2 * drivers/gpu/ion/ion.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/device.h>
18 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/ion.h>
22 #include <linux/list.h>
23 #include <linux/memblock.h>
24 #include <linux/miscdevice.h>
25 #include <linux/export.h>
27 #include <linux/mm_types.h>
28 #include <linux/rbtree.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <linux/debugfs.h>
34 #include <linux/dma-buf.h>
39 * struct ion_device - the metadata of the ion device node
40 * @dev: the actual misc device
41 * @buffers: an rb tree of all the existing buffers
42 * @lock: lock protecting the buffers & heaps trees
43 * @heaps: list of all the heaps in the system
44 * @user_clients: list of all the clients created from userspace
47 struct miscdevice dev;
48 struct rb_root buffers;
51 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
53 struct rb_root clients;
54 struct dentry *debug_root;
58 * struct ion_client - a process/hw block local address space
59 * @node: node in the tree of all clients
60 * @dev: backpointer to ion device
61 * @handles: an rb tree of all the handles in this client
62 * @lock: lock protecting the tree of handles
63 * @heap_mask: mask of all supported heaps
64 * @name: used for debugging
65 * @task: used for debugging
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
73 struct ion_device *dev;
74 struct rb_root handles;
76 unsigned int heap_mask;
78 struct task_struct *task;
80 struct dentry *debug_root;
84 * ion_handle - a client local reference to a buffer
85 * @ref: reference count
86 * @client: back pointer to the client the buffer resides in
87 * @buffer: pointer to the buffer
88 * @node: node in the client's handle rbtree
89 * @kmap_cnt: count of times this client has mapped to kernel
90 * @dmap_cnt: count of times this client has mapped for dma
92 * Modifications to node, map_cnt or mapping should be protected by the
93 * lock in the client. Other fields are never changed after initialization.
97 struct ion_client *client;
98 struct ion_buffer *buffer;
100 unsigned int kmap_cnt;
103 /* this function should only be called while dev->lock is held */
104 static void ion_buffer_add(struct ion_device *dev,
105 struct ion_buffer *buffer)
107 struct rb_node **p = &dev->buffers.rb_node;
108 struct rb_node *parent = NULL;
109 struct ion_buffer *entry;
113 entry = rb_entry(parent, struct ion_buffer, node);
115 if (buffer < entry) {
117 } else if (buffer > entry) {
120 pr_err("%s: buffer already found.", __func__);
125 rb_link_node(&buffer->node, parent, p);
126 rb_insert_color(&buffer->node, &dev->buffers);
129 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
131 /* this function should only be called while dev->lock is held */
132 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
133 struct ion_device *dev,
138 struct ion_buffer *buffer;
139 struct sg_table *table;
140 struct scatterlist *sg;
143 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
145 return ERR_PTR(-ENOMEM);
148 kref_init(&buffer->ref);
150 ret = heap->ops->allocate(heap, buffer, len, align, flags);
158 buffer->flags = flags;
160 table = heap->ops->map_dma(heap, buffer);
161 if (IS_ERR_OR_NULL(table)) {
162 heap->ops->free(buffer);
164 return ERR_PTR(PTR_ERR(table));
166 buffer->sg_table = table;
167 if (buffer->flags & ION_FLAG_CACHED)
168 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
170 if (sg_dma_len(sg) == PAGE_SIZE)
172 pr_err("%s: cached mappings must have pagewise "
173 "sg_lists\n", __func__);
174 heap->ops->unmap_dma(heap, buffer);
176 return ERR_PTR(-EINVAL);
179 ret = ion_buffer_alloc_dirty(buffer);
181 heap->ops->unmap_dma(heap, buffer);
182 heap->ops->free(buffer);
189 INIT_LIST_HEAD(&buffer->vmas);
190 mutex_init(&buffer->lock);
191 /* this will set up dma addresses for the sglist -- it is not
192 technically correct as per the dma api -- a specific
193 device isn't really taking ownership here. However, in practice on
194 our systems the only dma_address space is physical addresses.
195 Additionally, we can't afford the overhead of invalidating every
196 allocation via dma_map_sg. The implicit contract here is that
197 memory comming from the heaps is ready for dma, ie if it has a
198 cached mapping that mapping has been invalidated */
199 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
200 sg_dma_address(sg) = sg_phys(sg);
201 ion_buffer_add(dev, buffer);
205 static void ion_buffer_destroy(struct kref *kref)
207 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
208 struct ion_device *dev = buffer->dev;
210 if (WARN_ON(buffer->kmap_cnt > 0))
211 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
213 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
214 buffer->heap->ops->free(buffer);
215 mutex_lock(&dev->lock);
216 rb_erase(&buffer->node, &dev->buffers);
217 mutex_unlock(&dev->lock);
221 static void ion_buffer_get(struct ion_buffer *buffer)
223 kref_get(&buffer->ref);
226 static int ion_buffer_put(struct ion_buffer *buffer)
228 return kref_put(&buffer->ref, ion_buffer_destroy);
231 static struct ion_handle *ion_handle_create(struct ion_client *client,
232 struct ion_buffer *buffer)
234 struct ion_handle *handle;
236 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
238 return ERR_PTR(-ENOMEM);
239 kref_init(&handle->ref);
240 RB_CLEAR_NODE(&handle->node);
241 handle->client = client;
242 ion_buffer_get(buffer);
243 handle->buffer = buffer;
248 static void ion_handle_kmap_put(struct ion_handle *);
250 static void ion_handle_destroy(struct kref *kref)
252 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
253 struct ion_client *client = handle->client;
254 struct ion_buffer *buffer = handle->buffer;
256 mutex_lock(&client->lock);
258 mutex_lock(&buffer->lock);
259 while (handle->kmap_cnt)
260 ion_handle_kmap_put(handle);
261 mutex_unlock(&buffer->lock);
263 if (!RB_EMPTY_NODE(&handle->node))
264 rb_erase(&handle->node, &client->handles);
265 mutex_unlock(&client->lock);
267 ion_buffer_put(buffer);
271 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
273 return handle->buffer;
276 static void ion_handle_get(struct ion_handle *handle)
278 kref_get(&handle->ref);
281 static int ion_handle_put(struct ion_handle *handle)
283 return kref_put(&handle->ref, ion_handle_destroy);
286 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
287 struct ion_buffer *buffer)
291 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
292 struct ion_handle *handle = rb_entry(n, struct ion_handle,
294 if (handle->buffer == buffer)
300 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
302 struct rb_node *n = client->handles.rb_node;
305 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
307 if (handle < handle_node)
309 else if (handle > handle_node)
317 static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
319 struct rb_node **p = &client->handles.rb_node;
320 struct rb_node *parent = NULL;
321 struct ion_handle *entry;
325 entry = rb_entry(parent, struct ion_handle, node);
329 else if (handle > entry)
332 WARN(1, "%s: buffer already found.", __func__);
335 rb_link_node(&handle->node, parent, p);
336 rb_insert_color(&handle->node, &client->handles);
339 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
340 size_t align, unsigned int heap_mask,
344 struct ion_handle *handle;
345 struct ion_device *dev = client->dev;
346 struct ion_buffer *buffer = NULL;
348 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
349 align, heap_mask, flags);
351 * traverse the list of heaps available in this system in priority
352 * order. If the heap type is supported by the client, and matches the
353 * request of the caller allocate from it. Repeat until allocate has
354 * succeeded or all heaps have been tried
357 return ERR_PTR(-EINVAL);
359 len = PAGE_ALIGN(len);
361 mutex_lock(&dev->lock);
362 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
363 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
364 /* if the client doesn't support this heap type */
365 if (!((1 << heap->type) & client->heap_mask))
367 /* if the caller didn't specify this heap type */
368 if (!((1 << heap->id) & heap_mask))
370 buffer = ion_buffer_create(heap, dev, len, align, flags);
371 if (!IS_ERR_OR_NULL(buffer))
374 mutex_unlock(&dev->lock);
377 return ERR_PTR(-ENODEV);
380 return ERR_PTR(PTR_ERR(buffer));
382 handle = ion_handle_create(client, buffer);
385 * ion_buffer_create will create a buffer with a ref_cnt of 1,
386 * and ion_handle_create will take a second reference, drop one here
388 ion_buffer_put(buffer);
390 if (!IS_ERR(handle)) {
391 mutex_lock(&client->lock);
392 ion_handle_add(client, handle);
393 mutex_unlock(&client->lock);
399 EXPORT_SYMBOL(ion_alloc);
401 void ion_free(struct ion_client *client, struct ion_handle *handle)
405 BUG_ON(client != handle->client);
407 mutex_lock(&client->lock);
408 valid_handle = ion_handle_validate(client, handle);
409 mutex_unlock(&client->lock);
412 WARN(1, "%s: invalid handle passed to free.\n", __func__);
415 ion_handle_put(handle);
417 EXPORT_SYMBOL(ion_free);
419 int ion_phys(struct ion_client *client, struct ion_handle *handle,
420 ion_phys_addr_t *addr, size_t *len)
422 struct ion_buffer *buffer;
425 mutex_lock(&client->lock);
426 if (!ion_handle_validate(client, handle)) {
427 mutex_unlock(&client->lock);
431 buffer = handle->buffer;
433 if (!buffer->heap->ops->phys) {
434 pr_err("%s: ion_phys is not implemented by this heap.\n",
436 mutex_unlock(&client->lock);
439 mutex_unlock(&client->lock);
440 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
443 EXPORT_SYMBOL(ion_phys);
445 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
449 if (buffer->kmap_cnt) {
451 return buffer->vaddr;
453 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
454 if (IS_ERR_OR_NULL(vaddr))
456 buffer->vaddr = vaddr;
461 static void *ion_handle_kmap_get(struct ion_handle *handle)
463 struct ion_buffer *buffer = handle->buffer;
466 if (handle->kmap_cnt) {
468 return buffer->vaddr;
470 vaddr = ion_buffer_kmap_get(buffer);
471 if (IS_ERR_OR_NULL(vaddr))
477 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
480 if (!buffer->kmap_cnt) {
481 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
482 buffer->vaddr = NULL;
486 static void ion_handle_kmap_put(struct ion_handle *handle)
488 struct ion_buffer *buffer = handle->buffer;
491 if (!handle->kmap_cnt)
492 ion_buffer_kmap_put(buffer);
495 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
497 struct ion_buffer *buffer;
500 mutex_lock(&client->lock);
501 if (!ion_handle_validate(client, handle)) {
502 pr_err("%s: invalid handle passed to map_kernel.\n",
504 mutex_unlock(&client->lock);
505 return ERR_PTR(-EINVAL);
508 buffer = handle->buffer;
510 if (!handle->buffer->heap->ops->map_kernel) {
511 pr_err("%s: map_kernel is not implemented by this heap.\n",
513 mutex_unlock(&client->lock);
514 return ERR_PTR(-ENODEV);
517 mutex_lock(&buffer->lock);
518 vaddr = ion_handle_kmap_get(handle);
519 mutex_unlock(&buffer->lock);
520 mutex_unlock(&client->lock);
523 EXPORT_SYMBOL(ion_map_kernel);
525 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
527 struct ion_buffer *buffer;
529 mutex_lock(&client->lock);
530 buffer = handle->buffer;
531 mutex_lock(&buffer->lock);
532 ion_handle_kmap_put(handle);
533 mutex_unlock(&buffer->lock);
534 mutex_unlock(&client->lock);
536 EXPORT_SYMBOL(ion_unmap_kernel);
538 static int ion_debug_client_show(struct seq_file *s, void *unused)
540 struct ion_client *client = s->private;
542 size_t sizes[ION_NUM_HEAPS] = {0};
543 const char *names[ION_NUM_HEAPS] = {0};
546 mutex_lock(&client->lock);
547 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
548 struct ion_handle *handle = rb_entry(n, struct ion_handle,
550 enum ion_heap_type type = handle->buffer->heap->type;
553 names[type] = handle->buffer->heap->name;
554 sizes[type] += handle->buffer->size;
556 mutex_unlock(&client->lock);
558 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
559 for (i = 0; i < ION_NUM_HEAPS; i++) {
562 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
567 static int ion_debug_client_open(struct inode *inode, struct file *file)
569 return single_open(file, ion_debug_client_show, inode->i_private);
572 static const struct file_operations debug_client_fops = {
573 .open = ion_debug_client_open,
576 .release = single_release,
579 struct ion_client *ion_client_create(struct ion_device *dev,
580 unsigned int heap_mask,
583 struct ion_client *client;
584 struct task_struct *task;
586 struct rb_node *parent = NULL;
587 struct ion_client *entry;
591 get_task_struct(current->group_leader);
592 task_lock(current->group_leader);
593 pid = task_pid_nr(current->group_leader);
594 /* don't bother to store task struct for kernel threads,
595 they can't be killed anyway */
596 if (current->group_leader->flags & PF_KTHREAD) {
597 put_task_struct(current->group_leader);
600 task = current->group_leader;
602 task_unlock(current->group_leader);
604 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
607 put_task_struct(current->group_leader);
608 return ERR_PTR(-ENOMEM);
612 client->handles = RB_ROOT;
613 mutex_init(&client->lock);
615 client->heap_mask = heap_mask;
619 mutex_lock(&dev->lock);
620 p = &dev->clients.rb_node;
623 entry = rb_entry(parent, struct ion_client, node);
627 else if (client > entry)
630 rb_link_node(&client->node, parent, p);
631 rb_insert_color(&client->node, &dev->clients);
633 snprintf(debug_name, 64, "%u", client->pid);
634 client->debug_root = debugfs_create_file(debug_name, 0664,
635 dev->debug_root, client,
637 mutex_unlock(&dev->lock);
642 void ion_client_destroy(struct ion_client *client)
644 struct ion_device *dev = client->dev;
647 pr_debug("%s: %d\n", __func__, __LINE__);
648 while ((n = rb_first(&client->handles))) {
649 struct ion_handle *handle = rb_entry(n, struct ion_handle,
651 ion_handle_destroy(&handle->ref);
653 mutex_lock(&dev->lock);
655 put_task_struct(client->task);
656 rb_erase(&client->node, &dev->clients);
657 debugfs_remove_recursive(client->debug_root);
658 mutex_unlock(&dev->lock);
662 EXPORT_SYMBOL(ion_client_destroy);
664 struct sg_table *ion_sg_table(struct ion_client *client,
665 struct ion_handle *handle)
667 struct ion_buffer *buffer;
668 struct sg_table *table;
670 mutex_lock(&client->lock);
671 if (!ion_handle_validate(client, handle)) {
672 pr_err("%s: invalid handle passed to map_dma.\n",
674 mutex_unlock(&client->lock);
675 return ERR_PTR(-EINVAL);
677 buffer = handle->buffer;
678 table = buffer->sg_table;
679 mutex_unlock(&client->lock);
682 EXPORT_SYMBOL(ion_sg_table);
684 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
686 enum dma_data_direction direction);
688 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
689 enum dma_data_direction direction)
691 struct dma_buf *dmabuf = attachment->dmabuf;
692 struct ion_buffer *buffer = dmabuf->priv;
694 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
695 return buffer->sg_table;
698 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
699 struct sg_table *table,
700 enum dma_data_direction direction)
704 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
706 unsigned long pages = buffer->sg_table->nents;
707 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
709 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
715 struct ion_vma_list {
716 struct list_head list;
717 struct vm_area_struct *vma;
720 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
722 enum dma_data_direction dir)
724 struct scatterlist *sg;
726 struct ion_vma_list *vma_list;
728 pr_debug("%s: syncing for device %s\n", __func__,
729 dev ? dev_name(dev) : "null");
731 if (!(buffer->flags & ION_FLAG_CACHED))
734 mutex_lock(&buffer->lock);
735 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
736 if (!test_bit(i, buffer->dirty))
738 dma_sync_sg_for_device(dev, sg, 1, dir);
739 clear_bit(i, buffer->dirty);
741 list_for_each_entry(vma_list, &buffer->vmas, list) {
742 struct vm_area_struct *vma = vma_list->vma;
744 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
747 mutex_unlock(&buffer->lock);
750 int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
752 struct ion_buffer *buffer = vma->vm_private_data;
753 struct scatterlist *sg;
756 mutex_lock(&buffer->lock);
757 set_bit(vmf->pgoff, buffer->dirty);
759 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
762 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
763 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
767 mutex_unlock(&buffer->lock);
768 return VM_FAULT_NOPAGE;
771 static void ion_vm_open(struct vm_area_struct *vma)
773 struct ion_buffer *buffer = vma->vm_private_data;
774 struct ion_vma_list *vma_list;
776 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
780 mutex_lock(&buffer->lock);
781 list_add(&vma_list->list, &buffer->vmas);
782 mutex_unlock(&buffer->lock);
783 pr_debug("%s: adding %p\n", __func__, vma);
786 static void ion_vm_close(struct vm_area_struct *vma)
788 struct ion_buffer *buffer = vma->vm_private_data;
789 struct ion_vma_list *vma_list, *tmp;
791 pr_debug("%s\n", __func__);
792 mutex_lock(&buffer->lock);
793 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
794 if (vma_list->vma != vma)
796 list_del(&vma_list->list);
798 pr_debug("%s: deleting %p\n", __func__, vma);
801 mutex_unlock(&buffer->lock);
804 struct vm_operations_struct ion_vma_ops = {
806 .close = ion_vm_close,
807 .fault = ion_vm_fault,
810 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
812 struct ion_buffer *buffer = dmabuf->priv;
815 if (!buffer->heap->ops->map_user) {
816 pr_err("%s: this heap does not define a method for mapping "
817 "to userspace\n", __func__);
821 if (buffer->flags & ION_FLAG_CACHED) {
822 vma->vm_private_data = buffer;
823 vma->vm_ops = &ion_vma_ops;
826 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
827 mutex_lock(&buffer->lock);
828 /* now map it to userspace */
829 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
830 mutex_unlock(&buffer->lock);
834 pr_err("%s: failure mapping buffer to userspace\n",
840 static void ion_dma_buf_release(struct dma_buf *dmabuf)
842 struct ion_buffer *buffer = dmabuf->priv;
843 ion_buffer_put(buffer);
846 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
848 struct ion_buffer *buffer = dmabuf->priv;
849 return buffer->vaddr + offset;
852 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
858 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
860 enum dma_data_direction direction)
862 struct ion_buffer *buffer = dmabuf->priv;
865 if (!buffer->heap->ops->map_kernel) {
866 pr_err("%s: map kernel is not implemented by this heap.\n",
871 mutex_lock(&buffer->lock);
872 vaddr = ion_buffer_kmap_get(buffer);
873 mutex_unlock(&buffer->lock);
875 return PTR_ERR(vaddr);
881 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
883 enum dma_data_direction direction)
885 struct ion_buffer *buffer = dmabuf->priv;
887 mutex_lock(&buffer->lock);
888 ion_buffer_kmap_put(buffer);
889 mutex_unlock(&buffer->lock);
892 struct dma_buf_ops dma_buf_ops = {
893 .map_dma_buf = ion_map_dma_buf,
894 .unmap_dma_buf = ion_unmap_dma_buf,
896 .release = ion_dma_buf_release,
897 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
898 .end_cpu_access = ion_dma_buf_end_cpu_access,
899 .kmap_atomic = ion_dma_buf_kmap,
900 .kunmap_atomic = ion_dma_buf_kunmap,
901 .kmap = ion_dma_buf_kmap,
902 .kunmap = ion_dma_buf_kunmap,
905 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
907 struct ion_buffer *buffer;
908 struct dma_buf *dmabuf;
912 mutex_lock(&client->lock);
913 valid_handle = ion_handle_validate(client, handle);
914 mutex_unlock(&client->lock);
916 WARN(1, "%s: invalid handle passed to share.\n", __func__);
920 buffer = handle->buffer;
921 ion_buffer_get(buffer);
922 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
923 if (IS_ERR(dmabuf)) {
924 ion_buffer_put(buffer);
925 return PTR_ERR(dmabuf);
927 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
933 EXPORT_SYMBOL(ion_share_dma_buf);
935 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
937 struct dma_buf *dmabuf;
938 struct ion_buffer *buffer;
939 struct ion_handle *handle;
941 dmabuf = dma_buf_get(fd);
942 if (IS_ERR_OR_NULL(dmabuf))
943 return ERR_PTR(PTR_ERR(dmabuf));
944 /* if this memory came from ion */
946 if (dmabuf->ops != &dma_buf_ops) {
947 pr_err("%s: can not import dmabuf from another exporter\n",
950 return ERR_PTR(-EINVAL);
952 buffer = dmabuf->priv;
954 mutex_lock(&client->lock);
955 /* if a handle exists for this buffer just take a reference to it */
956 handle = ion_handle_lookup(client, buffer);
957 if (!IS_ERR_OR_NULL(handle)) {
958 ion_handle_get(handle);
961 handle = ion_handle_create(client, buffer);
962 if (IS_ERR_OR_NULL(handle))
964 ion_handle_add(client, handle);
966 mutex_unlock(&client->lock);
970 EXPORT_SYMBOL(ion_import_dma_buf);
972 static int ion_sync_for_device(struct ion_client *client, int fd)
974 struct dma_buf *dmabuf;
975 struct ion_buffer *buffer;
977 dmabuf = dma_buf_get(fd);
978 if (IS_ERR_OR_NULL(dmabuf))
979 return PTR_ERR(dmabuf);
981 /* if this memory came from ion */
982 if (dmabuf->ops != &dma_buf_ops) {
983 pr_err("%s: can not sync dmabuf from another exporter\n",
988 buffer = dmabuf->priv;
989 ion_buffer_sync_for_device(buffer, NULL, DMA_BIDIRECTIONAL);
994 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
996 struct ion_client *client = filp->private_data;
1001 struct ion_allocation_data data;
1003 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1005 data.handle = ion_alloc(client, data.len, data.align,
1006 data.heap_mask, data.flags);
1008 if (IS_ERR(data.handle))
1009 return PTR_ERR(data.handle);
1011 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1012 ion_free(client, data.handle);
1019 struct ion_handle_data data;
1022 if (copy_from_user(&data, (void __user *)arg,
1023 sizeof(struct ion_handle_data)))
1025 mutex_lock(&client->lock);
1026 valid = ion_handle_validate(client, data.handle);
1027 mutex_unlock(&client->lock);
1030 ion_free(client, data.handle);
1035 struct ion_fd_data data;
1037 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1039 data.fd = ion_share_dma_buf(client, data.handle);
1040 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1046 case ION_IOC_IMPORT:
1048 struct ion_fd_data data;
1050 if (copy_from_user(&data, (void __user *)arg,
1051 sizeof(struct ion_fd_data)))
1053 data.handle = ion_import_dma_buf(client, data.fd);
1054 if (IS_ERR(data.handle)) {
1055 ret = PTR_ERR(data.handle);
1058 if (copy_to_user((void __user *)arg, &data,
1059 sizeof(struct ion_fd_data)))
1067 struct ion_fd_data data;
1068 if (copy_from_user(&data, (void __user *)arg,
1069 sizeof(struct ion_fd_data)))
1071 ion_sync_for_device(client, data.fd);
1074 case ION_IOC_CUSTOM:
1076 struct ion_device *dev = client->dev;
1077 struct ion_custom_data data;
1079 if (!dev->custom_ioctl)
1081 if (copy_from_user(&data, (void __user *)arg,
1082 sizeof(struct ion_custom_data)))
1084 return dev->custom_ioctl(client, data.cmd, data.arg);
1092 static int ion_release(struct inode *inode, struct file *file)
1094 struct ion_client *client = file->private_data;
1096 pr_debug("%s: %d\n", __func__, __LINE__);
1097 ion_client_destroy(client);
1101 static int ion_open(struct inode *inode, struct file *file)
1103 struct miscdevice *miscdev = file->private_data;
1104 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1105 struct ion_client *client;
1107 pr_debug("%s: %d\n", __func__, __LINE__);
1108 client = ion_client_create(dev, -1, "user");
1109 if (IS_ERR_OR_NULL(client))
1110 return PTR_ERR(client);
1111 file->private_data = client;
1116 static const struct file_operations ion_fops = {
1117 .owner = THIS_MODULE,
1119 .release = ion_release,
1120 .unlocked_ioctl = ion_ioctl,
1123 static size_t ion_debug_heap_total(struct ion_client *client,
1124 enum ion_heap_type type)
1129 mutex_lock(&client->lock);
1130 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1131 struct ion_handle *handle = rb_entry(n,
1134 if (handle->buffer->heap->type == type)
1135 size += handle->buffer->size;
1137 mutex_unlock(&client->lock);
1141 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1143 struct ion_heap *heap = s->private;
1144 struct ion_device *dev = heap->dev;
1147 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1149 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1150 struct ion_client *client = rb_entry(n, struct ion_client,
1152 size_t size = ion_debug_heap_total(client, heap->type);
1156 char task_comm[TASK_COMM_LEN];
1158 get_task_comm(task_comm, client->task);
1159 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1162 seq_printf(s, "%16.s %16u %16u\n", client->name,
1169 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1171 return single_open(file, ion_debug_heap_show, inode->i_private);
1174 static const struct file_operations debug_heap_fops = {
1175 .open = ion_debug_heap_open,
1177 .llseek = seq_lseek,
1178 .release = single_release,
1181 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1183 struct rb_node **p = &dev->heaps.rb_node;
1184 struct rb_node *parent = NULL;
1185 struct ion_heap *entry;
1187 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1188 !heap->ops->unmap_dma)
1189 pr_err("%s: can not add heap with invalid ops struct.\n",
1193 mutex_lock(&dev->lock);
1196 entry = rb_entry(parent, struct ion_heap, node);
1198 if (heap->id < entry->id) {
1200 } else if (heap->id > entry->id ) {
1201 p = &(*p)->rb_right;
1203 pr_err("%s: can not insert multiple heaps with "
1204 "id %d\n", __func__, heap->id);
1209 rb_link_node(&heap->node, parent, p);
1210 rb_insert_color(&heap->node, &dev->heaps);
1211 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1214 mutex_unlock(&dev->lock);
1217 struct ion_device *ion_device_create(long (*custom_ioctl)
1218 (struct ion_client *client,
1222 struct ion_device *idev;
1225 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1227 return ERR_PTR(-ENOMEM);
1229 idev->dev.minor = MISC_DYNAMIC_MINOR;
1230 idev->dev.name = "ion";
1231 idev->dev.fops = &ion_fops;
1232 idev->dev.parent = NULL;
1233 ret = misc_register(&idev->dev);
1235 pr_err("ion: failed to register misc device.\n");
1236 return ERR_PTR(ret);
1239 idev->debug_root = debugfs_create_dir("ion", NULL);
1240 if (IS_ERR_OR_NULL(idev->debug_root))
1241 pr_err("ion: failed to create debug files.\n");
1243 idev->custom_ioctl = custom_ioctl;
1244 idev->buffers = RB_ROOT;
1245 mutex_init(&idev->lock);
1246 idev->heaps = RB_ROOT;
1247 idev->clients = RB_ROOT;
1251 void ion_device_destroy(struct ion_device *dev)
1253 misc_deregister(&dev->dev);
1254 /* XXX need to free the heaps and clients ? */
1258 void __init ion_reserve(struct ion_platform_data *data)
1262 for (i = 0; i < data->nr; i++) {
1263 if (data->heaps[i].size == 0)
1265 ret = memblock_reserve(data->heaps[i].base,
1266 data->heaps[i].size);
1268 pr_err("memblock reserve of %x@%lx failed\n",
1269 data->heaps[i].size,
1270 data->heaps[i].base);