3 * drivers/gpu/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <asm-generic/dma-contiguous.h>
43 #include "compat_ion.h"
46 * struct ion_device - the metadata of the ion device node
47 * @dev: the actual misc device
48 * @buffers: an rb tree of all the existing buffers
49 * @buffer_lock: lock protecting the tree of buffers
50 * @lock: rwsem protecting the tree of heaps and clients
51 * @heaps: list of all the heaps in the system
52 * @user_clients: list of all the clients created from userspace
55 struct miscdevice dev;
56 struct rb_root buffers;
57 struct mutex buffer_lock;
58 struct rw_semaphore lock;
59 struct plist_head heaps;
60 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
62 struct rb_root clients;
63 struct dentry *debug_root;
64 struct dentry *heaps_debug_root;
65 struct dentry *clients_debug_root;
69 * struct ion_client - a process/hw block local address space
70 * @node: node in the tree of all clients
71 * @dev: backpointer to ion device
72 * @handles: an rb tree of all the handles in this client
73 * @idr: an idr space for allocating handle ids
74 * @lock: lock protecting the tree of handles
75 * @name: used for debugging
76 * @display_name: used for debugging (unique version of @name)
77 * @display_serial: used for debugging (to make display_name unique)
78 * @task: used for debugging
80 * A client represents a list of buffers this client may access.
81 * The mutex stored here is used to protect both handles tree
82 * as well as the handles themselves, and should be held while modifying either.
86 struct ion_device *dev;
87 struct rb_root handles;
93 struct task_struct *task;
95 struct dentry *debug_root;
99 * ion_handle - a client local reference to a buffer
100 * @ref: reference count
101 * @client: back pointer to the client the buffer resides in
102 * @buffer: pointer to the buffer
103 * @node: node in the client's handle rbtree
104 * @kmap_cnt: count of times this client has mapped to kernel
105 * @id: client-unique id allocated by client->idr
107 * Modifications to node, map_cnt or mapping should be protected by the
108 * lock in the client. Other fields are never changed after initialization.
112 struct ion_client *client;
113 struct ion_buffer *buffer;
115 unsigned int kmap_cnt;
119 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
121 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
123 return (buffer->flags & ION_FLAG_CACHED) &&
124 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
127 bool ion_buffer_cached(struct ion_buffer *buffer)
129 return !!(buffer->flags & ION_FLAG_CACHED);
132 static inline struct page *ion_buffer_page(struct page *page)
134 return (struct page *)((unsigned long)page & ~(1UL));
137 static inline bool ion_buffer_page_is_dirty(struct page *page)
139 return !!((unsigned long)page & 1UL);
142 static inline void ion_buffer_page_dirty(struct page **page)
144 *page = (struct page *)((unsigned long)(*page) | 1UL);
147 static inline void ion_buffer_page_clean(struct page **page)
149 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
152 /* this function should only be called while dev->lock is held */
153 static void ion_buffer_add(struct ion_device *dev,
154 struct ion_buffer *buffer)
156 struct rb_node **p = &dev->buffers.rb_node;
157 struct rb_node *parent = NULL;
158 struct ion_buffer *entry;
162 entry = rb_entry(parent, struct ion_buffer, node);
164 if (buffer < entry) {
166 } else if (buffer > entry) {
169 pr_err("%s: buffer already found.", __func__);
174 rb_link_node(&buffer->node, parent, p);
175 rb_insert_color(&buffer->node, &dev->buffers);
178 /* this function should only be called while dev->lock is held */
179 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
180 struct ion_device *dev,
185 struct ion_buffer *buffer;
186 struct sg_table *table;
187 struct scatterlist *sg;
190 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
192 return ERR_PTR(-ENOMEM);
195 buffer->flags = flags;
196 kref_init(&buffer->ref);
198 ret = heap->ops->allocate(heap, buffer, len, align, flags);
201 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
204 ion_heap_freelist_drain(heap, 0);
205 ret = heap->ops->allocate(heap, buffer, len, align,
214 table = heap->ops->map_dma(heap, buffer);
215 if (WARN_ONCE(table == NULL,
216 "heap->ops->map_dma should return ERR_PTR on error"))
217 table = ERR_PTR(-EINVAL);
219 heap->ops->free(buffer);
221 return ERR_PTR(PTR_ERR(table));
223 buffer->sg_table = table;
224 if (ion_buffer_fault_user_mappings(buffer)) {
225 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
226 struct scatterlist *sg;
229 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
230 if (!buffer->pages) {
235 for_each_sg(table->sgl, sg, table->nents, i) {
236 struct page *page = sg_page(sg);
238 for (j = 0; j < sg->length / PAGE_SIZE; j++)
239 buffer->pages[k++] = page++;
248 INIT_LIST_HEAD(&buffer->vmas);
249 mutex_init(&buffer->lock);
250 /* this will set up dma addresses for the sglist -- it is not
251 technically correct as per the dma api -- a specific
252 device isn't really taking ownership here. However, in practice on
253 our systems the only dma_address space is physical addresses.
254 Additionally, we can't afford the overhead of invalidating every
255 allocation via dma_map_sg. The implicit contract here is that
256 memory comming from the heaps is ready for dma, ie if it has a
257 cached mapping that mapping has been invalidated */
258 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
259 sg_dma_address(sg) = sg_phys(sg);
260 mutex_lock(&dev->buffer_lock);
261 ion_buffer_add(dev, buffer);
262 mutex_unlock(&dev->buffer_lock);
266 heap->ops->unmap_dma(heap, buffer);
267 heap->ops->free(buffer);
270 vfree(buffer->pages);
276 void ion_buffer_destroy(struct ion_buffer *buffer)
278 if (WARN_ON(buffer->kmap_cnt > 0))
279 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
280 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
281 #ifdef CONFIG_ROCKCHIP_IOMMU
282 ion_iommu_force_unmap(buffer);
284 buffer->heap->ops->free(buffer);
286 vfree(buffer->pages);
290 static void _ion_buffer_destroy(struct kref *kref)
292 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
293 struct ion_heap *heap = buffer->heap;
294 struct ion_device *dev = buffer->dev;
296 mutex_lock(&dev->buffer_lock);
297 rb_erase(&buffer->node, &dev->buffers);
298 mutex_unlock(&dev->buffer_lock);
300 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
301 ion_heap_freelist_add(heap, buffer);
303 ion_buffer_destroy(buffer);
306 static void ion_buffer_get(struct ion_buffer *buffer)
308 kref_get(&buffer->ref);
311 static int ion_buffer_put(struct ion_buffer *buffer)
313 return kref_put(&buffer->ref, _ion_buffer_destroy);
316 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
318 mutex_lock(&buffer->lock);
319 buffer->handle_count++;
320 mutex_unlock(&buffer->lock);
323 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
326 * when a buffer is removed from a handle, if it is not in
327 * any other handles, copy the taskcomm and the pid of the
328 * process it's being removed from into the buffer. At this
329 * point there will be no way to track what processes this buffer is
330 * being used by, it only exists as a dma_buf file descriptor.
331 * The taskcomm and pid can provide a debug hint as to where this fd
334 mutex_lock(&buffer->lock);
335 buffer->handle_count--;
336 BUG_ON(buffer->handle_count < 0);
337 if (!buffer->handle_count) {
338 struct task_struct *task;
340 task = current->group_leader;
341 get_task_comm(buffer->task_comm, task);
342 buffer->pid = task_pid_nr(task);
344 mutex_unlock(&buffer->lock);
347 static struct ion_handle *ion_handle_create(struct ion_client *client,
348 struct ion_buffer *buffer)
350 struct ion_handle *handle;
352 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
354 return ERR_PTR(-ENOMEM);
355 kref_init(&handle->ref);
356 RB_CLEAR_NODE(&handle->node);
357 handle->client = client;
358 ion_buffer_get(buffer);
359 ion_buffer_add_to_handle(buffer);
360 handle->buffer = buffer;
365 static void ion_handle_kmap_put(struct ion_handle *);
367 static void ion_handle_destroy(struct kref *kref)
369 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
370 struct ion_client *client = handle->client;
371 struct ion_buffer *buffer = handle->buffer;
373 mutex_lock(&buffer->lock);
374 while (handle->kmap_cnt)
375 ion_handle_kmap_put(handle);
376 mutex_unlock(&buffer->lock);
378 idr_remove(&client->idr, handle->id);
379 if (!RB_EMPTY_NODE(&handle->node))
380 rb_erase(&handle->node, &client->handles);
382 ion_buffer_remove_from_handle(buffer);
383 ion_buffer_put(buffer);
388 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
390 return handle->buffer;
393 static void ion_handle_get(struct ion_handle *handle)
395 kref_get(&handle->ref);
398 int ion_handle_put(struct ion_handle *handle)
400 struct ion_client *client = handle->client;
403 mutex_lock(&client->lock);
404 ret = kref_put(&handle->ref, ion_handle_destroy);
405 mutex_unlock(&client->lock);
410 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
411 struct ion_buffer *buffer)
413 struct rb_node *n = client->handles.rb_node;
416 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
417 if (buffer < entry->buffer)
419 else if (buffer > entry->buffer)
424 return ERR_PTR(-EINVAL);
427 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
430 struct ion_handle *handle;
432 mutex_lock(&client->lock);
433 handle = idr_find(&client->idr, id);
435 ion_handle_get(handle);
436 mutex_unlock(&client->lock);
438 return handle ? handle : ERR_PTR(-EINVAL);
441 static bool ion_handle_validate(struct ion_client *client,
442 struct ion_handle *handle)
444 WARN_ON(!mutex_is_locked(&client->lock));
445 return (idr_find(&client->idr, handle->id) == handle);
448 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
451 struct rb_node **p = &client->handles.rb_node;
452 struct rb_node *parent = NULL;
453 struct ion_handle *entry;
455 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
463 entry = rb_entry(parent, struct ion_handle, node);
465 if (handle->buffer < entry->buffer)
467 else if (handle->buffer > entry->buffer)
470 WARN(1, "%s: buffer already found.", __func__);
473 rb_link_node(&handle->node, parent, p);
474 rb_insert_color(&handle->node, &client->handles);
479 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
480 size_t align, unsigned int heap_id_mask,
483 struct ion_handle *handle;
484 struct ion_device *dev = client->dev;
485 struct ion_buffer *buffer = NULL;
486 struct ion_heap *heap;
489 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
490 len, align, heap_id_mask, flags);
492 * traverse the list of heaps available in this system in priority
493 * order. If the heap type is supported by the client, and matches the
494 * request of the caller allocate from it. Repeat until allocate has
495 * succeeded or all heaps have been tried
497 len = PAGE_ALIGN(len);
500 return ERR_PTR(-EINVAL);
502 down_read(&dev->lock);
503 plist_for_each_entry(heap, &dev->heaps, node) {
504 /* if the caller didn't specify this heap id */
505 if (!((1 << heap->id) & heap_id_mask))
507 buffer = ion_buffer_create(heap, dev, len, align, flags);
514 return ERR_PTR(-ENODEV);
517 return ERR_PTR(PTR_ERR(buffer));
519 handle = ion_handle_create(client, buffer);
522 * ion_buffer_create will create a buffer with a ref_cnt of 1,
523 * and ion_handle_create will take a second reference, drop one here
525 ion_buffer_put(buffer);
530 mutex_lock(&client->lock);
531 ret = ion_handle_add(client, handle);
532 mutex_unlock(&client->lock);
534 ion_handle_put(handle);
535 handle = ERR_PTR(ret);
540 EXPORT_SYMBOL(ion_alloc);
542 void ion_free(struct ion_client *client, struct ion_handle *handle)
546 BUG_ON(client != handle->client);
548 mutex_lock(&client->lock);
549 valid_handle = ion_handle_validate(client, handle);
552 WARN(1, "%s: invalid handle passed to free.\n", __func__);
553 mutex_unlock(&client->lock);
556 mutex_unlock(&client->lock);
557 ion_handle_put(handle);
559 EXPORT_SYMBOL(ion_free);
561 int ion_phys(struct ion_client *client, struct ion_handle *handle,
562 ion_phys_addr_t *addr, size_t *len)
564 struct ion_buffer *buffer;
567 mutex_lock(&client->lock);
568 if (!ion_handle_validate(client, handle)) {
569 mutex_unlock(&client->lock);
573 buffer = handle->buffer;
575 if (!buffer->heap->ops->phys) {
576 pr_err("%s: ion_phys is not implemented by this heap.\n",
578 mutex_unlock(&client->lock);
581 mutex_unlock(&client->lock);
582 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
585 EXPORT_SYMBOL(ion_phys);
587 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
591 if (buffer->kmap_cnt) {
593 return buffer->vaddr;
595 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
596 if (WARN_ONCE(vaddr == NULL,
597 "heap->ops->map_kernel should return ERR_PTR on error"))
598 return ERR_PTR(-EINVAL);
601 buffer->vaddr = vaddr;
606 static void *ion_handle_kmap_get(struct ion_handle *handle)
608 struct ion_buffer *buffer = handle->buffer;
611 if (handle->kmap_cnt) {
613 return buffer->vaddr;
615 vaddr = ion_buffer_kmap_get(buffer);
622 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
625 if (!buffer->kmap_cnt) {
626 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
627 buffer->vaddr = NULL;
631 static void ion_handle_kmap_put(struct ion_handle *handle)
633 struct ion_buffer *buffer = handle->buffer;
636 if (!handle->kmap_cnt)
637 ion_buffer_kmap_put(buffer);
640 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
642 struct ion_buffer *buffer;
645 mutex_lock(&client->lock);
646 if (!ion_handle_validate(client, handle)) {
647 pr_err("%s: invalid handle passed to map_kernel.\n",
649 mutex_unlock(&client->lock);
650 return ERR_PTR(-EINVAL);
653 buffer = handle->buffer;
655 if (!handle->buffer->heap->ops->map_kernel) {
656 pr_err("%s: map_kernel is not implemented by this heap.\n",
658 mutex_unlock(&client->lock);
659 return ERR_PTR(-ENODEV);
662 mutex_lock(&buffer->lock);
663 vaddr = ion_handle_kmap_get(handle);
664 mutex_unlock(&buffer->lock);
665 mutex_unlock(&client->lock);
668 EXPORT_SYMBOL(ion_map_kernel);
670 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
672 struct ion_buffer *buffer;
674 mutex_lock(&client->lock);
675 buffer = handle->buffer;
676 mutex_lock(&buffer->lock);
677 ion_handle_kmap_put(handle);
678 mutex_unlock(&buffer->lock);
679 mutex_unlock(&client->lock);
681 EXPORT_SYMBOL(ion_unmap_kernel);
683 #ifdef CONFIG_ROCKCHIP_IOMMU
684 static void ion_iommu_add(struct ion_buffer *buffer,
685 struct ion_iommu_map *iommu)
687 struct rb_node **p = &buffer->iommu_maps.rb_node;
688 struct rb_node *parent = NULL;
689 struct ion_iommu_map *entry;
693 entry = rb_entry(parent, struct ion_iommu_map, node);
695 if (iommu->key < entry->key) {
697 } else if (iommu->key > entry->key) {
700 pr_err("%s: buffer %p already has mapping for domainid %x\n",
708 rb_link_node(&iommu->node, parent, p);
709 rb_insert_color(&iommu->node, &buffer->iommu_maps);
712 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
715 struct rb_node **p = &buffer->iommu_maps.rb_node;
716 struct rb_node *parent = NULL;
717 struct ion_iommu_map *entry;
721 entry = rb_entry(parent, struct ion_iommu_map, node);
723 if (key < entry->key)
725 else if (key > entry->key)
734 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
735 struct device *iommu_dev, unsigned long *iova)
737 struct ion_iommu_map *data;
740 data = kmalloc(sizeof(*data), GFP_ATOMIC);
743 return ERR_PTR(-ENOMEM);
745 data->buffer = buffer;
746 data->key = (uint32_t)iommu_dev;
748 ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
749 buffer->size, buffer->flags);
753 kref_init(&data->ref);
754 *iova = data->iova_addr;
756 ion_iommu_add(buffer, data);
765 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
766 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
768 struct ion_buffer *buffer;
769 struct ion_iommu_map *iommu_map;
772 mutex_lock(&client->lock);
773 if (!ion_handle_validate(client, handle)) {
774 pr_err("%s: invalid handle passed to map_kernel.\n",
776 mutex_unlock(&client->lock);
780 buffer = handle->buffer;
781 pr_debug("%s: map buffer(%p)\n", __func__, buffer);
783 mutex_lock(&buffer->lock);
785 if (ION_IS_CACHED(buffer->flags)) {
786 pr_err("%s: Cannot map iommu as cached.\n", __func__);
791 if (!handle->buffer->heap->ops->map_iommu) {
792 pr_err("%s: map_iommu is not implemented by this heap.\n",
798 if (buffer->size & ~PAGE_MASK) {
799 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
800 buffer->size, PAGE_SIZE);
805 iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
807 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
808 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
810 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
811 if (iommu_map->mapped_size != buffer->size) {
812 pr_err("%s: handle %p is already mapped with length"
813 " %x, trying to map with length %x\n",
814 __func__, handle, iommu_map->mapped_size, buffer->size);
817 kref_get(&iommu_map->ref);
818 *iova = iommu_map->iova_addr;
822 buffer->iommu_map_cnt++;
823 *size = buffer->size;
825 mutex_unlock(&buffer->lock);
826 mutex_unlock(&client->lock);
829 EXPORT_SYMBOL(ion_map_iommu);
831 static void ion_iommu_release(struct kref *kref)
833 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
835 struct ion_buffer *buffer = map->buffer;
837 rb_erase(&map->node, &buffer->iommu_maps);
838 buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
843 * Unmap any outstanding mappings which would otherwise have been leaked.
845 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
847 struct ion_iommu_map *iommu_map;
848 struct rb_node *node;
849 const struct rb_root *rb = &(buffer->iommu_maps);
851 pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
853 mutex_lock(&buffer->lock);
855 while ((node = rb_first(rb)) != 0) {
856 iommu_map = rb_entry(node, struct ion_iommu_map, node);
857 /* set ref count to 1 to force release */
858 kref_init(&iommu_map->ref);
859 kref_put(&iommu_map->ref, ion_iommu_release);
862 mutex_unlock(&buffer->lock);
865 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
866 struct ion_handle *handle)
868 struct ion_iommu_map *iommu_map;
869 struct ion_buffer *buffer;
871 mutex_lock(&client->lock);
872 buffer = handle->buffer;
873 pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
875 mutex_lock(&buffer->lock);
877 iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
880 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
885 kref_put(&iommu_map->ref, ion_iommu_release);
887 buffer->iommu_map_cnt--;
890 mutex_unlock(&buffer->lock);
891 mutex_unlock(&client->lock);
893 EXPORT_SYMBOL(ion_unmap_iommu);
896 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
898 struct ion_client *client = s->private;
900 ion_phys_addr_t addr;
903 seq_printf(s, "----------------------------------------------------\n");
904 seq_printf(s, "%16.s: %12.s %8.s %4.s %4.s %4.s\n", "heap_name", "addr",
905 "size", "HC", "IBR", "IHR");
906 mutex_lock(&client->lock);
907 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
908 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
909 struct ion_buffer *buffer = handle->buffer;
910 if (buffer->heap->ops->phys) {
911 buffer->heap->ops->phys(buffer->heap, buffer, &addr, &len);
912 seq_printf(s, "%16.16s: 0x%08lx %8zuKB %4d %4d %4d\n",
913 buffer->heap->name, addr, len>>10, buffer->handle_count,
914 atomic_read(&buffer->ref.refcount),
915 atomic_read(&handle->ref.refcount));
918 mutex_unlock(&client->lock);
923 static int ion_debug_client_show(struct seq_file *s, void *unused)
925 struct ion_client *client = s->private;
927 size_t sizes[ION_NUM_HEAP_IDS] = {0};
928 const char *names[ION_NUM_HEAP_IDS] = {NULL};
931 mutex_lock(&client->lock);
932 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
933 struct ion_handle *handle = rb_entry(n, struct ion_handle,
935 unsigned int id = handle->buffer->heap->id;
938 names[id] = handle->buffer->heap->name;
939 sizes[id] += handle->buffer->size;
941 mutex_unlock(&client->lock);
943 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
944 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
947 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
949 ion_debug_client_show_buffer(s, unused);
953 static int ion_debug_client_open(struct inode *inode, struct file *file)
955 return single_open(file, ion_debug_client_show, inode->i_private);
958 static const struct file_operations debug_client_fops = {
959 .open = ion_debug_client_open,
962 .release = single_release,
965 static int ion_get_client_serial(const struct rb_root *root,
966 const unsigned char *name)
969 struct rb_node *node;
970 for (node = rb_first(root); node; node = rb_next(node)) {
971 struct ion_client *client = rb_entry(node, struct ion_client,
973 if (strcmp(client->name, name))
975 serial = max(serial, client->display_serial);
980 struct ion_client *ion_client_create(struct ion_device *dev,
983 struct ion_client *client;
984 struct task_struct *task;
986 struct rb_node *parent = NULL;
987 struct ion_client *entry;
991 pr_err("%s: Name cannot be null\n", __func__);
992 return ERR_PTR(-EINVAL);
995 get_task_struct(current->group_leader);
996 task_lock(current->group_leader);
997 pid = task_pid_nr(current->group_leader);
998 /* don't bother to store task struct for kernel threads,
999 they can't be killed anyway */
1000 if (current->group_leader->flags & PF_KTHREAD) {
1001 put_task_struct(current->group_leader);
1004 task = current->group_leader;
1006 task_unlock(current->group_leader);
1008 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1010 goto err_put_task_struct;
1013 client->handles = RB_ROOT;
1014 idr_init(&client->idr);
1015 mutex_init(&client->lock);
1016 client->task = task;
1018 client->name = kstrdup(name, GFP_KERNEL);
1020 goto err_free_client;
1022 down_write(&dev->lock);
1023 client->display_serial = ion_get_client_serial(&dev->clients, name);
1024 client->display_name = kasprintf(
1025 GFP_KERNEL, "%s-%d", name, client->display_serial);
1026 if (!client->display_name) {
1027 up_write(&dev->lock);
1028 goto err_free_client_name;
1030 p = &dev->clients.rb_node;
1033 entry = rb_entry(parent, struct ion_client, node);
1037 else if (client > entry)
1038 p = &(*p)->rb_right;
1040 rb_link_node(&client->node, parent, p);
1041 rb_insert_color(&client->node, &dev->clients);
1043 client->debug_root = debugfs_create_file(client->display_name, 0664,
1044 dev->clients_debug_root,
1045 client, &debug_client_fops);
1046 if (!client->debug_root) {
1047 char buf[256], *path;
1048 path = dentry_path(dev->clients_debug_root, buf, 256);
1049 pr_err("Failed to create client debugfs at %s/%s\n",
1050 path, client->display_name);
1053 up_write(&dev->lock);
1057 err_free_client_name:
1058 kfree(client->name);
1061 err_put_task_struct:
1063 put_task_struct(current->group_leader);
1064 return ERR_PTR(-ENOMEM);
1066 EXPORT_SYMBOL(ion_client_create);
1068 void ion_client_destroy(struct ion_client *client)
1070 struct ion_device *dev = client->dev;
1073 pr_debug("%s: %d\n", __func__, __LINE__);
1074 while ((n = rb_first(&client->handles))) {
1075 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1077 ion_handle_destroy(&handle->ref);
1080 idr_destroy(&client->idr);
1082 down_write(&dev->lock);
1084 put_task_struct(client->task);
1085 rb_erase(&client->node, &dev->clients);
1086 debugfs_remove_recursive(client->debug_root);
1087 up_write(&dev->lock);
1089 kfree(client->display_name);
1090 kfree(client->name);
1093 EXPORT_SYMBOL(ion_client_destroy);
1095 struct sg_table *ion_sg_table(struct ion_client *client,
1096 struct ion_handle *handle)
1098 struct ion_buffer *buffer;
1099 struct sg_table *table;
1101 mutex_lock(&client->lock);
1102 if (!ion_handle_validate(client, handle)) {
1103 pr_err("%s: invalid handle passed to map_dma.\n",
1105 mutex_unlock(&client->lock);
1106 return ERR_PTR(-EINVAL);
1108 buffer = handle->buffer;
1109 table = buffer->sg_table;
1110 mutex_unlock(&client->lock);
1113 EXPORT_SYMBOL(ion_sg_table);
1115 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1117 enum dma_data_direction direction);
1119 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1120 enum dma_data_direction direction)
1122 struct dma_buf *dmabuf = attachment->dmabuf;
1123 struct ion_buffer *buffer = dmabuf->priv;
1125 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1126 return buffer->sg_table;
1129 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1130 struct sg_table *table,
1131 enum dma_data_direction direction)
1135 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1136 size_t size, enum dma_data_direction dir)
1138 struct scatterlist sg;
1140 sg_init_table(&sg, 1);
1141 sg_set_page(&sg, page, size, 0);
1143 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1144 * for the the targeted device, but this works on the currently targeted
1147 sg_dma_address(&sg) = page_to_phys(page);
1148 dma_sync_sg_for_device(dev, &sg, 1, dir);
1151 struct ion_vma_list {
1152 struct list_head list;
1153 struct vm_area_struct *vma;
1156 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1158 enum dma_data_direction dir)
1160 struct ion_vma_list *vma_list;
1161 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1164 pr_debug("%s: syncing for device %s\n", __func__,
1165 dev ? dev_name(dev) : "null");
1167 if (!ion_buffer_fault_user_mappings(buffer))
1170 mutex_lock(&buffer->lock);
1171 for (i = 0; i < pages; i++) {
1172 struct page *page = buffer->pages[i];
1174 if (ion_buffer_page_is_dirty(page))
1175 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1178 ion_buffer_page_clean(buffer->pages + i);
1180 list_for_each_entry(vma_list, &buffer->vmas, list) {
1181 struct vm_area_struct *vma = vma_list->vma;
1183 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1186 mutex_unlock(&buffer->lock);
1189 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1191 struct ion_buffer *buffer = vma->vm_private_data;
1195 mutex_lock(&buffer->lock);
1196 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1197 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1199 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1200 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1201 mutex_unlock(&buffer->lock);
1203 return VM_FAULT_ERROR;
1205 return VM_FAULT_NOPAGE;
1208 static void ion_vm_open(struct vm_area_struct *vma)
1210 struct ion_buffer *buffer = vma->vm_private_data;
1211 struct ion_vma_list *vma_list;
1213 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1216 vma_list->vma = vma;
1217 mutex_lock(&buffer->lock);
1218 list_add(&vma_list->list, &buffer->vmas);
1219 mutex_unlock(&buffer->lock);
1220 pr_debug("%s: adding %p\n", __func__, vma);
1223 static void ion_vm_close(struct vm_area_struct *vma)
1225 struct ion_buffer *buffer = vma->vm_private_data;
1226 struct ion_vma_list *vma_list, *tmp;
1228 pr_debug("%s\n", __func__);
1229 mutex_lock(&buffer->lock);
1230 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1231 if (vma_list->vma != vma)
1233 list_del(&vma_list->list);
1235 pr_debug("%s: deleting %p\n", __func__, vma);
1238 mutex_unlock(&buffer->lock);
1241 static struct vm_operations_struct ion_vma_ops = {
1242 .open = ion_vm_open,
1243 .close = ion_vm_close,
1244 .fault = ion_vm_fault,
1247 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1249 struct ion_buffer *buffer = dmabuf->priv;
1252 if (!buffer->heap->ops->map_user) {
1253 pr_err("%s: this heap does not define a method for mapping "
1254 "to userspace\n", __func__);
1258 if (ion_buffer_fault_user_mappings(buffer)) {
1259 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1261 vma->vm_private_data = buffer;
1262 vma->vm_ops = &ion_vma_ops;
1267 if (!(buffer->flags & ION_FLAG_CACHED))
1268 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1270 mutex_lock(&buffer->lock);
1271 /* now map it to userspace */
1272 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1273 mutex_unlock(&buffer->lock);
1276 pr_err("%s: failure mapping buffer to userspace\n",
1282 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1284 struct ion_buffer *buffer = dmabuf->priv;
1285 ion_buffer_put(buffer);
1288 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1290 struct ion_buffer *buffer = dmabuf->priv;
1291 return buffer->vaddr + offset * PAGE_SIZE;
1294 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1300 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1302 enum dma_data_direction direction)
1304 struct ion_buffer *buffer = dmabuf->priv;
1307 if (!buffer->heap->ops->map_kernel) {
1308 pr_err("%s: map kernel is not implemented by this heap.\n",
1313 mutex_lock(&buffer->lock);
1314 vaddr = ion_buffer_kmap_get(buffer);
1315 mutex_unlock(&buffer->lock);
1317 return PTR_ERR(vaddr);
1321 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1323 enum dma_data_direction direction)
1325 struct ion_buffer *buffer = dmabuf->priv;
1327 mutex_lock(&buffer->lock);
1328 ion_buffer_kmap_put(buffer);
1329 mutex_unlock(&buffer->lock);
1332 static struct dma_buf_ops dma_buf_ops = {
1333 .map_dma_buf = ion_map_dma_buf,
1334 .unmap_dma_buf = ion_unmap_dma_buf,
1336 .release = ion_dma_buf_release,
1337 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1338 .end_cpu_access = ion_dma_buf_end_cpu_access,
1339 .kmap_atomic = ion_dma_buf_kmap,
1340 .kunmap_atomic = ion_dma_buf_kunmap,
1341 .kmap = ion_dma_buf_kmap,
1342 .kunmap = ion_dma_buf_kunmap,
1345 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1346 struct ion_handle *handle)
1348 struct ion_buffer *buffer;
1349 struct dma_buf *dmabuf;
1352 mutex_lock(&client->lock);
1353 valid_handle = ion_handle_validate(client, handle);
1354 if (!valid_handle) {
1355 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1356 mutex_unlock(&client->lock);
1357 return ERR_PTR(-EINVAL);
1359 buffer = handle->buffer;
1360 ion_buffer_get(buffer);
1361 mutex_unlock(&client->lock);
1363 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1364 if (IS_ERR(dmabuf)) {
1365 ion_buffer_put(buffer);
1371 EXPORT_SYMBOL(ion_share_dma_buf);
1373 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1375 struct dma_buf *dmabuf;
1378 dmabuf = ion_share_dma_buf(client, handle);
1380 return PTR_ERR(dmabuf);
1382 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1384 dma_buf_put(dmabuf);
1388 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1390 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1392 struct dma_buf *dmabuf;
1393 struct ion_buffer *buffer;
1394 struct ion_handle *handle;
1397 dmabuf = dma_buf_get(fd);
1399 return ERR_PTR(PTR_ERR(dmabuf));
1400 /* if this memory came from ion */
1402 if (dmabuf->ops != &dma_buf_ops) {
1403 pr_err("%s: can not import dmabuf from another exporter\n",
1405 dma_buf_put(dmabuf);
1406 return ERR_PTR(-EINVAL);
1408 buffer = dmabuf->priv;
1410 mutex_lock(&client->lock);
1411 /* if a handle exists for this buffer just take a reference to it */
1412 handle = ion_handle_lookup(client, buffer);
1413 if (!IS_ERR(handle)) {
1414 ion_handle_get(handle);
1415 mutex_unlock(&client->lock);
1418 mutex_unlock(&client->lock);
1420 handle = ion_handle_create(client, buffer);
1424 mutex_lock(&client->lock);
1425 ret = ion_handle_add(client, handle);
1426 mutex_unlock(&client->lock);
1428 ion_handle_put(handle);
1429 handle = ERR_PTR(ret);
1433 dma_buf_put(dmabuf);
1436 EXPORT_SYMBOL(ion_import_dma_buf);
1438 static int ion_sync_for_device(struct ion_client *client, int fd)
1440 struct dma_buf *dmabuf;
1441 struct ion_buffer *buffer;
1443 dmabuf = dma_buf_get(fd);
1445 return PTR_ERR(dmabuf);
1447 /* if this memory came from ion */
1448 if (dmabuf->ops != &dma_buf_ops) {
1449 pr_err("%s: can not sync dmabuf from another exporter\n",
1451 dma_buf_put(dmabuf);
1454 buffer = dmabuf->priv;
1456 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1457 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1458 dma_buf_put(dmabuf);
1462 /* fix up the cases where the ioctl direction bits are incorrect */
1463 static unsigned int ion_ioctl_dir(unsigned int cmd)
1468 case ION_IOC_CUSTOM:
1471 return _IOC_DIR(cmd);
1475 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1477 struct ion_client *client = filp->private_data;
1478 struct ion_device *dev = client->dev;
1479 struct ion_handle *cleanup_handle = NULL;
1484 struct ion_fd_data fd;
1485 struct ion_allocation_data allocation;
1486 struct ion_handle_data handle;
1487 struct ion_custom_data custom;
1490 dir = ion_ioctl_dir(cmd);
1492 if (_IOC_SIZE(cmd) > sizeof(data))
1495 if (dir & _IOC_WRITE)
1496 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1502 struct ion_handle *handle;
1504 handle = ion_alloc(client, data.allocation.len,
1505 data.allocation.align,
1506 data.allocation.heap_id_mask,
1507 data.allocation.flags);
1509 return PTR_ERR(handle);
1511 data.allocation.handle = handle->id;
1513 cleanup_handle = handle;
1518 struct ion_handle *handle;
1520 handle = ion_handle_get_by_id(client, data.handle.handle);
1522 return PTR_ERR(handle);
1523 ion_free(client, handle);
1524 ion_handle_put(handle);
1530 struct ion_handle *handle;
1532 handle = ion_handle_get_by_id(client, data.handle.handle);
1534 return PTR_ERR(handle);
1535 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1536 ion_handle_put(handle);
1541 case ION_IOC_IMPORT:
1543 struct ion_handle *handle;
1544 handle = ion_import_dma_buf(client, data.fd.fd);
1546 ret = PTR_ERR(handle);
1548 data.handle.handle = handle->id;
1553 ret = ion_sync_for_device(client, data.fd.fd);
1556 case ION_IOC_CUSTOM:
1558 if (!dev->custom_ioctl)
1560 ret = dev->custom_ioctl(client, data.custom.cmd,
1568 if (dir & _IOC_READ) {
1569 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1571 ion_free(client, cleanup_handle);
1578 static int ion_release(struct inode *inode, struct file *file)
1580 struct ion_client *client = file->private_data;
1582 pr_debug("%s: %d\n", __func__, __LINE__);
1583 ion_client_destroy(client);
1587 static int ion_open(struct inode *inode, struct file *file)
1589 struct miscdevice *miscdev = file->private_data;
1590 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1591 struct ion_client *client;
1592 char debug_name[64];
1594 pr_debug("%s: %d\n", __func__, __LINE__);
1595 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1596 client = ion_client_create(dev, debug_name);
1598 return PTR_ERR(client);
1599 file->private_data = client;
1604 static const struct file_operations ion_fops = {
1605 .owner = THIS_MODULE,
1607 .release = ion_release,
1608 .unlocked_ioctl = ion_ioctl,
1609 .compat_ioctl = compat_ion_ioctl,
1612 int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
1613 void *uaddr, unsigned long offset, unsigned long len,
1616 struct ion_buffer *buffer;
1619 mutex_lock(&client->lock);
1620 if (!ion_handle_validate(client, handle)) {
1621 pr_err("%s: invalid handle passed to do_cache_op.\n",
1623 mutex_unlock(&client->lock);
1626 buffer = handle->buffer;
1627 mutex_lock(&buffer->lock);
1629 if (!ION_IS_CACHED(buffer->flags)) {
1634 if (!handle->buffer->heap->ops->cache_op) {
1635 pr_err("%s: cache_op is not implemented by this heap.\n",
1642 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
1646 mutex_unlock(&buffer->lock);
1647 mutex_unlock(&client->lock);
1651 EXPORT_SYMBOL(ion_do_cache_op);
1653 static size_t ion_debug_heap_total(struct ion_client *client,
1659 mutex_lock(&client->lock);
1660 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1661 struct ion_handle *handle = rb_entry(n,
1664 if (handle->buffer->heap->id == id)
1665 size += handle->buffer->size;
1667 mutex_unlock(&client->lock);
1671 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1673 struct ion_heap *heap = s->private;
1674 struct ion_device *dev = heap->dev;
1676 size_t total_size = 0;
1677 size_t total_orphaned_size = 0;
1679 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1680 seq_printf(s, "----------------------------------------------------\n");
1682 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1683 struct ion_client *client = rb_entry(n, struct ion_client,
1685 size_t size = ion_debug_heap_total(client, heap->id);
1689 char task_comm[TASK_COMM_LEN];
1691 get_task_comm(task_comm, client->task);
1692 seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1695 seq_printf(s, "%16.s %16u %16zu\n", client->name,
1699 seq_printf(s, "----------------------------------------------------\n");
1700 seq_printf(s, "orphaned allocations (info is from last known client):"
1702 mutex_lock(&dev->buffer_lock);
1703 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1704 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1706 if (buffer->heap->id != heap->id)
1708 total_size += buffer->size;
1709 if (!buffer->handle_count) {
1710 seq_printf(s, "%16.s %16u %16zu %d %d\n",
1711 buffer->task_comm, buffer->pid,
1712 buffer->size, buffer->kmap_cnt,
1713 atomic_read(&buffer->ref.refcount));
1714 total_orphaned_size += buffer->size;
1717 mutex_unlock(&dev->buffer_lock);
1718 seq_printf(s, "----------------------------------------------------\n");
1719 seq_printf(s, "%16.s %16zu\n", "total orphaned",
1720 total_orphaned_size);
1721 seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1722 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1723 seq_printf(s, "%16.s %16zu\n", "deferred free",
1724 heap->free_list_size);
1725 seq_printf(s, "----------------------------------------------------\n");
1727 if (heap->debug_show)
1728 heap->debug_show(heap, s, unused);
1733 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1735 return single_open(file, ion_debug_heap_show, inode->i_private);
1738 static const struct file_operations debug_heap_fops = {
1739 .open = ion_debug_heap_open,
1741 .llseek = seq_lseek,
1742 .release = single_release,
1745 #ifdef DEBUG_HEAP_SHRINKER
1746 static int debug_shrink_set(void *data, u64 val)
1748 struct ion_heap *heap = data;
1749 struct shrink_control sc;
1758 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1759 sc.nr_to_scan = objs;
1761 heap->shrinker.shrink(&heap->shrinker, &sc);
1765 static int debug_shrink_get(void *data, u64 *val)
1767 struct ion_heap *heap = data;
1768 struct shrink_control sc;
1774 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1779 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1780 debug_shrink_set, "%llu\n");
1784 // struct "cma" quoted from drivers/base/dma-contiguous.c
1786 unsigned long base_pfn;
1787 unsigned long count;
1788 unsigned long *bitmap;
1791 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1792 struct ion_cma_heap {
1793 struct ion_heap heap;
1797 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1799 struct ion_heap *heap = s->private;
1800 struct ion_cma_heap *cma_heap = container_of(heap,
1801 struct ion_cma_heap,
1803 struct device *dev = cma_heap->dev;
1804 struct cma *cma = dev_get_cma_area(dev);
1806 int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1807 phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1809 seq_printf(s, "%s Heap bitmap:\n", heap->name);
1811 for(i = rows - 1; i>= 0; i--){
1812 seq_printf(s, "%.4uM@0x%08x: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1813 i+1, base+(i)*SZ_1M,
1814 cma->bitmap[i*8 + 7],
1815 cma->bitmap[i*8 + 6],
1816 cma->bitmap[i*8 + 5],
1817 cma->bitmap[i*8 + 4],
1818 cma->bitmap[i*8 + 3],
1819 cma->bitmap[i*8 + 2],
1820 cma->bitmap[i*8 + 1],
1823 seq_printf(s, "Heap size: %luM, Heap base: 0x%08x\n",
1824 (cma->count)>>8, base);
1829 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1831 return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1834 static const struct file_operations debug_heap_bitmap_fops = {
1835 .open = ion_debug_heap_bitmap_open,
1837 .llseek = seq_lseek,
1838 .release = single_release,
1842 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1844 struct dentry *debug_file;
1846 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1847 !heap->ops->unmap_dma)
1848 pr_err("%s: can not add heap with invalid ops struct.\n",
1851 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1852 ion_heap_init_deferred_free(heap);
1854 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1855 ion_heap_init_shrinker(heap);
1858 down_write(&dev->lock);
1859 /* use negative heap->id to reverse the priority -- when traversing
1860 the list later attempt higher id numbers first */
1861 plist_node_init(&heap->node, -heap->id);
1862 plist_add(&heap->node, &dev->heaps);
1863 debug_file = debugfs_create_file(heap->name, 0664,
1864 dev->heaps_debug_root, heap,
1868 char buf[256], *path;
1869 path = dentry_path(dev->heaps_debug_root, buf, 256);
1870 pr_err("Failed to create heap debugfs at %s/%s\n",
1874 #ifdef DEBUG_HEAP_SHRINKER
1875 if (heap->shrinker.shrink) {
1876 char debug_name[64];
1878 snprintf(debug_name, 64, "%s_shrink", heap->name);
1879 debug_file = debugfs_create_file(
1880 debug_name, 0644, dev->heaps_debug_root, heap,
1881 &debug_shrink_fops);
1883 char buf[256], *path;
1884 path = dentry_path(dev->heaps_debug_root, buf, 256);
1885 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1891 if (ION_HEAP_TYPE_DMA==heap->type) {
1892 char* heap_bitmap_name = kasprintf(
1893 GFP_KERNEL, "%s-bitmap", heap->name);
1894 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1895 dev->heaps_debug_root, heap,
1896 &debug_heap_bitmap_fops);
1898 char buf[256], *path;
1899 path = dentry_path(dev->heaps_debug_root, buf, 256);
1900 pr_err("Failed to create heap debugfs at %s/%s\n",
1901 path, heap_bitmap_name);
1903 kfree(heap_bitmap_name);
1906 up_write(&dev->lock);
1909 struct ion_device *ion_device_create(long (*custom_ioctl)
1910 (struct ion_client *client,
1914 struct ion_device *idev;
1917 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1919 return ERR_PTR(-ENOMEM);
1921 idev->dev.minor = MISC_DYNAMIC_MINOR;
1922 idev->dev.name = "ion";
1923 idev->dev.fops = &ion_fops;
1924 idev->dev.parent = NULL;
1925 ret = misc_register(&idev->dev);
1927 pr_err("ion: failed to register misc device.\n");
1928 return ERR_PTR(ret);
1931 idev->debug_root = debugfs_create_dir("ion", NULL);
1932 if (!idev->debug_root) {
1933 pr_err("ion: failed to create debugfs root directory.\n");
1936 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1937 if (!idev->heaps_debug_root) {
1938 pr_err("ion: failed to create debugfs heaps directory.\n");
1941 idev->clients_debug_root = debugfs_create_dir("clients",
1943 if (!idev->clients_debug_root)
1944 pr_err("ion: failed to create debugfs clients directory.\n");
1948 idev->custom_ioctl = custom_ioctl;
1949 idev->buffers = RB_ROOT;
1950 mutex_init(&idev->buffer_lock);
1951 init_rwsem(&idev->lock);
1952 plist_head_init(&idev->heaps);
1953 idev->clients = RB_ROOT;
1957 void ion_device_destroy(struct ion_device *dev)
1959 misc_deregister(&dev->dev);
1960 debugfs_remove_recursive(dev->debug_root);
1961 /* XXX need to free the heaps and clients ? */
1965 void __init ion_reserve(struct ion_platform_data *data)
1969 for (i = 0; i < data->nr; i++) {
1970 if (data->heaps[i].size == 0)
1973 if (data->heaps[i].base == 0) {
1975 paddr = memblock_alloc_base(data->heaps[i].size,
1976 data->heaps[i].align,
1977 MEMBLOCK_ALLOC_ANYWHERE);
1979 pr_err("%s: error allocating memblock for "
1984 data->heaps[i].base = paddr;
1986 int ret = memblock_reserve(data->heaps[i].base,
1987 data->heaps[i].size);
1989 pr_err("memblock reserve of %zx@%lx failed\n",
1990 data->heaps[i].size,
1991 data->heaps[i].base);
1993 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1994 data->heaps[i].name,
1995 data->heaps[i].base,
1996 data->heaps[i].size);