3 * drivers/gpu/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <linux/dma-contiguous.h>
43 #include "compat_ion.h"
46 * struct ion_device - the metadata of the ion device node
47 * @dev: the actual misc device
48 * @buffers: an rb tree of all the existing buffers
49 * @buffer_lock: lock protecting the tree of buffers
50 * @lock: rwsem protecting the tree of heaps and clients
51 * @heaps: list of all the heaps in the system
52 * @user_clients: list of all the clients created from userspace
55 struct miscdevice dev;
56 struct rb_root buffers;
57 struct mutex buffer_lock;
58 struct rw_semaphore lock;
59 struct plist_head heaps;
60 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
62 struct rb_root clients;
63 struct dentry *debug_root;
64 struct dentry *heaps_debug_root;
65 struct dentry *clients_debug_root;
69 * struct ion_client - a process/hw block local address space
70 * @node: node in the tree of all clients
71 * @dev: backpointer to ion device
72 * @handles: an rb tree of all the handles in this client
73 * @idr: an idr space for allocating handle ids
74 * @lock: lock protecting the tree of handles
75 * @name: used for debugging
76 * @display_name: used for debugging (unique version of @name)
77 * @display_serial: used for debugging (to make display_name unique)
78 * @task: used for debugging
80 * A client represents a list of buffers this client may access.
81 * The mutex stored here is used to protect both handles tree
82 * as well as the handles themselves, and should be held while modifying either.
86 struct ion_device *dev;
87 struct rb_root handles;
93 struct task_struct *task;
95 struct dentry *debug_root;
99 * ion_handle - a client local reference to a buffer
100 * @ref: reference count
101 * @client: back pointer to the client the buffer resides in
102 * @buffer: pointer to the buffer
103 * @node: node in the client's handle rbtree
104 * @kmap_cnt: count of times this client has mapped to kernel
105 * @id: client-unique id allocated by client->idr
107 * Modifications to node, map_cnt or mapping should be protected by the
108 * lock in the client. Other fields are never changed after initialization.
112 struct ion_client *client;
113 struct ion_buffer *buffer;
115 unsigned int kmap_cnt;
119 #ifdef CONFIG_ROCKCHIP_IOMMU
120 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
122 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
123 extern char *rockchip_ion_snapshot_get(unsigned *size);
124 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
125 static int ion_snapshot_save(struct ion_device *idev, size_t len);
128 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
130 return (buffer->flags & ION_FLAG_CACHED) &&
131 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
134 bool ion_buffer_cached(struct ion_buffer *buffer)
136 return !!(buffer->flags & ION_FLAG_CACHED);
139 static inline struct page *ion_buffer_page(struct page *page)
141 return (struct page *)((unsigned long)page & ~(1UL));
144 static inline bool ion_buffer_page_is_dirty(struct page *page)
146 return !!((unsigned long)page & 1UL);
149 static inline void ion_buffer_page_dirty(struct page **page)
151 *page = (struct page *)((unsigned long)(*page) | 1UL);
154 static inline void ion_buffer_page_clean(struct page **page)
156 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
159 /* this function should only be called while dev->lock is held */
160 static void ion_buffer_add(struct ion_device *dev,
161 struct ion_buffer *buffer)
163 struct rb_node **p = &dev->buffers.rb_node;
164 struct rb_node *parent = NULL;
165 struct ion_buffer *entry;
169 entry = rb_entry(parent, struct ion_buffer, node);
171 if (buffer < entry) {
173 } else if (buffer > entry) {
176 pr_err("%s: buffer already found.", __func__);
181 rb_link_node(&buffer->node, parent, p);
182 rb_insert_color(&buffer->node, &dev->buffers);
185 /* this function should only be called while dev->lock is held */
186 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
187 struct ion_device *dev,
192 struct ion_buffer *buffer;
193 struct sg_table *table;
194 struct scatterlist *sg;
197 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
199 return ERR_PTR(-ENOMEM);
202 buffer->flags = flags;
203 kref_init(&buffer->ref);
205 ret = heap->ops->allocate(heap, buffer, len, align, flags);
208 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
211 ion_heap_freelist_drain(heap, 0);
212 ret = heap->ops->allocate(heap, buffer, len, align,
221 table = heap->ops->map_dma(heap, buffer);
222 if (WARN_ONCE(table == NULL,
223 "heap->ops->map_dma should return ERR_PTR on error"))
224 table = ERR_PTR(-EINVAL);
226 heap->ops->free(buffer);
228 return ERR_PTR(PTR_ERR(table));
230 buffer->sg_table = table;
231 if (ion_buffer_fault_user_mappings(buffer)) {
232 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
233 struct scatterlist *sg;
236 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
237 if (!buffer->pages) {
242 for_each_sg(table->sgl, sg, table->nents, i) {
243 struct page *page = sg_page(sg);
245 for (j = 0; j < sg->length / PAGE_SIZE; j++)
246 buffer->pages[k++] = page++;
255 INIT_LIST_HEAD(&buffer->vmas);
256 mutex_init(&buffer->lock);
257 /* this will set up dma addresses for the sglist -- it is not
258 technically correct as per the dma api -- a specific
259 device isn't really taking ownership here. However, in practice on
260 our systems the only dma_address space is physical addresses.
261 Additionally, we can't afford the overhead of invalidating every
262 allocation via dma_map_sg. The implicit contract here is that
263 memory comming from the heaps is ready for dma, ie if it has a
264 cached mapping that mapping has been invalidated */
265 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
266 sg_dma_address(sg) = sg_phys(sg);
267 mutex_lock(&dev->buffer_lock);
268 ion_buffer_add(dev, buffer);
269 mutex_unlock(&dev->buffer_lock);
273 heap->ops->unmap_dma(heap, buffer);
274 heap->ops->free(buffer);
277 vfree(buffer->pages);
283 void ion_buffer_destroy(struct ion_buffer *buffer)
285 if (WARN_ON(buffer->kmap_cnt > 0))
286 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
287 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
288 #ifdef CONFIG_ROCKCHIP_IOMMU
289 ion_iommu_force_unmap(buffer);
291 buffer->heap->ops->free(buffer);
293 vfree(buffer->pages);
297 static void _ion_buffer_destroy(struct kref *kref)
299 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
300 struct ion_heap *heap = buffer->heap;
301 struct ion_device *dev = buffer->dev;
303 mutex_lock(&dev->buffer_lock);
304 rb_erase(&buffer->node, &dev->buffers);
305 mutex_unlock(&dev->buffer_lock);
307 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
308 ion_heap_freelist_add(heap, buffer);
310 ion_buffer_destroy(buffer);
313 static void ion_buffer_get(struct ion_buffer *buffer)
315 kref_get(&buffer->ref);
318 static int ion_buffer_put(struct ion_buffer *buffer)
320 return kref_put(&buffer->ref, _ion_buffer_destroy);
323 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
325 mutex_lock(&buffer->lock);
326 buffer->handle_count++;
327 mutex_unlock(&buffer->lock);
330 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
333 * when a buffer is removed from a handle, if it is not in
334 * any other handles, copy the taskcomm and the pid of the
335 * process it's being removed from into the buffer. At this
336 * point there will be no way to track what processes this buffer is
337 * being used by, it only exists as a dma_buf file descriptor.
338 * The taskcomm and pid can provide a debug hint as to where this fd
341 mutex_lock(&buffer->lock);
342 buffer->handle_count--;
343 BUG_ON(buffer->handle_count < 0);
344 if (!buffer->handle_count) {
345 struct task_struct *task;
347 task = current->group_leader;
348 get_task_comm(buffer->task_comm, task);
349 buffer->pid = task_pid_nr(task);
351 mutex_unlock(&buffer->lock);
354 static struct ion_handle *ion_handle_create(struct ion_client *client,
355 struct ion_buffer *buffer)
357 struct ion_handle *handle;
359 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
361 return ERR_PTR(-ENOMEM);
362 kref_init(&handle->ref);
363 RB_CLEAR_NODE(&handle->node);
364 handle->client = client;
365 ion_buffer_get(buffer);
366 ion_buffer_add_to_handle(buffer);
367 handle->buffer = buffer;
372 static void ion_handle_kmap_put(struct ion_handle *);
374 static void ion_handle_destroy(struct kref *kref)
376 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
377 struct ion_client *client = handle->client;
378 struct ion_buffer *buffer = handle->buffer;
380 mutex_lock(&buffer->lock);
381 while (handle->kmap_cnt)
382 ion_handle_kmap_put(handle);
383 mutex_unlock(&buffer->lock);
385 idr_remove(&client->idr, handle->id);
386 if (!RB_EMPTY_NODE(&handle->node))
387 rb_erase(&handle->node, &client->handles);
389 ion_buffer_remove_from_handle(buffer);
390 ion_buffer_put(buffer);
395 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
397 return handle->buffer;
400 static void ion_handle_get(struct ion_handle *handle)
402 kref_get(&handle->ref);
405 int ion_handle_put(struct ion_handle *handle)
407 struct ion_client *client = handle->client;
410 mutex_lock(&client->lock);
411 ret = kref_put(&handle->ref, ion_handle_destroy);
412 mutex_unlock(&client->lock);
417 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
418 struct ion_buffer *buffer)
420 struct rb_node *n = client->handles.rb_node;
423 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
424 if (buffer < entry->buffer)
426 else if (buffer > entry->buffer)
431 return ERR_PTR(-EINVAL);
434 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
437 struct ion_handle *handle;
439 mutex_lock(&client->lock);
440 handle = idr_find(&client->idr, id);
442 ion_handle_get(handle);
443 mutex_unlock(&client->lock);
445 return handle ? handle : ERR_PTR(-EINVAL);
448 static bool ion_handle_validate(struct ion_client *client,
449 struct ion_handle *handle)
451 WARN_ON(!mutex_is_locked(&client->lock));
452 return (idr_find(&client->idr, handle->id) == handle);
455 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
458 struct rb_node **p = &client->handles.rb_node;
459 struct rb_node *parent = NULL;
460 struct ion_handle *entry;
462 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
470 entry = rb_entry(parent, struct ion_handle, node);
472 if (handle->buffer < entry->buffer)
474 else if (handle->buffer > entry->buffer)
477 WARN(1, "%s: buffer already found.", __func__);
480 rb_link_node(&handle->node, parent, p);
481 rb_insert_color(&handle->node, &client->handles);
486 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
487 size_t align, unsigned int heap_id_mask,
490 struct ion_handle *handle;
491 struct ion_device *dev = client->dev;
492 struct ion_buffer *buffer = NULL;
493 struct ion_heap *heap;
496 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
497 len, align, heap_id_mask, flags);
499 * traverse the list of heaps available in this system in priority
500 * order. If the heap type is supported by the client, and matches the
501 * request of the caller allocate from it. Repeat until allocate has
502 * succeeded or all heaps have been tried
504 len = PAGE_ALIGN(len);
507 return ERR_PTR(-EINVAL);
509 down_read(&dev->lock);
510 plist_for_each_entry(heap, &dev->heaps, node) {
511 /* if the caller didn't specify this heap id */
512 if (!((1 << heap->id) & heap_id_mask))
514 buffer = ion_buffer_create(heap, dev, len, align, flags);
521 return ERR_PTR(-ENODEV);
523 if (IS_ERR(buffer)) {
524 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
525 ion_snapshot_save(client->dev, len);
527 return ERR_PTR(PTR_ERR(buffer));
530 handle = ion_handle_create(client, buffer);
533 * ion_buffer_create will create a buffer with a ref_cnt of 1,
534 * and ion_handle_create will take a second reference, drop one here
536 ion_buffer_put(buffer);
541 mutex_lock(&client->lock);
542 ret = ion_handle_add(client, handle);
543 mutex_unlock(&client->lock);
545 ion_handle_put(handle);
546 handle = ERR_PTR(ret);
551 EXPORT_SYMBOL(ion_alloc);
553 void ion_free(struct ion_client *client, struct ion_handle *handle)
557 BUG_ON(client != handle->client);
559 mutex_lock(&client->lock);
560 valid_handle = ion_handle_validate(client, handle);
563 WARN(1, "%s: invalid handle passed to free.\n", __func__);
564 mutex_unlock(&client->lock);
567 mutex_unlock(&client->lock);
568 ion_handle_put(handle);
570 EXPORT_SYMBOL(ion_free);
572 int ion_phys(struct ion_client *client, struct ion_handle *handle,
573 ion_phys_addr_t *addr, size_t *len)
575 struct ion_buffer *buffer;
578 mutex_lock(&client->lock);
579 if (!ion_handle_validate(client, handle)) {
580 mutex_unlock(&client->lock);
584 buffer = handle->buffer;
586 if (!buffer->heap->ops->phys) {
587 pr_err("%s: ion_phys is not implemented by this heap.\n",
589 mutex_unlock(&client->lock);
592 mutex_unlock(&client->lock);
593 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
596 EXPORT_SYMBOL(ion_phys);
598 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
602 if (buffer->kmap_cnt) {
604 return buffer->vaddr;
606 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
607 if (WARN_ONCE(vaddr == NULL,
608 "heap->ops->map_kernel should return ERR_PTR on error"))
609 return ERR_PTR(-EINVAL);
612 buffer->vaddr = vaddr;
617 static void *ion_handle_kmap_get(struct ion_handle *handle)
619 struct ion_buffer *buffer = handle->buffer;
622 if (handle->kmap_cnt) {
624 return buffer->vaddr;
626 vaddr = ion_buffer_kmap_get(buffer);
633 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
636 if (!buffer->kmap_cnt) {
637 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
638 buffer->vaddr = NULL;
642 static void ion_handle_kmap_put(struct ion_handle *handle)
644 struct ion_buffer *buffer = handle->buffer;
647 if (!handle->kmap_cnt)
648 ion_buffer_kmap_put(buffer);
651 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
653 struct ion_buffer *buffer;
656 mutex_lock(&client->lock);
657 if (!ion_handle_validate(client, handle)) {
658 pr_err("%s: invalid handle passed to map_kernel.\n",
660 mutex_unlock(&client->lock);
661 return ERR_PTR(-EINVAL);
664 buffer = handle->buffer;
666 if (!handle->buffer->heap->ops->map_kernel) {
667 pr_err("%s: map_kernel is not implemented by this heap.\n",
669 mutex_unlock(&client->lock);
670 return ERR_PTR(-ENODEV);
673 mutex_lock(&buffer->lock);
674 vaddr = ion_handle_kmap_get(handle);
675 mutex_unlock(&buffer->lock);
676 mutex_unlock(&client->lock);
679 EXPORT_SYMBOL(ion_map_kernel);
681 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
683 struct ion_buffer *buffer;
685 mutex_lock(&client->lock);
686 buffer = handle->buffer;
687 mutex_lock(&buffer->lock);
688 ion_handle_kmap_put(handle);
689 mutex_unlock(&buffer->lock);
690 mutex_unlock(&client->lock);
692 EXPORT_SYMBOL(ion_unmap_kernel);
694 #ifdef CONFIG_ROCKCHIP_IOMMU
695 static void ion_iommu_add(struct ion_buffer *buffer,
696 struct ion_iommu_map *iommu)
698 struct rb_node **p = &buffer->iommu_maps.rb_node;
699 struct rb_node *parent = NULL;
700 struct ion_iommu_map *entry;
704 entry = rb_entry(parent, struct ion_iommu_map, node);
706 if (iommu->key < entry->key) {
708 } else if (iommu->key > entry->key) {
711 pr_err("%s: buffer %p already has mapping for domainid %x\n",
719 rb_link_node(&iommu->node, parent, p);
720 rb_insert_color(&iommu->node, &buffer->iommu_maps);
723 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
726 struct rb_node **p = &buffer->iommu_maps.rb_node;
727 struct rb_node *parent = NULL;
728 struct ion_iommu_map *entry;
732 entry = rb_entry(parent, struct ion_iommu_map, node);
734 if (key < entry->key)
736 else if (key > entry->key)
745 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
746 struct device *iommu_dev, unsigned long *iova)
748 struct ion_iommu_map *data;
751 data = kmalloc(sizeof(*data), GFP_ATOMIC);
754 return ERR_PTR(-ENOMEM);
756 data->buffer = buffer;
757 data->key = (uint32_t)iommu_dev;
759 ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
760 buffer->size, buffer->flags);
764 kref_init(&data->ref);
765 *iova = data->iova_addr;
767 ion_iommu_add(buffer, data);
776 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
777 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
779 struct ion_buffer *buffer;
780 struct ion_iommu_map *iommu_map;
783 mutex_lock(&client->lock);
784 if (!ion_handle_validate(client, handle)) {
785 pr_err("%s: invalid handle passed to map_kernel.\n",
787 mutex_unlock(&client->lock);
791 buffer = handle->buffer;
792 pr_debug("%s: map buffer(%p)\n", __func__, buffer);
794 mutex_lock(&buffer->lock);
796 if (ION_IS_CACHED(buffer->flags)) {
797 pr_err("%s: Cannot map iommu as cached.\n", __func__);
802 if (!handle->buffer->heap->ops->map_iommu) {
803 pr_err("%s: map_iommu is not implemented by this heap.\n",
809 if (buffer->size & ~PAGE_MASK) {
810 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
811 buffer->size, PAGE_SIZE);
816 iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
818 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
819 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
821 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
822 if (iommu_map->mapped_size != buffer->size) {
823 pr_err("%s: handle %p is already mapped with length"
824 " %x, trying to map with length %x\n",
825 __func__, handle, iommu_map->mapped_size, buffer->size);
828 kref_get(&iommu_map->ref);
829 *iova = iommu_map->iova_addr;
833 buffer->iommu_map_cnt++;
834 *size = buffer->size;
836 mutex_unlock(&buffer->lock);
837 mutex_unlock(&client->lock);
840 EXPORT_SYMBOL(ion_map_iommu);
842 static void ion_iommu_release(struct kref *kref)
844 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
846 struct ion_buffer *buffer = map->buffer;
848 rb_erase(&map->node, &buffer->iommu_maps);
849 buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
854 * Unmap any outstanding mappings which would otherwise have been leaked.
856 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
858 struct ion_iommu_map *iommu_map;
859 struct rb_node *node;
860 const struct rb_root *rb = &(buffer->iommu_maps);
862 pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
864 mutex_lock(&buffer->lock);
866 while ((node = rb_first(rb)) != 0) {
867 iommu_map = rb_entry(node, struct ion_iommu_map, node);
868 /* set ref count to 1 to force release */
869 kref_init(&iommu_map->ref);
870 kref_put(&iommu_map->ref, ion_iommu_release);
873 mutex_unlock(&buffer->lock);
876 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
877 struct ion_handle *handle)
879 struct ion_iommu_map *iommu_map;
880 struct ion_buffer *buffer;
882 mutex_lock(&client->lock);
883 buffer = handle->buffer;
884 pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
886 mutex_lock(&buffer->lock);
888 iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
891 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
896 kref_put(&iommu_map->ref, ion_iommu_release);
898 buffer->iommu_map_cnt--;
901 mutex_unlock(&buffer->lock);
902 mutex_unlock(&client->lock);
904 EXPORT_SYMBOL(ion_unmap_iommu);
906 static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffer *buffer)
908 struct ion_iommu_map *iommu_map;
909 const struct rb_root *rb;
910 struct rb_node *node;
912 pr_debug("%s: buffer(%p)\n", __func__, buffer);
914 mutex_lock(&buffer->lock);
915 rb = &(buffer->iommu_maps);
918 while (node != NULL) {
919 iommu_map = rb_entry(node, struct ion_iommu_map, node);
920 seq_printf(s, "%16.16s: 0x%08lx 0x%08x %8zuKB %4d\n",
921 "<iommu>", iommu_map->iova_addr, 0, iommu_map->mapped_size>>10,
922 atomic_read(&iommu_map->ref.refcount));
924 node = rb_next(node);
927 mutex_unlock(&buffer->lock);
933 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
935 struct ion_client *client = s->private;
938 seq_printf(s, "----------------------------------------------------\n");
939 seq_printf(s, "%16.s: %12.s %12.s %10.s %4.s %4.s %4.s\n", "heap_name", "VA", "PA",
940 "size", "HC", "IBR", "IHR");
941 mutex_lock(&client->lock);
942 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
943 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
944 struct ion_buffer *buffer = handle->buffer;
945 ion_phys_addr_t pa = 0;
946 size_t len = buffer->size;
948 mutex_lock(&buffer->lock);
950 if (buffer->heap->ops->phys)
951 buffer->heap->ops->phys(buffer->heap, buffer, &pa, &len);
953 seq_printf(s, "%16.16s: 0x%08lx 0x%08lx %8zuKB %4d %4d %4d\n",
954 buffer->heap->name, (unsigned long)buffer->vaddr, pa, len>>10, buffer->handle_count,
955 atomic_read(&buffer->ref.refcount), atomic_read(&handle->ref.refcount));
957 mutex_unlock(&buffer->lock);
959 #ifdef CONFIG_ROCKCHIP_IOMMU
960 ion_debug_client_show_buffer_map(s, buffer);
963 mutex_unlock(&client->lock);
968 static int ion_debug_client_show(struct seq_file *s, void *unused)
970 struct ion_client *client = s->private;
972 size_t sizes[ION_NUM_HEAP_IDS] = {0};
973 const char *names[ION_NUM_HEAP_IDS] = {NULL};
976 mutex_lock(&client->lock);
977 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
978 struct ion_handle *handle = rb_entry(n, struct ion_handle,
980 unsigned int id = handle->buffer->heap->id;
983 names[id] = handle->buffer->heap->name;
984 sizes[id] += handle->buffer->size;
986 mutex_unlock(&client->lock);
988 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
989 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
992 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
994 ion_debug_client_show_buffer(s, unused);
998 static int ion_debug_client_open(struct inode *inode, struct file *file)
1000 return single_open(file, ion_debug_client_show, inode->i_private);
1003 static const struct file_operations debug_client_fops = {
1004 .open = ion_debug_client_open,
1006 .llseek = seq_lseek,
1007 .release = single_release,
1010 static int ion_get_client_serial(const struct rb_root *root,
1011 const unsigned char *name)
1014 struct rb_node *node;
1015 for (node = rb_first(root); node; node = rb_next(node)) {
1016 struct ion_client *client = rb_entry(node, struct ion_client,
1018 if (strcmp(client->name, name))
1020 serial = max(serial, client->display_serial);
1025 struct ion_client *ion_client_create(struct ion_device *dev,
1028 struct ion_client *client;
1029 struct task_struct *task;
1031 struct rb_node *parent = NULL;
1032 struct ion_client *entry;
1036 pr_err("%s: Name cannot be null\n", __func__);
1037 return ERR_PTR(-EINVAL);
1040 get_task_struct(current->group_leader);
1041 task_lock(current->group_leader);
1042 pid = task_pid_nr(current->group_leader);
1043 /* don't bother to store task struct for kernel threads,
1044 they can't be killed anyway */
1045 if (current->group_leader->flags & PF_KTHREAD) {
1046 put_task_struct(current->group_leader);
1049 task = current->group_leader;
1051 task_unlock(current->group_leader);
1053 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1055 goto err_put_task_struct;
1058 client->handles = RB_ROOT;
1059 idr_init(&client->idr);
1060 mutex_init(&client->lock);
1061 client->task = task;
1063 client->name = kstrdup(name, GFP_KERNEL);
1065 goto err_free_client;
1067 down_write(&dev->lock);
1068 client->display_serial = ion_get_client_serial(&dev->clients, name);
1069 client->display_name = kasprintf(
1070 GFP_KERNEL, "%s-%d", name, client->display_serial);
1071 if (!client->display_name) {
1072 up_write(&dev->lock);
1073 goto err_free_client_name;
1075 p = &dev->clients.rb_node;
1078 entry = rb_entry(parent, struct ion_client, node);
1082 else if (client > entry)
1083 p = &(*p)->rb_right;
1085 rb_link_node(&client->node, parent, p);
1086 rb_insert_color(&client->node, &dev->clients);
1088 client->debug_root = debugfs_create_file(client->display_name, 0664,
1089 dev->clients_debug_root,
1090 client, &debug_client_fops);
1091 if (!client->debug_root) {
1092 char buf[256], *path;
1093 path = dentry_path(dev->clients_debug_root, buf, 256);
1094 pr_err("Failed to create client debugfs at %s/%s\n",
1095 path, client->display_name);
1098 up_write(&dev->lock);
1102 err_free_client_name:
1103 kfree(client->name);
1106 err_put_task_struct:
1108 put_task_struct(current->group_leader);
1109 return ERR_PTR(-ENOMEM);
1111 EXPORT_SYMBOL(ion_client_create);
1113 void ion_client_destroy(struct ion_client *client)
1115 struct ion_device *dev = client->dev;
1118 pr_debug("%s: %d\n", __func__, __LINE__);
1119 while ((n = rb_first(&client->handles))) {
1120 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1122 ion_handle_destroy(&handle->ref);
1125 idr_destroy(&client->idr);
1127 down_write(&dev->lock);
1129 put_task_struct(client->task);
1130 rb_erase(&client->node, &dev->clients);
1131 debugfs_remove_recursive(client->debug_root);
1132 up_write(&dev->lock);
1134 kfree(client->display_name);
1135 kfree(client->name);
1138 EXPORT_SYMBOL(ion_client_destroy);
1140 struct sg_table *ion_sg_table(struct ion_client *client,
1141 struct ion_handle *handle)
1143 struct ion_buffer *buffer;
1144 struct sg_table *table;
1146 mutex_lock(&client->lock);
1147 if (!ion_handle_validate(client, handle)) {
1148 pr_err("%s: invalid handle passed to map_dma.\n",
1150 mutex_unlock(&client->lock);
1151 return ERR_PTR(-EINVAL);
1153 buffer = handle->buffer;
1154 table = buffer->sg_table;
1155 mutex_unlock(&client->lock);
1158 EXPORT_SYMBOL(ion_sg_table);
1160 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1162 enum dma_data_direction direction);
1164 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1165 enum dma_data_direction direction)
1167 struct dma_buf *dmabuf = attachment->dmabuf;
1168 struct ion_buffer *buffer = dmabuf->priv;
1170 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1171 return buffer->sg_table;
1174 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1175 struct sg_table *table,
1176 enum dma_data_direction direction)
1180 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1181 size_t size, enum dma_data_direction dir)
1183 struct scatterlist sg;
1185 sg_init_table(&sg, 1);
1186 sg_set_page(&sg, page, size, 0);
1188 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1189 * for the the targeted device, but this works on the currently targeted
1192 sg_dma_address(&sg) = page_to_phys(page);
1193 dma_sync_sg_for_device(dev, &sg, 1, dir);
1196 struct ion_vma_list {
1197 struct list_head list;
1198 struct vm_area_struct *vma;
1201 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1203 enum dma_data_direction dir)
1205 struct ion_vma_list *vma_list;
1206 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1209 pr_debug("%s: syncing for device %s\n", __func__,
1210 dev ? dev_name(dev) : "null");
1212 if (!ion_buffer_fault_user_mappings(buffer))
1215 mutex_lock(&buffer->lock);
1216 for (i = 0; i < pages; i++) {
1217 struct page *page = buffer->pages[i];
1219 if (ion_buffer_page_is_dirty(page))
1220 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1223 ion_buffer_page_clean(buffer->pages + i);
1225 list_for_each_entry(vma_list, &buffer->vmas, list) {
1226 struct vm_area_struct *vma = vma_list->vma;
1228 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1231 mutex_unlock(&buffer->lock);
1234 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1236 struct ion_buffer *buffer = vma->vm_private_data;
1240 mutex_lock(&buffer->lock);
1241 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1242 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1244 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1245 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1246 mutex_unlock(&buffer->lock);
1248 return VM_FAULT_ERROR;
1250 return VM_FAULT_NOPAGE;
1253 static void ion_vm_open(struct vm_area_struct *vma)
1255 struct ion_buffer *buffer = vma->vm_private_data;
1256 struct ion_vma_list *vma_list;
1258 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1261 vma_list->vma = vma;
1262 mutex_lock(&buffer->lock);
1263 list_add(&vma_list->list, &buffer->vmas);
1264 mutex_unlock(&buffer->lock);
1265 pr_debug("%s: adding %p\n", __func__, vma);
1268 static void ion_vm_close(struct vm_area_struct *vma)
1270 struct ion_buffer *buffer = vma->vm_private_data;
1271 struct ion_vma_list *vma_list, *tmp;
1273 pr_debug("%s\n", __func__);
1274 mutex_lock(&buffer->lock);
1275 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1276 if (vma_list->vma != vma)
1278 list_del(&vma_list->list);
1280 pr_debug("%s: deleting %p\n", __func__, vma);
1283 mutex_unlock(&buffer->lock);
1286 static struct vm_operations_struct ion_vma_ops = {
1287 .open = ion_vm_open,
1288 .close = ion_vm_close,
1289 .fault = ion_vm_fault,
1292 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1294 struct ion_buffer *buffer = dmabuf->priv;
1297 if (!buffer->heap->ops->map_user) {
1298 pr_err("%s: this heap does not define a method for mapping "
1299 "to userspace\n", __func__);
1303 if (ion_buffer_fault_user_mappings(buffer)) {
1304 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1306 vma->vm_private_data = buffer;
1307 vma->vm_ops = &ion_vma_ops;
1312 if (!(buffer->flags & ION_FLAG_CACHED))
1313 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1315 mutex_lock(&buffer->lock);
1316 /* now map it to userspace */
1317 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1318 mutex_unlock(&buffer->lock);
1321 pr_err("%s: failure mapping buffer to userspace\n",
1327 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1329 struct ion_buffer *buffer = dmabuf->priv;
1330 ion_buffer_put(buffer);
1333 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1335 struct ion_buffer *buffer = dmabuf->priv;
1336 return buffer->vaddr + offset * PAGE_SIZE;
1339 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1345 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1347 enum dma_data_direction direction)
1349 struct ion_buffer *buffer = dmabuf->priv;
1352 if (!buffer->heap->ops->map_kernel) {
1353 pr_err("%s: map kernel is not implemented by this heap.\n",
1358 mutex_lock(&buffer->lock);
1359 vaddr = ion_buffer_kmap_get(buffer);
1360 mutex_unlock(&buffer->lock);
1362 return PTR_ERR(vaddr);
1366 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1368 enum dma_data_direction direction)
1370 struct ion_buffer *buffer = dmabuf->priv;
1372 mutex_lock(&buffer->lock);
1373 ion_buffer_kmap_put(buffer);
1374 mutex_unlock(&buffer->lock);
1377 static struct dma_buf_ops dma_buf_ops = {
1378 .map_dma_buf = ion_map_dma_buf,
1379 .unmap_dma_buf = ion_unmap_dma_buf,
1381 .release = ion_dma_buf_release,
1382 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1383 .end_cpu_access = ion_dma_buf_end_cpu_access,
1384 .kmap_atomic = ion_dma_buf_kmap,
1385 .kunmap_atomic = ion_dma_buf_kunmap,
1386 .kmap = ion_dma_buf_kmap,
1387 .kunmap = ion_dma_buf_kunmap,
1390 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1391 struct ion_handle *handle)
1393 struct ion_buffer *buffer;
1394 struct dma_buf *dmabuf;
1397 mutex_lock(&client->lock);
1398 valid_handle = ion_handle_validate(client, handle);
1399 if (!valid_handle) {
1400 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1401 mutex_unlock(&client->lock);
1402 return ERR_PTR(-EINVAL);
1404 buffer = handle->buffer;
1405 ion_buffer_get(buffer);
1406 mutex_unlock(&client->lock);
1408 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1409 if (IS_ERR(dmabuf)) {
1410 ion_buffer_put(buffer);
1416 EXPORT_SYMBOL(ion_share_dma_buf);
1418 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1420 struct dma_buf *dmabuf;
1423 dmabuf = ion_share_dma_buf(client, handle);
1425 return PTR_ERR(dmabuf);
1427 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1429 dma_buf_put(dmabuf);
1433 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1435 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1437 struct dma_buf *dmabuf;
1438 struct ion_buffer *buffer;
1439 struct ion_handle *handle;
1442 dmabuf = dma_buf_get(fd);
1444 return ERR_PTR(PTR_ERR(dmabuf));
1445 /* if this memory came from ion */
1447 if (dmabuf->ops != &dma_buf_ops) {
1448 pr_err("%s: can not import dmabuf from another exporter\n",
1450 dma_buf_put(dmabuf);
1451 return ERR_PTR(-EINVAL);
1453 buffer = dmabuf->priv;
1455 mutex_lock(&client->lock);
1456 /* if a handle exists for this buffer just take a reference to it */
1457 handle = ion_handle_lookup(client, buffer);
1458 if (!IS_ERR(handle)) {
1459 ion_handle_get(handle);
1460 mutex_unlock(&client->lock);
1463 mutex_unlock(&client->lock);
1465 handle = ion_handle_create(client, buffer);
1469 mutex_lock(&client->lock);
1470 ret = ion_handle_add(client, handle);
1471 mutex_unlock(&client->lock);
1473 ion_handle_put(handle);
1474 handle = ERR_PTR(ret);
1478 dma_buf_put(dmabuf);
1481 EXPORT_SYMBOL(ion_import_dma_buf);
1483 static int ion_sync_for_device(struct ion_client *client, int fd)
1485 struct dma_buf *dmabuf;
1486 struct ion_buffer *buffer;
1488 dmabuf = dma_buf_get(fd);
1490 return PTR_ERR(dmabuf);
1492 /* if this memory came from ion */
1493 if (dmabuf->ops != &dma_buf_ops) {
1494 pr_err("%s: can not sync dmabuf from another exporter\n",
1496 dma_buf_put(dmabuf);
1499 buffer = dmabuf->priv;
1501 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1502 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1503 dma_buf_put(dmabuf);
1507 /* fix up the cases where the ioctl direction bits are incorrect */
1508 static unsigned int ion_ioctl_dir(unsigned int cmd)
1513 case ION_IOC_CUSTOM:
1516 return _IOC_DIR(cmd);
1520 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1522 struct ion_client *client = filp->private_data;
1523 struct ion_device *dev = client->dev;
1524 struct ion_handle *cleanup_handle = NULL;
1529 struct ion_fd_data fd;
1530 struct ion_allocation_data allocation;
1531 struct ion_handle_data handle;
1532 struct ion_custom_data custom;
1535 dir = ion_ioctl_dir(cmd);
1537 if (_IOC_SIZE(cmd) > sizeof(data))
1540 if (dir & _IOC_WRITE)
1541 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1547 struct ion_handle *handle;
1549 handle = ion_alloc(client, data.allocation.len,
1550 data.allocation.align,
1551 data.allocation.heap_id_mask,
1552 data.allocation.flags);
1554 return PTR_ERR(handle);
1556 data.allocation.handle = handle->id;
1558 cleanup_handle = handle;
1563 struct ion_handle *handle;
1565 handle = ion_handle_get_by_id(client, data.handle.handle);
1567 return PTR_ERR(handle);
1568 ion_free(client, handle);
1569 ion_handle_put(handle);
1575 struct ion_handle *handle;
1577 handle = ion_handle_get_by_id(client, data.handle.handle);
1579 return PTR_ERR(handle);
1580 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1581 ion_handle_put(handle);
1586 case ION_IOC_IMPORT:
1588 struct ion_handle *handle;
1589 handle = ion_import_dma_buf(client, data.fd.fd);
1591 ret = PTR_ERR(handle);
1593 data.handle.handle = handle->id;
1598 ret = ion_sync_for_device(client, data.fd.fd);
1601 case ION_IOC_CUSTOM:
1603 if (!dev->custom_ioctl)
1605 ret = dev->custom_ioctl(client, data.custom.cmd,
1613 if (dir & _IOC_READ) {
1614 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1616 ion_free(client, cleanup_handle);
1623 static int ion_release(struct inode *inode, struct file *file)
1625 struct ion_client *client = file->private_data;
1627 pr_debug("%s: %d\n", __func__, __LINE__);
1628 ion_client_destroy(client);
1632 static int ion_open(struct inode *inode, struct file *file)
1634 struct miscdevice *miscdev = file->private_data;
1635 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1636 struct ion_client *client;
1637 char debug_name[64];
1639 pr_debug("%s: %d\n", __func__, __LINE__);
1640 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1641 client = ion_client_create(dev, debug_name);
1643 return PTR_ERR(client);
1644 file->private_data = client;
1649 static const struct file_operations ion_fops = {
1650 .owner = THIS_MODULE,
1652 .release = ion_release,
1653 .unlocked_ioctl = ion_ioctl,
1654 .compat_ioctl = compat_ion_ioctl,
1657 static size_t ion_debug_heap_total(struct ion_client *client,
1663 mutex_lock(&client->lock);
1664 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1665 struct ion_handle *handle = rb_entry(n,
1668 if (handle->buffer->heap->id == id)
1669 size += handle->buffer->size;
1671 mutex_unlock(&client->lock);
1675 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1677 struct ion_heap *heap = s->private;
1678 struct ion_device *dev = heap->dev;
1680 size_t total_size = 0;
1681 size_t total_orphaned_size = 0;
1683 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1684 seq_printf(s, "----------------------------------------------------\n");
1686 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1687 struct ion_client *client = rb_entry(n, struct ion_client,
1689 size_t size = ion_debug_heap_total(client, heap->id);
1693 char task_comm[TASK_COMM_LEN];
1695 get_task_comm(task_comm, client->task);
1696 seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1699 seq_printf(s, "%16.s %16u %16zu\n", client->name,
1703 seq_printf(s, "----------------------------------------------------\n");
1704 seq_printf(s, "orphaned allocations (info is from last known client):"
1706 mutex_lock(&dev->buffer_lock);
1707 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1708 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1710 if (buffer->heap->id != heap->id)
1712 total_size += buffer->size;
1713 if (!buffer->handle_count) {
1714 seq_printf(s, "%16.s %16u %16zu %d %d\n",
1715 buffer->task_comm, buffer->pid,
1716 buffer->size, buffer->kmap_cnt,
1717 atomic_read(&buffer->ref.refcount));
1718 total_orphaned_size += buffer->size;
1721 mutex_unlock(&dev->buffer_lock);
1722 seq_printf(s, "----------------------------------------------------\n");
1723 seq_printf(s, "%16.s %16zu\n", "total orphaned",
1724 total_orphaned_size);
1725 seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1726 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1727 seq_printf(s, "%16.s %16zu\n", "deferred free",
1728 heap->free_list_size);
1729 seq_printf(s, "----------------------------------------------------\n");
1731 if (heap->debug_show)
1732 heap->debug_show(heap, s, unused);
1737 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1739 return single_open(file, ion_debug_heap_show, inode->i_private);
1742 static const struct file_operations debug_heap_fops = {
1743 .open = ion_debug_heap_open,
1745 .llseek = seq_lseek,
1746 .release = single_release,
1749 #ifdef DEBUG_HEAP_SHRINKER
1750 static int debug_shrink_set(void *data, u64 val)
1752 struct ion_heap *heap = data;
1753 struct shrink_control sc;
1762 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1763 sc.nr_to_scan = objs;
1765 heap->shrinker.shrink(&heap->shrinker, &sc);
1769 static int debug_shrink_get(void *data, u64 *val)
1771 struct ion_heap *heap = data;
1772 struct shrink_control sc;
1778 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1783 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1784 debug_shrink_set, "%llu\n");
1788 // struct "cma" quoted from drivers/base/dma-contiguous.c
1790 unsigned long base_pfn;
1791 unsigned long count;
1792 unsigned long *bitmap;
1795 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1796 struct ion_cma_heap {
1797 struct ion_heap heap;
1801 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1803 struct ion_heap *heap = s->private;
1804 struct ion_cma_heap *cma_heap = container_of(heap,
1805 struct ion_cma_heap,
1807 struct device *dev = cma_heap->dev;
1808 struct cma *cma = dev_get_cma_area(dev);
1810 int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1811 phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1813 seq_printf(s, "%s Heap bitmap:\n", heap->name);
1815 for(i = rows - 1; i>= 0; i--){
1816 seq_printf(s, "%.4uM@0x%08x: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1817 i+1, base+(i)*SZ_1M,
1818 cma->bitmap[i*8 + 7],
1819 cma->bitmap[i*8 + 6],
1820 cma->bitmap[i*8 + 5],
1821 cma->bitmap[i*8 + 4],
1822 cma->bitmap[i*8 + 3],
1823 cma->bitmap[i*8 + 2],
1824 cma->bitmap[i*8 + 1],
1827 seq_printf(s, "Heap size: %luM, Heap base: 0x%08x\n",
1828 (cma->count)>>8, base);
1833 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1835 return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1838 static const struct file_operations debug_heap_bitmap_fops = {
1839 .open = ion_debug_heap_bitmap_open,
1841 .llseek = seq_lseek,
1842 .release = single_release,
1846 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1848 struct dentry *debug_file;
1850 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1851 !heap->ops->unmap_dma)
1852 pr_err("%s: can not add heap with invalid ops struct.\n",
1855 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1856 ion_heap_init_deferred_free(heap);
1858 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1859 ion_heap_init_shrinker(heap);
1862 down_write(&dev->lock);
1863 /* use negative heap->id to reverse the priority -- when traversing
1864 the list later attempt higher id numbers first */
1865 plist_node_init(&heap->node, -heap->id);
1866 plist_add(&heap->node, &dev->heaps);
1867 debug_file = debugfs_create_file(heap->name, 0664,
1868 dev->heaps_debug_root, heap,
1872 char buf[256], *path;
1873 path = dentry_path(dev->heaps_debug_root, buf, 256);
1874 pr_err("Failed to create heap debugfs at %s/%s\n",
1878 #ifdef DEBUG_HEAP_SHRINKER
1879 if (heap->shrinker.shrink) {
1880 char debug_name[64];
1882 snprintf(debug_name, 64, "%s_shrink", heap->name);
1883 debug_file = debugfs_create_file(
1884 debug_name, 0644, dev->heaps_debug_root, heap,
1885 &debug_shrink_fops);
1887 char buf[256], *path;
1888 path = dentry_path(dev->heaps_debug_root, buf, 256);
1889 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1895 if (ION_HEAP_TYPE_DMA==heap->type) {
1896 char* heap_bitmap_name = kasprintf(
1897 GFP_KERNEL, "%s-bitmap", heap->name);
1898 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1899 dev->heaps_debug_root, heap,
1900 &debug_heap_bitmap_fops);
1902 char buf[256], *path;
1903 path = dentry_path(dev->heaps_debug_root, buf, 256);
1904 pr_err("Failed to create heap debugfs at %s/%s\n",
1905 path, heap_bitmap_name);
1907 kfree(heap_bitmap_name);
1910 up_write(&dev->lock);
1913 struct ion_device *ion_device_create(long (*custom_ioctl)
1914 (struct ion_client *client,
1918 struct ion_device *idev;
1921 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1923 return ERR_PTR(-ENOMEM);
1925 idev->dev.minor = MISC_DYNAMIC_MINOR;
1926 idev->dev.name = "ion";
1927 idev->dev.fops = &ion_fops;
1928 idev->dev.parent = NULL;
1929 ret = misc_register(&idev->dev);
1931 pr_err("ion: failed to register misc device.\n");
1932 return ERR_PTR(ret);
1935 idev->debug_root = debugfs_create_dir("ion", NULL);
1936 if (!idev->debug_root) {
1937 pr_err("ion: failed to create debugfs root directory.\n");
1940 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1941 if (!idev->heaps_debug_root) {
1942 pr_err("ion: failed to create debugfs heaps directory.\n");
1945 idev->clients_debug_root = debugfs_create_dir("clients",
1947 if (!idev->clients_debug_root)
1948 pr_err("ion: failed to create debugfs clients directory.\n");
1950 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
1951 rockchip_ion_snapshot_debugfs(idev->debug_root);
1956 idev->custom_ioctl = custom_ioctl;
1957 idev->buffers = RB_ROOT;
1958 mutex_init(&idev->buffer_lock);
1959 init_rwsem(&idev->lock);
1960 plist_head_init(&idev->heaps);
1961 idev->clients = RB_ROOT;
1965 void ion_device_destroy(struct ion_device *dev)
1967 misc_deregister(&dev->dev);
1968 debugfs_remove_recursive(dev->debug_root);
1969 /* XXX need to free the heaps and clients ? */
1973 void __init ion_reserve(struct ion_platform_data *data)
1977 for (i = 0; i < data->nr; i++) {
1978 if (data->heaps[i].size == 0)
1981 if (data->heaps[i].base == 0) {
1983 paddr = memblock_alloc_base(data->heaps[i].size,
1984 data->heaps[i].align,
1985 MEMBLOCK_ALLOC_ANYWHERE);
1987 pr_err("%s: error allocating memblock for "
1992 data->heaps[i].base = paddr;
1994 int ret = memblock_reserve(data->heaps[i].base,
1995 data->heaps[i].size);
1997 pr_err("memblock reserve of %zx@%lx failed\n",
1998 data->heaps[i].size,
1999 data->heaps[i].base);
2001 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2002 data->heaps[i].name,
2003 data->heaps[i].base,
2004 data->heaps[i].size);
2008 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2010 // Find the maximum can be allocated memory
2011 static unsigned long ion_find_max_zero_area(unsigned long *map, unsigned long size)
2013 unsigned long index, i, zero_sz, max_zero_sz, start;
2018 index = find_next_zero_bit(map, size, start);
2019 if (index>=size) break;
2021 i = find_next_bit(map, size, index);
2023 pr_debug("zero[%lx, %lx]\n", index, zero_sz);
2024 max_zero_sz = max(max_zero_sz, zero_sz);
2026 } while(start<=size);
2028 pr_debug("max_zero_sz=%lx\n", max_zero_sz);
2032 static int ion_snapshot_save(struct ion_device *idev, size_t len)
2034 static struct seq_file seqf;
2035 struct ion_heap *heap;
2038 seqf.buf = rockchip_ion_snapshot_get(&seqf.size);
2042 memset(seqf.buf, 0, seqf.size);
2044 pr_debug("%s: save snapshot 0x%x@0x%lx\n", __func__, seqf.size,
2047 seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %uKB\n",
2048 current->comm, current->pid, len>>10);
2050 down_read(&idev->lock);
2052 plist_for_each_entry(heap, &idev->heaps, node) {
2053 seqf.private = (void*)heap;
2054 seq_printf(&seqf, "++++++++++++++++ HEAP: %s ++++++++++++++++\n",
2056 ion_debug_heap_show(&seqf, NULL);
2057 if (ION_HEAP_TYPE_DMA==heap->type) {
2058 struct ion_cma_heap *cma_heap = container_of(heap,
2059 struct ion_cma_heap,
2061 struct cma *cma = dev_get_cma_area(cma_heap->dev);
2062 seq_printf(&seqf, "\n");
2063 seq_printf(&seqf, "Maximum allocation of pages: %ld\n",
2064 ion_find_max_zero_area(cma->bitmap, cma->count));
2065 seq_printf(&seqf, "\n");
2069 up_read(&idev->lock);