rk: ion: add caller info in snapshot if alloc failed
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <linux/dma-contiguous.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 /**
46  * struct ion_device - the metadata of the ion device node
47  * @dev:                the actual misc device
48  * @buffers:            an rb tree of all the existing buffers
49  * @buffer_lock:        lock protecting the tree of buffers
50  * @lock:               rwsem protecting the tree of heaps and clients
51  * @heaps:              list of all the heaps in the system
52  * @user_clients:       list of all the clients created from userspace
53  */
54 struct ion_device {
55         struct miscdevice dev;
56         struct rb_root buffers;
57         struct mutex buffer_lock;
58         struct rw_semaphore lock;
59         struct plist_head heaps;
60         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
61                               unsigned long arg);
62         struct rb_root clients;
63         struct dentry *debug_root;
64         struct dentry *heaps_debug_root;
65         struct dentry *clients_debug_root;
66 };
67
68 /**
69  * struct ion_client - a process/hw block local address space
70  * @node:               node in the tree of all clients
71  * @dev:                backpointer to ion device
72  * @handles:            an rb tree of all the handles in this client
73  * @idr:                an idr space for allocating handle ids
74  * @lock:               lock protecting the tree of handles
75  * @name:               used for debugging
76  * @display_name:       used for debugging (unique version of @name)
77  * @display_serial:     used for debugging (to make display_name unique)
78  * @task:               used for debugging
79  *
80  * A client represents a list of buffers this client may access.
81  * The mutex stored here is used to protect both handles tree
82  * as well as the handles themselves, and should be held while modifying either.
83  */
84 struct ion_client {
85         struct rb_node node;
86         struct ion_device *dev;
87         struct rb_root handles;
88         struct idr idr;
89         struct mutex lock;
90         const char *name;
91         char *display_name;
92         int display_serial;
93         struct task_struct *task;
94         pid_t pid;
95         struct dentry *debug_root;
96 };
97
98 /**
99  * ion_handle - a client local reference to a buffer
100  * @ref:                reference count
101  * @client:             back pointer to the client the buffer resides in
102  * @buffer:             pointer to the buffer
103  * @node:               node in the client's handle rbtree
104  * @kmap_cnt:           count of times this client has mapped to kernel
105  * @id:                 client-unique id allocated by client->idr
106  *
107  * Modifications to node, map_cnt or mapping should be protected by the
108  * lock in the client.  Other fields are never changed after initialization.
109  */
110 struct ion_handle {
111         struct kref ref;
112         struct ion_client *client;
113         struct ion_buffer *buffer;
114         struct rb_node node;
115         unsigned int kmap_cnt;
116         int id;
117 };
118
119 #ifdef CONFIG_ROCKCHIP_IOMMU
120 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
121 #endif
122 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
123 extern char *rockchip_ion_snapshot_get(unsigned *size);
124 extern int rockchip_ion_snapshot_debugfs(struct dentry* root);
125 static int ion_snapshot_save(struct ion_device *idev, size_t len);
126 #endif
127
128 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
129 {
130         return (buffer->flags & ION_FLAG_CACHED) &&
131                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
132 }
133
134 bool ion_buffer_cached(struct ion_buffer *buffer)
135 {
136         return !!(buffer->flags & ION_FLAG_CACHED);
137 }
138
139 static inline struct page *ion_buffer_page(struct page *page)
140 {
141         return (struct page *)((unsigned long)page & ~(1UL));
142 }
143
144 static inline bool ion_buffer_page_is_dirty(struct page *page)
145 {
146         return !!((unsigned long)page & 1UL);
147 }
148
149 static inline void ion_buffer_page_dirty(struct page **page)
150 {
151         *page = (struct page *)((unsigned long)(*page) | 1UL);
152 }
153
154 static inline void ion_buffer_page_clean(struct page **page)
155 {
156         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
157 }
158
159 /* this function should only be called while dev->lock is held */
160 static void ion_buffer_add(struct ion_device *dev,
161                            struct ion_buffer *buffer)
162 {
163         struct rb_node **p = &dev->buffers.rb_node;
164         struct rb_node *parent = NULL;
165         struct ion_buffer *entry;
166
167         while (*p) {
168                 parent = *p;
169                 entry = rb_entry(parent, struct ion_buffer, node);
170
171                 if (buffer < entry) {
172                         p = &(*p)->rb_left;
173                 } else if (buffer > entry) {
174                         p = &(*p)->rb_right;
175                 } else {
176                         pr_err("%s: buffer already found.", __func__);
177                         BUG();
178                 }
179         }
180
181         rb_link_node(&buffer->node, parent, p);
182         rb_insert_color(&buffer->node, &dev->buffers);
183 }
184
185 /* this function should only be called while dev->lock is held */
186 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
187                                      struct ion_device *dev,
188                                      unsigned long len,
189                                      unsigned long align,
190                                      unsigned long flags)
191 {
192         struct ion_buffer *buffer;
193         struct sg_table *table;
194         struct scatterlist *sg;
195         int i, ret;
196
197         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
198         if (!buffer)
199                 return ERR_PTR(-ENOMEM);
200
201         buffer->heap = heap;
202         buffer->flags = flags;
203         kref_init(&buffer->ref);
204
205         ret = heap->ops->allocate(heap, buffer, len, align, flags);
206
207         if (ret) {
208                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
209                         goto err2;
210
211                 ion_heap_freelist_drain(heap, 0);
212                 ret = heap->ops->allocate(heap, buffer, len, align,
213                                           flags);
214                 if (ret)
215                         goto err2;
216         }
217
218         buffer->dev = dev;
219         buffer->size = len;
220
221         table = heap->ops->map_dma(heap, buffer);
222         if (WARN_ONCE(table == NULL,
223                         "heap->ops->map_dma should return ERR_PTR on error"))
224                 table = ERR_PTR(-EINVAL);
225         if (IS_ERR(table)) {
226                 heap->ops->free(buffer);
227                 kfree(buffer);
228                 return ERR_PTR(PTR_ERR(table));
229         }
230         buffer->sg_table = table;
231         if (ion_buffer_fault_user_mappings(buffer)) {
232                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
233                 struct scatterlist *sg;
234                 int i, j, k = 0;
235
236                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
237                 if (!buffer->pages) {
238                         ret = -ENOMEM;
239                         goto err1;
240                 }
241
242                 for_each_sg(table->sgl, sg, table->nents, i) {
243                         struct page *page = sg_page(sg);
244
245                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
246                                 buffer->pages[k++] = page++;
247                 }
248
249                 if (ret)
250                         goto err;
251         }
252
253         buffer->dev = dev;
254         buffer->size = len;
255         INIT_LIST_HEAD(&buffer->vmas);
256         mutex_init(&buffer->lock);
257         /* this will set up dma addresses for the sglist -- it is not
258            technically correct as per the dma api -- a specific
259            device isn't really taking ownership here.  However, in practice on
260            our systems the only dma_address space is physical addresses.
261            Additionally, we can't afford the overhead of invalidating every
262            allocation via dma_map_sg. The implicit contract here is that
263            memory comming from the heaps is ready for dma, ie if it has a
264            cached mapping that mapping has been invalidated */
265         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
266                 sg_dma_address(sg) = sg_phys(sg);
267         mutex_lock(&dev->buffer_lock);
268         ion_buffer_add(dev, buffer);
269         mutex_unlock(&dev->buffer_lock);
270         return buffer;
271
272 err:
273         heap->ops->unmap_dma(heap, buffer);
274         heap->ops->free(buffer);
275 err1:
276         if (buffer->pages)
277                 vfree(buffer->pages);
278 err2:
279         kfree(buffer);
280         return ERR_PTR(ret);
281 }
282
283 void ion_buffer_destroy(struct ion_buffer *buffer)
284 {
285         if (WARN_ON(buffer->kmap_cnt > 0))
286                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
287         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
288 #ifdef CONFIG_ROCKCHIP_IOMMU
289         ion_iommu_force_unmap(buffer);
290 #endif
291         buffer->heap->ops->free(buffer);
292         if (buffer->pages)
293                 vfree(buffer->pages);
294         kfree(buffer);
295 }
296
297 static void _ion_buffer_destroy(struct kref *kref)
298 {
299         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
300         struct ion_heap *heap = buffer->heap;
301         struct ion_device *dev = buffer->dev;
302
303         mutex_lock(&dev->buffer_lock);
304         rb_erase(&buffer->node, &dev->buffers);
305         mutex_unlock(&dev->buffer_lock);
306
307         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
308                 ion_heap_freelist_add(heap, buffer);
309         else
310                 ion_buffer_destroy(buffer);
311 }
312
313 static void ion_buffer_get(struct ion_buffer *buffer)
314 {
315         kref_get(&buffer->ref);
316 }
317
318 static int ion_buffer_put(struct ion_buffer *buffer)
319 {
320         return kref_put(&buffer->ref, _ion_buffer_destroy);
321 }
322
323 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
324 {
325         mutex_lock(&buffer->lock);
326         buffer->handle_count++;
327         mutex_unlock(&buffer->lock);
328 }
329
330 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
331 {
332         /*
333          * when a buffer is removed from a handle, if it is not in
334          * any other handles, copy the taskcomm and the pid of the
335          * process it's being removed from into the buffer.  At this
336          * point there will be no way to track what processes this buffer is
337          * being used by, it only exists as a dma_buf file descriptor.
338          * The taskcomm and pid can provide a debug hint as to where this fd
339          * is in the system
340          */
341         mutex_lock(&buffer->lock);
342         buffer->handle_count--;
343         BUG_ON(buffer->handle_count < 0);
344         if (!buffer->handle_count) {
345                 struct task_struct *task;
346
347                 task = current->group_leader;
348                 get_task_comm(buffer->task_comm, task);
349                 buffer->pid = task_pid_nr(task);
350         }
351         mutex_unlock(&buffer->lock);
352 }
353
354 static struct ion_handle *ion_handle_create(struct ion_client *client,
355                                      struct ion_buffer *buffer)
356 {
357         struct ion_handle *handle;
358
359         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
360         if (!handle)
361                 return ERR_PTR(-ENOMEM);
362         kref_init(&handle->ref);
363         RB_CLEAR_NODE(&handle->node);
364         handle->client = client;
365         ion_buffer_get(buffer);
366         ion_buffer_add_to_handle(buffer);
367         handle->buffer = buffer;
368
369         return handle;
370 }
371
372 static void ion_handle_kmap_put(struct ion_handle *);
373
374 static void ion_handle_destroy(struct kref *kref)
375 {
376         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
377         struct ion_client *client = handle->client;
378         struct ion_buffer *buffer = handle->buffer;
379
380         mutex_lock(&buffer->lock);
381         while (handle->kmap_cnt)
382                 ion_handle_kmap_put(handle);
383         mutex_unlock(&buffer->lock);
384
385         idr_remove(&client->idr, handle->id);
386         if (!RB_EMPTY_NODE(&handle->node))
387                 rb_erase(&handle->node, &client->handles);
388
389         ion_buffer_remove_from_handle(buffer);
390         ion_buffer_put(buffer);
391
392         kfree(handle);
393 }
394
395 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
396 {
397         return handle->buffer;
398 }
399
400 static void ion_handle_get(struct ion_handle *handle)
401 {
402         kref_get(&handle->ref);
403 }
404
405 int ion_handle_put(struct ion_handle *handle)
406 {
407         struct ion_client *client = handle->client;
408         int ret;
409
410         mutex_lock(&client->lock);
411         ret = kref_put(&handle->ref, ion_handle_destroy);
412         mutex_unlock(&client->lock);
413
414         return ret;
415 }
416
417 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
418                                             struct ion_buffer *buffer)
419 {
420         struct rb_node *n = client->handles.rb_node;
421
422         while (n) {
423                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
424                 if (buffer < entry->buffer)
425                         n = n->rb_left;
426                 else if (buffer > entry->buffer)
427                         n = n->rb_right;
428                 else
429                         return entry;
430         }
431         return ERR_PTR(-EINVAL);
432 }
433
434 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
435                                                 int id)
436 {
437         struct ion_handle *handle;
438
439         mutex_lock(&client->lock);
440         handle = idr_find(&client->idr, id);
441         if (handle)
442                 ion_handle_get(handle);
443         mutex_unlock(&client->lock);
444
445         return handle ? handle : ERR_PTR(-EINVAL);
446 }
447
448 static bool ion_handle_validate(struct ion_client *client,
449                                 struct ion_handle *handle)
450 {
451         WARN_ON(!mutex_is_locked(&client->lock));
452         return (idr_find(&client->idr, handle->id) == handle);
453 }
454
455 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
456 {
457         int id;
458         struct rb_node **p = &client->handles.rb_node;
459         struct rb_node *parent = NULL;
460         struct ion_handle *entry;
461
462         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
463         if (id < 0)
464                 return id;
465
466         handle->id = id;
467
468         while (*p) {
469                 parent = *p;
470                 entry = rb_entry(parent, struct ion_handle, node);
471
472                 if (handle->buffer < entry->buffer)
473                         p = &(*p)->rb_left;
474                 else if (handle->buffer > entry->buffer)
475                         p = &(*p)->rb_right;
476                 else
477                         WARN(1, "%s: buffer already found.", __func__);
478         }
479
480         rb_link_node(&handle->node, parent, p);
481         rb_insert_color(&handle->node, &client->handles);
482
483         return 0;
484 }
485
486 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
487                              size_t align, unsigned int heap_id_mask,
488                              unsigned int flags)
489 {
490         struct ion_handle *handle;
491         struct ion_device *dev = client->dev;
492         struct ion_buffer *buffer = NULL;
493         struct ion_heap *heap;
494         int ret;
495
496         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
497                  len, align, heap_id_mask, flags);
498         /*
499          * traverse the list of heaps available in this system in priority
500          * order.  If the heap type is supported by the client, and matches the
501          * request of the caller allocate from it.  Repeat until allocate has
502          * succeeded or all heaps have been tried
503          */
504         len = PAGE_ALIGN(len);
505
506         if (!len)
507                 return ERR_PTR(-EINVAL);
508
509         down_read(&dev->lock);
510         plist_for_each_entry(heap, &dev->heaps, node) {
511                 /* if the caller didn't specify this heap id */
512                 if (!((1 << heap->id) & heap_id_mask))
513                         continue;
514                 buffer = ion_buffer_create(heap, dev, len, align, flags);
515                 if (!IS_ERR(buffer))
516                         break;
517         }
518         up_read(&dev->lock);
519
520         if (buffer == NULL)
521                 return ERR_PTR(-ENODEV);
522
523         if (IS_ERR(buffer)) {
524 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
525                 ion_snapshot_save(client->dev, len);
526 #endif
527                 return ERR_PTR(PTR_ERR(buffer));
528         }
529
530         handle = ion_handle_create(client, buffer);
531
532         /*
533          * ion_buffer_create will create a buffer with a ref_cnt of 1,
534          * and ion_handle_create will take a second reference, drop one here
535          */
536         ion_buffer_put(buffer);
537
538         if (IS_ERR(handle))
539                 return handle;
540
541         mutex_lock(&client->lock);
542         ret = ion_handle_add(client, handle);
543         mutex_unlock(&client->lock);
544         if (ret) {
545                 ion_handle_put(handle);
546                 handle = ERR_PTR(ret);
547         }
548
549         return handle;
550 }
551 EXPORT_SYMBOL(ion_alloc);
552
553 void ion_free(struct ion_client *client, struct ion_handle *handle)
554 {
555         bool valid_handle;
556
557         BUG_ON(client != handle->client);
558
559         mutex_lock(&client->lock);
560         valid_handle = ion_handle_validate(client, handle);
561
562         if (!valid_handle) {
563                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
564                 mutex_unlock(&client->lock);
565                 return;
566         }
567         mutex_unlock(&client->lock);
568         ion_handle_put(handle);
569 }
570 EXPORT_SYMBOL(ion_free);
571
572 int ion_phys(struct ion_client *client, struct ion_handle *handle,
573              ion_phys_addr_t *addr, size_t *len)
574 {
575         struct ion_buffer *buffer;
576         int ret;
577
578         mutex_lock(&client->lock);
579         if (!ion_handle_validate(client, handle)) {
580                 mutex_unlock(&client->lock);
581                 return -EINVAL;
582         }
583
584         buffer = handle->buffer;
585
586         if (!buffer->heap->ops->phys) {
587                 pr_err("%s: ion_phys is not implemented by this heap.\n",
588                        __func__);
589                 mutex_unlock(&client->lock);
590                 return -ENODEV;
591         }
592         mutex_unlock(&client->lock);
593         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
594         return ret;
595 }
596 EXPORT_SYMBOL(ion_phys);
597
598 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
599 {
600         void *vaddr;
601
602         if (buffer->kmap_cnt) {
603                 buffer->kmap_cnt++;
604                 return buffer->vaddr;
605         }
606         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
607         if (WARN_ONCE(vaddr == NULL,
608                         "heap->ops->map_kernel should return ERR_PTR on error"))
609                 return ERR_PTR(-EINVAL);
610         if (IS_ERR(vaddr))
611                 return vaddr;
612         buffer->vaddr = vaddr;
613         buffer->kmap_cnt++;
614         return vaddr;
615 }
616
617 static void *ion_handle_kmap_get(struct ion_handle *handle)
618 {
619         struct ion_buffer *buffer = handle->buffer;
620         void *vaddr;
621
622         if (handle->kmap_cnt) {
623                 handle->kmap_cnt++;
624                 return buffer->vaddr;
625         }
626         vaddr = ion_buffer_kmap_get(buffer);
627         if (IS_ERR(vaddr))
628                 return vaddr;
629         handle->kmap_cnt++;
630         return vaddr;
631 }
632
633 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
634 {
635         buffer->kmap_cnt--;
636         if (!buffer->kmap_cnt) {
637                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
638                 buffer->vaddr = NULL;
639         }
640 }
641
642 static void ion_handle_kmap_put(struct ion_handle *handle)
643 {
644         struct ion_buffer *buffer = handle->buffer;
645
646         handle->kmap_cnt--;
647         if (!handle->kmap_cnt)
648                 ion_buffer_kmap_put(buffer);
649 }
650
651 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
652 {
653         struct ion_buffer *buffer;
654         void *vaddr;
655
656         mutex_lock(&client->lock);
657         if (!ion_handle_validate(client, handle)) {
658                 pr_err("%s: invalid handle passed to map_kernel.\n",
659                        __func__);
660                 mutex_unlock(&client->lock);
661                 return ERR_PTR(-EINVAL);
662         }
663
664         buffer = handle->buffer;
665
666         if (!handle->buffer->heap->ops->map_kernel) {
667                 pr_err("%s: map_kernel is not implemented by this heap.\n",
668                        __func__);
669                 mutex_unlock(&client->lock);
670                 return ERR_PTR(-ENODEV);
671         }
672
673         mutex_lock(&buffer->lock);
674         vaddr = ion_handle_kmap_get(handle);
675         mutex_unlock(&buffer->lock);
676         mutex_unlock(&client->lock);
677         return vaddr;
678 }
679 EXPORT_SYMBOL(ion_map_kernel);
680
681 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
682 {
683         struct ion_buffer *buffer;
684
685         mutex_lock(&client->lock);
686         buffer = handle->buffer;
687         mutex_lock(&buffer->lock);
688         ion_handle_kmap_put(handle);
689         mutex_unlock(&buffer->lock);
690         mutex_unlock(&client->lock);
691 }
692 EXPORT_SYMBOL(ion_unmap_kernel);
693
694 #ifdef CONFIG_ROCKCHIP_IOMMU
695 static void ion_iommu_add(struct ion_buffer *buffer,
696                           struct ion_iommu_map *iommu)
697 {
698         struct rb_node **p = &buffer->iommu_maps.rb_node;
699         struct rb_node *parent = NULL;
700         struct ion_iommu_map *entry;
701
702         while (*p) {
703                 parent = *p;
704                 entry = rb_entry(parent, struct ion_iommu_map, node);
705
706                 if (iommu->key < entry->key) {
707                         p = &(*p)->rb_left;
708                 } else if (iommu->key > entry->key) {
709                         p = &(*p)->rb_right;
710                 } else {
711                         pr_err("%s: buffer %p already has mapping for domainid %x\n",
712                                 __func__,
713                                 buffer,
714                                 iommu->key);
715                         BUG();
716                 }
717         }
718
719         rb_link_node(&iommu->node, parent, p);
720         rb_insert_color(&iommu->node, &buffer->iommu_maps);
721 }
722
723 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
724                                                 uint32_t key)
725 {
726         struct rb_node **p = &buffer->iommu_maps.rb_node;
727         struct rb_node *parent = NULL;
728         struct ion_iommu_map *entry;
729
730         while (*p) {
731                 parent = *p;
732                 entry = rb_entry(parent, struct ion_iommu_map, node);
733
734                 if (key < entry->key)
735                         p = &(*p)->rb_left;
736                 else if (key > entry->key)
737                         p = &(*p)->rb_right;
738                 else
739                         return entry;
740         }
741
742         return NULL;
743 }
744
745 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
746                 struct device *iommu_dev, unsigned long *iova)
747 {
748         struct ion_iommu_map *data;
749         int ret;
750
751         data = kmalloc(sizeof(*data), GFP_ATOMIC);
752
753         if (!data)
754                 return ERR_PTR(-ENOMEM);
755
756         data->buffer = buffer;
757         data->key = (uint32_t)iommu_dev;
758
759         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
760                                                 buffer->size, buffer->flags);
761         if (ret)
762                 goto out;
763
764         kref_init(&data->ref);
765         *iova = data->iova_addr;
766
767         ion_iommu_add(buffer, data);
768
769         return data;
770
771 out:
772         kfree(data);
773         return ERR_PTR(ret);
774 }
775
776 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
777                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
778 {
779         struct ion_buffer *buffer;
780         struct ion_iommu_map *iommu_map;
781         int ret = 0;
782
783         mutex_lock(&client->lock);
784         if (!ion_handle_validate(client, handle)) {
785                 pr_err("%s: invalid handle passed to map_kernel.\n",
786                        __func__);
787                 mutex_unlock(&client->lock);
788                 return -EINVAL;
789         }
790
791         buffer = handle->buffer;
792         pr_debug("%s: map buffer(%p)\n", __func__, buffer);
793
794         mutex_lock(&buffer->lock);
795
796         if (ION_IS_CACHED(buffer->flags)) {
797                 pr_err("%s: Cannot map iommu as cached.\n", __func__);
798                 ret = -EINVAL;
799                 goto out;
800         }
801
802         if (!handle->buffer->heap->ops->map_iommu) {
803                 pr_err("%s: map_iommu is not implemented by this heap.\n",
804                        __func__);
805                 ret = -ENODEV;
806                 goto out;
807         }
808
809         if (buffer->size & ~PAGE_MASK) {
810                 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
811                         buffer->size, PAGE_SIZE);
812                 ret = -EINVAL;
813                 goto out;
814         }
815
816         iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
817         if (!iommu_map) {
818                 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
819                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
820         } else {
821                 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
822                 if (iommu_map->mapped_size != buffer->size) {
823                         pr_err("%s: handle %p is already mapped with length"
824                                         " %x, trying to map with length %x\n",
825                                 __func__, handle, iommu_map->mapped_size, buffer->size);
826                         ret = -EINVAL;
827                 } else {
828                         kref_get(&iommu_map->ref);
829                         *iova = iommu_map->iova_addr;
830                 }
831         }
832         if (!ret)
833                 buffer->iommu_map_cnt++;
834         *size = buffer->size;
835 out:
836         mutex_unlock(&buffer->lock);
837         mutex_unlock(&client->lock);
838         return ret;
839 }
840 EXPORT_SYMBOL(ion_map_iommu);
841
842 static void ion_iommu_release(struct kref *kref)
843 {
844         struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
845                                                 ref);
846         struct ion_buffer *buffer = map->buffer;
847
848         rb_erase(&map->node, &buffer->iommu_maps);
849         buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
850         kfree(map);
851 }
852
853 /**
854  * Unmap any outstanding mappings which would otherwise have been leaked.
855  */
856 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
857 {
858         struct ion_iommu_map *iommu_map;
859         struct rb_node *node;
860         const struct rb_root *rb = &(buffer->iommu_maps);
861
862         pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
863
864         mutex_lock(&buffer->lock);
865
866         while ((node = rb_first(rb)) != 0) {
867                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
868                 /* set ref count to 1 to force release */
869                 kref_init(&iommu_map->ref);
870                 kref_put(&iommu_map->ref, ion_iommu_release);
871         }
872
873         mutex_unlock(&buffer->lock);
874 }
875
876 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
877                         struct ion_handle *handle)
878 {
879         struct ion_iommu_map *iommu_map;
880         struct ion_buffer *buffer;
881
882         mutex_lock(&client->lock);
883         buffer = handle->buffer;
884         pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
885
886         mutex_lock(&buffer->lock);
887
888         iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
889
890         if (!iommu_map) {
891                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
892                                 iommu_dev, buffer);
893                 goto out;
894         }
895
896         kref_put(&iommu_map->ref, ion_iommu_release);
897
898         buffer->iommu_map_cnt--;
899
900 out:
901         mutex_unlock(&buffer->lock);
902         mutex_unlock(&client->lock);
903 }
904 EXPORT_SYMBOL(ion_unmap_iommu);
905
906 static int ion_debug_client_show_buffer_map(struct seq_file *s, struct ion_buffer *buffer)
907 {
908         struct ion_iommu_map *iommu_map;
909         const struct rb_root *rb;
910         struct rb_node *node;
911
912         pr_debug("%s: buffer(%p)\n", __func__, buffer);
913
914         mutex_lock(&buffer->lock);
915         rb = &(buffer->iommu_maps);
916         node = rb_first(rb);
917
918         while (node != NULL) {
919                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
920                 seq_printf(s, "%16.16s:   0x%08lx   0x%08x %8zuKB %4d\n",
921                         "<iommu>", iommu_map->iova_addr, 0, iommu_map->mapped_size>>10,
922                         atomic_read(&iommu_map->ref.refcount));
923
924                 node = rb_next(node);
925         }
926
927         mutex_unlock(&buffer->lock);
928
929         return 0;
930 }
931 #endif
932
933 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
934 {
935         struct ion_client *client = s->private;
936         struct rb_node *n;
937
938         seq_printf(s, "----------------------------------------------------\n");
939         seq_printf(s, "%16.s: %12.s %12.s %10.s %4.s %4.s %4.s\n", "heap_name", "VA", "PA",
940                 "size", "HC", "IBR", "IHR");
941         mutex_lock(&client->lock);
942         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
943                 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
944                 struct ion_buffer *buffer = handle->buffer;
945                 ion_phys_addr_t pa = 0;
946                 size_t len = buffer->size;
947
948                 mutex_lock(&buffer->lock);
949
950                 if (buffer->heap->ops->phys)
951                         buffer->heap->ops->phys(buffer->heap, buffer, &pa, &len);
952
953                 seq_printf(s, "%16.16s:   0x%08lx   0x%08lx %8zuKB %4d %4d %4d\n",
954                         buffer->heap->name, (unsigned long)buffer->vaddr, pa, len>>10, buffer->handle_count,
955                         atomic_read(&buffer->ref.refcount), atomic_read(&handle->ref.refcount));
956
957                 mutex_unlock(&buffer->lock);
958
959 #ifdef CONFIG_ROCKCHIP_IOMMU
960                 ion_debug_client_show_buffer_map(s, buffer);
961 #endif
962         }
963         mutex_unlock(&client->lock);
964
965         return 0;
966 }
967
968 static int ion_debug_client_show(struct seq_file *s, void *unused)
969 {
970         struct ion_client *client = s->private;
971         struct rb_node *n;
972         size_t sizes[ION_NUM_HEAP_IDS] = {0};
973         const char *names[ION_NUM_HEAP_IDS] = {NULL};
974         int i;
975
976         mutex_lock(&client->lock);
977         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
978                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
979                                                      node);
980                 unsigned int id = handle->buffer->heap->id;
981
982                 if (!names[id])
983                         names[id] = handle->buffer->heap->name;
984                 sizes[id] += handle->buffer->size;
985         }
986         mutex_unlock(&client->lock);
987
988         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
989         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
990                 if (!names[i])
991                         continue;
992                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
993         }
994         ion_debug_client_show_buffer(s, unused);
995         return 0;
996 }
997
998 static int ion_debug_client_open(struct inode *inode, struct file *file)
999 {
1000         return single_open(file, ion_debug_client_show, inode->i_private);
1001 }
1002
1003 static const struct file_operations debug_client_fops = {
1004         .open = ion_debug_client_open,
1005         .read = seq_read,
1006         .llseek = seq_lseek,
1007         .release = single_release,
1008 };
1009
1010 static int ion_get_client_serial(const struct rb_root *root,
1011                                         const unsigned char *name)
1012 {
1013         int serial = -1;
1014         struct rb_node *node;
1015         for (node = rb_first(root); node; node = rb_next(node)) {
1016                 struct ion_client *client = rb_entry(node, struct ion_client,
1017                                                 node);
1018                 if (strcmp(client->name, name))
1019                         continue;
1020                 serial = max(serial, client->display_serial);
1021         }
1022         return serial + 1;
1023 }
1024
1025 struct ion_client *ion_client_create(struct ion_device *dev,
1026                                      const char *name)
1027 {
1028         struct ion_client *client;
1029         struct task_struct *task;
1030         struct rb_node **p;
1031         struct rb_node *parent = NULL;
1032         struct ion_client *entry;
1033         pid_t pid;
1034
1035         if (!name) {
1036                 pr_err("%s: Name cannot be null\n", __func__);
1037                 return ERR_PTR(-EINVAL);
1038         }
1039
1040         get_task_struct(current->group_leader);
1041         task_lock(current->group_leader);
1042         pid = task_pid_nr(current->group_leader);
1043         /* don't bother to store task struct for kernel threads,
1044            they can't be killed anyway */
1045         if (current->group_leader->flags & PF_KTHREAD) {
1046                 put_task_struct(current->group_leader);
1047                 task = NULL;
1048         } else {
1049                 task = current->group_leader;
1050         }
1051         task_unlock(current->group_leader);
1052
1053         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1054         if (!client)
1055                 goto err_put_task_struct;
1056
1057         client->dev = dev;
1058         client->handles = RB_ROOT;
1059         idr_init(&client->idr);
1060         mutex_init(&client->lock);
1061         client->task = task;
1062         client->pid = pid;
1063         client->name = kstrdup(name, GFP_KERNEL);
1064         if (!client->name)
1065                 goto err_free_client;
1066
1067         down_write(&dev->lock);
1068         client->display_serial = ion_get_client_serial(&dev->clients, name);
1069         client->display_name = kasprintf(
1070                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1071         if (!client->display_name) {
1072                 up_write(&dev->lock);
1073                 goto err_free_client_name;
1074         }
1075         p = &dev->clients.rb_node;
1076         while (*p) {
1077                 parent = *p;
1078                 entry = rb_entry(parent, struct ion_client, node);
1079
1080                 if (client < entry)
1081                         p = &(*p)->rb_left;
1082                 else if (client > entry)
1083                         p = &(*p)->rb_right;
1084         }
1085         rb_link_node(&client->node, parent, p);
1086         rb_insert_color(&client->node, &dev->clients);
1087
1088         client->debug_root = debugfs_create_file(client->display_name, 0664,
1089                                                 dev->clients_debug_root,
1090                                                 client, &debug_client_fops);
1091         if (!client->debug_root) {
1092                 char buf[256], *path;
1093                 path = dentry_path(dev->clients_debug_root, buf, 256);
1094                 pr_err("Failed to create client debugfs at %s/%s\n",
1095                         path, client->display_name);
1096         }
1097
1098         up_write(&dev->lock);
1099
1100         return client;
1101
1102 err_free_client_name:
1103         kfree(client->name);
1104 err_free_client:
1105         kfree(client);
1106 err_put_task_struct:
1107         if (task)
1108                 put_task_struct(current->group_leader);
1109         return ERR_PTR(-ENOMEM);
1110 }
1111 EXPORT_SYMBOL(ion_client_create);
1112
1113 void ion_client_destroy(struct ion_client *client)
1114 {
1115         struct ion_device *dev = client->dev;
1116         struct rb_node *n;
1117
1118         pr_debug("%s: %d\n", __func__, __LINE__);
1119         while ((n = rb_first(&client->handles))) {
1120                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1121                                                      node);
1122                 ion_handle_destroy(&handle->ref);
1123         }
1124
1125         idr_destroy(&client->idr);
1126
1127         down_write(&dev->lock);
1128         if (client->task)
1129                 put_task_struct(client->task);
1130         rb_erase(&client->node, &dev->clients);
1131         debugfs_remove_recursive(client->debug_root);
1132         up_write(&dev->lock);
1133
1134         kfree(client->display_name);
1135         kfree(client->name);
1136         kfree(client);
1137 }
1138 EXPORT_SYMBOL(ion_client_destroy);
1139
1140 struct sg_table *ion_sg_table(struct ion_client *client,
1141                               struct ion_handle *handle)
1142 {
1143         struct ion_buffer *buffer;
1144         struct sg_table *table;
1145
1146         mutex_lock(&client->lock);
1147         if (!ion_handle_validate(client, handle)) {
1148                 pr_err("%s: invalid handle passed to map_dma.\n",
1149                        __func__);
1150                 mutex_unlock(&client->lock);
1151                 return ERR_PTR(-EINVAL);
1152         }
1153         buffer = handle->buffer;
1154         table = buffer->sg_table;
1155         mutex_unlock(&client->lock);
1156         return table;
1157 }
1158 EXPORT_SYMBOL(ion_sg_table);
1159
1160 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1161                                        struct device *dev,
1162                                        enum dma_data_direction direction);
1163
1164 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1165                                         enum dma_data_direction direction)
1166 {
1167         struct dma_buf *dmabuf = attachment->dmabuf;
1168         struct ion_buffer *buffer = dmabuf->priv;
1169
1170         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1171         return buffer->sg_table;
1172 }
1173
1174 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1175                               struct sg_table *table,
1176                               enum dma_data_direction direction)
1177 {
1178 }
1179
1180 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1181                 size_t size, enum dma_data_direction dir)
1182 {
1183         struct scatterlist sg;
1184
1185         sg_init_table(&sg, 1);
1186         sg_set_page(&sg, page, size, 0);
1187         /*
1188          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1189          * for the the targeted device, but this works on the currently targeted
1190          * hardware.
1191          */
1192         sg_dma_address(&sg) = page_to_phys(page);
1193         dma_sync_sg_for_device(dev, &sg, 1, dir);
1194 }
1195
1196 struct ion_vma_list {
1197         struct list_head list;
1198         struct vm_area_struct *vma;
1199 };
1200
1201 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1202                                        struct device *dev,
1203                                        enum dma_data_direction dir)
1204 {
1205         struct ion_vma_list *vma_list;
1206         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1207         int i;
1208
1209         pr_debug("%s: syncing for device %s\n", __func__,
1210                  dev ? dev_name(dev) : "null");
1211
1212         if (!ion_buffer_fault_user_mappings(buffer))
1213                 return;
1214
1215         mutex_lock(&buffer->lock);
1216         for (i = 0; i < pages; i++) {
1217                 struct page *page = buffer->pages[i];
1218
1219                 if (ion_buffer_page_is_dirty(page))
1220                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1221                                                         PAGE_SIZE, dir);
1222
1223                 ion_buffer_page_clean(buffer->pages + i);
1224         }
1225         list_for_each_entry(vma_list, &buffer->vmas, list) {
1226                 struct vm_area_struct *vma = vma_list->vma;
1227
1228                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1229                                NULL);
1230         }
1231         mutex_unlock(&buffer->lock);
1232 }
1233
1234 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1235 {
1236         struct ion_buffer *buffer = vma->vm_private_data;
1237         unsigned long pfn;
1238         int ret;
1239
1240         mutex_lock(&buffer->lock);
1241         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1242         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1243
1244         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1245         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1246         mutex_unlock(&buffer->lock);
1247         if (ret)
1248                 return VM_FAULT_ERROR;
1249
1250         return VM_FAULT_NOPAGE;
1251 }
1252
1253 static void ion_vm_open(struct vm_area_struct *vma)
1254 {
1255         struct ion_buffer *buffer = vma->vm_private_data;
1256         struct ion_vma_list *vma_list;
1257
1258         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1259         if (!vma_list)
1260                 return;
1261         vma_list->vma = vma;
1262         mutex_lock(&buffer->lock);
1263         list_add(&vma_list->list, &buffer->vmas);
1264         mutex_unlock(&buffer->lock);
1265         pr_debug("%s: adding %p\n", __func__, vma);
1266 }
1267
1268 static void ion_vm_close(struct vm_area_struct *vma)
1269 {
1270         struct ion_buffer *buffer = vma->vm_private_data;
1271         struct ion_vma_list *vma_list, *tmp;
1272
1273         pr_debug("%s\n", __func__);
1274         mutex_lock(&buffer->lock);
1275         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1276                 if (vma_list->vma != vma)
1277                         continue;
1278                 list_del(&vma_list->list);
1279                 kfree(vma_list);
1280                 pr_debug("%s: deleting %p\n", __func__, vma);
1281                 break;
1282         }
1283         mutex_unlock(&buffer->lock);
1284 }
1285
1286 static struct vm_operations_struct ion_vma_ops = {
1287         .open = ion_vm_open,
1288         .close = ion_vm_close,
1289         .fault = ion_vm_fault,
1290 };
1291
1292 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1293 {
1294         struct ion_buffer *buffer = dmabuf->priv;
1295         int ret = 0;
1296
1297         if (!buffer->heap->ops->map_user) {
1298                 pr_err("%s: this heap does not define a method for mapping "
1299                        "to userspace\n", __func__);
1300                 return -EINVAL;
1301         }
1302
1303         if (ion_buffer_fault_user_mappings(buffer)) {
1304                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1305                                                         VM_DONTDUMP;
1306                 vma->vm_private_data = buffer;
1307                 vma->vm_ops = &ion_vma_ops;
1308                 ion_vm_open(vma);
1309                 return 0;
1310         }
1311
1312         if (!(buffer->flags & ION_FLAG_CACHED))
1313                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1314
1315         mutex_lock(&buffer->lock);
1316         /* now map it to userspace */
1317         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1318         mutex_unlock(&buffer->lock);
1319
1320         if (ret)
1321                 pr_err("%s: failure mapping buffer to userspace\n",
1322                        __func__);
1323
1324         return ret;
1325 }
1326
1327 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1328 {
1329         struct ion_buffer *buffer = dmabuf->priv;
1330         ion_buffer_put(buffer);
1331 }
1332
1333 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1334 {
1335         struct ion_buffer *buffer = dmabuf->priv;
1336         return buffer->vaddr + offset * PAGE_SIZE;
1337 }
1338
1339 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1340                                void *ptr)
1341 {
1342         return;
1343 }
1344
1345 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1346                                         size_t len,
1347                                         enum dma_data_direction direction)
1348 {
1349         struct ion_buffer *buffer = dmabuf->priv;
1350         void *vaddr;
1351
1352         if (!buffer->heap->ops->map_kernel) {
1353                 pr_err("%s: map kernel is not implemented by this heap.\n",
1354                        __func__);
1355                 return -ENODEV;
1356         }
1357
1358         mutex_lock(&buffer->lock);
1359         vaddr = ion_buffer_kmap_get(buffer);
1360         mutex_unlock(&buffer->lock);
1361         if (IS_ERR(vaddr))
1362                 return PTR_ERR(vaddr);
1363         return 0;
1364 }
1365
1366 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1367                                        size_t len,
1368                                        enum dma_data_direction direction)
1369 {
1370         struct ion_buffer *buffer = dmabuf->priv;
1371
1372         mutex_lock(&buffer->lock);
1373         ion_buffer_kmap_put(buffer);
1374         mutex_unlock(&buffer->lock);
1375 }
1376
1377 static struct dma_buf_ops dma_buf_ops = {
1378         .map_dma_buf = ion_map_dma_buf,
1379         .unmap_dma_buf = ion_unmap_dma_buf,
1380         .mmap = ion_mmap,
1381         .release = ion_dma_buf_release,
1382         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1383         .end_cpu_access = ion_dma_buf_end_cpu_access,
1384         .kmap_atomic = ion_dma_buf_kmap,
1385         .kunmap_atomic = ion_dma_buf_kunmap,
1386         .kmap = ion_dma_buf_kmap,
1387         .kunmap = ion_dma_buf_kunmap,
1388 };
1389
1390 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1391                                                 struct ion_handle *handle)
1392 {
1393         struct ion_buffer *buffer;
1394         struct dma_buf *dmabuf;
1395         bool valid_handle;
1396
1397         mutex_lock(&client->lock);
1398         valid_handle = ion_handle_validate(client, handle);
1399         if (!valid_handle) {
1400                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1401                 mutex_unlock(&client->lock);
1402                 return ERR_PTR(-EINVAL);
1403         }
1404         buffer = handle->buffer;
1405         ion_buffer_get(buffer);
1406         mutex_unlock(&client->lock);
1407
1408         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1409         if (IS_ERR(dmabuf)) {
1410                 ion_buffer_put(buffer);
1411                 return dmabuf;
1412         }
1413
1414         return dmabuf;
1415 }
1416 EXPORT_SYMBOL(ion_share_dma_buf);
1417
1418 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1419 {
1420         struct dma_buf *dmabuf;
1421         int fd;
1422
1423         dmabuf = ion_share_dma_buf(client, handle);
1424         if (IS_ERR(dmabuf))
1425                 return PTR_ERR(dmabuf);
1426
1427         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1428         if (fd < 0)
1429                 dma_buf_put(dmabuf);
1430
1431         return fd;
1432 }
1433 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1434
1435 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1436 {
1437         struct dma_buf *dmabuf;
1438         struct ion_buffer *buffer;
1439         struct ion_handle *handle;
1440         int ret;
1441
1442         dmabuf = dma_buf_get(fd);
1443         if (IS_ERR(dmabuf))
1444                 return ERR_PTR(PTR_ERR(dmabuf));
1445         /* if this memory came from ion */
1446
1447         if (dmabuf->ops != &dma_buf_ops) {
1448                 pr_err("%s: can not import dmabuf from another exporter\n",
1449                        __func__);
1450                 dma_buf_put(dmabuf);
1451                 return ERR_PTR(-EINVAL);
1452         }
1453         buffer = dmabuf->priv;
1454
1455         mutex_lock(&client->lock);
1456         /* if a handle exists for this buffer just take a reference to it */
1457         handle = ion_handle_lookup(client, buffer);
1458         if (!IS_ERR(handle)) {
1459                 ion_handle_get(handle);
1460                 mutex_unlock(&client->lock);
1461                 goto end;
1462         }
1463         mutex_unlock(&client->lock);
1464
1465         handle = ion_handle_create(client, buffer);
1466         if (IS_ERR(handle))
1467                 goto end;
1468
1469         mutex_lock(&client->lock);
1470         ret = ion_handle_add(client, handle);
1471         mutex_unlock(&client->lock);
1472         if (ret) {
1473                 ion_handle_put(handle);
1474                 handle = ERR_PTR(ret);
1475         }
1476
1477 end:
1478         dma_buf_put(dmabuf);
1479         return handle;
1480 }
1481 EXPORT_SYMBOL(ion_import_dma_buf);
1482
1483 static int ion_sync_for_device(struct ion_client *client, int fd)
1484 {
1485         struct dma_buf *dmabuf;
1486         struct ion_buffer *buffer;
1487
1488         dmabuf = dma_buf_get(fd);
1489         if (IS_ERR(dmabuf))
1490                 return PTR_ERR(dmabuf);
1491
1492         /* if this memory came from ion */
1493         if (dmabuf->ops != &dma_buf_ops) {
1494                 pr_err("%s: can not sync dmabuf from another exporter\n",
1495                        __func__);
1496                 dma_buf_put(dmabuf);
1497                 return -EINVAL;
1498         }
1499         buffer = dmabuf->priv;
1500
1501         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1502                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1503         dma_buf_put(dmabuf);
1504         return 0;
1505 }
1506
1507 /* fix up the cases where the ioctl direction bits are incorrect */
1508 static unsigned int ion_ioctl_dir(unsigned int cmd)
1509 {
1510         switch (cmd) {
1511         case ION_IOC_SYNC:
1512         case ION_IOC_FREE:
1513         case ION_IOC_CUSTOM:
1514                 return _IOC_WRITE;
1515         default:
1516                 return _IOC_DIR(cmd);
1517         }
1518 }
1519
1520 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1521 {
1522         struct ion_client *client = filp->private_data;
1523         struct ion_device *dev = client->dev;
1524         struct ion_handle *cleanup_handle = NULL;
1525         int ret = 0;
1526         unsigned int dir;
1527
1528         union {
1529                 struct ion_fd_data fd;
1530                 struct ion_allocation_data allocation;
1531                 struct ion_handle_data handle;
1532                 struct ion_custom_data custom;
1533         } data;
1534
1535         dir = ion_ioctl_dir(cmd);
1536
1537         if (_IOC_SIZE(cmd) > sizeof(data))
1538                 return -EINVAL;
1539
1540         if (dir & _IOC_WRITE)
1541                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1542                         return -EFAULT;
1543
1544         switch (cmd) {
1545         case ION_IOC_ALLOC:
1546         {
1547                 struct ion_handle *handle;
1548
1549                 handle = ion_alloc(client, data.allocation.len,
1550                                                 data.allocation.align,
1551                                                 data.allocation.heap_id_mask,
1552                                                 data.allocation.flags);
1553                 if (IS_ERR(handle))
1554                         return PTR_ERR(handle);
1555
1556                 data.allocation.handle = handle->id;
1557
1558                 cleanup_handle = handle;
1559                 break;
1560         }
1561         case ION_IOC_FREE:
1562         {
1563                 struct ion_handle *handle;
1564
1565                 handle = ion_handle_get_by_id(client, data.handle.handle);
1566                 if (IS_ERR(handle))
1567                         return PTR_ERR(handle);
1568                 ion_free(client, handle);
1569                 ion_handle_put(handle);
1570                 break;
1571         }
1572         case ION_IOC_SHARE:
1573         case ION_IOC_MAP:
1574         {
1575                 struct ion_handle *handle;
1576
1577                 handle = ion_handle_get_by_id(client, data.handle.handle);
1578                 if (IS_ERR(handle))
1579                         return PTR_ERR(handle);
1580                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1581                 ion_handle_put(handle);
1582                 if (data.fd.fd < 0)
1583                         ret = data.fd.fd;
1584                 break;
1585         }
1586         case ION_IOC_IMPORT:
1587         {
1588                 struct ion_handle *handle;
1589                 handle = ion_import_dma_buf(client, data.fd.fd);
1590                 if (IS_ERR(handle))
1591                         ret = PTR_ERR(handle);
1592                 else
1593                         data.handle.handle = handle->id;
1594                 break;
1595         }
1596         case ION_IOC_SYNC:
1597         {
1598                 ret = ion_sync_for_device(client, data.fd.fd);
1599                 break;
1600         }
1601         case ION_IOC_CUSTOM:
1602         {
1603                 if (!dev->custom_ioctl)
1604                         return -ENOTTY;
1605                 ret = dev->custom_ioctl(client, data.custom.cmd,
1606                                                 data.custom.arg);
1607                 break;
1608         }
1609         default:
1610                 return -ENOTTY;
1611         }
1612
1613         if (dir & _IOC_READ) {
1614                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1615                         if (cleanup_handle)
1616                                 ion_free(client, cleanup_handle);
1617                         return -EFAULT;
1618                 }
1619         }
1620         return ret;
1621 }
1622
1623 static int ion_release(struct inode *inode, struct file *file)
1624 {
1625         struct ion_client *client = file->private_data;
1626
1627         pr_debug("%s: %d\n", __func__, __LINE__);
1628         ion_client_destroy(client);
1629         return 0;
1630 }
1631
1632 static int ion_open(struct inode *inode, struct file *file)
1633 {
1634         struct miscdevice *miscdev = file->private_data;
1635         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1636         struct ion_client *client;
1637         char debug_name[64];
1638
1639         pr_debug("%s: %d\n", __func__, __LINE__);
1640         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1641         client = ion_client_create(dev, debug_name);
1642         if (IS_ERR(client))
1643                 return PTR_ERR(client);
1644         file->private_data = client;
1645
1646         return 0;
1647 }
1648
1649 static const struct file_operations ion_fops = {
1650         .owner          = THIS_MODULE,
1651         .open           = ion_open,
1652         .release        = ion_release,
1653         .unlocked_ioctl = ion_ioctl,
1654         .compat_ioctl   = compat_ion_ioctl,
1655 };
1656
1657 static size_t ion_debug_heap_total(struct ion_client *client,
1658                                    unsigned int id)
1659 {
1660         size_t size = 0;
1661         struct rb_node *n;
1662
1663         mutex_lock(&client->lock);
1664         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1665                 struct ion_handle *handle = rb_entry(n,
1666                                                      struct ion_handle,
1667                                                      node);
1668                 if (handle->buffer->heap->id == id)
1669                         size += handle->buffer->size;
1670         }
1671         mutex_unlock(&client->lock);
1672         return size;
1673 }
1674
1675 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1676 {
1677         struct ion_heap *heap = s->private;
1678         struct ion_device *dev = heap->dev;
1679         struct rb_node *n;
1680         size_t total_size = 0;
1681         size_t total_orphaned_size = 0;
1682
1683         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1684         seq_printf(s, "----------------------------------------------------\n");
1685
1686         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1687                 struct ion_client *client = rb_entry(n, struct ion_client,
1688                                                      node);
1689                 size_t size = ion_debug_heap_total(client, heap->id);
1690                 if (!size)
1691                         continue;
1692                 if (client->task) {
1693                         char task_comm[TASK_COMM_LEN];
1694
1695                         get_task_comm(task_comm, client->task);
1696                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1697                                    client->pid, size);
1698                 } else {
1699                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1700                                    client->pid, size);
1701                 }
1702         }
1703         seq_printf(s, "----------------------------------------------------\n");
1704         seq_printf(s, "orphaned allocations (info is from last known client):"
1705                    "\n");
1706         mutex_lock(&dev->buffer_lock);
1707         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1708                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1709                                                      node);
1710                 if (buffer->heap->id != heap->id)
1711                         continue;
1712                 total_size += buffer->size;
1713                 if (!buffer->handle_count) {
1714                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1715                                    buffer->task_comm, buffer->pid,
1716                                    buffer->size, buffer->kmap_cnt,
1717                                    atomic_read(&buffer->ref.refcount));
1718                         total_orphaned_size += buffer->size;
1719                 }
1720         }
1721         mutex_unlock(&dev->buffer_lock);
1722         seq_printf(s, "----------------------------------------------------\n");
1723         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1724                    total_orphaned_size);
1725         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1726         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1727                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1728                                 heap->free_list_size);
1729         seq_printf(s, "----------------------------------------------------\n");
1730
1731         if (heap->debug_show)
1732                 heap->debug_show(heap, s, unused);
1733
1734         return 0;
1735 }
1736
1737 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1738 {
1739         return single_open(file, ion_debug_heap_show, inode->i_private);
1740 }
1741
1742 static const struct file_operations debug_heap_fops = {
1743         .open = ion_debug_heap_open,
1744         .read = seq_read,
1745         .llseek = seq_lseek,
1746         .release = single_release,
1747 };
1748
1749 #ifdef DEBUG_HEAP_SHRINKER
1750 static int debug_shrink_set(void *data, u64 val)
1751 {
1752         struct ion_heap *heap = data;
1753         struct shrink_control sc;
1754         int objs;
1755
1756         sc.gfp_mask = -1;
1757         sc.nr_to_scan = 0;
1758
1759         if (!val)
1760                 return 0;
1761
1762         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1763         sc.nr_to_scan = objs;
1764
1765         heap->shrinker.shrink(&heap->shrinker, &sc);
1766         return 0;
1767 }
1768
1769 static int debug_shrink_get(void *data, u64 *val)
1770 {
1771         struct ion_heap *heap = data;
1772         struct shrink_control sc;
1773         int objs;
1774
1775         sc.gfp_mask = -1;
1776         sc.nr_to_scan = 0;
1777
1778         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1779         *val = objs;
1780         return 0;
1781 }
1782
1783 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1784                         debug_shrink_set, "%llu\n");
1785 #endif
1786
1787 #ifdef CONFIG_CMA
1788 // struct "cma" quoted from drivers/base/dma-contiguous.c
1789 struct cma {
1790         unsigned long   base_pfn;
1791         unsigned long   count;
1792         unsigned long   *bitmap;
1793 };
1794
1795 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1796 struct ion_cma_heap {
1797         struct ion_heap heap;
1798         struct device *dev;
1799 };
1800
1801 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1802 {
1803         struct ion_heap *heap = s->private;
1804         struct ion_cma_heap *cma_heap = container_of(heap,
1805                                                         struct ion_cma_heap,
1806                                                         heap);
1807         struct device *dev = cma_heap->dev;
1808         struct cma *cma = dev_get_cma_area(dev);
1809         int i;
1810         int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1811         phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1812
1813         seq_printf(s, "%s Heap bitmap:\n", heap->name);
1814
1815         for(i = rows - 1; i>= 0; i--){
1816                 seq_printf(s, "%.4uM@0x%08x: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1817                                 i+1, base+(i)*SZ_1M,
1818                                 cma->bitmap[i*8 + 7],
1819                                 cma->bitmap[i*8 + 6],
1820                                 cma->bitmap[i*8 + 5],
1821                                 cma->bitmap[i*8 + 4],
1822                                 cma->bitmap[i*8 + 3],
1823                                 cma->bitmap[i*8 + 2],
1824                                 cma->bitmap[i*8 + 1],
1825                                 cma->bitmap[i*8]);
1826         }
1827         seq_printf(s, "Heap size: %luM, Heap base: 0x%08x\n",
1828                 (cma->count)>>8, base);
1829
1830         return 0;
1831 }
1832
1833 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1834 {
1835         return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1836 }
1837
1838 static const struct file_operations debug_heap_bitmap_fops = {
1839         .open = ion_debug_heap_bitmap_open,
1840         .read = seq_read,
1841         .llseek = seq_lseek,
1842         .release = single_release,
1843 };
1844 #endif
1845
1846 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1847 {
1848         struct dentry *debug_file;
1849
1850         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1851             !heap->ops->unmap_dma)
1852                 pr_err("%s: can not add heap with invalid ops struct.\n",
1853                        __func__);
1854
1855         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1856                 ion_heap_init_deferred_free(heap);
1857
1858         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1859                 ion_heap_init_shrinker(heap);
1860
1861         heap->dev = dev;
1862         down_write(&dev->lock);
1863         /* use negative heap->id to reverse the priority -- when traversing
1864            the list later attempt higher id numbers first */
1865         plist_node_init(&heap->node, -heap->id);
1866         plist_add(&heap->node, &dev->heaps);
1867         debug_file = debugfs_create_file(heap->name, 0664,
1868                                         dev->heaps_debug_root, heap,
1869                                         &debug_heap_fops);
1870
1871         if (!debug_file) {
1872                 char buf[256], *path;
1873                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1874                 pr_err("Failed to create heap debugfs at %s/%s\n",
1875                         path, heap->name);
1876         }
1877
1878 #ifdef DEBUG_HEAP_SHRINKER
1879         if (heap->shrinker.shrink) {
1880                 char debug_name[64];
1881
1882                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1883                 debug_file = debugfs_create_file(
1884                         debug_name, 0644, dev->heaps_debug_root, heap,
1885                         &debug_shrink_fops);
1886                 if (!debug_file) {
1887                         char buf[256], *path;
1888                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1889                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1890                                 path, debug_name);
1891                 }
1892         }
1893 #endif
1894 #ifdef CONFIG_CMA
1895         if (ION_HEAP_TYPE_DMA==heap->type) {
1896                 char* heap_bitmap_name = kasprintf(
1897                         GFP_KERNEL, "%s-bitmap", heap->name);
1898                 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1899                                                 dev->heaps_debug_root, heap,
1900                                                 &debug_heap_bitmap_fops);
1901                 if (!debug_file) {
1902                         char buf[256], *path;
1903                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1904                         pr_err("Failed to create heap debugfs at %s/%s\n",
1905                                 path, heap_bitmap_name);
1906                 }
1907                 kfree(heap_bitmap_name);
1908         }
1909 #endif
1910         up_write(&dev->lock);
1911 }
1912
1913 struct ion_device *ion_device_create(long (*custom_ioctl)
1914                                      (struct ion_client *client,
1915                                       unsigned int cmd,
1916                                       unsigned long arg))
1917 {
1918         struct ion_device *idev;
1919         int ret;
1920
1921         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1922         if (!idev)
1923                 return ERR_PTR(-ENOMEM);
1924
1925         idev->dev.minor = MISC_DYNAMIC_MINOR;
1926         idev->dev.name = "ion";
1927         idev->dev.fops = &ion_fops;
1928         idev->dev.parent = NULL;
1929         ret = misc_register(&idev->dev);
1930         if (ret) {
1931                 pr_err("ion: failed to register misc device.\n");
1932                 return ERR_PTR(ret);
1933         }
1934
1935         idev->debug_root = debugfs_create_dir("ion", NULL);
1936         if (!idev->debug_root) {
1937                 pr_err("ion: failed to create debugfs root directory.\n");
1938                 goto debugfs_done;
1939         }
1940         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1941         if (!idev->heaps_debug_root) {
1942                 pr_err("ion: failed to create debugfs heaps directory.\n");
1943                 goto debugfs_done;
1944         }
1945         idev->clients_debug_root = debugfs_create_dir("clients",
1946                                                 idev->debug_root);
1947         if (!idev->clients_debug_root)
1948                 pr_err("ion: failed to create debugfs clients directory.\n");
1949
1950 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
1951         rockchip_ion_snapshot_debugfs(idev->debug_root);
1952 #endif
1953
1954 debugfs_done:
1955
1956         idev->custom_ioctl = custom_ioctl;
1957         idev->buffers = RB_ROOT;
1958         mutex_init(&idev->buffer_lock);
1959         init_rwsem(&idev->lock);
1960         plist_head_init(&idev->heaps);
1961         idev->clients = RB_ROOT;
1962         return idev;
1963 }
1964
1965 void ion_device_destroy(struct ion_device *dev)
1966 {
1967         misc_deregister(&dev->dev);
1968         debugfs_remove_recursive(dev->debug_root);
1969         /* XXX need to free the heaps and clients ? */
1970         kfree(dev);
1971 }
1972
1973 void __init ion_reserve(struct ion_platform_data *data)
1974 {
1975         int i;
1976
1977         for (i = 0; i < data->nr; i++) {
1978                 if (data->heaps[i].size == 0)
1979                         continue;
1980
1981                 if (data->heaps[i].base == 0) {
1982                         phys_addr_t paddr;
1983                         paddr = memblock_alloc_base(data->heaps[i].size,
1984                                                     data->heaps[i].align,
1985                                                     MEMBLOCK_ALLOC_ANYWHERE);
1986                         if (!paddr) {
1987                                 pr_err("%s: error allocating memblock for "
1988                                        "heap %d\n",
1989                                         __func__, i);
1990                                 continue;
1991                         }
1992                         data->heaps[i].base = paddr;
1993                 } else {
1994                         int ret = memblock_reserve(data->heaps[i].base,
1995                                                data->heaps[i].size);
1996                         if (ret)
1997                                 pr_err("memblock reserve of %zx@%lx failed\n",
1998                                        data->heaps[i].size,
1999                                        data->heaps[i].base);
2000                 }
2001                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
2002                         data->heaps[i].name,
2003                         data->heaps[i].base,
2004                         data->heaps[i].size);
2005         }
2006 }
2007
2008 #ifdef CONFIG_ION_ROCKCHIP_SNAPSHOT
2009
2010 // Find the maximum can be allocated memory
2011 static unsigned long ion_find_max_zero_area(unsigned long *map, unsigned long size)
2012 {
2013         unsigned long index, i, zero_sz, max_zero_sz, start;
2014         start = 0;
2015         max_zero_sz = 0;
2016
2017         do {
2018                 index = find_next_zero_bit(map, size, start);
2019                 if (index>=size) break;
2020
2021                 i = find_next_bit(map, size, index);
2022                 zero_sz = i-index;
2023                 pr_debug("zero[%lx, %lx]\n", index, zero_sz);
2024                 max_zero_sz = max(max_zero_sz, zero_sz);
2025                 start = i + 1;
2026         } while(start<=size);
2027
2028         pr_debug("max_zero_sz=%lx\n", max_zero_sz);
2029         return max_zero_sz;
2030 }
2031
2032 static int ion_snapshot_save(struct ion_device *idev, size_t len)
2033 {
2034         static struct seq_file seqf;
2035         struct ion_heap *heap;
2036
2037         if (!seqf.buf) {
2038                 seqf.buf = rockchip_ion_snapshot_get(&seqf.size);
2039                 if (!seqf.buf)
2040                         return -ENOMEM;
2041         }
2042         memset(seqf.buf, 0, seqf.size);
2043         seqf.count = 0;
2044         pr_debug("%s: save snapshot 0x%x@0x%lx\n", __func__, seqf.size,
2045                 __pa(seqf.buf));
2046
2047         seq_printf(&seqf, "call by comm: %s pid: %d, alloc: %uKB\n",
2048                 current->comm, current->pid, len>>10);
2049
2050         down_read(&idev->lock);
2051
2052         plist_for_each_entry(heap, &idev->heaps, node) {
2053                 seqf.private = (void*)heap;
2054                 seq_printf(&seqf, "++++++++++++++++ HEAP: %s ++++++++++++++++\n",
2055                         heap->name);
2056                 ion_debug_heap_show(&seqf, NULL);
2057                 if (ION_HEAP_TYPE_DMA==heap->type) {
2058                         struct ion_cma_heap *cma_heap = container_of(heap,
2059                                                                         struct ion_cma_heap,
2060                                                                         heap);
2061                         struct cma *cma = dev_get_cma_area(cma_heap->dev);
2062                         seq_printf(&seqf, "\n");
2063                         seq_printf(&seqf, "Maximum allocation of pages: %ld\n",
2064                                         ion_find_max_zero_area(cma->bitmap, cma->count));
2065                         seq_printf(&seqf, "\n");
2066                 }
2067         }
2068
2069         up_read(&idev->lock);
2070
2071         return 0;
2072 }
2073 #endif