rk: ion: cma bitmap move to /d/ion/heaps/cma-heap node
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
1 /*
2
3  * drivers/gpu/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/file.h>
20 #include <linux/freezer.h>
21 #include <linux/fs.h>
22 #include <linux/anon_inodes.h>
23 #include <linux/kthread.h>
24 #include <linux/list.h>
25 #include <linux/memblock.h>
26 #include <linux/miscdevice.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
29 #include <linux/mm_types.h>
30 #include <linux/rbtree.h>
31 #include <linux/slab.h>
32 #include <linux/seq_file.h>
33 #include <linux/uaccess.h>
34 #include <linux/vmalloc.h>
35 #include <linux/debugfs.h>
36 #include <linux/dma-buf.h>
37 #include <linux/idr.h>
38 #include <linux/rockchip_ion.h>
39 #include <asm-generic/dma-contiguous.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 /**
46  * struct ion_device - the metadata of the ion device node
47  * @dev:                the actual misc device
48  * @buffers:            an rb tree of all the existing buffers
49  * @buffer_lock:        lock protecting the tree of buffers
50  * @lock:               rwsem protecting the tree of heaps and clients
51  * @heaps:              list of all the heaps in the system
52  * @user_clients:       list of all the clients created from userspace
53  */
54 struct ion_device {
55         struct miscdevice dev;
56         struct rb_root buffers;
57         struct mutex buffer_lock;
58         struct rw_semaphore lock;
59         struct plist_head heaps;
60         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
61                               unsigned long arg);
62         struct rb_root clients;
63         struct dentry *debug_root;
64         struct dentry *heaps_debug_root;
65         struct dentry *clients_debug_root;
66 };
67
68 /**
69  * struct ion_client - a process/hw block local address space
70  * @node:               node in the tree of all clients
71  * @dev:                backpointer to ion device
72  * @handles:            an rb tree of all the handles in this client
73  * @idr:                an idr space for allocating handle ids
74  * @lock:               lock protecting the tree of handles
75  * @name:               used for debugging
76  * @display_name:       used for debugging (unique version of @name)
77  * @display_serial:     used for debugging (to make display_name unique)
78  * @task:               used for debugging
79  *
80  * A client represents a list of buffers this client may access.
81  * The mutex stored here is used to protect both handles tree
82  * as well as the handles themselves, and should be held while modifying either.
83  */
84 struct ion_client {
85         struct rb_node node;
86         struct ion_device *dev;
87         struct rb_root handles;
88         struct idr idr;
89         struct mutex lock;
90         const char *name;
91         char *display_name;
92         int display_serial;
93         struct task_struct *task;
94         pid_t pid;
95         struct dentry *debug_root;
96 };
97
98 /**
99  * ion_handle - a client local reference to a buffer
100  * @ref:                reference count
101  * @client:             back pointer to the client the buffer resides in
102  * @buffer:             pointer to the buffer
103  * @node:               node in the client's handle rbtree
104  * @kmap_cnt:           count of times this client has mapped to kernel
105  * @id:                 client-unique id allocated by client->idr
106  *
107  * Modifications to node, map_cnt or mapping should be protected by the
108  * lock in the client.  Other fields are never changed after initialization.
109  */
110 struct ion_handle {
111         struct kref ref;
112         struct ion_client *client;
113         struct ion_buffer *buffer;
114         struct rb_node node;
115         unsigned int kmap_cnt;
116         int id;
117 };
118
119 static void ion_iommu_force_unmap(struct ion_buffer *buffer);
120
121 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
122 {
123         return (buffer->flags & ION_FLAG_CACHED) &&
124                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
125 }
126
127 bool ion_buffer_cached(struct ion_buffer *buffer)
128 {
129         return !!(buffer->flags & ION_FLAG_CACHED);
130 }
131
132 static inline struct page *ion_buffer_page(struct page *page)
133 {
134         return (struct page *)((unsigned long)page & ~(1UL));
135 }
136
137 static inline bool ion_buffer_page_is_dirty(struct page *page)
138 {
139         return !!((unsigned long)page & 1UL);
140 }
141
142 static inline void ion_buffer_page_dirty(struct page **page)
143 {
144         *page = (struct page *)((unsigned long)(*page) | 1UL);
145 }
146
147 static inline void ion_buffer_page_clean(struct page **page)
148 {
149         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
150 }
151
152 /* this function should only be called while dev->lock is held */
153 static void ion_buffer_add(struct ion_device *dev,
154                            struct ion_buffer *buffer)
155 {
156         struct rb_node **p = &dev->buffers.rb_node;
157         struct rb_node *parent = NULL;
158         struct ion_buffer *entry;
159
160         while (*p) {
161                 parent = *p;
162                 entry = rb_entry(parent, struct ion_buffer, node);
163
164                 if (buffer < entry) {
165                         p = &(*p)->rb_left;
166                 } else if (buffer > entry) {
167                         p = &(*p)->rb_right;
168                 } else {
169                         pr_err("%s: buffer already found.", __func__);
170                         BUG();
171                 }
172         }
173
174         rb_link_node(&buffer->node, parent, p);
175         rb_insert_color(&buffer->node, &dev->buffers);
176 }
177
178 /* this function should only be called while dev->lock is held */
179 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
180                                      struct ion_device *dev,
181                                      unsigned long len,
182                                      unsigned long align,
183                                      unsigned long flags)
184 {
185         struct ion_buffer *buffer;
186         struct sg_table *table;
187         struct scatterlist *sg;
188         int i, ret;
189
190         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
191         if (!buffer)
192                 return ERR_PTR(-ENOMEM);
193
194         buffer->heap = heap;
195         buffer->flags = flags;
196         kref_init(&buffer->ref);
197
198         ret = heap->ops->allocate(heap, buffer, len, align, flags);
199
200         if (ret) {
201                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
202                         goto err2;
203
204                 ion_heap_freelist_drain(heap, 0);
205                 ret = heap->ops->allocate(heap, buffer, len, align,
206                                           flags);
207                 if (ret)
208                         goto err2;
209         }
210
211         buffer->dev = dev;
212         buffer->size = len;
213
214         table = heap->ops->map_dma(heap, buffer);
215         if (WARN_ONCE(table == NULL,
216                         "heap->ops->map_dma should return ERR_PTR on error"))
217                 table = ERR_PTR(-EINVAL);
218         if (IS_ERR(table)) {
219                 heap->ops->free(buffer);
220                 kfree(buffer);
221                 return ERR_PTR(PTR_ERR(table));
222         }
223         buffer->sg_table = table;
224         if (ion_buffer_fault_user_mappings(buffer)) {
225                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
226                 struct scatterlist *sg;
227                 int i, j, k = 0;
228
229                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
230                 if (!buffer->pages) {
231                         ret = -ENOMEM;
232                         goto err1;
233                 }
234
235                 for_each_sg(table->sgl, sg, table->nents, i) {
236                         struct page *page = sg_page(sg);
237
238                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
239                                 buffer->pages[k++] = page++;
240                 }
241
242                 if (ret)
243                         goto err;
244         }
245
246         buffer->dev = dev;
247         buffer->size = len;
248         INIT_LIST_HEAD(&buffer->vmas);
249         mutex_init(&buffer->lock);
250         /* this will set up dma addresses for the sglist -- it is not
251            technically correct as per the dma api -- a specific
252            device isn't really taking ownership here.  However, in practice on
253            our systems the only dma_address space is physical addresses.
254            Additionally, we can't afford the overhead of invalidating every
255            allocation via dma_map_sg. The implicit contract here is that
256            memory comming from the heaps is ready for dma, ie if it has a
257            cached mapping that mapping has been invalidated */
258         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
259                 sg_dma_address(sg) = sg_phys(sg);
260         mutex_lock(&dev->buffer_lock);
261         ion_buffer_add(dev, buffer);
262         mutex_unlock(&dev->buffer_lock);
263         return buffer;
264
265 err:
266         heap->ops->unmap_dma(heap, buffer);
267         heap->ops->free(buffer);
268 err1:
269         if (buffer->pages)
270                 vfree(buffer->pages);
271 err2:
272         kfree(buffer);
273         return ERR_PTR(ret);
274 }
275
276 void ion_buffer_destroy(struct ion_buffer *buffer)
277 {
278         if (WARN_ON(buffer->kmap_cnt > 0))
279                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
280         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
281 #ifdef CONFIG_ROCKCHIP_IOMMU
282         ion_iommu_force_unmap(buffer);
283 #endif
284         buffer->heap->ops->free(buffer);
285         if (buffer->pages)
286                 vfree(buffer->pages);
287         kfree(buffer);
288 }
289
290 static void _ion_buffer_destroy(struct kref *kref)
291 {
292         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
293         struct ion_heap *heap = buffer->heap;
294         struct ion_device *dev = buffer->dev;
295
296         mutex_lock(&dev->buffer_lock);
297         rb_erase(&buffer->node, &dev->buffers);
298         mutex_unlock(&dev->buffer_lock);
299
300         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
301                 ion_heap_freelist_add(heap, buffer);
302         else
303                 ion_buffer_destroy(buffer);
304 }
305
306 static void ion_buffer_get(struct ion_buffer *buffer)
307 {
308         kref_get(&buffer->ref);
309 }
310
311 static int ion_buffer_put(struct ion_buffer *buffer)
312 {
313         return kref_put(&buffer->ref, _ion_buffer_destroy);
314 }
315
316 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
317 {
318         mutex_lock(&buffer->lock);
319         buffer->handle_count++;
320         mutex_unlock(&buffer->lock);
321 }
322
323 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
324 {
325         /*
326          * when a buffer is removed from a handle, if it is not in
327          * any other handles, copy the taskcomm and the pid of the
328          * process it's being removed from into the buffer.  At this
329          * point there will be no way to track what processes this buffer is
330          * being used by, it only exists as a dma_buf file descriptor.
331          * The taskcomm and pid can provide a debug hint as to where this fd
332          * is in the system
333          */
334         mutex_lock(&buffer->lock);
335         buffer->handle_count--;
336         BUG_ON(buffer->handle_count < 0);
337         if (!buffer->handle_count) {
338                 struct task_struct *task;
339
340                 task = current->group_leader;
341                 get_task_comm(buffer->task_comm, task);
342                 buffer->pid = task_pid_nr(task);
343         }
344         mutex_unlock(&buffer->lock);
345 }
346
347 static struct ion_handle *ion_handle_create(struct ion_client *client,
348                                      struct ion_buffer *buffer)
349 {
350         struct ion_handle *handle;
351
352         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
353         if (!handle)
354                 return ERR_PTR(-ENOMEM);
355         kref_init(&handle->ref);
356         RB_CLEAR_NODE(&handle->node);
357         handle->client = client;
358         ion_buffer_get(buffer);
359         ion_buffer_add_to_handle(buffer);
360         handle->buffer = buffer;
361
362         return handle;
363 }
364
365 static void ion_handle_kmap_put(struct ion_handle *);
366
367 static void ion_handle_destroy(struct kref *kref)
368 {
369         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
370         struct ion_client *client = handle->client;
371         struct ion_buffer *buffer = handle->buffer;
372
373         mutex_lock(&buffer->lock);
374         while (handle->kmap_cnt)
375                 ion_handle_kmap_put(handle);
376         mutex_unlock(&buffer->lock);
377
378         idr_remove(&client->idr, handle->id);
379         if (!RB_EMPTY_NODE(&handle->node))
380                 rb_erase(&handle->node, &client->handles);
381
382         ion_buffer_remove_from_handle(buffer);
383         ion_buffer_put(buffer);
384
385         kfree(handle);
386 }
387
388 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
389 {
390         return handle->buffer;
391 }
392
393 static void ion_handle_get(struct ion_handle *handle)
394 {
395         kref_get(&handle->ref);
396 }
397
398 int ion_handle_put(struct ion_handle *handle)
399 {
400         struct ion_client *client = handle->client;
401         int ret;
402
403         mutex_lock(&client->lock);
404         ret = kref_put(&handle->ref, ion_handle_destroy);
405         mutex_unlock(&client->lock);
406
407         return ret;
408 }
409
410 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
411                                             struct ion_buffer *buffer)
412 {
413         struct rb_node *n = client->handles.rb_node;
414
415         while (n) {
416                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
417                 if (buffer < entry->buffer)
418                         n = n->rb_left;
419                 else if (buffer > entry->buffer)
420                         n = n->rb_right;
421                 else
422                         return entry;
423         }
424         return ERR_PTR(-EINVAL);
425 }
426
427 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
428                                                 int id)
429 {
430         struct ion_handle *handle;
431
432         mutex_lock(&client->lock);
433         handle = idr_find(&client->idr, id);
434         if (handle)
435                 ion_handle_get(handle);
436         mutex_unlock(&client->lock);
437
438         return handle ? handle : ERR_PTR(-EINVAL);
439 }
440
441 static bool ion_handle_validate(struct ion_client *client,
442                                 struct ion_handle *handle)
443 {
444         WARN_ON(!mutex_is_locked(&client->lock));
445         return (idr_find(&client->idr, handle->id) == handle);
446 }
447
448 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
449 {
450         int id;
451         struct rb_node **p = &client->handles.rb_node;
452         struct rb_node *parent = NULL;
453         struct ion_handle *entry;
454
455         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
456         if (id < 0)
457                 return id;
458
459         handle->id = id;
460
461         while (*p) {
462                 parent = *p;
463                 entry = rb_entry(parent, struct ion_handle, node);
464
465                 if (handle->buffer < entry->buffer)
466                         p = &(*p)->rb_left;
467                 else if (handle->buffer > entry->buffer)
468                         p = &(*p)->rb_right;
469                 else
470                         WARN(1, "%s: buffer already found.", __func__);
471         }
472
473         rb_link_node(&handle->node, parent, p);
474         rb_insert_color(&handle->node, &client->handles);
475
476         return 0;
477 }
478
479 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
480                              size_t align, unsigned int heap_id_mask,
481                              unsigned int flags)
482 {
483         struct ion_handle *handle;
484         struct ion_device *dev = client->dev;
485         struct ion_buffer *buffer = NULL;
486         struct ion_heap *heap;
487         int ret;
488
489         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
490                  len, align, heap_id_mask, flags);
491         /*
492          * traverse the list of heaps available in this system in priority
493          * order.  If the heap type is supported by the client, and matches the
494          * request of the caller allocate from it.  Repeat until allocate has
495          * succeeded or all heaps have been tried
496          */
497         len = PAGE_ALIGN(len);
498
499         if (!len)
500                 return ERR_PTR(-EINVAL);
501
502         down_read(&dev->lock);
503         plist_for_each_entry(heap, &dev->heaps, node) {
504                 /* if the caller didn't specify this heap id */
505                 if (!((1 << heap->id) & heap_id_mask))
506                         continue;
507                 buffer = ion_buffer_create(heap, dev, len, align, flags);
508                 if (!IS_ERR(buffer))
509                         break;
510         }
511         up_read(&dev->lock);
512
513         if (buffer == NULL)
514                 return ERR_PTR(-ENODEV);
515
516         if (IS_ERR(buffer))
517                 return ERR_PTR(PTR_ERR(buffer));
518
519         handle = ion_handle_create(client, buffer);
520
521         /*
522          * ion_buffer_create will create a buffer with a ref_cnt of 1,
523          * and ion_handle_create will take a second reference, drop one here
524          */
525         ion_buffer_put(buffer);
526
527         if (IS_ERR(handle))
528                 return handle;
529
530         mutex_lock(&client->lock);
531         ret = ion_handle_add(client, handle);
532         mutex_unlock(&client->lock);
533         if (ret) {
534                 ion_handle_put(handle);
535                 handle = ERR_PTR(ret);
536         }
537
538         return handle;
539 }
540 EXPORT_SYMBOL(ion_alloc);
541
542 void ion_free(struct ion_client *client, struct ion_handle *handle)
543 {
544         bool valid_handle;
545
546         BUG_ON(client != handle->client);
547
548         mutex_lock(&client->lock);
549         valid_handle = ion_handle_validate(client, handle);
550
551         if (!valid_handle) {
552                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
553                 mutex_unlock(&client->lock);
554                 return;
555         }
556         mutex_unlock(&client->lock);
557         ion_handle_put(handle);
558 }
559 EXPORT_SYMBOL(ion_free);
560
561 int ion_phys(struct ion_client *client, struct ion_handle *handle,
562              ion_phys_addr_t *addr, size_t *len)
563 {
564         struct ion_buffer *buffer;
565         int ret;
566
567         mutex_lock(&client->lock);
568         if (!ion_handle_validate(client, handle)) {
569                 mutex_unlock(&client->lock);
570                 return -EINVAL;
571         }
572
573         buffer = handle->buffer;
574
575         if (!buffer->heap->ops->phys) {
576                 pr_err("%s: ion_phys is not implemented by this heap.\n",
577                        __func__);
578                 mutex_unlock(&client->lock);
579                 return -ENODEV;
580         }
581         mutex_unlock(&client->lock);
582         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
583         return ret;
584 }
585 EXPORT_SYMBOL(ion_phys);
586
587 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
588 {
589         void *vaddr;
590
591         if (buffer->kmap_cnt) {
592                 buffer->kmap_cnt++;
593                 return buffer->vaddr;
594         }
595         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
596         if (WARN_ONCE(vaddr == NULL,
597                         "heap->ops->map_kernel should return ERR_PTR on error"))
598                 return ERR_PTR(-EINVAL);
599         if (IS_ERR(vaddr))
600                 return vaddr;
601         buffer->vaddr = vaddr;
602         buffer->kmap_cnt++;
603         return vaddr;
604 }
605
606 static void *ion_handle_kmap_get(struct ion_handle *handle)
607 {
608         struct ion_buffer *buffer = handle->buffer;
609         void *vaddr;
610
611         if (handle->kmap_cnt) {
612                 handle->kmap_cnt++;
613                 return buffer->vaddr;
614         }
615         vaddr = ion_buffer_kmap_get(buffer);
616         if (IS_ERR(vaddr))
617                 return vaddr;
618         handle->kmap_cnt++;
619         return vaddr;
620 }
621
622 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
623 {
624         buffer->kmap_cnt--;
625         if (!buffer->kmap_cnt) {
626                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
627                 buffer->vaddr = NULL;
628         }
629 }
630
631 static void ion_handle_kmap_put(struct ion_handle *handle)
632 {
633         struct ion_buffer *buffer = handle->buffer;
634
635         handle->kmap_cnt--;
636         if (!handle->kmap_cnt)
637                 ion_buffer_kmap_put(buffer);
638 }
639
640 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
641 {
642         struct ion_buffer *buffer;
643         void *vaddr;
644
645         mutex_lock(&client->lock);
646         if (!ion_handle_validate(client, handle)) {
647                 pr_err("%s: invalid handle passed to map_kernel.\n",
648                        __func__);
649                 mutex_unlock(&client->lock);
650                 return ERR_PTR(-EINVAL);
651         }
652
653         buffer = handle->buffer;
654
655         if (!handle->buffer->heap->ops->map_kernel) {
656                 pr_err("%s: map_kernel is not implemented by this heap.\n",
657                        __func__);
658                 mutex_unlock(&client->lock);
659                 return ERR_PTR(-ENODEV);
660         }
661
662         mutex_lock(&buffer->lock);
663         vaddr = ion_handle_kmap_get(handle);
664         mutex_unlock(&buffer->lock);
665         mutex_unlock(&client->lock);
666         return vaddr;
667 }
668 EXPORT_SYMBOL(ion_map_kernel);
669
670 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
671 {
672         struct ion_buffer *buffer;
673
674         mutex_lock(&client->lock);
675         buffer = handle->buffer;
676         mutex_lock(&buffer->lock);
677         ion_handle_kmap_put(handle);
678         mutex_unlock(&buffer->lock);
679         mutex_unlock(&client->lock);
680 }
681 EXPORT_SYMBOL(ion_unmap_kernel);
682
683 #ifdef CONFIG_ROCKCHIP_IOMMU
684 static void ion_iommu_add(struct ion_buffer *buffer,
685                           struct ion_iommu_map *iommu)
686 {
687         struct rb_node **p = &buffer->iommu_maps.rb_node;
688         struct rb_node *parent = NULL;
689         struct ion_iommu_map *entry;
690
691         while (*p) {
692                 parent = *p;
693                 entry = rb_entry(parent, struct ion_iommu_map, node);
694
695                 if (iommu->key < entry->key) {
696                         p = &(*p)->rb_left;
697                 } else if (iommu->key > entry->key) {
698                         p = &(*p)->rb_right;
699                 } else {
700                         pr_err("%s: buffer %p already has mapping for domainid %x\n",
701                                 __func__,
702                                 buffer,
703                                 iommu->key);
704                         BUG();
705                 }
706         }
707
708         rb_link_node(&iommu->node, parent, p);
709         rb_insert_color(&iommu->node, &buffer->iommu_maps);
710 }
711
712 static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
713                                                 uint32_t key)
714 {
715         struct rb_node **p = &buffer->iommu_maps.rb_node;
716         struct rb_node *parent = NULL;
717         struct ion_iommu_map *entry;
718
719         while (*p) {
720                 parent = *p;
721                 entry = rb_entry(parent, struct ion_iommu_map, node);
722
723                 if (key < entry->key)
724                         p = &(*p)->rb_left;
725                 else if (key > entry->key)
726                         p = &(*p)->rb_right;
727                 else
728                         return entry;
729         }
730
731         return NULL;
732 }
733
734 static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
735                 struct device *iommu_dev, unsigned long *iova)
736 {
737         struct ion_iommu_map *data;
738         int ret;
739
740         data = kmalloc(sizeof(*data), GFP_ATOMIC);
741
742         if (!data)
743                 return ERR_PTR(-ENOMEM);
744
745         data->buffer = buffer;
746         data->key = (uint32_t)iommu_dev;
747
748         ret = buffer->heap->ops->map_iommu(buffer, iommu_dev, data,
749                                                 buffer->size, buffer->flags);
750         if (ret)
751                 goto out;
752
753         kref_init(&data->ref);
754         *iova = data->iova_addr;
755
756         ion_iommu_add(buffer, data);
757
758         return data;
759
760 out:
761         kfree(data);
762         return ERR_PTR(ret);
763 }
764
765 int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
766                 struct ion_handle *handle, unsigned long *iova, unsigned long *size)
767 {
768         struct ion_buffer *buffer;
769         struct ion_iommu_map *iommu_map;
770         int ret = 0;
771
772         mutex_lock(&client->lock);
773         if (!ion_handle_validate(client, handle)) {
774                 pr_err("%s: invalid handle passed to map_kernel.\n",
775                        __func__);
776                 mutex_unlock(&client->lock);
777                 return -EINVAL;
778         }
779
780         buffer = handle->buffer;
781         pr_debug("%s: map buffer(%p)\n", __func__, buffer);
782
783         mutex_lock(&buffer->lock);
784
785         if (ION_IS_CACHED(buffer->flags)) {
786                 pr_err("%s: Cannot map iommu as cached.\n", __func__);
787                 ret = -EINVAL;
788                 goto out;
789         }
790
791         if (!handle->buffer->heap->ops->map_iommu) {
792                 pr_err("%s: map_iommu is not implemented by this heap.\n",
793                        __func__);
794                 ret = -ENODEV;
795                 goto out;
796         }
797
798         if (buffer->size & ~PAGE_MASK) {
799                 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
800                         buffer->size, PAGE_SIZE);
801                 ret = -EINVAL;
802                 goto out;
803         }
804
805         iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
806         if (!iommu_map) {
807                 pr_debug("%s: create new map for buffer(%p)\n", __func__, buffer);
808                 iommu_map = __ion_iommu_map(buffer, iommu_dev, iova);
809         } else {
810                 pr_debug("%s: buffer(%p) already mapped\n", __func__, buffer);
811                 if (iommu_map->mapped_size != buffer->size) {
812                         pr_err("%s: handle %p is already mapped with length"
813                                         " %x, trying to map with length %x\n",
814                                 __func__, handle, iommu_map->mapped_size, buffer->size);
815                         ret = -EINVAL;
816                 } else {
817                         kref_get(&iommu_map->ref);
818                         *iova = iommu_map->iova_addr;
819                 }
820         }
821         if (!ret)
822                 buffer->iommu_map_cnt++;
823         *size = buffer->size;
824 out:
825         mutex_unlock(&buffer->lock);
826         mutex_unlock(&client->lock);
827         return ret;
828 }
829 EXPORT_SYMBOL(ion_map_iommu);
830
831 static void ion_iommu_release(struct kref *kref)
832 {
833         struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
834                                                 ref);
835         struct ion_buffer *buffer = map->buffer;
836
837         rb_erase(&map->node, &buffer->iommu_maps);
838         buffer->heap->ops->unmap_iommu((struct device*)map->key, map);
839         kfree(map);
840 }
841
842 /**
843  * Unmap any outstanding mappings which would otherwise have been leaked.
844  */
845 static void ion_iommu_force_unmap(struct ion_buffer *buffer)
846 {
847         struct ion_iommu_map *iommu_map;
848         struct rb_node *node;
849         const struct rb_root *rb = &(buffer->iommu_maps);
850
851         pr_debug("%s: force unmap buffer(%p)\n", __func__, buffer);
852
853         mutex_lock(&buffer->lock);
854
855         while ((node = rb_first(rb)) != 0) {
856                 iommu_map = rb_entry(node, struct ion_iommu_map, node);
857                 /* set ref count to 1 to force release */
858                 kref_init(&iommu_map->ref);
859                 kref_put(&iommu_map->ref, ion_iommu_release);
860         }
861
862         mutex_unlock(&buffer->lock);
863 }
864
865 void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
866                         struct ion_handle *handle)
867 {
868         struct ion_iommu_map *iommu_map;
869         struct ion_buffer *buffer;
870
871         mutex_lock(&client->lock);
872         buffer = handle->buffer;
873         pr_debug("%s: unmap buffer(%p)\n", __func__, buffer);
874
875         mutex_lock(&buffer->lock);
876
877         iommu_map = ion_iommu_lookup(buffer, (uint32_t)iommu_dev);
878
879         if (!iommu_map) {
880                 WARN(1, "%s: (%p) was never mapped for %p\n", __func__,
881                                 iommu_dev, buffer);
882                 goto out;
883         }
884
885         kref_put(&iommu_map->ref, ion_iommu_release);
886
887         buffer->iommu_map_cnt--;
888
889 out:
890         mutex_unlock(&buffer->lock);
891         mutex_unlock(&client->lock);
892 }
893 EXPORT_SYMBOL(ion_unmap_iommu);
894 #endif
895
896 static int ion_debug_client_show_buffer(struct seq_file *s, void *unused)
897 {
898         struct ion_client *client = s->private;
899         struct rb_node *n;
900         ion_phys_addr_t addr;
901         size_t len;
902
903         seq_printf(s, "----------------------------------------------------\n");
904         seq_printf(s, "%16.s: %12.s %8.s %4.s %4.s %4.s\n", "heap_name", "addr", 
905                 "size", "HC", "IBR", "IHR");
906         mutex_lock(&client->lock);
907         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
908                 struct ion_handle *handle = rb_entry(n, struct ion_handle, node);
909                 struct ion_buffer *buffer = handle->buffer;
910                 if (buffer->heap->ops->phys) {
911                         buffer->heap->ops->phys(buffer->heap, buffer, &addr, &len);
912                         seq_printf(s, "%16.16s: 0x%08lx %8zuKB %4d %4d %4d\n",
913                                 buffer->heap->name, addr, len>>10, buffer->handle_count,
914                                 atomic_read(&buffer->ref.refcount), 
915                                 atomic_read(&handle->ref.refcount));
916                 }
917         }
918         mutex_unlock(&client->lock);
919
920         return 0;
921 }
922
923 static int ion_debug_client_show(struct seq_file *s, void *unused)
924 {
925         struct ion_client *client = s->private;
926         struct rb_node *n;
927         size_t sizes[ION_NUM_HEAP_IDS] = {0};
928         const char *names[ION_NUM_HEAP_IDS] = {NULL};
929         int i;
930
931         mutex_lock(&client->lock);
932         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
933                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
934                                                      node);
935                 unsigned int id = handle->buffer->heap->id;
936
937                 if (!names[id])
938                         names[id] = handle->buffer->heap->name;
939                 sizes[id] += handle->buffer->size;
940         }
941         mutex_unlock(&client->lock);
942
943         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
944         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
945                 if (!names[i])
946                         continue;
947                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
948         }
949         ion_debug_client_show_buffer(s, unused);
950         return 0;
951 }
952
953 static int ion_debug_client_open(struct inode *inode, struct file *file)
954 {
955         return single_open(file, ion_debug_client_show, inode->i_private);
956 }
957
958 static const struct file_operations debug_client_fops = {
959         .open = ion_debug_client_open,
960         .read = seq_read,
961         .llseek = seq_lseek,
962         .release = single_release,
963 };
964
965 static int ion_get_client_serial(const struct rb_root *root,
966                                         const unsigned char *name)
967 {
968         int serial = -1;
969         struct rb_node *node;
970         for (node = rb_first(root); node; node = rb_next(node)) {
971                 struct ion_client *client = rb_entry(node, struct ion_client,
972                                                 node);
973                 if (strcmp(client->name, name))
974                         continue;
975                 serial = max(serial, client->display_serial);
976         }
977         return serial + 1;
978 }
979
980 struct ion_client *ion_client_create(struct ion_device *dev,
981                                      const char *name)
982 {
983         struct ion_client *client;
984         struct task_struct *task;
985         struct rb_node **p;
986         struct rb_node *parent = NULL;
987         struct ion_client *entry;
988         pid_t pid;
989
990         if (!name) {
991                 pr_err("%s: Name cannot be null\n", __func__);
992                 return ERR_PTR(-EINVAL);
993         }
994
995         get_task_struct(current->group_leader);
996         task_lock(current->group_leader);
997         pid = task_pid_nr(current->group_leader);
998         /* don't bother to store task struct for kernel threads,
999            they can't be killed anyway */
1000         if (current->group_leader->flags & PF_KTHREAD) {
1001                 put_task_struct(current->group_leader);
1002                 task = NULL;
1003         } else {
1004                 task = current->group_leader;
1005         }
1006         task_unlock(current->group_leader);
1007
1008         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
1009         if (!client)
1010                 goto err_put_task_struct;
1011
1012         client->dev = dev;
1013         client->handles = RB_ROOT;
1014         idr_init(&client->idr);
1015         mutex_init(&client->lock);
1016         client->task = task;
1017         client->pid = pid;
1018         client->name = kstrdup(name, GFP_KERNEL);
1019         if (!client->name)
1020                 goto err_free_client;
1021
1022         down_write(&dev->lock);
1023         client->display_serial = ion_get_client_serial(&dev->clients, name);
1024         client->display_name = kasprintf(
1025                 GFP_KERNEL, "%s-%d", name, client->display_serial);
1026         if (!client->display_name) {
1027                 up_write(&dev->lock);
1028                 goto err_free_client_name;
1029         }
1030         p = &dev->clients.rb_node;
1031         while (*p) {
1032                 parent = *p;
1033                 entry = rb_entry(parent, struct ion_client, node);
1034
1035                 if (client < entry)
1036                         p = &(*p)->rb_left;
1037                 else if (client > entry)
1038                         p = &(*p)->rb_right;
1039         }
1040         rb_link_node(&client->node, parent, p);
1041         rb_insert_color(&client->node, &dev->clients);
1042
1043         client->debug_root = debugfs_create_file(client->display_name, 0664,
1044                                                 dev->clients_debug_root,
1045                                                 client, &debug_client_fops);
1046         if (!client->debug_root) {
1047                 char buf[256], *path;
1048                 path = dentry_path(dev->clients_debug_root, buf, 256);
1049                 pr_err("Failed to create client debugfs at %s/%s\n",
1050                         path, client->display_name);
1051         }
1052
1053         up_write(&dev->lock);
1054
1055         return client;
1056
1057 err_free_client_name:
1058         kfree(client->name);
1059 err_free_client:
1060         kfree(client);
1061 err_put_task_struct:
1062         if (task)
1063                 put_task_struct(current->group_leader);
1064         return ERR_PTR(-ENOMEM);
1065 }
1066 EXPORT_SYMBOL(ion_client_create);
1067
1068 void ion_client_destroy(struct ion_client *client)
1069 {
1070         struct ion_device *dev = client->dev;
1071         struct rb_node *n;
1072
1073         pr_debug("%s: %d\n", __func__, __LINE__);
1074         while ((n = rb_first(&client->handles))) {
1075                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1076                                                      node);
1077                 ion_handle_destroy(&handle->ref);
1078         }
1079
1080         idr_destroy(&client->idr);
1081
1082         down_write(&dev->lock);
1083         if (client->task)
1084                 put_task_struct(client->task);
1085         rb_erase(&client->node, &dev->clients);
1086         debugfs_remove_recursive(client->debug_root);
1087         up_write(&dev->lock);
1088
1089         kfree(client->display_name);
1090         kfree(client->name);
1091         kfree(client);
1092 }
1093 EXPORT_SYMBOL(ion_client_destroy);
1094
1095 struct sg_table *ion_sg_table(struct ion_client *client,
1096                               struct ion_handle *handle)
1097 {
1098         struct ion_buffer *buffer;
1099         struct sg_table *table;
1100
1101         mutex_lock(&client->lock);
1102         if (!ion_handle_validate(client, handle)) {
1103                 pr_err("%s: invalid handle passed to map_dma.\n",
1104                        __func__);
1105                 mutex_unlock(&client->lock);
1106                 return ERR_PTR(-EINVAL);
1107         }
1108         buffer = handle->buffer;
1109         table = buffer->sg_table;
1110         mutex_unlock(&client->lock);
1111         return table;
1112 }
1113 EXPORT_SYMBOL(ion_sg_table);
1114
1115 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1116                                        struct device *dev,
1117                                        enum dma_data_direction direction);
1118
1119 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1120                                         enum dma_data_direction direction)
1121 {
1122         struct dma_buf *dmabuf = attachment->dmabuf;
1123         struct ion_buffer *buffer = dmabuf->priv;
1124
1125         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1126         return buffer->sg_table;
1127 }
1128
1129 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1130                               struct sg_table *table,
1131                               enum dma_data_direction direction)
1132 {
1133 }
1134
1135 void ion_pages_sync_for_device(struct device *dev, struct page *page,
1136                 size_t size, enum dma_data_direction dir)
1137 {
1138         struct scatterlist sg;
1139
1140         sg_init_table(&sg, 1);
1141         sg_set_page(&sg, page, size, 0);
1142         /*
1143          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1144          * for the the targeted device, but this works on the currently targeted
1145          * hardware.
1146          */
1147         sg_dma_address(&sg) = page_to_phys(page);
1148         dma_sync_sg_for_device(dev, &sg, 1, dir);
1149 }
1150
1151 struct ion_vma_list {
1152         struct list_head list;
1153         struct vm_area_struct *vma;
1154 };
1155
1156 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1157                                        struct device *dev,
1158                                        enum dma_data_direction dir)
1159 {
1160         struct ion_vma_list *vma_list;
1161         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1162         int i;
1163
1164         pr_debug("%s: syncing for device %s\n", __func__,
1165                  dev ? dev_name(dev) : "null");
1166
1167         if (!ion_buffer_fault_user_mappings(buffer))
1168                 return;
1169
1170         mutex_lock(&buffer->lock);
1171         for (i = 0; i < pages; i++) {
1172                 struct page *page = buffer->pages[i];
1173
1174                 if (ion_buffer_page_is_dirty(page))
1175                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
1176                                                         PAGE_SIZE, dir);
1177
1178                 ion_buffer_page_clean(buffer->pages + i);
1179         }
1180         list_for_each_entry(vma_list, &buffer->vmas, list) {
1181                 struct vm_area_struct *vma = vma_list->vma;
1182
1183                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1184                                NULL);
1185         }
1186         mutex_unlock(&buffer->lock);
1187 }
1188
1189 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1190 {
1191         struct ion_buffer *buffer = vma->vm_private_data;
1192         unsigned long pfn;
1193         int ret;
1194
1195         mutex_lock(&buffer->lock);
1196         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1197         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1198
1199         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1200         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1201         mutex_unlock(&buffer->lock);
1202         if (ret)
1203                 return VM_FAULT_ERROR;
1204
1205         return VM_FAULT_NOPAGE;
1206 }
1207
1208 static void ion_vm_open(struct vm_area_struct *vma)
1209 {
1210         struct ion_buffer *buffer = vma->vm_private_data;
1211         struct ion_vma_list *vma_list;
1212
1213         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1214         if (!vma_list)
1215                 return;
1216         vma_list->vma = vma;
1217         mutex_lock(&buffer->lock);
1218         list_add(&vma_list->list, &buffer->vmas);
1219         mutex_unlock(&buffer->lock);
1220         pr_debug("%s: adding %p\n", __func__, vma);
1221 }
1222
1223 static void ion_vm_close(struct vm_area_struct *vma)
1224 {
1225         struct ion_buffer *buffer = vma->vm_private_data;
1226         struct ion_vma_list *vma_list, *tmp;
1227
1228         pr_debug("%s\n", __func__);
1229         mutex_lock(&buffer->lock);
1230         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1231                 if (vma_list->vma != vma)
1232                         continue;
1233                 list_del(&vma_list->list);
1234                 kfree(vma_list);
1235                 pr_debug("%s: deleting %p\n", __func__, vma);
1236                 break;
1237         }
1238         mutex_unlock(&buffer->lock);
1239 }
1240
1241 static struct vm_operations_struct ion_vma_ops = {
1242         .open = ion_vm_open,
1243         .close = ion_vm_close,
1244         .fault = ion_vm_fault,
1245 };
1246
1247 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1248 {
1249         struct ion_buffer *buffer = dmabuf->priv;
1250         int ret = 0;
1251
1252         if (!buffer->heap->ops->map_user) {
1253                 pr_err("%s: this heap does not define a method for mapping "
1254                        "to userspace\n", __func__);
1255                 return -EINVAL;
1256         }
1257
1258         if (ion_buffer_fault_user_mappings(buffer)) {
1259                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1260                                                         VM_DONTDUMP;
1261                 vma->vm_private_data = buffer;
1262                 vma->vm_ops = &ion_vma_ops;
1263                 ion_vm_open(vma);
1264                 return 0;
1265         }
1266
1267         if (!(buffer->flags & ION_FLAG_CACHED))
1268                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1269
1270         mutex_lock(&buffer->lock);
1271         /* now map it to userspace */
1272         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1273         mutex_unlock(&buffer->lock);
1274
1275         if (ret)
1276                 pr_err("%s: failure mapping buffer to userspace\n",
1277                        __func__);
1278
1279         return ret;
1280 }
1281
1282 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1283 {
1284         struct ion_buffer *buffer = dmabuf->priv;
1285         ion_buffer_put(buffer);
1286 }
1287
1288 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1289 {
1290         struct ion_buffer *buffer = dmabuf->priv;
1291         return buffer->vaddr + offset * PAGE_SIZE;
1292 }
1293
1294 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1295                                void *ptr)
1296 {
1297         return;
1298 }
1299
1300 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1301                                         size_t len,
1302                                         enum dma_data_direction direction)
1303 {
1304         struct ion_buffer *buffer = dmabuf->priv;
1305         void *vaddr;
1306
1307         if (!buffer->heap->ops->map_kernel) {
1308                 pr_err("%s: map kernel is not implemented by this heap.\n",
1309                        __func__);
1310                 return -ENODEV;
1311         }
1312
1313         mutex_lock(&buffer->lock);
1314         vaddr = ion_buffer_kmap_get(buffer);
1315         mutex_unlock(&buffer->lock);
1316         if (IS_ERR(vaddr))
1317                 return PTR_ERR(vaddr);
1318         return 0;
1319 }
1320
1321 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1322                                        size_t len,
1323                                        enum dma_data_direction direction)
1324 {
1325         struct ion_buffer *buffer = dmabuf->priv;
1326
1327         mutex_lock(&buffer->lock);
1328         ion_buffer_kmap_put(buffer);
1329         mutex_unlock(&buffer->lock);
1330 }
1331
1332 static struct dma_buf_ops dma_buf_ops = {
1333         .map_dma_buf = ion_map_dma_buf,
1334         .unmap_dma_buf = ion_unmap_dma_buf,
1335         .mmap = ion_mmap,
1336         .release = ion_dma_buf_release,
1337         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1338         .end_cpu_access = ion_dma_buf_end_cpu_access,
1339         .kmap_atomic = ion_dma_buf_kmap,
1340         .kunmap_atomic = ion_dma_buf_kunmap,
1341         .kmap = ion_dma_buf_kmap,
1342         .kunmap = ion_dma_buf_kunmap,
1343 };
1344
1345 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1346                                                 struct ion_handle *handle)
1347 {
1348         struct ion_buffer *buffer;
1349         struct dma_buf *dmabuf;
1350         bool valid_handle;
1351
1352         mutex_lock(&client->lock);
1353         valid_handle = ion_handle_validate(client, handle);
1354         if (!valid_handle) {
1355                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1356                 mutex_unlock(&client->lock);
1357                 return ERR_PTR(-EINVAL);
1358         }
1359         buffer = handle->buffer;
1360         ion_buffer_get(buffer);
1361         mutex_unlock(&client->lock);
1362
1363         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1364         if (IS_ERR(dmabuf)) {
1365                 ion_buffer_put(buffer);
1366                 return dmabuf;
1367         }
1368
1369         return dmabuf;
1370 }
1371 EXPORT_SYMBOL(ion_share_dma_buf);
1372
1373 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1374 {
1375         struct dma_buf *dmabuf;
1376         int fd;
1377
1378         dmabuf = ion_share_dma_buf(client, handle);
1379         if (IS_ERR(dmabuf))
1380                 return PTR_ERR(dmabuf);
1381
1382         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1383         if (fd < 0)
1384                 dma_buf_put(dmabuf);
1385
1386         return fd;
1387 }
1388 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1389
1390 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1391 {
1392         struct dma_buf *dmabuf;
1393         struct ion_buffer *buffer;
1394         struct ion_handle *handle;
1395         int ret;
1396
1397         dmabuf = dma_buf_get(fd);
1398         if (IS_ERR(dmabuf))
1399                 return ERR_PTR(PTR_ERR(dmabuf));
1400         /* if this memory came from ion */
1401
1402         if (dmabuf->ops != &dma_buf_ops) {
1403                 pr_err("%s: can not import dmabuf from another exporter\n",
1404                        __func__);
1405                 dma_buf_put(dmabuf);
1406                 return ERR_PTR(-EINVAL);
1407         }
1408         buffer = dmabuf->priv;
1409
1410         mutex_lock(&client->lock);
1411         /* if a handle exists for this buffer just take a reference to it */
1412         handle = ion_handle_lookup(client, buffer);
1413         if (!IS_ERR(handle)) {
1414                 ion_handle_get(handle);
1415                 mutex_unlock(&client->lock);
1416                 goto end;
1417         }
1418         mutex_unlock(&client->lock);
1419
1420         handle = ion_handle_create(client, buffer);
1421         if (IS_ERR(handle))
1422                 goto end;
1423
1424         mutex_lock(&client->lock);
1425         ret = ion_handle_add(client, handle);
1426         mutex_unlock(&client->lock);
1427         if (ret) {
1428                 ion_handle_put(handle);
1429                 handle = ERR_PTR(ret);
1430         }
1431
1432 end:
1433         dma_buf_put(dmabuf);
1434         return handle;
1435 }
1436 EXPORT_SYMBOL(ion_import_dma_buf);
1437
1438 static int ion_sync_for_device(struct ion_client *client, int fd)
1439 {
1440         struct dma_buf *dmabuf;
1441         struct ion_buffer *buffer;
1442
1443         dmabuf = dma_buf_get(fd);
1444         if (IS_ERR(dmabuf))
1445                 return PTR_ERR(dmabuf);
1446
1447         /* if this memory came from ion */
1448         if (dmabuf->ops != &dma_buf_ops) {
1449                 pr_err("%s: can not sync dmabuf from another exporter\n",
1450                        __func__);
1451                 dma_buf_put(dmabuf);
1452                 return -EINVAL;
1453         }
1454         buffer = dmabuf->priv;
1455
1456         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1457                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1458         dma_buf_put(dmabuf);
1459         return 0;
1460 }
1461
1462 /* fix up the cases where the ioctl direction bits are incorrect */
1463 static unsigned int ion_ioctl_dir(unsigned int cmd)
1464 {
1465         switch (cmd) {
1466         case ION_IOC_SYNC:
1467         case ION_IOC_FREE:
1468         case ION_IOC_CUSTOM:
1469                 return _IOC_WRITE;
1470         default:
1471                 return _IOC_DIR(cmd);
1472         }
1473 }
1474
1475 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1476 {
1477         struct ion_client *client = filp->private_data;
1478         struct ion_device *dev = client->dev;
1479         struct ion_handle *cleanup_handle = NULL;
1480         int ret = 0;
1481         unsigned int dir;
1482
1483         union {
1484                 struct ion_fd_data fd;
1485                 struct ion_allocation_data allocation;
1486                 struct ion_handle_data handle;
1487                 struct ion_custom_data custom;
1488         } data;
1489
1490         dir = ion_ioctl_dir(cmd);
1491
1492         if (_IOC_SIZE(cmd) > sizeof(data))
1493                 return -EINVAL;
1494
1495         if (dir & _IOC_WRITE)
1496                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1497                         return -EFAULT;
1498
1499         switch (cmd) {
1500         case ION_IOC_ALLOC:
1501         {
1502                 struct ion_handle *handle;
1503
1504                 handle = ion_alloc(client, data.allocation.len,
1505                                                 data.allocation.align,
1506                                                 data.allocation.heap_id_mask,
1507                                                 data.allocation.flags);
1508                 if (IS_ERR(handle))
1509                         return PTR_ERR(handle);
1510
1511                 data.allocation.handle = handle->id;
1512
1513                 cleanup_handle = handle;
1514                 break;
1515         }
1516         case ION_IOC_FREE:
1517         {
1518                 struct ion_handle *handle;
1519
1520                 handle = ion_handle_get_by_id(client, data.handle.handle);
1521                 if (IS_ERR(handle))
1522                         return PTR_ERR(handle);
1523                 ion_free(client, handle);
1524                 ion_handle_put(handle);
1525                 break;
1526         }
1527         case ION_IOC_SHARE:
1528         case ION_IOC_MAP:
1529         {
1530                 struct ion_handle *handle;
1531
1532                 handle = ion_handle_get_by_id(client, data.handle.handle);
1533                 if (IS_ERR(handle))
1534                         return PTR_ERR(handle);
1535                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1536                 ion_handle_put(handle);
1537                 if (data.fd.fd < 0)
1538                         ret = data.fd.fd;
1539                 break;
1540         }
1541         case ION_IOC_IMPORT:
1542         {
1543                 struct ion_handle *handle;
1544                 handle = ion_import_dma_buf(client, data.fd.fd);
1545                 if (IS_ERR(handle))
1546                         ret = PTR_ERR(handle);
1547                 else
1548                         data.handle.handle = handle->id;
1549                 break;
1550         }
1551         case ION_IOC_SYNC:
1552         {
1553                 ret = ion_sync_for_device(client, data.fd.fd);
1554                 break;
1555         }
1556         case ION_IOC_CUSTOM:
1557         {
1558                 if (!dev->custom_ioctl)
1559                         return -ENOTTY;
1560                 ret = dev->custom_ioctl(client, data.custom.cmd,
1561                                                 data.custom.arg);
1562                 break;
1563         }
1564         default:
1565                 return -ENOTTY;
1566         }
1567
1568         if (dir & _IOC_READ) {
1569                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1570                         if (cleanup_handle)
1571                                 ion_free(client, cleanup_handle);
1572                         return -EFAULT;
1573                 }
1574         }
1575         return ret;
1576 }
1577
1578 static int ion_release(struct inode *inode, struct file *file)
1579 {
1580         struct ion_client *client = file->private_data;
1581
1582         pr_debug("%s: %d\n", __func__, __LINE__);
1583         ion_client_destroy(client);
1584         return 0;
1585 }
1586
1587 static int ion_open(struct inode *inode, struct file *file)
1588 {
1589         struct miscdevice *miscdev = file->private_data;
1590         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1591         struct ion_client *client;
1592         char debug_name[64];
1593
1594         pr_debug("%s: %d\n", __func__, __LINE__);
1595         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1596         client = ion_client_create(dev, debug_name);
1597         if (IS_ERR(client))
1598                 return PTR_ERR(client);
1599         file->private_data = client;
1600
1601         return 0;
1602 }
1603
1604 static const struct file_operations ion_fops = {
1605         .owner          = THIS_MODULE,
1606         .open           = ion_open,
1607         .release        = ion_release,
1608         .unlocked_ioctl = ion_ioctl,
1609         .compat_ioctl   = compat_ion_ioctl,
1610 };
1611
1612 int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
1613                         void *uaddr, unsigned long offset, unsigned long len,
1614                         unsigned int cmd)
1615 {
1616         struct ion_buffer *buffer;
1617         int ret = -EINVAL;
1618
1619         mutex_lock(&client->lock);
1620         if (!ion_handle_validate(client, handle)) {
1621                 pr_err("%s: invalid handle passed to do_cache_op.\n",
1622                        __func__);
1623                 mutex_unlock(&client->lock);
1624                 return -EINVAL;
1625         }
1626         buffer = handle->buffer;
1627         mutex_lock(&buffer->lock);
1628
1629         if (!ION_IS_CACHED(buffer->flags)) {
1630                 ret = 0;
1631                 goto out;
1632         }
1633
1634         if (!handle->buffer->heap->ops->cache_op) {
1635                 pr_err("%s: cache_op is not implemented by this heap.\n",
1636                        __func__);
1637                 ret = -ENODEV;
1638                 goto out;
1639         }
1640
1641
1642         ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
1643                                                 offset, len, cmd);
1644
1645 out:
1646         mutex_unlock(&buffer->lock);
1647         mutex_unlock(&client->lock);
1648         return ret;
1649
1650 }
1651 EXPORT_SYMBOL(ion_do_cache_op);
1652
1653 static size_t ion_debug_heap_total(struct ion_client *client,
1654                                    unsigned int id)
1655 {
1656         size_t size = 0;
1657         struct rb_node *n;
1658
1659         mutex_lock(&client->lock);
1660         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1661                 struct ion_handle *handle = rb_entry(n,
1662                                                      struct ion_handle,
1663                                                      node);
1664                 if (handle->buffer->heap->id == id)
1665                         size += handle->buffer->size;
1666         }
1667         mutex_unlock(&client->lock);
1668         return size;
1669 }
1670
1671 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1672 {
1673         struct ion_heap *heap = s->private;
1674         struct ion_device *dev = heap->dev;
1675         struct rb_node *n;
1676         size_t total_size = 0;
1677         size_t total_orphaned_size = 0;
1678
1679         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1680         seq_printf(s, "----------------------------------------------------\n");
1681
1682         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1683                 struct ion_client *client = rb_entry(n, struct ion_client,
1684                                                      node);
1685                 size_t size = ion_debug_heap_total(client, heap->id);
1686                 if (!size)
1687                         continue;
1688                 if (client->task) {
1689                         char task_comm[TASK_COMM_LEN];
1690
1691                         get_task_comm(task_comm, client->task);
1692                         seq_printf(s, "%16.s %16u %16zu\n", task_comm,
1693                                    client->pid, size);
1694                 } else {
1695                         seq_printf(s, "%16.s %16u %16zu\n", client->name,
1696                                    client->pid, size);
1697                 }
1698         }
1699         seq_printf(s, "----------------------------------------------------\n");
1700         seq_printf(s, "orphaned allocations (info is from last known client):"
1701                    "\n");
1702         mutex_lock(&dev->buffer_lock);
1703         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1704                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1705                                                      node);
1706                 if (buffer->heap->id != heap->id)
1707                         continue;
1708                 total_size += buffer->size;
1709                 if (!buffer->handle_count) {
1710                         seq_printf(s, "%16.s %16u %16zu %d %d\n",
1711                                    buffer->task_comm, buffer->pid,
1712                                    buffer->size, buffer->kmap_cnt,
1713                                    atomic_read(&buffer->ref.refcount));
1714                         total_orphaned_size += buffer->size;
1715                 }
1716         }
1717         mutex_unlock(&dev->buffer_lock);
1718         seq_printf(s, "----------------------------------------------------\n");
1719         seq_printf(s, "%16.s %16zu\n", "total orphaned",
1720                    total_orphaned_size);
1721         seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1722         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1723                 seq_printf(s, "%16.s %16zu\n", "deferred free",
1724                                 heap->free_list_size);
1725         seq_printf(s, "----------------------------------------------------\n");
1726
1727         if (heap->debug_show)
1728                 heap->debug_show(heap, s, unused);
1729
1730         return 0;
1731 }
1732
1733 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1734 {
1735         return single_open(file, ion_debug_heap_show, inode->i_private);
1736 }
1737
1738 static const struct file_operations debug_heap_fops = {
1739         .open = ion_debug_heap_open,
1740         .read = seq_read,
1741         .llseek = seq_lseek,
1742         .release = single_release,
1743 };
1744
1745 #ifdef DEBUG_HEAP_SHRINKER
1746 static int debug_shrink_set(void *data, u64 val)
1747 {
1748         struct ion_heap *heap = data;
1749         struct shrink_control sc;
1750         int objs;
1751
1752         sc.gfp_mask = -1;
1753         sc.nr_to_scan = 0;
1754
1755         if (!val)
1756                 return 0;
1757
1758         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1759         sc.nr_to_scan = objs;
1760
1761         heap->shrinker.shrink(&heap->shrinker, &sc);
1762         return 0;
1763 }
1764
1765 static int debug_shrink_get(void *data, u64 *val)
1766 {
1767         struct ion_heap *heap = data;
1768         struct shrink_control sc;
1769         int objs;
1770
1771         sc.gfp_mask = -1;
1772         sc.nr_to_scan = 0;
1773
1774         objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1775         *val = objs;
1776         return 0;
1777 }
1778
1779 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1780                         debug_shrink_set, "%llu\n");
1781 #endif
1782
1783 #ifdef CONFIG_CMA
1784 // struct "cma" quoted from drivers/base/dma-contiguous.c
1785 struct cma {
1786         unsigned long   base_pfn;
1787         unsigned long   count;
1788         unsigned long   *bitmap;
1789 };
1790
1791 // struct "ion_cma_heap" quoted from drivers/staging/android/ion/ion_cma_heap.c
1792 struct ion_cma_heap {
1793         struct ion_heap heap;
1794         struct device *dev;
1795 };
1796
1797 static int ion_cma_heap_debug_show(struct seq_file *s, void *unused)
1798 {
1799         struct ion_heap *heap = s->private;
1800         struct ion_cma_heap *cma_heap = container_of(heap,
1801                                                         struct ion_cma_heap,
1802                                                         heap);
1803         struct device *dev = cma_heap->dev;
1804         struct cma *cma = dev_get_cma_area(dev);
1805         int i;
1806         int rows = cma->count/(SZ_1M >> PAGE_SHIFT);
1807         phys_addr_t base = __pfn_to_phys(cma->base_pfn);
1808
1809         seq_printf(s, "%s Heap bitmap:\n", heap->name);
1810
1811         for(i = rows - 1; i>= 0; i--){
1812                 seq_printf(s, "%.4uM@0x%08x: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
1813                                 i+1, base+(i)*SZ_1M,
1814                                 cma->bitmap[i*8 + 7],
1815                                 cma->bitmap[i*8 + 6],
1816                                 cma->bitmap[i*8 + 5],
1817                                 cma->bitmap[i*8 + 4],
1818                                 cma->bitmap[i*8 + 3],
1819                                 cma->bitmap[i*8 + 2],
1820                                 cma->bitmap[i*8 + 1],
1821                                 cma->bitmap[i*8]);
1822         }
1823         seq_printf(s, "Heap size: %luM, Heap base: 0x%08x\n",
1824                 (cma->count)>>8, base);
1825
1826         return 0;
1827 }
1828
1829 static int ion_debug_heap_bitmap_open(struct inode *inode, struct file *file)
1830 {
1831         return single_open(file, ion_cma_heap_debug_show, inode->i_private);
1832 }
1833
1834 static const struct file_operations debug_heap_bitmap_fops = {
1835         .open = ion_debug_heap_bitmap_open,
1836         .read = seq_read,
1837         .llseek = seq_lseek,
1838         .release = single_release,
1839 };
1840 #endif
1841
1842 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1843 {
1844         struct dentry *debug_file;
1845
1846         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1847             !heap->ops->unmap_dma)
1848                 pr_err("%s: can not add heap with invalid ops struct.\n",
1849                        __func__);
1850
1851         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1852                 ion_heap_init_deferred_free(heap);
1853
1854         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1855                 ion_heap_init_shrinker(heap);
1856
1857         heap->dev = dev;
1858         down_write(&dev->lock);
1859         /* use negative heap->id to reverse the priority -- when traversing
1860            the list later attempt higher id numbers first */
1861         plist_node_init(&heap->node, -heap->id);
1862         plist_add(&heap->node, &dev->heaps);
1863         debug_file = debugfs_create_file(heap->name, 0664,
1864                                         dev->heaps_debug_root, heap,
1865                                         &debug_heap_fops);
1866
1867         if (!debug_file) {
1868                 char buf[256], *path;
1869                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1870                 pr_err("Failed to create heap debugfs at %s/%s\n",
1871                         path, heap->name);
1872         }
1873
1874 #ifdef DEBUG_HEAP_SHRINKER
1875         if (heap->shrinker.shrink) {
1876                 char debug_name[64];
1877
1878                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1879                 debug_file = debugfs_create_file(
1880                         debug_name, 0644, dev->heaps_debug_root, heap,
1881                         &debug_shrink_fops);
1882                 if (!debug_file) {
1883                         char buf[256], *path;
1884                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1885                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1886                                 path, debug_name);
1887                 }
1888         }
1889 #endif
1890 #ifdef CONFIG_CMA
1891         if (ION_HEAP_TYPE_DMA==heap->type) {
1892                 char* heap_bitmap_name = kasprintf(
1893                         GFP_KERNEL, "%s-bitmap", heap->name);
1894                 debug_file = debugfs_create_file(heap_bitmap_name, 0664,
1895                                                 dev->heaps_debug_root, heap,
1896                                                 &debug_heap_bitmap_fops);
1897                 if (!debug_file) {
1898                         char buf[256], *path;
1899                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1900                         pr_err("Failed to create heap debugfs at %s/%s\n",
1901                                 path, heap_bitmap_name);
1902                 }
1903                 kfree(heap_bitmap_name);
1904         }
1905 #endif
1906         up_write(&dev->lock);
1907 }
1908
1909 struct ion_device *ion_device_create(long (*custom_ioctl)
1910                                      (struct ion_client *client,
1911                                       unsigned int cmd,
1912                                       unsigned long arg))
1913 {
1914         struct ion_device *idev;
1915         int ret;
1916
1917         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1918         if (!idev)
1919                 return ERR_PTR(-ENOMEM);
1920
1921         idev->dev.minor = MISC_DYNAMIC_MINOR;
1922         idev->dev.name = "ion";
1923         idev->dev.fops = &ion_fops;
1924         idev->dev.parent = NULL;
1925         ret = misc_register(&idev->dev);
1926         if (ret) {
1927                 pr_err("ion: failed to register misc device.\n");
1928                 return ERR_PTR(ret);
1929         }
1930
1931         idev->debug_root = debugfs_create_dir("ion", NULL);
1932         if (!idev->debug_root) {
1933                 pr_err("ion: failed to create debugfs root directory.\n");
1934                 goto debugfs_done;
1935         }
1936         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1937         if (!idev->heaps_debug_root) {
1938                 pr_err("ion: failed to create debugfs heaps directory.\n");
1939                 goto debugfs_done;
1940         }
1941         idev->clients_debug_root = debugfs_create_dir("clients",
1942                                                 idev->debug_root);
1943         if (!idev->clients_debug_root)
1944                 pr_err("ion: failed to create debugfs clients directory.\n");
1945
1946 debugfs_done:
1947
1948         idev->custom_ioctl = custom_ioctl;
1949         idev->buffers = RB_ROOT;
1950         mutex_init(&idev->buffer_lock);
1951         init_rwsem(&idev->lock);
1952         plist_head_init(&idev->heaps);
1953         idev->clients = RB_ROOT;
1954         return idev;
1955 }
1956
1957 void ion_device_destroy(struct ion_device *dev)
1958 {
1959         misc_deregister(&dev->dev);
1960         debugfs_remove_recursive(dev->debug_root);
1961         /* XXX need to free the heaps and clients ? */
1962         kfree(dev);
1963 }
1964
1965 void __init ion_reserve(struct ion_platform_data *data)
1966 {
1967         int i;
1968
1969         for (i = 0; i < data->nr; i++) {
1970                 if (data->heaps[i].size == 0)
1971                         continue;
1972
1973                 if (data->heaps[i].base == 0) {
1974                         phys_addr_t paddr;
1975                         paddr = memblock_alloc_base(data->heaps[i].size,
1976                                                     data->heaps[i].align,
1977                                                     MEMBLOCK_ALLOC_ANYWHERE);
1978                         if (!paddr) {
1979                                 pr_err("%s: error allocating memblock for "
1980                                        "heap %d\n",
1981                                         __func__, i);
1982                                 continue;
1983                         }
1984                         data->heaps[i].base = paddr;
1985                 } else {
1986                         int ret = memblock_reserve(data->heaps[i].base,
1987                                                data->heaps[i].size);
1988                         if (ret)
1989                                 pr_err("memblock reserve of %zx@%lx failed\n",
1990                                        data->heaps[i].size,
1991                                        data->heaps[i].base);
1992                 }
1993                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1994                         data->heaps[i].name,
1995                         data->heaps[i].base,
1996                         data->heaps[i].size);
1997         }
1998 }