gpu: ion: Add cache maintenance to ion.
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / ion / ion.c
1 /*
2  * drivers/gpu/ion/ion.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/device.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/ion.h>
22 #include <linux/list.h>
23 #include <linux/memblock.h>
24 #include <linux/miscdevice.h>
25 #include <linux/export.h>
26 #include <linux/mm.h>
27 #include <linux/mm_types.h>
28 #include <linux/rbtree.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <linux/debugfs.h>
34 #include <linux/dma-buf.h>
35
36 #include "ion_priv.h"
37
38 /**
39  * struct ion_device - the metadata of the ion device node
40  * @dev:                the actual misc device
41  * @buffers:    an rb tree of all the existing buffers
42  * @lock:               lock protecting the buffers & heaps trees
43  * @heaps:              list of all the heaps in the system
44  * @user_clients:       list of all the clients created from userspace
45  */
46 struct ion_device {
47         struct miscdevice dev;
48         struct rb_root buffers;
49         struct mutex lock;
50         struct rb_root heaps;
51         long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
52                               unsigned long arg);
53         struct rb_root clients;
54         struct dentry *debug_root;
55 };
56
57 /**
58  * struct ion_client - a process/hw block local address space
59  * @node:               node in the tree of all clients
60  * @dev:                backpointer to ion device
61  * @handles:            an rb tree of all the handles in this client
62  * @lock:               lock protecting the tree of handles
63  * @heap_mask:          mask of all supported heaps
64  * @name:               used for debugging
65  * @task:               used for debugging
66  *
67  * A client represents a list of buffers this client may access.
68  * The mutex stored here is used to protect both handles tree
69  * as well as the handles themselves, and should be held while modifying either.
70  */
71 struct ion_client {
72         struct rb_node node;
73         struct ion_device *dev;
74         struct rb_root handles;
75         struct mutex lock;
76         unsigned int heap_mask;
77         const char *name;
78         struct task_struct *task;
79         pid_t pid;
80         struct dentry *debug_root;
81 };
82
83 /**
84  * ion_handle - a client local reference to a buffer
85  * @ref:                reference count
86  * @client:             back pointer to the client the buffer resides in
87  * @buffer:             pointer to the buffer
88  * @node:               node in the client's handle rbtree
89  * @kmap_cnt:           count of times this client has mapped to kernel
90  * @dmap_cnt:           count of times this client has mapped for dma
91  *
92  * Modifications to node, map_cnt or mapping should be protected by the
93  * lock in the client.  Other fields are never changed after initialization.
94  */
95 struct ion_handle {
96         struct kref ref;
97         struct ion_client *client;
98         struct ion_buffer *buffer;
99         struct rb_node node;
100         unsigned int kmap_cnt;
101 };
102
103 /* this function should only be called while dev->lock is held */
104 static void ion_buffer_add(struct ion_device *dev,
105                            struct ion_buffer *buffer)
106 {
107         struct rb_node **p = &dev->buffers.rb_node;
108         struct rb_node *parent = NULL;
109         struct ion_buffer *entry;
110
111         while (*p) {
112                 parent = *p;
113                 entry = rb_entry(parent, struct ion_buffer, node);
114
115                 if (buffer < entry) {
116                         p = &(*p)->rb_left;
117                 } else if (buffer > entry) {
118                         p = &(*p)->rb_right;
119                 } else {
120                         pr_err("%s: buffer already found.", __func__);
121                         BUG();
122                 }
123         }
124
125         rb_link_node(&buffer->node, parent, p);
126         rb_insert_color(&buffer->node, &dev->buffers);
127 }
128
129 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
130
131 /* this function should only be called while dev->lock is held */
132 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
133                                      struct ion_device *dev,
134                                      unsigned long len,
135                                      unsigned long align,
136                                      unsigned long flags)
137 {
138         struct ion_buffer *buffer;
139         struct sg_table *table;
140         struct scatterlist *sg;
141         int i, ret;
142
143         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
144         if (!buffer)
145                 return ERR_PTR(-ENOMEM);
146
147         buffer->heap = heap;
148         kref_init(&buffer->ref);
149
150         ret = heap->ops->allocate(heap, buffer, len, align, flags);
151         if (ret) {
152                 kfree(buffer);
153                 return ERR_PTR(ret);
154         }
155
156         buffer->dev = dev;
157         buffer->size = len;
158         buffer->flags = flags;
159
160         table = heap->ops->map_dma(heap, buffer);
161         if (IS_ERR_OR_NULL(table)) {
162                 heap->ops->free(buffer);
163                 kfree(buffer);
164                 return ERR_PTR(PTR_ERR(table));
165         }
166         buffer->sg_table = table;
167         if (buffer->flags & ION_FLAG_CACHED)
168                 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
169                             i) {
170                         if (sg_dma_len(sg) == PAGE_SIZE)
171                                 continue;
172                         pr_err("%s: cached mappings must have pagewise "
173                                "sg_lists\n", __func__);
174                         heap->ops->unmap_dma(heap, buffer);
175                         kfree(buffer);
176                         return ERR_PTR(-EINVAL);
177                 }
178
179         ret = ion_buffer_alloc_dirty(buffer);
180         if (ret) {
181                 heap->ops->unmap_dma(heap, buffer);
182                 heap->ops->free(buffer);
183                 kfree(buffer);
184                 return ERR_PTR(ret);
185         }
186
187         buffer->dev = dev;
188         buffer->size = len;
189         INIT_LIST_HEAD(&buffer->vmas);
190         mutex_init(&buffer->lock);
191         /* this will set up dma addresses for the sglist -- it is not
192            technically correct as per the dma api -- a specific
193            device isn't really taking ownership here.  However, in practice on
194            our systems the only dma_address space is physical addresses.
195            Additionally, we can't afford the overhead of invalidating every
196            allocation via dma_map_sg. The implicit contract here is that
197            memory comming from the heaps is ready for dma, ie if it has a
198            cached mapping that mapping has been invalidated */
199         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
200                 sg_dma_address(sg) = sg_phys(sg);
201         ion_buffer_add(dev, buffer);
202         return buffer;
203 }
204
205 static void ion_buffer_destroy(struct kref *kref)
206 {
207         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
208         struct ion_device *dev = buffer->dev;
209
210         if (WARN_ON(buffer->kmap_cnt > 0))
211                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
212
213         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
214         buffer->heap->ops->free(buffer);
215         mutex_lock(&dev->lock);
216         rb_erase(&buffer->node, &dev->buffers);
217         mutex_unlock(&dev->lock);
218         kfree(buffer);
219 }
220
221 static void ion_buffer_get(struct ion_buffer *buffer)
222 {
223         kref_get(&buffer->ref);
224 }
225
226 static int ion_buffer_put(struct ion_buffer *buffer)
227 {
228         return kref_put(&buffer->ref, ion_buffer_destroy);
229 }
230
231 static struct ion_handle *ion_handle_create(struct ion_client *client,
232                                      struct ion_buffer *buffer)
233 {
234         struct ion_handle *handle;
235
236         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
237         if (!handle)
238                 return ERR_PTR(-ENOMEM);
239         kref_init(&handle->ref);
240         RB_CLEAR_NODE(&handle->node);
241         handle->client = client;
242         ion_buffer_get(buffer);
243         handle->buffer = buffer;
244
245         return handle;
246 }
247
248 static void ion_handle_kmap_put(struct ion_handle *);
249
250 static void ion_handle_destroy(struct kref *kref)
251 {
252         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
253         struct ion_client *client = handle->client;
254         struct ion_buffer *buffer = handle->buffer;
255
256         mutex_lock(&client->lock);
257
258         mutex_lock(&buffer->lock);
259         while (handle->kmap_cnt)
260                 ion_handle_kmap_put(handle);
261         mutex_unlock(&buffer->lock);
262
263         if (!RB_EMPTY_NODE(&handle->node))
264                 rb_erase(&handle->node, &client->handles);
265         mutex_unlock(&client->lock);
266
267         ion_buffer_put(buffer);
268         kfree(handle);
269 }
270
271 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
272 {
273         return handle->buffer;
274 }
275
276 static void ion_handle_get(struct ion_handle *handle)
277 {
278         kref_get(&handle->ref);
279 }
280
281 static int ion_handle_put(struct ion_handle *handle)
282 {
283         return kref_put(&handle->ref, ion_handle_destroy);
284 }
285
286 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
287                                             struct ion_buffer *buffer)
288 {
289         struct rb_node *n;
290
291         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
292                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
293                                                      node);
294                 if (handle->buffer == buffer)
295                         return handle;
296         }
297         return NULL;
298 }
299
300 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
301 {
302         struct rb_node *n = client->handles.rb_node;
303
304         while (n) {
305                 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
306                                                           node);
307                 if (handle < handle_node)
308                         n = n->rb_left;
309                 else if (handle > handle_node)
310                         n = n->rb_right;
311                 else
312                         return true;
313         }
314         return false;
315 }
316
317 static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
318 {
319         struct rb_node **p = &client->handles.rb_node;
320         struct rb_node *parent = NULL;
321         struct ion_handle *entry;
322
323         while (*p) {
324                 parent = *p;
325                 entry = rb_entry(parent, struct ion_handle, node);
326
327                 if (handle < entry)
328                         p = &(*p)->rb_left;
329                 else if (handle > entry)
330                         p = &(*p)->rb_right;
331                 else
332                         WARN(1, "%s: buffer already found.", __func__);
333         }
334
335         rb_link_node(&handle->node, parent, p);
336         rb_insert_color(&handle->node, &client->handles);
337 }
338
339 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
340                              size_t align, unsigned int heap_mask,
341                              unsigned int flags)
342 {
343         struct rb_node *n;
344         struct ion_handle *handle;
345         struct ion_device *dev = client->dev;
346         struct ion_buffer *buffer = NULL;
347
348         pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
349                  align, heap_mask, flags);
350         /*
351          * traverse the list of heaps available in this system in priority
352          * order.  If the heap type is supported by the client, and matches the
353          * request of the caller allocate from it.  Repeat until allocate has
354          * succeeded or all heaps have been tried
355          */
356         if (WARN_ON(!len))
357                 return ERR_PTR(-EINVAL);
358
359         len = PAGE_ALIGN(len);
360
361         mutex_lock(&dev->lock);
362         for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
363                 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
364                 /* if the client doesn't support this heap type */
365                 if (!((1 << heap->type) & client->heap_mask))
366                         continue;
367                 /* if the caller didn't specify this heap type */
368                 if (!((1 << heap->id) & heap_mask))
369                         continue;
370                 buffer = ion_buffer_create(heap, dev, len, align, flags);
371                 if (!IS_ERR_OR_NULL(buffer))
372                         break;
373         }
374         mutex_unlock(&dev->lock);
375
376         if (buffer == NULL)
377                 return ERR_PTR(-ENODEV);
378
379         if (IS_ERR(buffer))
380                 return ERR_PTR(PTR_ERR(buffer));
381
382         handle = ion_handle_create(client, buffer);
383
384         /*
385          * ion_buffer_create will create a buffer with a ref_cnt of 1,
386          * and ion_handle_create will take a second reference, drop one here
387          */
388         ion_buffer_put(buffer);
389
390         if (!IS_ERR(handle)) {
391                 mutex_lock(&client->lock);
392                 ion_handle_add(client, handle);
393                 mutex_unlock(&client->lock);
394         }
395
396
397         return handle;
398 }
399
400 void ion_free(struct ion_client *client, struct ion_handle *handle)
401 {
402         bool valid_handle;
403
404         BUG_ON(client != handle->client);
405
406         mutex_lock(&client->lock);
407         valid_handle = ion_handle_validate(client, handle);
408         mutex_unlock(&client->lock);
409
410         if (!valid_handle) {
411                 WARN("%s: invalid handle passed to free.\n", __func__);
412                 return;
413         }
414         ion_handle_put(handle);
415 }
416
417 int ion_phys(struct ion_client *client, struct ion_handle *handle,
418              ion_phys_addr_t *addr, size_t *len)
419 {
420         struct ion_buffer *buffer;
421         int ret;
422
423         mutex_lock(&client->lock);
424         if (!ion_handle_validate(client, handle)) {
425                 mutex_unlock(&client->lock);
426                 return -EINVAL;
427         }
428
429         buffer = handle->buffer;
430
431         if (!buffer->heap->ops->phys) {
432                 pr_err("%s: ion_phys is not implemented by this heap.\n",
433                        __func__);
434                 mutex_unlock(&client->lock);
435                 return -ENODEV;
436         }
437         mutex_unlock(&client->lock);
438         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
439         return ret;
440 }
441
442 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
443 {
444         void *vaddr;
445
446         if (buffer->kmap_cnt) {
447                 buffer->kmap_cnt++;
448                 return buffer->vaddr;
449         }
450         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
451         if (IS_ERR_OR_NULL(vaddr))
452                 return vaddr;
453         buffer->vaddr = vaddr;
454         buffer->kmap_cnt++;
455         return vaddr;
456 }
457
458 static void *ion_handle_kmap_get(struct ion_handle *handle)
459 {
460         struct ion_buffer *buffer = handle->buffer;
461         void *vaddr;
462
463         if (handle->kmap_cnt) {
464                 handle->kmap_cnt++;
465                 return buffer->vaddr;
466         }
467         vaddr = ion_buffer_kmap_get(buffer);
468         if (IS_ERR_OR_NULL(vaddr))
469                 return vaddr;
470         handle->kmap_cnt++;
471         return vaddr;
472 }
473
474 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
475 {
476         buffer->kmap_cnt--;
477         if (!buffer->kmap_cnt) {
478                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
479                 buffer->vaddr = NULL;
480         }
481 }
482
483 static void ion_handle_kmap_put(struct ion_handle *handle)
484 {
485         struct ion_buffer *buffer = handle->buffer;
486
487         handle->kmap_cnt--;
488         if (!handle->kmap_cnt)
489                 ion_buffer_kmap_put(buffer);
490 }
491
492 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
493 {
494         struct ion_buffer *buffer;
495         void *vaddr;
496
497         mutex_lock(&client->lock);
498         if (!ion_handle_validate(client, handle)) {
499                 pr_err("%s: invalid handle passed to map_kernel.\n",
500                        __func__);
501                 mutex_unlock(&client->lock);
502                 return ERR_PTR(-EINVAL);
503         }
504
505         buffer = handle->buffer;
506
507         if (!handle->buffer->heap->ops->map_kernel) {
508                 pr_err("%s: map_kernel is not implemented by this heap.\n",
509                        __func__);
510                 mutex_unlock(&client->lock);
511                 return ERR_PTR(-ENODEV);
512         }
513
514         mutex_lock(&buffer->lock);
515         vaddr = ion_handle_kmap_get(handle);
516         mutex_unlock(&buffer->lock);
517         mutex_unlock(&client->lock);
518         return vaddr;
519 }
520
521 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
522 {
523         struct ion_buffer *buffer;
524
525         mutex_lock(&client->lock);
526         buffer = handle->buffer;
527         mutex_lock(&buffer->lock);
528         ion_handle_kmap_put(handle);
529         mutex_unlock(&buffer->lock);
530         mutex_unlock(&client->lock);
531 }
532
533 static int ion_debug_client_show(struct seq_file *s, void *unused)
534 {
535         struct ion_client *client = s->private;
536         struct rb_node *n;
537         size_t sizes[ION_NUM_HEAPS] = {0};
538         const char *names[ION_NUM_HEAPS] = {0};
539         int i;
540
541         mutex_lock(&client->lock);
542         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
543                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
544                                                      node);
545                 enum ion_heap_type type = handle->buffer->heap->type;
546
547                 if (!names[type])
548                         names[type] = handle->buffer->heap->name;
549                 sizes[type] += handle->buffer->size;
550         }
551         mutex_unlock(&client->lock);
552
553         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
554         for (i = 0; i < ION_NUM_HEAPS; i++) {
555                 if (!names[i])
556                         continue;
557                 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
558         }
559         return 0;
560 }
561
562 static int ion_debug_client_open(struct inode *inode, struct file *file)
563 {
564         return single_open(file, ion_debug_client_show, inode->i_private);
565 }
566
567 static const struct file_operations debug_client_fops = {
568         .open = ion_debug_client_open,
569         .read = seq_read,
570         .llseek = seq_lseek,
571         .release = single_release,
572 };
573
574 struct ion_client *ion_client_create(struct ion_device *dev,
575                                      unsigned int heap_mask,
576                                      const char *name)
577 {
578         struct ion_client *client;
579         struct task_struct *task;
580         struct rb_node **p;
581         struct rb_node *parent = NULL;
582         struct ion_client *entry;
583         char debug_name[64];
584         pid_t pid;
585
586         get_task_struct(current->group_leader);
587         task_lock(current->group_leader);
588         pid = task_pid_nr(current->group_leader);
589         /* don't bother to store task struct for kernel threads,
590            they can't be killed anyway */
591         if (current->group_leader->flags & PF_KTHREAD) {
592                 put_task_struct(current->group_leader);
593                 task = NULL;
594         } else {
595                 task = current->group_leader;
596         }
597         task_unlock(current->group_leader);
598
599         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
600         if (!client) {
601                 if (task)
602                         put_task_struct(current->group_leader);
603                 return ERR_PTR(-ENOMEM);
604         }
605
606         client->dev = dev;
607         client->handles = RB_ROOT;
608         mutex_init(&client->lock);
609         client->name = name;
610         client->heap_mask = heap_mask;
611         client->task = task;
612         client->pid = pid;
613
614         mutex_lock(&dev->lock);
615         p = &dev->clients.rb_node;
616         while (*p) {
617                 parent = *p;
618                 entry = rb_entry(parent, struct ion_client, node);
619
620                 if (client < entry)
621                         p = &(*p)->rb_left;
622                 else if (client > entry)
623                         p = &(*p)->rb_right;
624         }
625         rb_link_node(&client->node, parent, p);
626         rb_insert_color(&client->node, &dev->clients);
627
628         snprintf(debug_name, 64, "%u", client->pid);
629         client->debug_root = debugfs_create_file(debug_name, 0664,
630                                                  dev->debug_root, client,
631                                                  &debug_client_fops);
632         mutex_unlock(&dev->lock);
633
634         return client;
635 }
636
637 void ion_client_destroy(struct ion_client *client)
638 {
639         struct ion_device *dev = client->dev;
640         struct rb_node *n;
641
642         pr_debug("%s: %d\n", __func__, __LINE__);
643         while ((n = rb_first(&client->handles))) {
644                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
645                                                      node);
646                 ion_handle_destroy(&handle->ref);
647         }
648         mutex_lock(&dev->lock);
649         if (client->task)
650                 put_task_struct(client->task);
651         rb_erase(&client->node, &dev->clients);
652         debugfs_remove_recursive(client->debug_root);
653         mutex_unlock(&dev->lock);
654
655         kfree(client);
656 }
657
658 struct sg_table *ion_sg_table(struct ion_client *client,
659                               struct ion_handle *handle)
660 {
661         struct ion_buffer *buffer;
662         struct sg_table *table;
663
664         mutex_lock(&client->lock);
665         if (!ion_handle_validate(client, handle)) {
666                 pr_err("%s: invalid handle passed to map_dma.\n",
667                        __func__);
668                 mutex_unlock(&client->lock);
669                 return ERR_PTR(-EINVAL);
670         }
671         buffer = handle->buffer;
672         table = buffer->sg_table;
673         mutex_unlock(&client->lock);
674         return table;
675 }
676
677 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
678                                        struct device *dev,
679                                        enum dma_data_direction direction);
680
681 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
682                                         enum dma_data_direction direction)
683 {
684         struct dma_buf *dmabuf = attachment->dmabuf;
685         struct ion_buffer *buffer = dmabuf->priv;
686
687         if (buffer->flags & ION_FLAG_CACHED)
688                 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
689         return buffer->sg_table;
690 }
691
692 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
693                               struct sg_table *table,
694                               enum dma_data_direction direction)
695 {
696 }
697
698 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
699 {
700         unsigned long pages = buffer->sg_table->nents;
701         unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
702
703         buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
704         if (!buffer->dirty)
705                 return -ENOMEM;
706         return 0;
707 }
708
709 struct ion_vma_list {
710         struct list_head list;
711         struct vm_area_struct *vma;
712 };
713
714 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
715                                        struct device *dev,
716                                        enum dma_data_direction dir)
717 {
718         struct scatterlist *sg;
719         int i;
720         struct ion_vma_list *vma_list;
721
722         pr_debug("%s: syncing for device %s\n", __func__,
723                  dev ? dev_name(dev) : "null");
724         mutex_lock(&buffer->lock);
725         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
726                 if (!test_bit(i, buffer->dirty))
727                         continue;
728                 dma_sync_sg_for_device(dev, sg, 1, dir);
729                 clear_bit(i, buffer->dirty);
730         }
731         list_for_each_entry(vma_list, &buffer->vmas, list) {
732                 struct vm_area_struct *vma = vma_list->vma;
733
734                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
735                                NULL);
736         }
737         mutex_unlock(&buffer->lock);
738 }
739
740 int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
741 {
742         struct ion_buffer *buffer = vma->vm_private_data;
743         struct scatterlist *sg;
744         int i;
745
746         mutex_lock(&buffer->lock);
747         set_bit(vmf->pgoff, buffer->dirty);
748
749         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
750                 if (i != vmf->pgoff)
751                         continue;
752                 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
753                 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
754                                sg_page(sg));
755                 break;
756         }
757         mutex_unlock(&buffer->lock);
758         return VM_FAULT_NOPAGE;
759 }
760
761 static void ion_vm_open(struct vm_area_struct *vma)
762 {
763         struct ion_buffer *buffer = vma->vm_private_data;
764         struct ion_vma_list *vma_list;
765
766         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
767         if (!vma_list)
768                 return;
769         vma_list->vma = vma;
770         mutex_lock(&buffer->lock);
771         list_add(&vma_list->list, &buffer->vmas);
772         mutex_unlock(&buffer->lock);
773         pr_debug("%s: adding %p\n", __func__, vma);
774 }
775
776 static void ion_vm_close(struct vm_area_struct *vma)
777 {
778         struct ion_buffer *buffer = vma->vm_private_data;
779         struct ion_vma_list *vma_list, *tmp;
780
781         pr_debug("%s\n", __func__);
782         mutex_lock(&buffer->lock);
783         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
784                 if (vma_list->vma != vma)
785                         continue;
786                 list_del(&vma_list->list);
787                 kfree(vma_list);
788                 pr_debug("%s: deleting %p\n", __func__, vma);
789                 break;
790         }
791         mutex_unlock(&buffer->lock);
792 }
793
794 struct vm_operations_struct ion_vma_ops = {
795         .open = ion_vm_open,
796         .close = ion_vm_close,
797         .fault = ion_vm_fault,
798 };
799
800 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
801 {
802         struct ion_buffer *buffer = dmabuf->priv;
803         int ret = 0;
804
805         if (!buffer->heap->ops->map_user) {
806                 pr_err("%s: this heap does not define a method for mapping "
807                        "to userspace\n", __func__);
808                 return -EINVAL;
809         }
810
811         if (buffer->flags & ION_FLAG_CACHED) {
812                 vma->vm_private_data = buffer;
813                 vma->vm_ops = &ion_vma_ops;
814                 ion_vm_open(vma);
815         } else {
816                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
817                 mutex_lock(&buffer->lock);
818                 /* now map it to userspace */
819                 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
820                 mutex_unlock(&buffer->lock);
821         }
822
823         if (ret)
824                 pr_err("%s: failure mapping buffer to userspace\n",
825                        __func__);
826
827         return ret;
828 }
829
830 static void ion_dma_buf_release(struct dma_buf *dmabuf)
831 {
832         struct ion_buffer *buffer = dmabuf->priv;
833         ion_buffer_put(buffer);
834 }
835
836 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
837 {
838         struct ion_buffer *buffer = dmabuf->priv;
839         return buffer->vaddr + offset;
840 }
841
842 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
843                                void *ptr)
844 {
845         return;
846 }
847
848 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
849                                         size_t len,
850                                         enum dma_data_direction direction)
851 {
852         struct ion_buffer *buffer = dmabuf->priv;
853         void *vaddr;
854
855         if (!buffer->heap->ops->map_kernel) {
856                 pr_err("%s: map kernel is not implemented by this heap.\n",
857                        __func__);
858                 return -ENODEV;
859         }
860
861         mutex_lock(&buffer->lock);
862         vaddr = ion_buffer_kmap_get(buffer);
863         mutex_unlock(&buffer->lock);
864         if (IS_ERR(vaddr))
865                 return PTR_ERR(vaddr);
866         if (!vaddr)
867                 return -ENOMEM;
868         return 0;
869 }
870
871 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
872                                        size_t len,
873                                        enum dma_data_direction direction)
874 {
875         struct ion_buffer *buffer = dmabuf->priv;
876
877         mutex_lock(&buffer->lock);
878         ion_buffer_kmap_put(buffer);
879         mutex_unlock(&buffer->lock);
880 }
881
882 struct dma_buf_ops dma_buf_ops = {
883         .map_dma_buf = ion_map_dma_buf,
884         .unmap_dma_buf = ion_unmap_dma_buf,
885         .mmap = ion_mmap,
886         .release = ion_dma_buf_release,
887         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
888         .end_cpu_access = ion_dma_buf_end_cpu_access,
889         .kmap_atomic = ion_dma_buf_kmap,
890         .kunmap_atomic = ion_dma_buf_kunmap,
891         .kmap = ion_dma_buf_kmap,
892         .kunmap = ion_dma_buf_kunmap,
893 };
894
895 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
896 {
897         struct ion_buffer *buffer;
898         struct dma_buf *dmabuf;
899         bool valid_handle;
900         int fd;
901
902         mutex_lock(&client->lock);
903         valid_handle = ion_handle_validate(client, handle);
904         mutex_unlock(&client->lock);
905         if (!valid_handle) {
906                 WARN("%s: invalid handle passed to share.\n", __func__);
907                 return -EINVAL;
908         }
909
910         buffer = handle->buffer;
911         ion_buffer_get(buffer);
912         dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
913         if (IS_ERR(dmabuf)) {
914                 ion_buffer_put(buffer);
915                 return PTR_ERR(dmabuf);
916         }
917         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
918         if (fd < 0) {
919                 dma_buf_put(dmabuf);
920                 ion_buffer_put(buffer);
921         }
922         return fd;
923 }
924
925 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
926 {
927         struct dma_buf *dmabuf;
928         struct ion_buffer *buffer;
929         struct ion_handle *handle;
930
931         dmabuf = dma_buf_get(fd);
932         if (IS_ERR_OR_NULL(dmabuf))
933                 return ERR_PTR(PTR_ERR(dmabuf));
934         /* if this memory came from ion */
935
936         if (dmabuf->ops != &dma_buf_ops) {
937                 pr_err("%s: can not import dmabuf from another exporter\n",
938                        __func__);
939                 dma_buf_put(dmabuf);
940                 return ERR_PTR(-EINVAL);
941         }
942         buffer = dmabuf->priv;
943
944         mutex_lock(&client->lock);
945         /* if a handle exists for this buffer just take a reference to it */
946         handle = ion_handle_lookup(client, buffer);
947         if (!IS_ERR_OR_NULL(handle)) {
948                 ion_handle_get(handle);
949                 goto end;
950         }
951         handle = ion_handle_create(client, buffer);
952         if (IS_ERR_OR_NULL(handle))
953                 goto end;
954         ion_handle_add(client, handle);
955 end:
956         mutex_unlock(&client->lock);
957         dma_buf_put(dmabuf);
958         return handle;
959 }
960
961 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
962 {
963         struct ion_client *client = filp->private_data;
964
965         switch (cmd) {
966         case ION_IOC_ALLOC:
967         {
968                 struct ion_allocation_data data;
969
970                 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
971                         return -EFAULT;
972                 data.handle = ion_alloc(client, data.len, data.align,
973                                              data.heap_mask, data.flags);
974
975                 if (IS_ERR(data.handle))
976                         return PTR_ERR(data.handle);
977
978                 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
979                         ion_free(client, data.handle);
980                         return -EFAULT;
981                 }
982                 break;
983         }
984         case ION_IOC_FREE:
985         {
986                 struct ion_handle_data data;
987                 bool valid;
988
989                 if (copy_from_user(&data, (void __user *)arg,
990                                    sizeof(struct ion_handle_data)))
991                         return -EFAULT;
992                 mutex_lock(&client->lock);
993                 valid = ion_handle_validate(client, data.handle);
994                 mutex_unlock(&client->lock);
995                 if (!valid)
996                         return -EINVAL;
997                 ion_free(client, data.handle);
998                 break;
999         }
1000         case ION_IOC_SHARE:
1001         {
1002                 struct ion_fd_data data;
1003
1004                 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1005                         return -EFAULT;
1006                 data.fd = ion_share_dma_buf(client, data.handle);
1007                 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1008                         return -EFAULT;
1009                 break;
1010         }
1011         case ION_IOC_IMPORT:
1012         {
1013                 struct ion_fd_data data;
1014                 if (copy_from_user(&data, (void __user *)arg,
1015                                    sizeof(struct ion_fd_data)))
1016                         return -EFAULT;
1017                 data.handle = ion_import_dma_buf(client, data.fd);
1018                 if (IS_ERR(data.handle))
1019                         data.handle = NULL;
1020                 if (copy_to_user((void __user *)arg, &data,
1021                                  sizeof(struct ion_fd_data)))
1022                         return -EFAULT;
1023                 break;
1024         }
1025         case ION_IOC_CUSTOM:
1026         {
1027                 struct ion_device *dev = client->dev;
1028                 struct ion_custom_data data;
1029
1030                 if (!dev->custom_ioctl)
1031                         return -ENOTTY;
1032                 if (copy_from_user(&data, (void __user *)arg,
1033                                 sizeof(struct ion_custom_data)))
1034                         return -EFAULT;
1035                 return dev->custom_ioctl(client, data.cmd, data.arg);
1036         }
1037         default:
1038                 return -ENOTTY;
1039         }
1040         return 0;
1041 }
1042
1043 static int ion_release(struct inode *inode, struct file *file)
1044 {
1045         struct ion_client *client = file->private_data;
1046
1047         pr_debug("%s: %d\n", __func__, __LINE__);
1048         ion_client_destroy(client);
1049         return 0;
1050 }
1051
1052 static int ion_open(struct inode *inode, struct file *file)
1053 {
1054         struct miscdevice *miscdev = file->private_data;
1055         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1056         struct ion_client *client;
1057
1058         pr_debug("%s: %d\n", __func__, __LINE__);
1059         client = ion_client_create(dev, -1, "user");
1060         if (IS_ERR_OR_NULL(client))
1061                 return PTR_ERR(client);
1062         file->private_data = client;
1063
1064         return 0;
1065 }
1066
1067 static const struct file_operations ion_fops = {
1068         .owner          = THIS_MODULE,
1069         .open           = ion_open,
1070         .release        = ion_release,
1071         .unlocked_ioctl = ion_ioctl,
1072 };
1073
1074 static size_t ion_debug_heap_total(struct ion_client *client,
1075                                    enum ion_heap_type type)
1076 {
1077         size_t size = 0;
1078         struct rb_node *n;
1079
1080         mutex_lock(&client->lock);
1081         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1082                 struct ion_handle *handle = rb_entry(n,
1083                                                      struct ion_handle,
1084                                                      node);
1085                 if (handle->buffer->heap->type == type)
1086                         size += handle->buffer->size;
1087         }
1088         mutex_unlock(&client->lock);
1089         return size;
1090 }
1091
1092 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1093 {
1094         struct ion_heap *heap = s->private;
1095         struct ion_device *dev = heap->dev;
1096         struct rb_node *n;
1097
1098         seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1099
1100         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1101                 struct ion_client *client = rb_entry(n, struct ion_client,
1102                                                      node);
1103                 size_t size = ion_debug_heap_total(client, heap->type);
1104                 if (!size)
1105                         continue;
1106                 if (client->task) {
1107                         char task_comm[TASK_COMM_LEN];
1108
1109                         get_task_comm(task_comm, client->task);
1110                         seq_printf(s, "%16.s %16u %16u\n", task_comm,
1111                                    client->pid, size);
1112                 } else {
1113                         seq_printf(s, "%16.s %16u %16u\n", client->name,
1114                                    client->pid, size);
1115                 }
1116         }
1117         return 0;
1118 }
1119
1120 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1121 {
1122         return single_open(file, ion_debug_heap_show, inode->i_private);
1123 }
1124
1125 static const struct file_operations debug_heap_fops = {
1126         .open = ion_debug_heap_open,
1127         .read = seq_read,
1128         .llseek = seq_lseek,
1129         .release = single_release,
1130 };
1131
1132 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1133 {
1134         struct rb_node **p = &dev->heaps.rb_node;
1135         struct rb_node *parent = NULL;
1136         struct ion_heap *entry;
1137
1138         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1139             !heap->ops->unmap_dma)
1140                 pr_err("%s: can not add heap with invalid ops struct.\n",
1141                        __func__);
1142
1143         heap->dev = dev;
1144         mutex_lock(&dev->lock);
1145         while (*p) {
1146                 parent = *p;
1147                 entry = rb_entry(parent, struct ion_heap, node);
1148
1149                 if (heap->id < entry->id) {
1150                         p = &(*p)->rb_left;
1151                 } else if (heap->id > entry->id ) {
1152                         p = &(*p)->rb_right;
1153                 } else {
1154                         pr_err("%s: can not insert multiple heaps with "
1155                                 "id %d\n", __func__, heap->id);
1156                         goto end;
1157                 }
1158         }
1159
1160         rb_link_node(&heap->node, parent, p);
1161         rb_insert_color(&heap->node, &dev->heaps);
1162         debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1163                             &debug_heap_fops);
1164 end:
1165         mutex_unlock(&dev->lock);
1166 }
1167
1168 struct ion_device *ion_device_create(long (*custom_ioctl)
1169                                      (struct ion_client *client,
1170                                       unsigned int cmd,
1171                                       unsigned long arg))
1172 {
1173         struct ion_device *idev;
1174         int ret;
1175
1176         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1177         if (!idev)
1178                 return ERR_PTR(-ENOMEM);
1179
1180         idev->dev.minor = MISC_DYNAMIC_MINOR;
1181         idev->dev.name = "ion";
1182         idev->dev.fops = &ion_fops;
1183         idev->dev.parent = NULL;
1184         ret = misc_register(&idev->dev);
1185         if (ret) {
1186                 pr_err("ion: failed to register misc device.\n");
1187                 return ERR_PTR(ret);
1188         }
1189
1190         idev->debug_root = debugfs_create_dir("ion", NULL);
1191         if (IS_ERR_OR_NULL(idev->debug_root))
1192                 pr_err("ion: failed to create debug files.\n");
1193
1194         idev->custom_ioctl = custom_ioctl;
1195         idev->buffers = RB_ROOT;
1196         mutex_init(&idev->lock);
1197         idev->heaps = RB_ROOT;
1198         idev->clients = RB_ROOT;
1199         return idev;
1200 }
1201
1202 void ion_device_destroy(struct ion_device *dev)
1203 {
1204         misc_deregister(&dev->dev);
1205         /* XXX need to free the heaps and clients ? */
1206         kfree(dev);
1207 }
1208
1209 void __init ion_reserve(struct ion_platform_data *data)
1210 {
1211         int i, ret;
1212
1213         for (i = 0; i < data->nr; i++) {
1214                 if (data->heaps[i].size == 0)
1215                         continue;
1216                 ret = memblock_reserve(data->heaps[i].base,
1217                                        data->heaps[i].size);
1218                 if (ret)
1219                         pr_err("memblock reserve of %x@%lx failed\n",
1220                                data->heaps[i].size,
1221                                data->heaps[i].base);
1222         }
1223 }