Merge branch 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/linux into drm...
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <drm/exynos_drm.h>
18
19 #include "exynos_drm_drv.h"
20 #include "exynos_drm_gem.h"
21 #include "exynos_drm_iommu.h"
22
23 static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
24 {
25         struct drm_device *dev = obj->base.dev;
26         enum dma_attr attr;
27         unsigned int nr_pages;
28
29         if (obj->dma_addr) {
30                 DRM_DEBUG_KMS("already allocated.\n");
31                 return 0;
32         }
33
34         init_dma_attrs(&obj->dma_attrs);
35
36         /*
37          * if EXYNOS_BO_CONTIG, fully physically contiguous memory
38          * region will be allocated else physically contiguous
39          * as possible.
40          */
41         if (!(obj->flags & EXYNOS_BO_NONCONTIG))
42                 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
43
44         /*
45          * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46          * else cachable mapping.
47          */
48         if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
49                 attr = DMA_ATTR_WRITE_COMBINE;
50         else
51                 attr = DMA_ATTR_NON_CONSISTENT;
52
53         dma_set_attr(attr, &obj->dma_attrs);
54         dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
55
56         nr_pages = obj->size >> PAGE_SHIFT;
57
58         if (!is_drm_iommu_supported(dev)) {
59                 dma_addr_t start_addr;
60                 unsigned int i = 0;
61
62                 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
63                 if (!obj->pages) {
64                         DRM_ERROR("failed to allocate pages.\n");
65                         return -ENOMEM;
66                 }
67
68                 obj->cookie = dma_alloc_attrs(dev->dev,
69                                         obj->size,
70                                         &obj->dma_addr, GFP_KERNEL,
71                                         &obj->dma_attrs);
72                 if (!obj->cookie) {
73                         DRM_ERROR("failed to allocate buffer.\n");
74                         drm_free_large(obj->pages);
75                         return -ENOMEM;
76                 }
77
78                 start_addr = obj->dma_addr;
79                 while (i < nr_pages) {
80                         obj->pages[i] = phys_to_page(start_addr);
81                         start_addr += PAGE_SIZE;
82                         i++;
83                 }
84         } else {
85                 obj->pages = dma_alloc_attrs(dev->dev, obj->size,
86                                         &obj->dma_addr, GFP_KERNEL,
87                                         &obj->dma_attrs);
88                 if (!obj->pages) {
89                         DRM_ERROR("failed to allocate buffer.\n");
90                         return -ENOMEM;
91                 }
92         }
93
94         DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
95                         (unsigned long)obj->dma_addr,
96                         obj->size);
97
98         return 0;
99 }
100
101 static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
102 {
103         struct drm_device *dev = obj->base.dev;
104
105         if (!obj->dma_addr) {
106                 DRM_DEBUG_KMS("dma_addr is invalid.\n");
107                 return;
108         }
109
110         DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
111                         (unsigned long)obj->dma_addr, obj->size);
112
113         if (!is_drm_iommu_supported(dev)) {
114                 dma_free_attrs(dev->dev, obj->size, obj->cookie,
115                                 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
116                 drm_free_large(obj->pages);
117         } else
118                 dma_free_attrs(dev->dev, obj->size, obj->pages,
119                                 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
120
121         obj->dma_addr = (dma_addr_t)NULL;
122 }
123
124 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
125                                         struct drm_file *file_priv,
126                                         unsigned int *handle)
127 {
128         int ret;
129
130         /*
131          * allocate a id of idr table where the obj is registered
132          * and handle has the id what user can see.
133          */
134         ret = drm_gem_handle_create(file_priv, obj, handle);
135         if (ret)
136                 return ret;
137
138         DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
139
140         /* drop reference from allocate - handle holds it now. */
141         drm_gem_object_unreference_unlocked(obj);
142
143         return 0;
144 }
145
146 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
147 {
148         struct drm_gem_object *obj = &exynos_gem_obj->base;
149
150         DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
151
152         /*
153          * do not release memory region from exporter.
154          *
155          * the region will be released by exporter
156          * once dmabuf's refcount becomes 0.
157          */
158         if (obj->import_attach)
159                 goto out;
160
161         exynos_drm_free_buf(exynos_gem_obj);
162
163 out:
164         drm_gem_free_mmap_offset(obj);
165
166         /* release file pointer to gem object. */
167         drm_gem_object_release(obj);
168
169         kfree(exynos_gem_obj);
170         exynos_gem_obj = NULL;
171 }
172
173 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
174                                                 unsigned int gem_handle,
175                                                 struct drm_file *file_priv)
176 {
177         struct exynos_drm_gem_obj *exynos_gem_obj;
178         struct drm_gem_object *obj;
179
180         obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
181         if (!obj) {
182                 DRM_ERROR("failed to lookup gem object.\n");
183                 return 0;
184         }
185
186         exynos_gem_obj = to_exynos_gem_obj(obj);
187
188         drm_gem_object_unreference_unlocked(obj);
189
190         return exynos_gem_obj->size;
191 }
192
193
194 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
195                                                       unsigned long size)
196 {
197         struct exynos_drm_gem_obj *exynos_gem_obj;
198         struct drm_gem_object *obj;
199         int ret;
200
201         exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
202         if (!exynos_gem_obj)
203                 return ERR_PTR(-ENOMEM);
204
205         exynos_gem_obj->size = size;
206         obj = &exynos_gem_obj->base;
207
208         ret = drm_gem_object_init(dev, obj, size);
209         if (ret < 0) {
210                 DRM_ERROR("failed to initialize gem object\n");
211                 kfree(exynos_gem_obj);
212                 return ERR_PTR(ret);
213         }
214
215         DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
216
217         return exynos_gem_obj;
218 }
219
220 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
221                                                 unsigned int flags,
222                                                 unsigned long size)
223 {
224         struct exynos_drm_gem_obj *exynos_gem_obj;
225         int ret;
226
227         if (flags & ~(EXYNOS_BO_MASK)) {
228                 DRM_ERROR("invalid flags.\n");
229                 return ERR_PTR(-EINVAL);
230         }
231
232         if (!size) {
233                 DRM_ERROR("invalid size.\n");
234                 return ERR_PTR(-EINVAL);
235         }
236
237         size = roundup(size, PAGE_SIZE);
238
239         exynos_gem_obj = exynos_drm_gem_init(dev, size);
240         if (IS_ERR(exynos_gem_obj))
241                 return exynos_gem_obj;
242
243         /* set memory type and cache attribute from user side. */
244         exynos_gem_obj->flags = flags;
245
246         ret = exynos_drm_alloc_buf(exynos_gem_obj);
247         if (ret < 0) {
248                 drm_gem_object_release(&exynos_gem_obj->base);
249                 kfree(exynos_gem_obj);
250                 return ERR_PTR(ret);
251         }
252
253         return exynos_gem_obj;
254 }
255
256 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
257                                 struct drm_file *file_priv)
258 {
259         struct drm_exynos_gem_create *args = data;
260         struct exynos_drm_gem_obj *exynos_gem_obj;
261         int ret;
262
263         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
264         if (IS_ERR(exynos_gem_obj))
265                 return PTR_ERR(exynos_gem_obj);
266
267         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
268                         &args->handle);
269         if (ret) {
270                 exynos_drm_gem_destroy(exynos_gem_obj);
271                 return ret;
272         }
273
274         return 0;
275 }
276
277 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
278                                         unsigned int gem_handle,
279                                         struct drm_file *filp)
280 {
281         struct exynos_drm_gem_obj *exynos_gem_obj;
282         struct drm_gem_object *obj;
283
284         obj = drm_gem_object_lookup(dev, filp, gem_handle);
285         if (!obj) {
286                 DRM_ERROR("failed to lookup gem object.\n");
287                 return ERR_PTR(-EINVAL);
288         }
289
290         exynos_gem_obj = to_exynos_gem_obj(obj);
291
292         return &exynos_gem_obj->dma_addr;
293 }
294
295 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
296                                         unsigned int gem_handle,
297                                         struct drm_file *filp)
298 {
299         struct drm_gem_object *obj;
300
301         obj = drm_gem_object_lookup(dev, filp, gem_handle);
302         if (!obj) {
303                 DRM_ERROR("failed to lookup gem object.\n");
304                 return;
305         }
306
307         drm_gem_object_unreference_unlocked(obj);
308
309         /*
310          * decrease obj->refcount one more time because we has already
311          * increased it at exynos_drm_gem_get_dma_addr().
312          */
313         drm_gem_object_unreference_unlocked(obj);
314 }
315
316 int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
317                                       struct vm_area_struct *vma)
318 {
319         struct drm_device *drm_dev = exynos_gem_obj->base.dev;
320         unsigned long vm_size;
321         int ret;
322
323         vma->vm_flags &= ~VM_PFNMAP;
324         vma->vm_pgoff = 0;
325
326         vm_size = vma->vm_end - vma->vm_start;
327
328         /* check if user-requested size is valid. */
329         if (vm_size > exynos_gem_obj->size)
330                 return -EINVAL;
331
332         ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
333                                 exynos_gem_obj->dma_addr, exynos_gem_obj->size,
334                                 &exynos_gem_obj->dma_attrs);
335         if (ret < 0) {
336                 DRM_ERROR("failed to mmap.\n");
337                 return ret;
338         }
339
340         return 0;
341 }
342
343 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
344                                       struct drm_file *file_priv)
345 {       struct exynos_drm_gem_obj *exynos_gem_obj;
346         struct drm_exynos_gem_info *args = data;
347         struct drm_gem_object *obj;
348
349         mutex_lock(&dev->struct_mutex);
350
351         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
352         if (!obj) {
353                 DRM_ERROR("failed to lookup gem object.\n");
354                 mutex_unlock(&dev->struct_mutex);
355                 return -EINVAL;
356         }
357
358         exynos_gem_obj = to_exynos_gem_obj(obj);
359
360         args->flags = exynos_gem_obj->flags;
361         args->size = exynos_gem_obj->size;
362
363         drm_gem_object_unreference(obj);
364         mutex_unlock(&dev->struct_mutex);
365
366         return 0;
367 }
368
369 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
370 {
371         struct vm_area_struct *vma_copy;
372
373         vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
374         if (!vma_copy)
375                 return NULL;
376
377         if (vma->vm_ops && vma->vm_ops->open)
378                 vma->vm_ops->open(vma);
379
380         if (vma->vm_file)
381                 get_file(vma->vm_file);
382
383         memcpy(vma_copy, vma, sizeof(*vma));
384
385         vma_copy->vm_mm = NULL;
386         vma_copy->vm_next = NULL;
387         vma_copy->vm_prev = NULL;
388
389         return vma_copy;
390 }
391
392 void exynos_gem_put_vma(struct vm_area_struct *vma)
393 {
394         if (!vma)
395                 return;
396
397         if (vma->vm_ops && vma->vm_ops->close)
398                 vma->vm_ops->close(vma);
399
400         if (vma->vm_file)
401                 fput(vma->vm_file);
402
403         kfree(vma);
404 }
405
406 int exynos_gem_get_pages_from_userptr(unsigned long start,
407                                                 unsigned int npages,
408                                                 struct page **pages,
409                                                 struct vm_area_struct *vma)
410 {
411         int get_npages;
412
413         /* the memory region mmaped with VM_PFNMAP. */
414         if (vma_is_io(vma)) {
415                 unsigned int i;
416
417                 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
418                         unsigned long pfn;
419                         int ret = follow_pfn(vma, start, &pfn);
420                         if (ret)
421                                 return ret;
422
423                         pages[i] = pfn_to_page(pfn);
424                 }
425
426                 if (i != npages) {
427                         DRM_ERROR("failed to get user_pages.\n");
428                         return -EINVAL;
429                 }
430
431                 return 0;
432         }
433
434         get_npages = get_user_pages(current, current->mm, start,
435                                         npages, 1, 1, pages, NULL);
436         get_npages = max(get_npages, 0);
437         if (get_npages != npages) {
438                 DRM_ERROR("failed to get user_pages.\n");
439                 while (get_npages)
440                         put_page(pages[--get_npages]);
441                 return -EFAULT;
442         }
443
444         return 0;
445 }
446
447 void exynos_gem_put_pages_to_userptr(struct page **pages,
448                                         unsigned int npages,
449                                         struct vm_area_struct *vma)
450 {
451         if (!vma_is_io(vma)) {
452                 unsigned int i;
453
454                 for (i = 0; i < npages; i++) {
455                         set_page_dirty_lock(pages[i]);
456
457                         /*
458                          * undo the reference we took when populating
459                          * the table.
460                          */
461                         put_page(pages[i]);
462                 }
463         }
464 }
465
466 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
467                                 struct sg_table *sgt,
468                                 enum dma_data_direction dir)
469 {
470         int nents;
471
472         mutex_lock(&drm_dev->struct_mutex);
473
474         nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
475         if (!nents) {
476                 DRM_ERROR("failed to map sgl with dma.\n");
477                 mutex_unlock(&drm_dev->struct_mutex);
478                 return nents;
479         }
480
481         mutex_unlock(&drm_dev->struct_mutex);
482         return 0;
483 }
484
485 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
486                                 struct sg_table *sgt,
487                                 enum dma_data_direction dir)
488 {
489         dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
490 }
491
492 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
493 {
494         exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
495 }
496
497 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
498                                struct drm_device *dev,
499                                struct drm_mode_create_dumb *args)
500 {
501         struct exynos_drm_gem_obj *exynos_gem_obj;
502         int ret;
503
504         /*
505          * allocate memory to be used for framebuffer.
506          * - this callback would be called by user application
507          *      with DRM_IOCTL_MODE_CREATE_DUMB command.
508          */
509
510         args->pitch = args->width * ((args->bpp + 7) / 8);
511         args->size = args->pitch * args->height;
512
513         if (is_drm_iommu_supported(dev)) {
514                 exynos_gem_obj = exynos_drm_gem_create(dev,
515                         EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
516                         args->size);
517         } else {
518                 exynos_gem_obj = exynos_drm_gem_create(dev,
519                         EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
520                         args->size);
521         }
522
523         if (IS_ERR(exynos_gem_obj)) {
524                 dev_warn(dev->dev, "FB allocation failed.\n");
525                 return PTR_ERR(exynos_gem_obj);
526         }
527
528         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
529                         &args->handle);
530         if (ret) {
531                 exynos_drm_gem_destroy(exynos_gem_obj);
532                 return ret;
533         }
534
535         return 0;
536 }
537
538 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
539                                    struct drm_device *dev, uint32_t handle,
540                                    uint64_t *offset)
541 {
542         struct drm_gem_object *obj;
543         int ret = 0;
544
545         mutex_lock(&dev->struct_mutex);
546
547         /*
548          * get offset of memory allocated for drm framebuffer.
549          * - this callback would be called by user application
550          *      with DRM_IOCTL_MODE_MAP_DUMB command.
551          */
552
553         obj = drm_gem_object_lookup(dev, file_priv, handle);
554         if (!obj) {
555                 DRM_ERROR("failed to lookup gem object.\n");
556                 ret = -EINVAL;
557                 goto unlock;
558         }
559
560         ret = drm_gem_create_mmap_offset(obj);
561         if (ret)
562                 goto out;
563
564         *offset = drm_vma_node_offset_addr(&obj->vma_node);
565         DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
566
567 out:
568         drm_gem_object_unreference(obj);
569 unlock:
570         mutex_unlock(&dev->struct_mutex);
571         return ret;
572 }
573
574 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
575 {
576         struct drm_gem_object *obj = vma->vm_private_data;
577         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
578         unsigned long pfn;
579         pgoff_t page_offset;
580         int ret;
581
582         page_offset = ((unsigned long)vmf->virtual_address -
583                         vma->vm_start) >> PAGE_SHIFT;
584
585         if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
586                 DRM_ERROR("invalid page offset\n");
587                 ret = -EINVAL;
588                 goto out;
589         }
590
591         pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
592         ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
593
594 out:
595         switch (ret) {
596         case 0:
597         case -ERESTARTSYS:
598         case -EINTR:
599                 return VM_FAULT_NOPAGE;
600         case -ENOMEM:
601                 return VM_FAULT_OOM;
602         default:
603                 return VM_FAULT_SIGBUS;
604         }
605 }
606
607 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
608 {
609         struct exynos_drm_gem_obj *exynos_gem_obj;
610         struct drm_gem_object *obj;
611         int ret;
612
613         /* set vm_area_struct. */
614         ret = drm_gem_mmap(filp, vma);
615         if (ret < 0) {
616                 DRM_ERROR("failed to mmap.\n");
617                 return ret;
618         }
619
620         obj = vma->vm_private_data;
621         exynos_gem_obj = to_exynos_gem_obj(obj);
622
623         DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
624
625         /* non-cachable as default. */
626         if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
627                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
628         else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
629                 vma->vm_page_prot =
630                         pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
631         else
632                 vma->vm_page_prot =
633                         pgprot_noncached(vm_get_page_prot(vma->vm_flags));
634
635         ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
636         if (ret)
637                 goto err_close_vm;
638
639         return ret;
640
641 err_close_vm:
642         drm_gem_vm_close(vma);
643         drm_gem_free_mmap_offset(obj);
644
645         return ret;
646 }
647
648 /* low-level interface prime helpers */
649 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
650 {
651         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
652         int npages;
653
654         npages = exynos_gem_obj->size >> PAGE_SHIFT;
655
656         return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
657 }
658
659 struct drm_gem_object *
660 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
661                                      struct dma_buf_attachment *attach,
662                                      struct sg_table *sgt)
663 {
664         struct exynos_drm_gem_obj *exynos_gem_obj;
665         int npages;
666         int ret;
667
668         exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
669         if (IS_ERR(exynos_gem_obj)) {
670                 ret = PTR_ERR(exynos_gem_obj);
671                 return ERR_PTR(ret);
672         }
673
674         exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
675
676         npages = exynos_gem_obj->size >> PAGE_SHIFT;
677         exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
678         if (!exynos_gem_obj->pages) {
679                 ret = -ENOMEM;
680                 goto err;
681         }
682
683         ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
684                         npages);
685         if (ret < 0)
686                 goto err_free_large;
687
688         if (sgt->nents == 1) {
689                 /* always physically continuous memory if sgt->nents is 1. */
690                 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
691         } else {
692                 /*
693                  * this case could be CONTIG or NONCONTIG type but for now
694                  * sets NONCONTIG.
695                  * TODO. we have to find a way that exporter can notify
696                  * the type of its own buffer to importer.
697                  */
698                 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
699         }
700
701         return &exynos_gem_obj->base;
702
703 err_free_large:
704         drm_free_large(exynos_gem_obj->pages);
705 err:
706         drm_gem_object_release(&exynos_gem_obj->base);
707         kfree(exynos_gem_obj);
708         return ERR_PTR(ret);
709 }
710
711 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
712 {
713         return NULL;
714 }
715
716 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
717 {
718         /* Nothing to do */
719 }