drm/exynos: add userptr feature for g2d module
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include <drm/drmP.h>
27
28 #include <linux/shmem_fs.h>
29 #include <drm/exynos_drm.h>
30
31 #include "exynos_drm_drv.h"
32 #include "exynos_drm_gem.h"
33 #include "exynos_drm_buf.h"
34
35 static unsigned int convert_to_vm_err_msg(int msg)
36 {
37         unsigned int out_msg;
38
39         switch (msg) {
40         case 0:
41         case -ERESTARTSYS:
42         case -EINTR:
43                 out_msg = VM_FAULT_NOPAGE;
44                 break;
45
46         case -ENOMEM:
47                 out_msg = VM_FAULT_OOM;
48                 break;
49
50         default:
51                 out_msg = VM_FAULT_SIGBUS;
52                 break;
53         }
54
55         return out_msg;
56 }
57
58 static int check_gem_flags(unsigned int flags)
59 {
60         if (flags & ~(EXYNOS_BO_MASK)) {
61                 DRM_ERROR("invalid flags.\n");
62                 return -EINVAL;
63         }
64
65         return 0;
66 }
67
68 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
69                                         struct vm_area_struct *vma)
70 {
71         DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
72
73         /* non-cachable as default. */
74         if (obj->flags & EXYNOS_BO_CACHABLE)
75                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76         else if (obj->flags & EXYNOS_BO_WC)
77                 vma->vm_page_prot =
78                         pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
79         else
80                 vma->vm_page_prot =
81                         pgprot_noncached(vm_get_page_prot(vma->vm_flags));
82 }
83
84 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
85 {
86         /* TODO */
87
88         return roundup(size, PAGE_SIZE);
89 }
90
91 static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
92                                         struct vm_area_struct *vma,
93                                         unsigned long f_vaddr,
94                                         pgoff_t page_offset)
95 {
96         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
97         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
98         struct scatterlist *sgl;
99         unsigned long pfn;
100         int i;
101
102         if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
103                 if (!buf->sgt)
104                         return -EINTR;
105
106                 sgl = buf->sgt->sgl;
107                 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
108                         if (!sgl) {
109                                 DRM_ERROR("invalid SG table\n");
110                                 return -EINTR;
111                         }
112                         if (page_offset < (sgl->length >> PAGE_SHIFT))
113                                 break;
114                         page_offset -=  (sgl->length >> PAGE_SHIFT);
115                 }
116
117                 if (i >= buf->sgt->nents) {
118                         DRM_ERROR("invalid page offset\n");
119                         return -EINVAL;
120                 }
121
122                 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
123         } else {
124                 if (!buf->pages)
125                         return -EINTR;
126
127                 pfn = page_to_pfn(buf->pages[0]) + page_offset;
128         }
129
130         return vm_insert_mixed(vma, f_vaddr, pfn);
131 }
132
133 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
134                                         struct drm_file *file_priv,
135                                         unsigned int *handle)
136 {
137         int ret;
138
139         /*
140          * allocate a id of idr table where the obj is registered
141          * and handle has the id what user can see.
142          */
143         ret = drm_gem_handle_create(file_priv, obj, handle);
144         if (ret)
145                 return ret;
146
147         DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
148
149         /* drop reference from allocate - handle holds it now. */
150         drm_gem_object_unreference_unlocked(obj);
151
152         return 0;
153 }
154
155 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
156 {
157         struct drm_gem_object *obj;
158         struct exynos_drm_gem_buf *buf;
159
160         DRM_DEBUG_KMS("%s\n", __FILE__);
161
162         obj = &exynos_gem_obj->base;
163         buf = exynos_gem_obj->buffer;
164
165         DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
166
167         /*
168          * do not release memory region from exporter.
169          *
170          * the region will be released by exporter
171          * once dmabuf's refcount becomes 0.
172          */
173         if (obj->import_attach)
174                 goto out;
175
176         exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
177
178 out:
179         exynos_drm_fini_buf(obj->dev, buf);
180         exynos_gem_obj->buffer = NULL;
181
182         if (obj->map_list.map)
183                 drm_gem_free_mmap_offset(obj);
184
185         /* release file pointer to gem object. */
186         drm_gem_object_release(obj);
187
188         kfree(exynos_gem_obj);
189         exynos_gem_obj = NULL;
190 }
191
192 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
193                                                       unsigned long size)
194 {
195         struct exynos_drm_gem_obj *exynos_gem_obj;
196         struct drm_gem_object *obj;
197         int ret;
198
199         exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
200         if (!exynos_gem_obj) {
201                 DRM_ERROR("failed to allocate exynos gem object\n");
202                 return NULL;
203         }
204
205         exynos_gem_obj->size = size;
206         obj = &exynos_gem_obj->base;
207
208         ret = drm_gem_object_init(dev, obj, size);
209         if (ret < 0) {
210                 DRM_ERROR("failed to initialize gem object\n");
211                 kfree(exynos_gem_obj);
212                 return NULL;
213         }
214
215         DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
216
217         return exynos_gem_obj;
218 }
219
220 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
221                                                 unsigned int flags,
222                                                 unsigned long size)
223 {
224         struct exynos_drm_gem_obj *exynos_gem_obj;
225         struct exynos_drm_gem_buf *buf;
226         int ret;
227
228         if (!size) {
229                 DRM_ERROR("invalid size.\n");
230                 return ERR_PTR(-EINVAL);
231         }
232
233         size = roundup_gem_size(size, flags);
234         DRM_DEBUG_KMS("%s\n", __FILE__);
235
236         ret = check_gem_flags(flags);
237         if (ret)
238                 return ERR_PTR(ret);
239
240         buf = exynos_drm_init_buf(dev, size);
241         if (!buf)
242                 return ERR_PTR(-ENOMEM);
243
244         exynos_gem_obj = exynos_drm_gem_init(dev, size);
245         if (!exynos_gem_obj) {
246                 ret = -ENOMEM;
247                 goto err_fini_buf;
248         }
249
250         exynos_gem_obj->buffer = buf;
251
252         /* set memory type and cache attribute from user side. */
253         exynos_gem_obj->flags = flags;
254
255         ret = exynos_drm_alloc_buf(dev, buf, flags);
256         if (ret < 0) {
257                 drm_gem_object_release(&exynos_gem_obj->base);
258                 goto err_fini_buf;
259         }
260
261         return exynos_gem_obj;
262
263 err_fini_buf:
264         exynos_drm_fini_buf(dev, buf);
265         return ERR_PTR(ret);
266 }
267
268 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
269                                 struct drm_file *file_priv)
270 {
271         struct drm_exynos_gem_create *args = data;
272         struct exynos_drm_gem_obj *exynos_gem_obj;
273         int ret;
274
275         DRM_DEBUG_KMS("%s\n", __FILE__);
276
277         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
278         if (IS_ERR(exynos_gem_obj))
279                 return PTR_ERR(exynos_gem_obj);
280
281         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
282                         &args->handle);
283         if (ret) {
284                 exynos_drm_gem_destroy(exynos_gem_obj);
285                 return ret;
286         }
287
288         return 0;
289 }
290
291 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
292                                         unsigned int gem_handle,
293                                         struct drm_file *filp)
294 {
295         struct exynos_drm_gem_obj *exynos_gem_obj;
296         struct drm_gem_object *obj;
297
298         obj = drm_gem_object_lookup(dev, filp, gem_handle);
299         if (!obj) {
300                 DRM_ERROR("failed to lookup gem object.\n");
301                 return ERR_PTR(-EINVAL);
302         }
303
304         exynos_gem_obj = to_exynos_gem_obj(obj);
305
306         return &exynos_gem_obj->buffer->dma_addr;
307 }
308
309 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
310                                         unsigned int gem_handle,
311                                         struct drm_file *filp)
312 {
313         struct exynos_drm_gem_obj *exynos_gem_obj;
314         struct drm_gem_object *obj;
315
316         obj = drm_gem_object_lookup(dev, filp, gem_handle);
317         if (!obj) {
318                 DRM_ERROR("failed to lookup gem object.\n");
319                 return;
320         }
321
322         exynos_gem_obj = to_exynos_gem_obj(obj);
323
324         drm_gem_object_unreference_unlocked(obj);
325
326         /*
327          * decrease obj->refcount one more time because we has already
328          * increased it at exynos_drm_gem_get_dma_addr().
329          */
330         drm_gem_object_unreference_unlocked(obj);
331 }
332
333 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
334                                     struct drm_file *file_priv)
335 {
336         struct drm_exynos_gem_map_off *args = data;
337
338         DRM_DEBUG_KMS("%s\n", __FILE__);
339
340         DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
341                         args->handle, (unsigned long)args->offset);
342
343         if (!(dev->driver->driver_features & DRIVER_GEM)) {
344                 DRM_ERROR("does not support GEM.\n");
345                 return -ENODEV;
346         }
347
348         return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
349                         &args->offset);
350 }
351
352 static int exynos_drm_gem_mmap_buffer(struct file *filp,
353                                       struct vm_area_struct *vma)
354 {
355         struct drm_gem_object *obj = filp->private_data;
356         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
357         struct exynos_drm_gem_buf *buffer;
358         unsigned long vm_size;
359
360         DRM_DEBUG_KMS("%s\n", __FILE__);
361
362         vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
363
364         update_vm_cache_attr(exynos_gem_obj, vma);
365
366         vm_size = vma->vm_end - vma->vm_start;
367
368         /*
369          * a buffer contains information to physically continuous memory
370          * allocated by user request or at framebuffer creation.
371          */
372         buffer = exynos_gem_obj->buffer;
373
374         /* check if user-requested size is valid. */
375         if (vm_size > buffer->size)
376                 return -EINVAL;
377
378         return dma_mmap_attrs(obj->dev->dev, vma, buffer->kvaddr,
379                                 buffer->dma_addr, buffer->size,
380                                 &buffer->dma_attrs);
381 }
382
383 static const struct file_operations exynos_drm_gem_fops = {
384         .mmap = exynos_drm_gem_mmap_buffer,
385 };
386
387 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
388                               struct drm_file *file_priv)
389 {
390         struct drm_exynos_gem_mmap *args = data;
391         struct drm_gem_object *obj;
392         unsigned int addr;
393
394         DRM_DEBUG_KMS("%s\n", __FILE__);
395
396         if (!(dev->driver->driver_features & DRIVER_GEM)) {
397                 DRM_ERROR("does not support GEM.\n");
398                 return -ENODEV;
399         }
400
401         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
402         if (!obj) {
403                 DRM_ERROR("failed to lookup gem object.\n");
404                 return -EINVAL;
405         }
406
407         obj->filp->f_op = &exynos_drm_gem_fops;
408         obj->filp->private_data = obj;
409
410         addr = vm_mmap(obj->filp, 0, args->size,
411                         PROT_READ | PROT_WRITE, MAP_SHARED, 0);
412
413         drm_gem_object_unreference_unlocked(obj);
414
415         if (IS_ERR((void *)addr))
416                 return PTR_ERR((void *)addr);
417
418         args->mapped = addr;
419
420         DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
421
422         return 0;
423 }
424
425 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
426                                       struct drm_file *file_priv)
427 {       struct exynos_drm_gem_obj *exynos_gem_obj;
428         struct drm_exynos_gem_info *args = data;
429         struct drm_gem_object *obj;
430
431         mutex_lock(&dev->struct_mutex);
432
433         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
434         if (!obj) {
435                 DRM_ERROR("failed to lookup gem object.\n");
436                 mutex_unlock(&dev->struct_mutex);
437                 return -EINVAL;
438         }
439
440         exynos_gem_obj = to_exynos_gem_obj(obj);
441
442         args->flags = exynos_gem_obj->flags;
443         args->size = exynos_gem_obj->size;
444
445         drm_gem_object_unreference(obj);
446         mutex_unlock(&dev->struct_mutex);
447
448         return 0;
449 }
450
451 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
452 {
453         struct vm_area_struct *vma_copy;
454
455         vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
456         if (!vma_copy)
457                 return NULL;
458
459         if (vma->vm_ops && vma->vm_ops->open)
460                 vma->vm_ops->open(vma);
461
462         if (vma->vm_file)
463                 get_file(vma->vm_file);
464
465         memcpy(vma_copy, vma, sizeof(*vma));
466
467         vma_copy->vm_mm = NULL;
468         vma_copy->vm_next = NULL;
469         vma_copy->vm_prev = NULL;
470
471         return vma_copy;
472 }
473
474 void exynos_gem_put_vma(struct vm_area_struct *vma)
475 {
476         if (!vma)
477                 return;
478
479         if (vma->vm_ops && vma->vm_ops->close)
480                 vma->vm_ops->close(vma);
481
482         if (vma->vm_file)
483                 fput(vma->vm_file);
484
485         kfree(vma);
486 }
487
488 int exynos_gem_get_pages_from_userptr(unsigned long start,
489                                                 unsigned int npages,
490                                                 struct page **pages,
491                                                 struct vm_area_struct *vma)
492 {
493         int get_npages;
494
495         /* the memory region mmaped with VM_PFNMAP. */
496         if (vma_is_io(vma)) {
497                 unsigned int i;
498
499                 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
500                         unsigned long pfn;
501                         int ret = follow_pfn(vma, start, &pfn);
502                         if (ret)
503                                 return ret;
504
505                         pages[i] = pfn_to_page(pfn);
506                 }
507
508                 if (i != npages) {
509                         DRM_ERROR("failed to get user_pages.\n");
510                         return -EINVAL;
511                 }
512
513                 return 0;
514         }
515
516         get_npages = get_user_pages(current, current->mm, start,
517                                         npages, 1, 1, pages, NULL);
518         get_npages = max(get_npages, 0);
519         if (get_npages != npages) {
520                 DRM_ERROR("failed to get user_pages.\n");
521                 while (get_npages)
522                         put_page(pages[--get_npages]);
523                 return -EFAULT;
524         }
525
526         return 0;
527 }
528
529 void exynos_gem_put_pages_to_userptr(struct page **pages,
530                                         unsigned int npages,
531                                         struct vm_area_struct *vma)
532 {
533         if (!vma_is_io(vma)) {
534                 unsigned int i;
535
536                 for (i = 0; i < npages; i++) {
537                         set_page_dirty_lock(pages[i]);
538
539                         /*
540                          * undo the reference we took when populating
541                          * the table.
542                          */
543                         put_page(pages[i]);
544                 }
545         }
546 }
547
548 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
549                                 struct sg_table *sgt,
550                                 enum dma_data_direction dir)
551 {
552         int nents;
553
554         mutex_lock(&drm_dev->struct_mutex);
555
556         nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
557         if (!nents) {
558                 DRM_ERROR("failed to map sgl with dma.\n");
559                 mutex_unlock(&drm_dev->struct_mutex);
560                 return nents;
561         }
562
563         mutex_unlock(&drm_dev->struct_mutex);
564         return 0;
565 }
566
567 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
568                                 struct sg_table *sgt,
569                                 enum dma_data_direction dir)
570 {
571         dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
572 }
573
574 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
575 {
576         DRM_DEBUG_KMS("%s\n", __FILE__);
577
578         return 0;
579 }
580
581 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
582 {
583         struct exynos_drm_gem_obj *exynos_gem_obj;
584         struct exynos_drm_gem_buf *buf;
585
586         DRM_DEBUG_KMS("%s\n", __FILE__);
587
588         exynos_gem_obj = to_exynos_gem_obj(obj);
589         buf = exynos_gem_obj->buffer;
590
591         if (obj->import_attach)
592                 drm_prime_gem_destroy(obj, buf->sgt);
593
594         exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
595 }
596
597 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
598                                struct drm_device *dev,
599                                struct drm_mode_create_dumb *args)
600 {
601         struct exynos_drm_gem_obj *exynos_gem_obj;
602         int ret;
603
604         DRM_DEBUG_KMS("%s\n", __FILE__);
605
606         /*
607          * alocate memory to be used for framebuffer.
608          * - this callback would be called by user application
609          *      with DRM_IOCTL_MODE_CREATE_DUMB command.
610          */
611
612         args->pitch = args->width * ((args->bpp + 7) / 8);
613         args->size = args->pitch * args->height;
614
615         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
616         if (IS_ERR(exynos_gem_obj))
617                 return PTR_ERR(exynos_gem_obj);
618
619         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
620                         &args->handle);
621         if (ret) {
622                 exynos_drm_gem_destroy(exynos_gem_obj);
623                 return ret;
624         }
625
626         return 0;
627 }
628
629 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
630                                    struct drm_device *dev, uint32_t handle,
631                                    uint64_t *offset)
632 {
633         struct drm_gem_object *obj;
634         int ret = 0;
635
636         DRM_DEBUG_KMS("%s\n", __FILE__);
637
638         mutex_lock(&dev->struct_mutex);
639
640         /*
641          * get offset of memory allocated for drm framebuffer.
642          * - this callback would be called by user application
643          *      with DRM_IOCTL_MODE_MAP_DUMB command.
644          */
645
646         obj = drm_gem_object_lookup(dev, file_priv, handle);
647         if (!obj) {
648                 DRM_ERROR("failed to lookup gem object.\n");
649                 ret = -EINVAL;
650                 goto unlock;
651         }
652
653         if (!obj->map_list.map) {
654                 ret = drm_gem_create_mmap_offset(obj);
655                 if (ret)
656                         goto out;
657         }
658
659         *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
660         DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
661
662 out:
663         drm_gem_object_unreference(obj);
664 unlock:
665         mutex_unlock(&dev->struct_mutex);
666         return ret;
667 }
668
669 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
670                                 struct drm_device *dev,
671                                 unsigned int handle)
672 {
673         int ret;
674
675         DRM_DEBUG_KMS("%s\n", __FILE__);
676
677         /*
678          * obj->refcount and obj->handle_count are decreased and
679          * if both them are 0 then exynos_drm_gem_free_object()
680          * would be called by callback to release resources.
681          */
682         ret = drm_gem_handle_delete(file_priv, handle);
683         if (ret < 0) {
684                 DRM_ERROR("failed to delete drm_gem_handle.\n");
685                 return ret;
686         }
687
688         return 0;
689 }
690
691 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
692 {
693         struct drm_gem_object *obj = vma->vm_private_data;
694         struct drm_device *dev = obj->dev;
695         unsigned long f_vaddr;
696         pgoff_t page_offset;
697         int ret;
698
699         page_offset = ((unsigned long)vmf->virtual_address -
700                         vma->vm_start) >> PAGE_SHIFT;
701         f_vaddr = (unsigned long)vmf->virtual_address;
702
703         mutex_lock(&dev->struct_mutex);
704
705         ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
706         if (ret < 0)
707                 DRM_ERROR("failed to map a buffer with user.\n");
708
709         mutex_unlock(&dev->struct_mutex);
710
711         return convert_to_vm_err_msg(ret);
712 }
713
714 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
715 {
716         struct exynos_drm_gem_obj *exynos_gem_obj;
717         struct drm_gem_object *obj;
718         int ret;
719
720         DRM_DEBUG_KMS("%s\n", __FILE__);
721
722         /* set vm_area_struct. */
723         ret = drm_gem_mmap(filp, vma);
724         if (ret < 0) {
725                 DRM_ERROR("failed to mmap.\n");
726                 return ret;
727         }
728
729         obj = vma->vm_private_data;
730         exynos_gem_obj = to_exynos_gem_obj(obj);
731
732         ret = check_gem_flags(exynos_gem_obj->flags);
733         if (ret) {
734                 drm_gem_vm_close(vma);
735                 drm_gem_free_mmap_offset(obj);
736                 return ret;
737         }
738
739         vma->vm_flags &= ~VM_PFNMAP;
740         vma->vm_flags |= VM_MIXEDMAP;
741
742         update_vm_cache_attr(exynos_gem_obj, vma);
743
744         return ret;
745 }