3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <linux/shmem_fs.h>
29 #include <drm/exynos_drm.h>
31 #include "exynos_drm_drv.h"
32 #include "exynos_drm_gem.h"
33 #include "exynos_drm_buf.h"
35 static unsigned int convert_to_vm_err_msg(int msg)
43 out_msg = VM_FAULT_NOPAGE;
47 out_msg = VM_FAULT_OOM;
51 out_msg = VM_FAULT_SIGBUS;
58 static int check_gem_flags(unsigned int flags)
60 if (flags & ~(EXYNOS_BO_MASK)) {
61 DRM_ERROR("invalid flags.\n");
68 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
69 struct vm_area_struct *vma)
71 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
73 /* non-cachable as default. */
74 if (obj->flags & EXYNOS_BO_CACHABLE)
75 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76 else if (obj->flags & EXYNOS_BO_WC)
78 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
81 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
84 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
86 if (!IS_NONCONTIG_BUFFER(flags)) {
88 return roundup(size, SECTION_SIZE);
89 else if (size >= SZ_64K)
90 return roundup(size, SZ_64K);
95 return roundup(size, PAGE_SIZE);
98 struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
101 struct page *p, **pages;
104 npages = obj->size >> PAGE_SHIFT;
106 pages = drm_malloc_ab(npages, sizeof(struct page *));
108 return ERR_PTR(-ENOMEM);
110 for (i = 0; i < npages; i++) {
111 p = alloc_page(gfpmask);
121 __free_page(pages[i]);
123 drm_free_large(pages);
127 static void exynos_gem_put_pages(struct drm_gem_object *obj,
132 npages = obj->size >> PAGE_SHIFT;
134 while (--npages >= 0)
135 __free_page(pages[npages]);
137 drm_free_large(pages);
140 static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
141 struct vm_area_struct *vma,
142 unsigned long f_vaddr,
145 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
146 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
149 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
153 pfn = page_to_pfn(buf->pages[page_offset++]);
155 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
157 return vm_insert_mixed(vma, f_vaddr, pfn);
160 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
162 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
163 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
164 struct scatterlist *sgl;
166 unsigned int npages, i = 0;
170 DRM_DEBUG_KMS("already allocated.\n");
174 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
176 DRM_ERROR("failed to get pages.\n");
177 return PTR_ERR(pages);
180 npages = obj->size >> PAGE_SHIFT;
181 buf->page_size = PAGE_SIZE;
183 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
185 DRM_ERROR("failed to allocate sg table.\n");
190 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
192 DRM_ERROR("failed to initialize sg table.\n");
199 /* set all pages to sg list. */
201 sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
202 sg_dma_address(sgl) = page_to_phys(pages[i]);
207 /* add some codes for UNCACHED type here. TODO */
215 exynos_gem_put_pages(obj, pages);
220 static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
222 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
223 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
226 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
227 * allocated at gem fault handler.
229 sg_free_table(buf->sgt);
233 exynos_gem_put_pages(obj, buf->pages);
236 /* add some codes for UNCACHED type here. TODO */
239 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
240 struct drm_file *file_priv,
241 unsigned int *handle)
246 * allocate a id of idr table where the obj is registered
247 * and handle has the id what user can see.
249 ret = drm_gem_handle_create(file_priv, obj, handle);
253 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
255 /* drop reference from allocate - handle holds it now. */
256 drm_gem_object_unreference_unlocked(obj);
261 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
263 struct drm_gem_object *obj;
264 struct exynos_drm_gem_buf *buf;
266 DRM_DEBUG_KMS("%s\n", __FILE__);
268 obj = &exynos_gem_obj->base;
269 buf = exynos_gem_obj->buffer;
271 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
277 * do not release memory region from exporter.
279 * the region will be released by exporter
280 * once dmabuf's refcount becomes 0.
282 if (obj->import_attach)
285 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
286 exynos_drm_gem_put_pages(obj);
288 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
291 exynos_drm_fini_buf(obj->dev, buf);
292 exynos_gem_obj->buffer = NULL;
294 if (obj->map_list.map)
295 drm_gem_free_mmap_offset(obj);
297 /* release file pointer to gem object. */
298 drm_gem_object_release(obj);
300 kfree(exynos_gem_obj);
301 exynos_gem_obj = NULL;
304 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
307 struct exynos_drm_gem_obj *exynos_gem_obj;
308 struct drm_gem_object *obj;
311 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
312 if (!exynos_gem_obj) {
313 DRM_ERROR("failed to allocate exynos gem object\n");
317 exynos_gem_obj->size = size;
318 obj = &exynos_gem_obj->base;
320 ret = drm_gem_object_init(dev, obj, size);
322 DRM_ERROR("failed to initialize gem object\n");
323 kfree(exynos_gem_obj);
327 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
329 return exynos_gem_obj;
332 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
336 struct exynos_drm_gem_obj *exynos_gem_obj;
337 struct exynos_drm_gem_buf *buf;
341 DRM_ERROR("invalid size.\n");
342 return ERR_PTR(-EINVAL);
345 size = roundup_gem_size(size, flags);
346 DRM_DEBUG_KMS("%s\n", __FILE__);
348 ret = check_gem_flags(flags);
352 buf = exynos_drm_init_buf(dev, size);
354 return ERR_PTR(-ENOMEM);
356 exynos_gem_obj = exynos_drm_gem_init(dev, size);
357 if (!exynos_gem_obj) {
362 exynos_gem_obj->buffer = buf;
364 /* set memory type and cache attribute from user side. */
365 exynos_gem_obj->flags = flags;
368 * allocate all pages as desired size if user wants to allocate
369 * physically non-continuous memory.
371 if (flags & EXYNOS_BO_NONCONTIG) {
372 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
374 drm_gem_object_release(&exynos_gem_obj->base);
378 ret = exynos_drm_alloc_buf(dev, buf, flags);
380 drm_gem_object_release(&exynos_gem_obj->base);
385 return exynos_gem_obj;
388 exynos_drm_fini_buf(dev, buf);
392 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
393 struct drm_file *file_priv)
395 struct drm_exynos_gem_create *args = data;
396 struct exynos_drm_gem_obj *exynos_gem_obj;
399 DRM_DEBUG_KMS("%s\n", __FILE__);
401 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
402 if (IS_ERR(exynos_gem_obj))
403 return PTR_ERR(exynos_gem_obj);
405 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
408 exynos_drm_gem_destroy(exynos_gem_obj);
415 void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
416 unsigned int gem_handle,
417 struct drm_file *file_priv)
419 struct exynos_drm_gem_obj *exynos_gem_obj;
420 struct drm_gem_object *obj;
422 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
424 DRM_ERROR("failed to lookup gem object.\n");
425 return ERR_PTR(-EINVAL);
428 exynos_gem_obj = to_exynos_gem_obj(obj);
430 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
431 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
432 drm_gem_object_unreference_unlocked(obj);
435 return ERR_PTR(-EINVAL);
438 return &exynos_gem_obj->buffer->dma_addr;
441 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
442 unsigned int gem_handle,
443 struct drm_file *file_priv)
445 struct exynos_drm_gem_obj *exynos_gem_obj;
446 struct drm_gem_object *obj;
448 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
450 DRM_ERROR("failed to lookup gem object.\n");
454 exynos_gem_obj = to_exynos_gem_obj(obj);
456 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
457 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
458 drm_gem_object_unreference_unlocked(obj);
464 drm_gem_object_unreference_unlocked(obj);
467 * decrease obj->refcount one more time because we has already
468 * increased it at exynos_drm_gem_get_dma_addr().
470 drm_gem_object_unreference_unlocked(obj);
473 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
474 struct drm_file *file_priv)
476 struct drm_exynos_gem_map_off *args = data;
478 DRM_DEBUG_KMS("%s\n", __FILE__);
480 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
481 args->handle, (unsigned long)args->offset);
483 if (!(dev->driver->driver_features & DRIVER_GEM)) {
484 DRM_ERROR("does not support GEM.\n");
488 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
492 static int exynos_drm_gem_mmap_buffer(struct file *filp,
493 struct vm_area_struct *vma)
495 struct drm_gem_object *obj = filp->private_data;
496 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
497 struct exynos_drm_gem_buf *buffer;
498 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
501 DRM_DEBUG_KMS("%s\n", __FILE__);
503 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
505 update_vm_cache_attr(exynos_gem_obj, vma);
507 vm_size = usize = vma->vm_end - vma->vm_start;
510 * a buffer contains information to physically continuous memory
511 * allocated by user request or at framebuffer creation.
513 buffer = exynos_gem_obj->buffer;
515 /* check if user-requested size is valid. */
516 if (vm_size > buffer->size)
519 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
525 vma->vm_flags |= VM_MIXEDMAP;
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
530 DRM_ERROR("failed to remap user space.\n");
539 * get page frame number to physical memory to be mapped
542 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
545 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
547 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
548 vma->vm_page_prot)) {
549 DRM_ERROR("failed to remap pfn range.\n");
557 static const struct file_operations exynos_drm_gem_fops = {
558 .mmap = exynos_drm_gem_mmap_buffer,
561 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
562 struct drm_file *file_priv)
564 struct drm_exynos_gem_mmap *args = data;
565 struct drm_gem_object *obj;
568 DRM_DEBUG_KMS("%s\n", __FILE__);
570 if (!(dev->driver->driver_features & DRIVER_GEM)) {
571 DRM_ERROR("does not support GEM.\n");
575 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
577 DRM_ERROR("failed to lookup gem object.\n");
581 obj->filp->f_op = &exynos_drm_gem_fops;
582 obj->filp->private_data = obj;
584 addr = vm_mmap(obj->filp, 0, args->size,
585 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
587 drm_gem_object_unreference_unlocked(obj);
589 if (IS_ERR((void *)addr))
590 return PTR_ERR((void *)addr);
594 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
599 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
600 struct drm_file *file_priv)
601 { struct exynos_drm_gem_obj *exynos_gem_obj;
602 struct drm_exynos_gem_info *args = data;
603 struct drm_gem_object *obj;
605 mutex_lock(&dev->struct_mutex);
607 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
609 DRM_ERROR("failed to lookup gem object.\n");
610 mutex_unlock(&dev->struct_mutex);
614 exynos_gem_obj = to_exynos_gem_obj(obj);
616 args->flags = exynos_gem_obj->flags;
617 args->size = exynos_gem_obj->size;
619 drm_gem_object_unreference(obj);
620 mutex_unlock(&dev->struct_mutex);
625 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
627 DRM_DEBUG_KMS("%s\n", __FILE__);
632 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
634 struct exynos_drm_gem_obj *exynos_gem_obj;
635 struct exynos_drm_gem_buf *buf;
637 DRM_DEBUG_KMS("%s\n", __FILE__);
639 exynos_gem_obj = to_exynos_gem_obj(obj);
640 buf = exynos_gem_obj->buffer;
642 if (obj->import_attach)
643 drm_prime_gem_destroy(obj, buf->sgt);
645 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
648 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
649 struct drm_device *dev,
650 struct drm_mode_create_dumb *args)
652 struct exynos_drm_gem_obj *exynos_gem_obj;
655 DRM_DEBUG_KMS("%s\n", __FILE__);
658 * alocate memory to be used for framebuffer.
659 * - this callback would be called by user application
660 * with DRM_IOCTL_MODE_CREATE_DUMB command.
663 args->pitch = args->width * ((args->bpp + 7) / 8);
664 args->size = args->pitch * args->height;
666 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
667 if (IS_ERR(exynos_gem_obj))
668 return PTR_ERR(exynos_gem_obj);
670 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
673 exynos_drm_gem_destroy(exynos_gem_obj);
680 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
681 struct drm_device *dev, uint32_t handle,
684 struct drm_gem_object *obj;
687 DRM_DEBUG_KMS("%s\n", __FILE__);
689 mutex_lock(&dev->struct_mutex);
692 * get offset of memory allocated for drm framebuffer.
693 * - this callback would be called by user application
694 * with DRM_IOCTL_MODE_MAP_DUMB command.
697 obj = drm_gem_object_lookup(dev, file_priv, handle);
699 DRM_ERROR("failed to lookup gem object.\n");
704 if (!obj->map_list.map) {
705 ret = drm_gem_create_mmap_offset(obj);
710 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
711 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
714 drm_gem_object_unreference(obj);
716 mutex_unlock(&dev->struct_mutex);
720 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
721 struct drm_device *dev,
726 DRM_DEBUG_KMS("%s\n", __FILE__);
729 * obj->refcount and obj->handle_count are decreased and
730 * if both them are 0 then exynos_drm_gem_free_object()
731 * would be called by callback to release resources.
733 ret = drm_gem_handle_delete(file_priv, handle);
735 DRM_ERROR("failed to delete drm_gem_handle.\n");
742 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
744 struct drm_gem_object *obj = vma->vm_private_data;
745 struct drm_device *dev = obj->dev;
746 unsigned long f_vaddr;
750 page_offset = ((unsigned long)vmf->virtual_address -
751 vma->vm_start) >> PAGE_SHIFT;
752 f_vaddr = (unsigned long)vmf->virtual_address;
754 mutex_lock(&dev->struct_mutex);
756 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
758 DRM_ERROR("failed to map pages.\n");
760 mutex_unlock(&dev->struct_mutex);
762 return convert_to_vm_err_msg(ret);
765 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
767 struct exynos_drm_gem_obj *exynos_gem_obj;
768 struct drm_gem_object *obj;
771 DRM_DEBUG_KMS("%s\n", __FILE__);
773 /* set vm_area_struct. */
774 ret = drm_gem_mmap(filp, vma);
776 DRM_ERROR("failed to mmap.\n");
780 obj = vma->vm_private_data;
781 exynos_gem_obj = to_exynos_gem_obj(obj);
783 ret = check_gem_flags(exynos_gem_obj->flags);
785 drm_gem_vm_close(vma);
786 drm_gem_free_mmap_offset(obj);
790 vma->vm_flags &= ~VM_PFNMAP;
791 vma->vm_flags |= VM_MIXEDMAP;
793 update_vm_cache_attr(exynos_gem_obj, vma);