3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <linux/shmem_fs.h>
29 #include <drm/exynos_drm.h>
31 #include "exynos_drm_drv.h"
32 #include "exynos_drm_gem.h"
33 #include "exynos_drm_buf.h"
35 static unsigned int convert_to_vm_err_msg(int msg)
43 out_msg = VM_FAULT_NOPAGE;
47 out_msg = VM_FAULT_OOM;
51 out_msg = VM_FAULT_SIGBUS;
58 static int check_gem_flags(unsigned int flags)
60 if (flags & ~(EXYNOS_BO_MASK)) {
61 DRM_ERROR("invalid flags.\n");
68 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
69 struct vm_area_struct *vma)
71 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
73 /* non-cachable as default. */
74 if (obj->flags & EXYNOS_BO_CACHABLE)
75 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
76 else if (obj->flags & EXYNOS_BO_WC)
78 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
81 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
84 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
88 return roundup(size, PAGE_SIZE);
91 static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
92 struct vm_area_struct *vma,
93 unsigned long f_vaddr,
96 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
97 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
98 struct scatterlist *sgl;
102 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
107 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
109 DRM_ERROR("invalid SG table\n");
112 if (page_offset < (sgl->length >> PAGE_SHIFT))
114 page_offset -= (sgl->length >> PAGE_SHIFT);
117 if (i >= buf->sgt->nents) {
118 DRM_ERROR("invalid page offset\n");
122 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
127 pfn = page_to_pfn(buf->pages[0]) + page_offset;
130 return vm_insert_mixed(vma, f_vaddr, pfn);
133 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
134 struct drm_file *file_priv,
135 unsigned int *handle)
140 * allocate a id of idr table where the obj is registered
141 * and handle has the id what user can see.
143 ret = drm_gem_handle_create(file_priv, obj, handle);
147 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
149 /* drop reference from allocate - handle holds it now. */
150 drm_gem_object_unreference_unlocked(obj);
155 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
157 struct drm_gem_object *obj;
158 struct exynos_drm_gem_buf *buf;
160 DRM_DEBUG_KMS("%s\n", __FILE__);
162 obj = &exynos_gem_obj->base;
163 buf = exynos_gem_obj->buffer;
165 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
168 * do not release memory region from exporter.
170 * the region will be released by exporter
171 * once dmabuf's refcount becomes 0.
173 if (obj->import_attach)
176 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
179 exynos_drm_fini_buf(obj->dev, buf);
180 exynos_gem_obj->buffer = NULL;
182 if (obj->map_list.map)
183 drm_gem_free_mmap_offset(obj);
185 /* release file pointer to gem object. */
186 drm_gem_object_release(obj);
188 kfree(exynos_gem_obj);
189 exynos_gem_obj = NULL;
192 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
195 struct exynos_drm_gem_obj *exynos_gem_obj;
196 struct drm_gem_object *obj;
199 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
200 if (!exynos_gem_obj) {
201 DRM_ERROR("failed to allocate exynos gem object\n");
205 exynos_gem_obj->size = size;
206 obj = &exynos_gem_obj->base;
208 ret = drm_gem_object_init(dev, obj, size);
210 DRM_ERROR("failed to initialize gem object\n");
211 kfree(exynos_gem_obj);
215 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
217 return exynos_gem_obj;
220 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
224 struct exynos_drm_gem_obj *exynos_gem_obj;
225 struct exynos_drm_gem_buf *buf;
229 DRM_ERROR("invalid size.\n");
230 return ERR_PTR(-EINVAL);
233 size = roundup_gem_size(size, flags);
234 DRM_DEBUG_KMS("%s\n", __FILE__);
236 ret = check_gem_flags(flags);
240 buf = exynos_drm_init_buf(dev, size);
242 return ERR_PTR(-ENOMEM);
244 exynos_gem_obj = exynos_drm_gem_init(dev, size);
245 if (!exynos_gem_obj) {
250 exynos_gem_obj->buffer = buf;
252 /* set memory type and cache attribute from user side. */
253 exynos_gem_obj->flags = flags;
255 ret = exynos_drm_alloc_buf(dev, buf, flags);
257 drm_gem_object_release(&exynos_gem_obj->base);
261 return exynos_gem_obj;
264 exynos_drm_fini_buf(dev, buf);
268 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
269 struct drm_file *file_priv)
271 struct drm_exynos_gem_create *args = data;
272 struct exynos_drm_gem_obj *exynos_gem_obj;
275 DRM_DEBUG_KMS("%s\n", __FILE__);
277 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
278 if (IS_ERR(exynos_gem_obj))
279 return PTR_ERR(exynos_gem_obj);
281 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
284 exynos_drm_gem_destroy(exynos_gem_obj);
291 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
292 unsigned int gem_handle,
293 struct drm_file *filp)
295 struct exynos_drm_gem_obj *exynos_gem_obj;
296 struct drm_gem_object *obj;
298 obj = drm_gem_object_lookup(dev, filp, gem_handle);
300 DRM_ERROR("failed to lookup gem object.\n");
301 return ERR_PTR(-EINVAL);
304 exynos_gem_obj = to_exynos_gem_obj(obj);
306 return &exynos_gem_obj->buffer->dma_addr;
309 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
310 unsigned int gem_handle,
311 struct drm_file *filp)
313 struct exynos_drm_gem_obj *exynos_gem_obj;
314 struct drm_gem_object *obj;
316 obj = drm_gem_object_lookup(dev, filp, gem_handle);
318 DRM_ERROR("failed to lookup gem object.\n");
322 exynos_gem_obj = to_exynos_gem_obj(obj);
324 drm_gem_object_unreference_unlocked(obj);
327 * decrease obj->refcount one more time because we has already
328 * increased it at exynos_drm_gem_get_dma_addr().
330 drm_gem_object_unreference_unlocked(obj);
333 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
334 struct drm_file *file_priv)
336 struct drm_exynos_gem_map_off *args = data;
338 DRM_DEBUG_KMS("%s\n", __FILE__);
340 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
341 args->handle, (unsigned long)args->offset);
343 if (!(dev->driver->driver_features & DRIVER_GEM)) {
344 DRM_ERROR("does not support GEM.\n");
348 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
352 static int exynos_drm_gem_mmap_buffer(struct file *filp,
353 struct vm_area_struct *vma)
355 struct drm_gem_object *obj = filp->private_data;
356 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
357 struct exynos_drm_gem_buf *buffer;
358 unsigned long vm_size;
360 DRM_DEBUG_KMS("%s\n", __FILE__);
362 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
364 update_vm_cache_attr(exynos_gem_obj, vma);
366 vm_size = vma->vm_end - vma->vm_start;
369 * a buffer contains information to physically continuous memory
370 * allocated by user request or at framebuffer creation.
372 buffer = exynos_gem_obj->buffer;
374 /* check if user-requested size is valid. */
375 if (vm_size > buffer->size)
378 return dma_mmap_attrs(obj->dev->dev, vma, buffer->kvaddr,
379 buffer->dma_addr, buffer->size,
383 static const struct file_operations exynos_drm_gem_fops = {
384 .mmap = exynos_drm_gem_mmap_buffer,
387 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
388 struct drm_file *file_priv)
390 struct drm_exynos_gem_mmap *args = data;
391 struct drm_gem_object *obj;
394 DRM_DEBUG_KMS("%s\n", __FILE__);
396 if (!(dev->driver->driver_features & DRIVER_GEM)) {
397 DRM_ERROR("does not support GEM.\n");
401 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
403 DRM_ERROR("failed to lookup gem object.\n");
407 obj->filp->f_op = &exynos_drm_gem_fops;
408 obj->filp->private_data = obj;
410 addr = vm_mmap(obj->filp, 0, args->size,
411 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
413 drm_gem_object_unreference_unlocked(obj);
415 if (IS_ERR((void *)addr))
416 return PTR_ERR((void *)addr);
420 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
425 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
426 struct drm_file *file_priv)
427 { struct exynos_drm_gem_obj *exynos_gem_obj;
428 struct drm_exynos_gem_info *args = data;
429 struct drm_gem_object *obj;
431 mutex_lock(&dev->struct_mutex);
433 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
435 DRM_ERROR("failed to lookup gem object.\n");
436 mutex_unlock(&dev->struct_mutex);
440 exynos_gem_obj = to_exynos_gem_obj(obj);
442 args->flags = exynos_gem_obj->flags;
443 args->size = exynos_gem_obj->size;
445 drm_gem_object_unreference(obj);
446 mutex_unlock(&dev->struct_mutex);
451 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
453 struct vm_area_struct *vma_copy;
455 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
459 if (vma->vm_ops && vma->vm_ops->open)
460 vma->vm_ops->open(vma);
463 get_file(vma->vm_file);
465 memcpy(vma_copy, vma, sizeof(*vma));
467 vma_copy->vm_mm = NULL;
468 vma_copy->vm_next = NULL;
469 vma_copy->vm_prev = NULL;
474 void exynos_gem_put_vma(struct vm_area_struct *vma)
479 if (vma->vm_ops && vma->vm_ops->close)
480 vma->vm_ops->close(vma);
488 int exynos_gem_get_pages_from_userptr(unsigned long start,
491 struct vm_area_struct *vma)
495 /* the memory region mmaped with VM_PFNMAP. */
496 if (vma_is_io(vma)) {
499 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
501 int ret = follow_pfn(vma, start, &pfn);
505 pages[i] = pfn_to_page(pfn);
509 DRM_ERROR("failed to get user_pages.\n");
516 get_npages = get_user_pages(current, current->mm, start,
517 npages, 1, 1, pages, NULL);
518 get_npages = max(get_npages, 0);
519 if (get_npages != npages) {
520 DRM_ERROR("failed to get user_pages.\n");
522 put_page(pages[--get_npages]);
529 void exynos_gem_put_pages_to_userptr(struct page **pages,
531 struct vm_area_struct *vma)
533 if (!vma_is_io(vma)) {
536 for (i = 0; i < npages; i++) {
537 set_page_dirty_lock(pages[i]);
540 * undo the reference we took when populating
548 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
549 struct sg_table *sgt,
550 enum dma_data_direction dir)
554 mutex_lock(&drm_dev->struct_mutex);
556 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
558 DRM_ERROR("failed to map sgl with dma.\n");
559 mutex_unlock(&drm_dev->struct_mutex);
563 mutex_unlock(&drm_dev->struct_mutex);
567 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
568 struct sg_table *sgt,
569 enum dma_data_direction dir)
571 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
574 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
576 DRM_DEBUG_KMS("%s\n", __FILE__);
581 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
583 struct exynos_drm_gem_obj *exynos_gem_obj;
584 struct exynos_drm_gem_buf *buf;
586 DRM_DEBUG_KMS("%s\n", __FILE__);
588 exynos_gem_obj = to_exynos_gem_obj(obj);
589 buf = exynos_gem_obj->buffer;
591 if (obj->import_attach)
592 drm_prime_gem_destroy(obj, buf->sgt);
594 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
597 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
598 struct drm_device *dev,
599 struct drm_mode_create_dumb *args)
601 struct exynos_drm_gem_obj *exynos_gem_obj;
604 DRM_DEBUG_KMS("%s\n", __FILE__);
607 * alocate memory to be used for framebuffer.
608 * - this callback would be called by user application
609 * with DRM_IOCTL_MODE_CREATE_DUMB command.
612 args->pitch = args->width * ((args->bpp + 7) / 8);
613 args->size = args->pitch * args->height;
615 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
616 if (IS_ERR(exynos_gem_obj))
617 return PTR_ERR(exynos_gem_obj);
619 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
622 exynos_drm_gem_destroy(exynos_gem_obj);
629 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
630 struct drm_device *dev, uint32_t handle,
633 struct drm_gem_object *obj;
636 DRM_DEBUG_KMS("%s\n", __FILE__);
638 mutex_lock(&dev->struct_mutex);
641 * get offset of memory allocated for drm framebuffer.
642 * - this callback would be called by user application
643 * with DRM_IOCTL_MODE_MAP_DUMB command.
646 obj = drm_gem_object_lookup(dev, file_priv, handle);
648 DRM_ERROR("failed to lookup gem object.\n");
653 if (!obj->map_list.map) {
654 ret = drm_gem_create_mmap_offset(obj);
659 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
660 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
663 drm_gem_object_unreference(obj);
665 mutex_unlock(&dev->struct_mutex);
669 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
670 struct drm_device *dev,
675 DRM_DEBUG_KMS("%s\n", __FILE__);
678 * obj->refcount and obj->handle_count are decreased and
679 * if both them are 0 then exynos_drm_gem_free_object()
680 * would be called by callback to release resources.
682 ret = drm_gem_handle_delete(file_priv, handle);
684 DRM_ERROR("failed to delete drm_gem_handle.\n");
691 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
693 struct drm_gem_object *obj = vma->vm_private_data;
694 struct drm_device *dev = obj->dev;
695 unsigned long f_vaddr;
699 page_offset = ((unsigned long)vmf->virtual_address -
700 vma->vm_start) >> PAGE_SHIFT;
701 f_vaddr = (unsigned long)vmf->virtual_address;
703 mutex_lock(&dev->struct_mutex);
705 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
707 DRM_ERROR("failed to map a buffer with user.\n");
709 mutex_unlock(&dev->struct_mutex);
711 return convert_to_vm_err_msg(ret);
714 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
716 struct exynos_drm_gem_obj *exynos_gem_obj;
717 struct drm_gem_object *obj;
720 DRM_DEBUG_KMS("%s\n", __FILE__);
722 /* set vm_area_struct. */
723 ret = drm_gem_mmap(filp, vma);
725 DRM_ERROR("failed to mmap.\n");
729 obj = vma->vm_private_data;
730 exynos_gem_obj = to_exynos_gem_obj(obj);
732 ret = check_gem_flags(exynos_gem_obj->flags);
734 drm_gem_vm_close(vma);
735 drm_gem_free_mmap_offset(obj);
739 vma->vm_flags &= ~VM_PFNMAP;
740 vma->vm_flags |= VM_MIXEDMAP;
742 update_vm_cache_attr(exynos_gem_obj, vma);