3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/drm_vma_manager.h>
15 #include <linux/shmem_fs.h>
16 #include <linux/dma-buf.h>
17 #include <drm/exynos_drm.h>
19 #include "exynos_drm_drv.h"
20 #include "exynos_drm_gem.h"
21 #include "exynos_drm_iommu.h"
23 static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
25 struct drm_device *dev = obj->base.dev;
27 unsigned int nr_pages;
30 DRM_DEBUG_KMS("already allocated.\n");
34 init_dma_attrs(&obj->dma_attrs);
37 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
38 * region will be allocated else physically contiguous
41 if (!(obj->flags & EXYNOS_BO_NONCONTIG))
42 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &obj->dma_attrs);
45 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
46 * else cachable mapping.
48 if (obj->flags & EXYNOS_BO_WC || !(obj->flags & EXYNOS_BO_CACHABLE))
49 attr = DMA_ATTR_WRITE_COMBINE;
51 attr = DMA_ATTR_NON_CONSISTENT;
53 dma_set_attr(attr, &obj->dma_attrs);
54 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &obj->dma_attrs);
56 nr_pages = obj->size >> PAGE_SHIFT;
58 if (!is_drm_iommu_supported(dev)) {
59 dma_addr_t start_addr;
62 obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
64 DRM_ERROR("failed to allocate pages.\n");
68 obj->cookie = dma_alloc_attrs(dev->dev,
70 &obj->dma_addr, GFP_KERNEL,
73 DRM_ERROR("failed to allocate buffer.\n");
74 drm_free_large(obj->pages);
78 start_addr = obj->dma_addr;
79 while (i < nr_pages) {
80 obj->pages[i] = phys_to_page(start_addr);
81 start_addr += PAGE_SIZE;
85 obj->pages = dma_alloc_attrs(dev->dev, obj->size,
86 &obj->dma_addr, GFP_KERNEL,
89 DRM_ERROR("failed to allocate buffer.\n");
94 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
95 (unsigned long)obj->dma_addr,
101 static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
103 struct drm_device *dev = obj->base.dev;
105 if (!obj->dma_addr) {
106 DRM_DEBUG_KMS("dma_addr is invalid.\n");
110 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
111 (unsigned long)obj->dma_addr, obj->size);
113 if (!is_drm_iommu_supported(dev)) {
114 dma_free_attrs(dev->dev, obj->size, obj->cookie,
115 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
116 drm_free_large(obj->pages);
118 dma_free_attrs(dev->dev, obj->size, obj->pages,
119 (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
121 obj->dma_addr = (dma_addr_t)NULL;
124 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
125 struct drm_file *file_priv,
126 unsigned int *handle)
131 * allocate a id of idr table where the obj is registered
132 * and handle has the id what user can see.
134 ret = drm_gem_handle_create(file_priv, obj, handle);
138 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
140 /* drop reference from allocate - handle holds it now. */
141 drm_gem_object_unreference_unlocked(obj);
146 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
148 struct drm_gem_object *obj = &exynos_gem_obj->base;
150 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
153 * do not release memory region from exporter.
155 * the region will be released by exporter
156 * once dmabuf's refcount becomes 0.
158 if (obj->import_attach)
161 exynos_drm_free_buf(exynos_gem_obj);
164 drm_gem_free_mmap_offset(obj);
166 /* release file pointer to gem object. */
167 drm_gem_object_release(obj);
169 kfree(exynos_gem_obj);
170 exynos_gem_obj = NULL;
173 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
174 unsigned int gem_handle,
175 struct drm_file *file_priv)
177 struct exynos_drm_gem_obj *exynos_gem_obj;
178 struct drm_gem_object *obj;
180 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
182 DRM_ERROR("failed to lookup gem object.\n");
186 exynos_gem_obj = to_exynos_gem_obj(obj);
188 drm_gem_object_unreference_unlocked(obj);
190 return exynos_gem_obj->size;
194 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
197 struct exynos_drm_gem_obj *exynos_gem_obj;
198 struct drm_gem_object *obj;
201 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
203 return ERR_PTR(-ENOMEM);
205 exynos_gem_obj->size = size;
206 obj = &exynos_gem_obj->base;
208 ret = drm_gem_object_init(dev, obj, size);
210 DRM_ERROR("failed to initialize gem object\n");
211 kfree(exynos_gem_obj);
215 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
217 return exynos_gem_obj;
220 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
224 struct exynos_drm_gem_obj *exynos_gem_obj;
227 if (flags & ~(EXYNOS_BO_MASK)) {
228 DRM_ERROR("invalid flags.\n");
229 return ERR_PTR(-EINVAL);
233 DRM_ERROR("invalid size.\n");
234 return ERR_PTR(-EINVAL);
237 size = roundup(size, PAGE_SIZE);
239 exynos_gem_obj = exynos_drm_gem_init(dev, size);
240 if (IS_ERR(exynos_gem_obj))
241 return exynos_gem_obj;
243 /* set memory type and cache attribute from user side. */
244 exynos_gem_obj->flags = flags;
246 ret = exynos_drm_alloc_buf(exynos_gem_obj);
248 drm_gem_object_release(&exynos_gem_obj->base);
249 kfree(exynos_gem_obj);
253 return exynos_gem_obj;
256 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
257 struct drm_file *file_priv)
259 struct drm_exynos_gem_create *args = data;
260 struct exynos_drm_gem_obj *exynos_gem_obj;
263 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
264 if (IS_ERR(exynos_gem_obj))
265 return PTR_ERR(exynos_gem_obj);
267 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
270 exynos_drm_gem_destroy(exynos_gem_obj);
277 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
278 unsigned int gem_handle,
279 struct drm_file *filp)
281 struct exynos_drm_gem_obj *exynos_gem_obj;
282 struct drm_gem_object *obj;
284 obj = drm_gem_object_lookup(dev, filp, gem_handle);
286 DRM_ERROR("failed to lookup gem object.\n");
287 return ERR_PTR(-EINVAL);
290 exynos_gem_obj = to_exynos_gem_obj(obj);
292 return &exynos_gem_obj->dma_addr;
295 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
296 unsigned int gem_handle,
297 struct drm_file *filp)
299 struct drm_gem_object *obj;
301 obj = drm_gem_object_lookup(dev, filp, gem_handle);
303 DRM_ERROR("failed to lookup gem object.\n");
307 drm_gem_object_unreference_unlocked(obj);
310 * decrease obj->refcount one more time because we has already
311 * increased it at exynos_drm_gem_get_dma_addr().
313 drm_gem_object_unreference_unlocked(obj);
316 int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
317 struct vm_area_struct *vma)
319 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
320 unsigned long vm_size;
323 vma->vm_flags &= ~VM_PFNMAP;
326 vm_size = vma->vm_end - vma->vm_start;
328 /* check if user-requested size is valid. */
329 if (vm_size > exynos_gem_obj->size)
332 ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem_obj->pages,
333 exynos_gem_obj->dma_addr, exynos_gem_obj->size,
334 &exynos_gem_obj->dma_attrs);
336 DRM_ERROR("failed to mmap.\n");
343 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
344 struct drm_file *file_priv)
345 { struct exynos_drm_gem_obj *exynos_gem_obj;
346 struct drm_exynos_gem_info *args = data;
347 struct drm_gem_object *obj;
349 mutex_lock(&dev->struct_mutex);
351 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
353 DRM_ERROR("failed to lookup gem object.\n");
354 mutex_unlock(&dev->struct_mutex);
358 exynos_gem_obj = to_exynos_gem_obj(obj);
360 args->flags = exynos_gem_obj->flags;
361 args->size = exynos_gem_obj->size;
363 drm_gem_object_unreference(obj);
364 mutex_unlock(&dev->struct_mutex);
369 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
371 struct vm_area_struct *vma_copy;
373 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
377 if (vma->vm_ops && vma->vm_ops->open)
378 vma->vm_ops->open(vma);
381 get_file(vma->vm_file);
383 memcpy(vma_copy, vma, sizeof(*vma));
385 vma_copy->vm_mm = NULL;
386 vma_copy->vm_next = NULL;
387 vma_copy->vm_prev = NULL;
392 void exynos_gem_put_vma(struct vm_area_struct *vma)
397 if (vma->vm_ops && vma->vm_ops->close)
398 vma->vm_ops->close(vma);
406 int exynos_gem_get_pages_from_userptr(unsigned long start,
409 struct vm_area_struct *vma)
413 /* the memory region mmaped with VM_PFNMAP. */
414 if (vma_is_io(vma)) {
417 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
419 int ret = follow_pfn(vma, start, &pfn);
423 pages[i] = pfn_to_page(pfn);
427 DRM_ERROR("failed to get user_pages.\n");
434 get_npages = get_user_pages(current, current->mm, start,
435 npages, 1, 1, pages, NULL);
436 get_npages = max(get_npages, 0);
437 if (get_npages != npages) {
438 DRM_ERROR("failed to get user_pages.\n");
440 put_page(pages[--get_npages]);
447 void exynos_gem_put_pages_to_userptr(struct page **pages,
449 struct vm_area_struct *vma)
451 if (!vma_is_io(vma)) {
454 for (i = 0; i < npages; i++) {
455 set_page_dirty_lock(pages[i]);
458 * undo the reference we took when populating
466 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
467 struct sg_table *sgt,
468 enum dma_data_direction dir)
472 mutex_lock(&drm_dev->struct_mutex);
474 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
476 DRM_ERROR("failed to map sgl with dma.\n");
477 mutex_unlock(&drm_dev->struct_mutex);
481 mutex_unlock(&drm_dev->struct_mutex);
485 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
486 struct sg_table *sgt,
487 enum dma_data_direction dir)
489 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
492 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
494 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
497 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
498 struct drm_device *dev,
499 struct drm_mode_create_dumb *args)
501 struct exynos_drm_gem_obj *exynos_gem_obj;
505 * allocate memory to be used for framebuffer.
506 * - this callback would be called by user application
507 * with DRM_IOCTL_MODE_CREATE_DUMB command.
510 args->pitch = args->width * ((args->bpp + 7) / 8);
511 args->size = args->pitch * args->height;
513 if (is_drm_iommu_supported(dev)) {
514 exynos_gem_obj = exynos_drm_gem_create(dev,
515 EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
518 exynos_gem_obj = exynos_drm_gem_create(dev,
519 EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
523 if (IS_ERR(exynos_gem_obj)) {
524 dev_warn(dev->dev, "FB allocation failed.\n");
525 return PTR_ERR(exynos_gem_obj);
528 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
531 exynos_drm_gem_destroy(exynos_gem_obj);
538 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
539 struct drm_device *dev, uint32_t handle,
542 struct drm_gem_object *obj;
545 mutex_lock(&dev->struct_mutex);
548 * get offset of memory allocated for drm framebuffer.
549 * - this callback would be called by user application
550 * with DRM_IOCTL_MODE_MAP_DUMB command.
553 obj = drm_gem_object_lookup(dev, file_priv, handle);
555 DRM_ERROR("failed to lookup gem object.\n");
560 ret = drm_gem_create_mmap_offset(obj);
564 *offset = drm_vma_node_offset_addr(&obj->vma_node);
565 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
568 drm_gem_object_unreference(obj);
570 mutex_unlock(&dev->struct_mutex);
574 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
576 struct drm_gem_object *obj = vma->vm_private_data;
577 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
582 page_offset = ((unsigned long)vmf->virtual_address -
583 vma->vm_start) >> PAGE_SHIFT;
585 if (page_offset >= (exynos_gem_obj->size >> PAGE_SHIFT)) {
586 DRM_ERROR("invalid page offset\n");
591 pfn = page_to_pfn(exynos_gem_obj->pages[page_offset]);
592 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
599 return VM_FAULT_NOPAGE;
603 return VM_FAULT_SIGBUS;
607 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
609 struct exynos_drm_gem_obj *exynos_gem_obj;
610 struct drm_gem_object *obj;
613 /* set vm_area_struct. */
614 ret = drm_gem_mmap(filp, vma);
616 DRM_ERROR("failed to mmap.\n");
620 obj = vma->vm_private_data;
621 exynos_gem_obj = to_exynos_gem_obj(obj);
623 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem_obj->flags);
625 /* non-cachable as default. */
626 if (exynos_gem_obj->flags & EXYNOS_BO_CACHABLE)
627 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
628 else if (exynos_gem_obj->flags & EXYNOS_BO_WC)
630 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
633 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
635 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
642 drm_gem_vm_close(vma);
643 drm_gem_free_mmap_offset(obj);
648 /* low-level interface prime helpers */
649 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
651 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
654 npages = exynos_gem_obj->size >> PAGE_SHIFT;
656 return drm_prime_pages_to_sg(exynos_gem_obj->pages, npages);
659 struct drm_gem_object *
660 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
661 struct dma_buf_attachment *attach,
662 struct sg_table *sgt)
664 struct exynos_drm_gem_obj *exynos_gem_obj;
668 exynos_gem_obj = exynos_drm_gem_init(dev, attach->dmabuf->size);
669 if (IS_ERR(exynos_gem_obj)) {
670 ret = PTR_ERR(exynos_gem_obj);
674 exynos_gem_obj->dma_addr = sg_dma_address(sgt->sgl);
676 npages = exynos_gem_obj->size >> PAGE_SHIFT;
677 exynos_gem_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
678 if (!exynos_gem_obj->pages) {
683 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem_obj->pages, NULL,
688 if (sgt->nents == 1) {
689 /* always physically continuous memory if sgt->nents is 1. */
690 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
693 * this case could be CONTIG or NONCONTIG type but for now
695 * TODO. we have to find a way that exporter can notify
696 * the type of its own buffer to importer.
698 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
701 return &exynos_gem_obj->base;
704 drm_free_large(exynos_gem_obj->pages);
706 drm_gem_object_release(&exynos_gem_obj->base);
707 kfree(exynos_gem_obj);
711 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
716 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)