2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
17 #include <drm/drm_gem.h>
18 #include <drm/drm_sync_helper.h>
19 #include <drm/drm_vma_manager.h>
20 #include <drm/rockchip_drm.h>
22 #include <linux/completion.h>
23 #include <linux/dma-attrs.h>
24 #include <linux/dma-buf.h>
25 #include <linux/reservation.h>
26 #include <linux/iommu.h>
28 #include "rockchip_drm_drv.h"
29 #include "rockchip_drm_gem.h"
31 static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
33 struct drm_device *drm = rk_obj->base.dev;
34 struct rockchip_drm_private *private = drm->dev_private;
35 int prot = IOMMU_READ | IOMMU_WRITE;
38 ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
39 rk_obj->base.size, PAGE_SIZE,
42 DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
46 rk_obj->dma_addr = rk_obj->mm.start;
48 ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
49 rk_obj->sgt->nents, prot);
51 DRM_ERROR("failed to map buffer: %zd\n", ret);
60 drm_mm_remove_node(&rk_obj->mm);
65 static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
67 struct drm_device *drm = rk_obj->base.dev;
68 struct rockchip_drm_private *private = drm->dev_private;
70 iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
71 drm_mm_remove_node(&rk_obj->mm);
76 static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
78 struct drm_device *drm = rk_obj->base.dev;
80 struct scatterlist *s;
82 rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
83 if (IS_ERR(rk_obj->pages))
84 return PTR_ERR(rk_obj->pages);
86 rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
88 rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
89 if (IS_ERR(rk_obj->sgt)) {
90 ret = PTR_ERR(rk_obj->sgt);
95 * Fake up the SG table so that dma_sync_sg_for_device() can be used
96 * to flush the pages associated with it.
98 * TODO: Replace this by drm_clflush_sg() once it can be implemented
99 * without relying on symbols that are not exported.
101 for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
102 sg_dma_address(s) = sg_phys(s);
104 dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
110 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
114 static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
116 sg_free_table(rk_obj->sgt);
118 drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
121 static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
126 ret = rockchip_gem_get_pages(rk_obj);
130 ret = rockchip_gem_iommu_map(rk_obj);
135 rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
136 pgprot_writecombine(PAGE_KERNEL));
137 if (!rk_obj->kvaddr) {
138 DRM_ERROR("failed to vmap() buffer\n");
147 rockchip_gem_iommu_unmap(rk_obj);
149 rockchip_gem_put_pages(rk_obj);
154 static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
157 struct drm_gem_object *obj = &rk_obj->base;
158 struct drm_device *drm = obj->dev;
160 init_dma_attrs(&rk_obj->dma_attrs);
161 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
164 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs);
166 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
167 &rk_obj->dma_addr, GFP_KERNEL,
169 if (!rk_obj->kvaddr) {
170 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
177 static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
180 struct drm_gem_object *obj = &rk_obj->base;
181 struct drm_device *drm = obj->dev;
182 struct rockchip_drm_private *private = drm->dev_private;
185 return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
187 return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
190 static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
192 vunmap(rk_obj->kvaddr);
193 rockchip_gem_iommu_unmap(rk_obj);
194 rockchip_gem_put_pages(rk_obj);
197 static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
199 struct drm_gem_object *obj = &rk_obj->base;
200 struct drm_device *drm = obj->dev;
202 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr,
203 rk_obj->dma_addr, &rk_obj->dma_attrs);
206 static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
209 rockchip_gem_free_iommu(rk_obj);
211 rockchip_gem_free_dma(rk_obj);
214 static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
215 struct vm_area_struct *vma)
217 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
218 unsigned int i, count = obj->size >> PAGE_SHIFT;
219 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
220 unsigned long uaddr = vma->vm_start;
221 unsigned long offset = vma->vm_pgoff;
222 unsigned long end = user_count + offset;
230 for (i = offset; i < end; i++) {
231 ret = vm_insert_page(vma, uaddr, rk_obj->pages[i]);
240 static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
241 struct vm_area_struct *vma)
243 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
244 struct drm_device *drm = obj->dev;
246 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
247 obj->size, &rk_obj->dma_attrs);
250 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
251 struct vm_area_struct *vma)
254 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
257 * We allocated a struct page table for rk_obj, so clear
258 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
260 vma->vm_flags &= ~VM_PFNMAP;
263 ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
265 ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
268 drm_gem_vm_close(vma);
273 int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
274 struct vm_area_struct *vma)
278 ret = drm_gem_mmap_obj(obj, obj->size, vma);
282 return rockchip_drm_gem_object_mmap(obj, vma);
285 /* drm driver mmap file operations */
286 int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
288 struct drm_gem_object *obj;
291 ret = drm_gem_mmap(filp, vma);
296 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
297 * whole buffer from the start.
301 obj = vma->vm_private_data;
303 return rockchip_drm_gem_object_mmap(obj, vma);
306 static struct rockchip_gem_object *
307 rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
309 struct rockchip_gem_object *rk_obj;
310 struct drm_gem_object *obj;
312 size = round_up(size, PAGE_SIZE);
314 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
316 return ERR_PTR(-ENOMEM);
320 drm_gem_object_init(drm, obj, size);
325 static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
327 drm_gem_object_release(&rk_obj->base);
331 struct rockchip_gem_object *
332 rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
335 struct rockchip_gem_object *rk_obj;
338 rk_obj = rockchip_gem_alloc_object(drm, size);
342 ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
344 goto err_free_rk_obj;
349 rockchip_gem_release_object(rk_obj);
354 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback
357 void rockchip_gem_free_object(struct drm_gem_object *obj)
359 struct drm_device *drm = obj->dev;
360 struct rockchip_drm_private *private = drm->dev_private;
361 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
363 if (obj->import_attach) {
364 if (private->domain) {
365 rockchip_gem_iommu_unmap(rk_obj);
367 dma_unmap_sg(drm->dev, rk_obj->sgt->sgl,
368 rk_obj->sgt->nents, DMA_BIDIRECTIONAL);
370 drm_prime_gem_destroy(obj, rk_obj->sgt);
372 rockchip_gem_free_buf(rk_obj);
375 #ifdef CONFIG_DRM_DMA_SYNC
376 drm_fence_signal_and_put(&rk_obj->acquire_fence);
379 rockchip_gem_release_object(rk_obj);
383 * rockchip_gem_create_with_handle - allocate an object with the given
384 * size and create a gem handle on it
386 * returns a struct rockchip_gem_object* on success or ERR_PTR values
389 static struct rockchip_gem_object *
390 rockchip_gem_create_with_handle(struct drm_file *file_priv,
391 struct drm_device *drm, unsigned int size,
392 unsigned int *handle)
394 struct rockchip_gem_object *rk_obj;
395 struct drm_gem_object *obj;
398 rk_obj = rockchip_gem_create_object(drm, size, false);
400 return ERR_CAST(rk_obj);
405 * allocate a id of idr table where the obj is registered
406 * and handle has the id what user can see.
408 ret = drm_gem_handle_create(file_priv, obj, handle);
410 goto err_handle_create;
412 /* drop reference from allocate - handle holds it now. */
413 drm_gem_object_unreference_unlocked(obj);
418 rockchip_gem_free_object(obj);
423 int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
424 struct drm_device *dev, uint32_t handle,
427 struct drm_gem_object *obj;
430 obj = drm_gem_object_lookup(dev, file_priv, handle);
432 DRM_ERROR("failed to lookup gem object.\n");
436 ret = drm_gem_create_mmap_offset(obj);
440 *offset = drm_vma_node_offset_addr(&obj->vma_node);
441 DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
444 drm_gem_object_unreference_unlocked(obj);
450 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
453 * This aligns the pitch and size arguments to the minimum required. wrap
454 * this into your own function if you need bigger alignment.
456 int rockchip_gem_dumb_create(struct drm_file *file_priv,
457 struct drm_device *dev,
458 struct drm_mode_create_dumb *args)
460 struct rockchip_gem_object *rk_obj;
461 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
464 * align to 64 bytes since Mali requires it.
466 args->pitch = ALIGN(min_pitch, 64);
467 args->size = args->pitch * args->height;
469 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
472 return PTR_ERR_OR_ZERO(rk_obj);
475 int rockchip_gem_map_offset_ioctl(struct drm_device *drm, void *data,
476 struct drm_file *file_priv)
478 struct drm_rockchip_gem_map_off *args = data;
480 return rockchip_gem_dumb_map_offset(file_priv, drm, args->handle,
484 int rockchip_gem_create_ioctl(struct drm_device *dev, void *data,
485 struct drm_file *file_priv)
487 struct drm_rockchip_gem_create *args = data;
488 struct rockchip_gem_object *rk_obj;
490 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
492 return PTR_ERR_OR_ZERO(rk_obj);
495 static struct reservation_object *drm_gem_get_resv(struct drm_gem_object *gem)
497 struct dma_buf *dma_buf = gem->dma_buf;
498 return dma_buf ? dma_buf->resv : NULL;
501 #ifdef CONFIG_DRM_DMA_SYNC
502 static void rockchip_gem_acquire_complete(struct drm_reservation_cb *rcb,
505 struct completion *compl = context;
509 static int rockchip_gem_acquire(struct drm_device *dev,
510 struct rockchip_gem_object *rockchip_gem_obj,
514 struct rockchip_drm_private *dev_priv = dev->dev_private;
515 struct reservation_object *resv =
516 drm_gem_get_resv(&rockchip_gem_obj->base);
518 struct drm_reservation_cb rcb;
519 DECLARE_COMPLETION_ONSTACK(compl);
525 !rockchip_gem_obj->acquire_exclusive &&
526 rockchip_gem_obj->acquire_fence) {
527 atomic_inc(&rockchip_gem_obj->acquire_shared_count);
531 fence = drm_sw_fence_new(dev_priv->cpu_fence_context,
532 atomic_add_return(1, &dev_priv->cpu_fence_seqno));
534 ret = PTR_ERR(fence);
535 DRM_ERROR("Failed to create acquire fence %d.\n", ret);
538 ww_mutex_lock(&resv->lock, NULL);
540 ret = reservation_object_reserve_shared(resv);
542 DRM_ERROR("Failed to reserve space for shared fence %d.\n",
547 drm_reservation_cb_init(&rcb, rockchip_gem_acquire_complete, &compl);
548 ret = drm_reservation_cb_add(&rcb, resv, exclusive);
550 DRM_ERROR("Failed to add reservation to callback %d.\n", ret);
553 drm_reservation_cb_done(&rcb);
555 reservation_object_add_excl_fence(resv, fence);
557 reservation_object_add_shared_fence(resv, fence);
559 ww_mutex_unlock(&resv->lock);
560 mutex_unlock(&dev->struct_mutex);
561 ret = wait_for_completion_interruptible(&compl);
562 mutex_lock(&dev->struct_mutex);
564 DRM_ERROR("Failed wait for reservation callback %d.\n", ret);
565 drm_reservation_cb_fini(&rcb);
566 /* somebody else may be already waiting on it */
567 drm_fence_signal_and_put(&fence);
570 rockchip_gem_obj->acquire_fence = fence;
571 rockchip_gem_obj->acquire_exclusive = exclusive;
572 atomic_set(&rockchip_gem_obj->acquire_shared_count, 1);
576 ww_mutex_unlock(&resv->lock);
581 static void rockchip_gem_release(struct rockchip_gem_object *rockchip_gem_obj)
583 BUG_ON(!rockchip_gem_obj->acquire_fence);
584 if (atomic_sub_and_test(1,
585 &rockchip_gem_obj->acquire_shared_count))
586 drm_fence_signal_and_put(&rockchip_gem_obj->acquire_fence);
590 int rockchip_gem_cpu_acquire_ioctl(struct drm_device *dev, void *data,
591 struct drm_file *file)
593 struct drm_rockchip_gem_cpu_acquire *args = data;
594 struct rockchip_drm_file_private *file_priv = file->driver_priv;
595 struct drm_gem_object *obj;
596 struct rockchip_gem_object *rockchip_gem_obj;
597 struct rockchip_gem_object_node *gem_node;
600 DRM_DEBUG_KMS("[BO:%u] flags: 0x%x\n", args->handle, args->flags);
602 mutex_lock(&dev->struct_mutex);
604 obj = drm_gem_object_lookup(dev, file, args->handle);
606 DRM_ERROR("failed to lookup gem object.\n");
611 rockchip_gem_obj = to_rockchip_obj(obj);
613 if (!drm_gem_get_resv(&rockchip_gem_obj->base)) {
614 /* If there is no reservation object present, there is no
615 * cross-process/cross-device sharing and sync is unnecessary.
621 #ifdef CONFIG_DRM_DMA_SYNC
622 ret = rockchip_gem_acquire(dev, rockchip_gem_obj,
623 args->flags & DRM_ROCKCHIP_GEM_CPU_ACQUIRE_EXCLUSIVE);
628 gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
630 DRM_ERROR("Failed to allocate rockchip_drm_gem_obj_node.\n");
635 gem_node->rockchip_gem_obj = rockchip_gem_obj;
636 list_add(&gem_node->list, &file_priv->gem_cpu_acquire_list);
637 mutex_unlock(&dev->struct_mutex);
641 #ifdef CONFIG_DRM_DMA_SYNC
642 rockchip_gem_release(rockchip_gem_obj);
645 drm_gem_object_unreference(obj);
648 mutex_unlock(&dev->struct_mutex);
652 int rockchip_gem_cpu_release_ioctl(struct drm_device *dev, void *data,
653 struct drm_file *file)
655 struct drm_rockchip_gem_cpu_release *args = data;
656 struct rockchip_drm_file_private *file_priv = file->driver_priv;
657 struct drm_gem_object *obj;
658 struct rockchip_gem_object *rockchip_gem_obj;
659 struct list_head *cur;
662 DRM_DEBUG_KMS("[BO:%u]\n", args->handle);
664 mutex_lock(&dev->struct_mutex);
666 obj = drm_gem_object_lookup(dev, file, args->handle);
668 DRM_ERROR("failed to lookup gem object.\n");
673 rockchip_gem_obj = to_rockchip_obj(obj);
675 if (!drm_gem_get_resv(&rockchip_gem_obj->base)) {
676 /* If there is no reservation object present, there is no
677 * cross-process/cross-device sharing and sync is unnecessary.
683 list_for_each(cur, &file_priv->gem_cpu_acquire_list) {
684 struct rockchip_gem_object_node *node = list_entry(
685 cur, struct rockchip_gem_object_node, list);
686 if (node->rockchip_gem_obj == rockchip_gem_obj)
689 if (cur == &file_priv->gem_cpu_acquire_list) {
690 DRM_ERROR("gem object not acquired for current process.\n");
695 #ifdef CONFIG_DRM_DMA_SYNC
696 rockchip_gem_release(rockchip_gem_obj);
700 kfree(list_entry(cur, struct rockchip_gem_object_node, list));
701 /* unreference for the reference held since cpu_acquire_ioctl */
702 drm_gem_object_unreference(obj);
706 /* unreference for the reference from drm_gem_object_lookup() */
707 drm_gem_object_unreference(obj);
710 mutex_unlock(&dev->struct_mutex);
715 * Allocate a sg_table for this GEM object.
716 * Note: Both the table's contents, and the sg_table itself must be freed by
718 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
720 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
722 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
723 struct drm_device *drm = obj->dev;
724 struct sg_table *sgt;
728 return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
730 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
732 return ERR_PTR(-ENOMEM);
734 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
735 rk_obj->dma_addr, obj->size,
738 DRM_ERROR("failed to allocate sgt, %d\n", ret);
746 static unsigned long rockchip_sg_get_contiguous_size(struct sg_table *sgt,
749 struct scatterlist *s;
750 dma_addr_t expected = sg_dma_address(sgt->sgl);
752 unsigned long size = 0;
754 for_each_sg(sgt->sgl, s, count, i) {
755 if (sg_dma_address(s) != expected)
757 expected = sg_dma_address(s) + sg_dma_len(s);
758 size += sg_dma_len(s);
764 rockchip_gem_iommu_map_sg(struct drm_device *drm,
765 struct dma_buf_attachment *attach,
767 struct rockchip_gem_object *rk_obj)
770 return rockchip_gem_iommu_map(rk_obj);
774 rockchip_gem_dma_map_sg(struct drm_device *drm,
775 struct dma_buf_attachment *attach,
777 struct rockchip_gem_object *rk_obj)
779 int count = dma_map_sg(drm->dev, sg->sgl, sg->nents,
784 if (rockchip_sg_get_contiguous_size(sg, count) < attach->dmabuf->size) {
785 DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
786 dma_unmap_sg(drm->dev, sg->sgl, sg->nents,
791 rk_obj->dma_addr = sg_dma_address(sg->sgl);
796 struct drm_gem_object *
797 rockchip_gem_prime_import_sg_table(struct drm_device *drm,
798 struct dma_buf_attachment *attach,
801 struct rockchip_drm_private *private = drm->dev_private;
802 struct rockchip_gem_object *rk_obj;
805 rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
807 return ERR_CAST(rk_obj);
810 ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
812 ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
815 DRM_ERROR("failed to import sg table: %d\n", ret);
816 goto err_free_rk_obj;
819 return &rk_obj->base;
822 rockchip_gem_release_object(rk_obj);
826 void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
828 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
831 return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
832 pgprot_writecombine(PAGE_KERNEL));
834 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs))
837 return rk_obj->kvaddr;
840 void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
842 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
849 /* Nothing to do if allocated by DMA mapping API. */