From: Dominik Behr Date: Thu, 13 Nov 2014 01:36:42 +0000 (-0800) Subject: CHROMIUM: drm/rockchip: add GEM CPU acquire/release ioctls X-Git-Tag: firefly_0821_release~2863 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=47587d3a97ff95596491b370f3db47ed6b82c4f7;p=firefly-linux-kernel-4.4.55.git CHROMIUM: drm/rockchip: add GEM CPU acquire/release ioctls These ioctls can be used to synchronize CPU/GPU access to a buffer. BUG=chrome-os-partner:33438 TEST=add CONFIG_DRM_DMA_SYNC=y, in conjunction with xf86-video-armsoc change,\ run any X application, like xev Change-Id: I8065ec465ebd0cb6abe128a3e7d92a8f74a88928 Signed-off-by: Dominik Behr Reviewed-on: https://chromium-review.googlesource.com/229441 Reviewed-by: Daniel Kurtz Conflicts: drivers/gpu/drm/rockchip/rockchip_drm_drv.c drivers/gpu/drm/rockchip/rockchip_drm_drv.h drivers/gpu/drm/rockchip/rockchip_drm_gem.c (cherry picked from cros/chromeos-3.14 commit a847e1f492cbd186116c01a3f56575320dc87152) Signed-off-by: ZhengShunQian --- diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 57e19122fd06..a555f51a3581 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -19,11 +19,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include @@ -158,6 +160,11 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) drm_dev->dev_private = private; +#ifdef CONFIG_DRM_DMA_SYNC + private->cpu_fence_context = fence_context_alloc(1); + atomic_set(&private->cpu_fence_seqno, 0); +#endif + drm_mode_config_init(drm_dev); rockchip_drm_mode_config_init(drm_dev); @@ -300,13 +307,51 @@ static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc, priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv); } +static int rockchip_drm_open(struct drm_device *dev, struct drm_file *file) +{ + struct rockchip_drm_file_private *file_priv; + + file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); + if (!file_priv) + return -ENOMEM; + INIT_LIST_HEAD(&file_priv->gem_cpu_acquire_list); + + file->driver_priv = file_priv; + + return 0; +} + static void rockchip_drm_preclose(struct drm_device *dev, struct drm_file *file_priv) { + struct rockchip_drm_file_private *file_private = file_priv->driver_priv; + struct rockchip_gem_object_node *cur, *d; struct drm_crtc *crtc; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv); + + mutex_lock(&dev->struct_mutex); + list_for_each_entry_safe(cur, d, + &file_private->gem_cpu_acquire_list, list) { +#ifdef CONFIG_DRM_DMA_SYNC + BUG_ON(!cur->rockchip_gem_obj->acquire_fence); + drm_fence_signal_and_put(&cur->rockchip_gem_obj->acquire_fence); +#endif + drm_gem_object_unreference(&cur->rockchip_gem_obj->base); + kfree(cur); + } + /* since we are deleting the whole list, just initialize the header + * instead of calling list_del for every element + */ + INIT_LIST_HEAD(&file_private->gem_cpu_acquire_list); + mutex_unlock(&dev->struct_mutex); +} + +static void rockchip_drm_postclose(struct drm_device *dev, struct drm_file *file) +{ + kfree(file->driver_priv); + file->driver_priv = NULL; } void rockchip_drm_lastclose(struct drm_device *dev) @@ -322,6 +367,12 @@ static const struct drm_ioctl_desc rockchip_ioctls[] = { DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_MAP_OFFSET, rockchip_gem_map_offset_ioctl, DRM_UNLOCKED | DRM_AUTH), + DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_CPU_ACQUIRE, + rockchip_gem_cpu_acquire_ioctl, + DRM_UNLOCKED | DRM_AUTH), + DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_CPU_RELEASE, + rockchip_gem_cpu_release_ioctl, + DRM_UNLOCKED | DRM_AUTH), }; static const struct file_operations rockchip_drm_driver_fops = { @@ -350,6 +401,8 @@ static struct drm_driver rockchip_drm_driver = { .preclose = rockchip_drm_preclose, .lastclose = rockchip_drm_lastclose, .get_vblank_counter = drm_vblank_no_hw_counter, + .open = rockchip_drm_open, + .postclose = rockchip_drm_postclose, .enable_vblank = rockchip_drm_crtc_enable_vblank, .disable_vblank = rockchip_drm_crtc_disable_vblank, .gem_vm_ops = &rockchip_drm_vm_ops, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 2677b95d81d4..6e0886944275 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -50,11 +50,22 @@ struct rockchip_atomic_commit { struct mutex lock; }; +/* + * Rockchip drm_file private structure. + * + * @gem_cpu_acquire_list: list of GEM objects we hold acquires on + */ +struct rockchip_drm_file_private { + struct list_head gem_cpu_acquire_list; +}; + /* * Rockchip drm private structure. * * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc. * @num_pipe: number of pipes for this device. + * @cpu_fence_context: fence context used for CPU acquire/release + * @cpu_fence_seqno: fence sequence number */ struct rockchip_drm_private { struct drm_fb_helper fbdev_helper; @@ -63,6 +74,10 @@ struct rockchip_drm_private { struct rockchip_atomic_commit commit; struct iommu_domain *domain; +#ifdef CONFIG_DRM_DMA_SYNC + unsigned int cpu_fence_context; + atomic_t cpu_fence_seqno; +#endif }; void rockchip_drm_atomic_work(struct work_struct *work); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index c023d02092d1..b215dc890adb 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -15,10 +15,14 @@ #include #include #include +#include #include #include +#include #include +#include +#include #include "rockchip_drm_drv.h" #include "rockchip_drm_gem.h" @@ -148,6 +152,10 @@ void rockchip_gem_free_object(struct drm_gem_object *obj) rockchip_gem_free_buf(rk_obj); +#ifdef CONFIG_DRM_DMA_SYNC + drm_fence_signal_and_put(&rk_obj->acquire_fence); +#endif + kfree(rk_obj); } @@ -264,6 +272,225 @@ int rockchip_gem_create_ioctl(struct drm_device *dev, void *data, return PTR_ERR_OR_ZERO(rk_obj); } +static struct reservation_object *drm_gem_get_resv(struct drm_gem_object *gem) +{ + struct dma_buf *dma_buf = gem->dma_buf; + return dma_buf ? dma_buf->resv : NULL; +} + +#ifdef CONFIG_DRM_DMA_SYNC +static void rockchip_gem_acquire_complete(struct drm_reservation_cb *rcb, + void *context) +{ + struct completion *compl = context; + complete(compl); +} + +static int rockchip_gem_acquire(struct drm_device *dev, + struct rockchip_gem_object *rockchip_gem_obj, + bool exclusive) +{ + struct fence *fence; + struct rockchip_drm_private *dev_priv = dev->dev_private; + struct reservation_object *resv = + drm_gem_get_resv(&rockchip_gem_obj->base); + int ret = 0; + struct drm_reservation_cb rcb; + DECLARE_COMPLETION_ONSTACK(compl); + + if (!resv) + return ret; + + if (!exclusive && + !rockchip_gem_obj->acquire_exclusive && + rockchip_gem_obj->acquire_fence) { + atomic_inc(&rockchip_gem_obj->acquire_shared_count); + return ret; + } + + fence = drm_sw_fence_new(dev_priv->cpu_fence_context, + atomic_add_return(1, &dev_priv->cpu_fence_seqno)); + if (IS_ERR(fence)) { + ret = PTR_ERR(fence); + DRM_ERROR("Failed to create acquire fence %d.\n", ret); + return ret; + } + ww_mutex_lock(&resv->lock, NULL); + if (!exclusive) { + ret = reservation_object_reserve_shared(resv); + if (ret < 0) { + DRM_ERROR("Failed to reserve space for shared fence %d.\n", + ret); + goto resv_unlock; + } + } + drm_reservation_cb_init(&rcb, rockchip_gem_acquire_complete, &compl); + ret = drm_reservation_cb_add(&rcb, resv, exclusive); + if (ret < 0) { + DRM_ERROR("Failed to add reservation to callback %d.\n", ret); + goto resv_unlock; + } + drm_reservation_cb_done(&rcb); + if (exclusive) + reservation_object_add_excl_fence(resv, fence); + else + reservation_object_add_shared_fence(resv, fence); + + ww_mutex_unlock(&resv->lock); + mutex_unlock(&dev->struct_mutex); + ret = wait_for_completion_interruptible(&compl); + mutex_lock(&dev->struct_mutex); + if (ret < 0) { + DRM_ERROR("Failed wait for reservation callback %d.\n", ret); + drm_reservation_cb_fini(&rcb); + /* somebody else may be already waiting on it */ + drm_fence_signal_and_put(&fence); + return ret; + } + rockchip_gem_obj->acquire_fence = fence; + rockchip_gem_obj->acquire_exclusive = exclusive; + atomic_set(&rockchip_gem_obj->acquire_shared_count, 1); + return ret; + +resv_unlock: + ww_mutex_unlock(&resv->lock); + fence_put(fence); + return ret; +} + +static void rockchip_gem_release(struct rockchip_gem_object *rockchip_gem_obj) +{ + BUG_ON(!rockchip_gem_obj->acquire_fence); + if (atomic_sub_and_test(1, + &rockchip_gem_obj->acquire_shared_count)) + drm_fence_signal_and_put(&rockchip_gem_obj->acquire_fence); +} +#endif + +int rockchip_gem_cpu_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_rockchip_gem_cpu_acquire *args = data; + struct rockchip_drm_file_private *file_priv = file->driver_priv; + struct drm_gem_object *obj; + struct rockchip_gem_object *rockchip_gem_obj; + struct rockchip_gem_object_node *gem_node; + int ret = 0; + + DRM_DEBUG_KMS("[BO:%u] flags: 0x%x\n", args->handle, args->flags); + + mutex_lock(&dev->struct_mutex); + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) { + DRM_ERROR("failed to lookup gem object.\n"); + ret = -EINVAL; + goto unlock; + } + + rockchip_gem_obj = to_rockchip_obj(obj); + + if (!drm_gem_get_resv(&rockchip_gem_obj->base)) { + /* If there is no reservation object present, there is no + * cross-process/cross-device sharing and sync is unnecessary. + */ + ret = 0; + goto unref_obj; + } + +#ifdef CONFIG_DRM_DMA_SYNC + ret = rockchip_gem_acquire(dev, rockchip_gem_obj, + args->flags & DRM_ROCKCHIP_GEM_CPU_ACQUIRE_EXCLUSIVE); + if (ret < 0) + goto unref_obj; +#endif + + gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL); + if (!gem_node) { + DRM_ERROR("Failed to allocate rockchip_drm_gem_obj_node.\n"); + ret = -ENOMEM; + goto release_sync; + } + + gem_node->rockchip_gem_obj = rockchip_gem_obj; + list_add(&gem_node->list, &file_priv->gem_cpu_acquire_list); + mutex_unlock(&dev->struct_mutex); + return 0; + +release_sync: +#ifdef CONFIG_DRM_DMA_SYNC + rockchip_gem_release(rockchip_gem_obj); +#endif +unref_obj: + drm_gem_object_unreference(obj); + +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int rockchip_gem_cpu_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_rockchip_gem_cpu_release *args = data; + struct rockchip_drm_file_private *file_priv = file->driver_priv; + struct drm_gem_object *obj; + struct rockchip_gem_object *rockchip_gem_obj; + struct list_head *cur; + int ret = 0; + + DRM_DEBUG_KMS("[BO:%u]\n", args->handle); + + mutex_lock(&dev->struct_mutex); + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (!obj) { + DRM_ERROR("failed to lookup gem object.\n"); + ret = -EINVAL; + goto unlock; + } + + rockchip_gem_obj = to_rockchip_obj(obj); + + if (!drm_gem_get_resv(&rockchip_gem_obj->base)) { + /* If there is no reservation object present, there is no + * cross-process/cross-device sharing and sync is unnecessary. + */ + ret = 0; + goto unref_obj; + } + + list_for_each(cur, &file_priv->gem_cpu_acquire_list) { + struct rockchip_gem_object_node *node = list_entry( + cur, struct rockchip_gem_object_node, list); + if (node->rockchip_gem_obj == rockchip_gem_obj) + break; + } + if (cur == &file_priv->gem_cpu_acquire_list) { + DRM_ERROR("gem object not acquired for current process.\n"); + ret = -EINVAL; + goto unref_obj; + } + +#ifdef CONFIG_DRM_DMA_SYNC + rockchip_gem_release(rockchip_gem_obj); +#endif + + list_del(cur); + kfree(list_entry(cur, struct rockchip_gem_object_node, list)); + /* unreference for the reference held since cpu_acquire_ioctl */ + drm_gem_object_unreference(obj); + ret = 0; + +unref_obj: + /* unreference for the reference from drm_gem_object_lookup() */ + drm_gem_object_unreference(obj); + +unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} + /* * Allocate a sg_table for this GEM object. * Note: Both the table's contents, and the sg_table itself must be freed by diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index 3584b944a5a8..0cbe8606e4d1 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -24,6 +24,23 @@ struct rockchip_gem_object { void *kvaddr; dma_addr_t dma_addr; struct dma_attrs dma_attrs; + +#ifdef CONFIG_DRM_DMA_SYNC + struct fence *acquire_fence; + atomic_t acquire_shared_count; + bool acquire_exclusive; +#endif +}; + +/* + * rockchip drm GEM object linked list structure. + * + * @list: list link. + * @rockchip_gem_obj: struct rockchhip_gem_object that this entry points to. + */ +struct rockchip_gem_object_node { + struct list_head list; + struct rockchip_gem_object *rockchip_gem_obj; }; struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); @@ -52,8 +69,6 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv, int rockchip_gem_dumb_map_offset(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); -int rockchip_gem_map_offset_ioctl(struct drm_device *drm, void *data, - struct drm_file *file_priv); /* * request gem object creation and buffer allocation as the size * that it is calculated with framebuffer information such as width, @@ -66,4 +81,15 @@ int rockchip_gem_create_ioctl(struct drm_device *dev, void *data, int rockchip_gem_map_offset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +/* + * acquire gem object for CPU access. + */ +int rockchip_gem_cpu_acquire_ioctl(struct drm_device *dev, void* data, + struct drm_file *file_priv); +/* + * release gem object after CPU access. + */ +int rockchip_gem_cpu_release_ioctl(struct drm_device *dev, void* data, + struct drm_file *file_priv); + #endif /* _ROCKCHIP_DRM_GEM_H */ diff --git a/include/uapi/drm/rockchip_drm.h b/include/uapi/drm/rockchip_drm.h index c521e5a3e0cb..b8f367d9114d 100644 --- a/include/uapi/drm/rockchip_drm.h +++ b/include/uapi/drm/rockchip_drm.h @@ -45,8 +45,36 @@ struct drm_rockchip_gem_map_off { uint64_t offset; }; +/* acquire type definitions. */ +enum drm_rockchip_gem_cpu_acquire_type { + DRM_ROCKCHIP_GEM_CPU_ACQUIRE_SHARED = 0x0, + DRM_ROCKCHIP_GEM_CPU_ACQUIRE_EXCLUSIVE = 0x1, +}; + +/** + * A structure for acquiring buffer for CPU access. + * + * @handle: a handle to gem object created. + * @flags: acquire flag + */ +struct drm_rockchip_gem_cpu_acquire { + uint32_t handle; + uint32_t flags; +}; + +/* + * A structure for releasing buffer for GPU access. + * + * @handle: a handle to gem object created. + */ +struct drm_rockchip_gem_cpu_release { + uint32_t handle; +}; + #define DRM_ROCKCHIP_GEM_CREATE 0x00 #define DRM_ROCKCHIP_GEM_MAP_OFFSET 0x01 +#define DRM_ROCKCHIP_GEM_CPU_ACQUIRE 0x02 +#define DRM_ROCKCHIP_GEM_CPU_RELEASE 0x03 #define DRM_IOCTL_ROCKCHIP_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ DRM_ROCKCHIP_GEM_CREATE, struct drm_rockchip_gem_create) @@ -54,4 +82,10 @@ struct drm_rockchip_gem_map_off { #define DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \ DRM_ROCKCHIP_GEM_MAP_OFFSET, struct drm_rockchip_gem_map_off) +#define DRM_IOCTL_ROCKCHIP_GEM_CPU_ACQUIRE DRM_IOWR(DRM_COMMAND_BASE + \ + DRM_ROCKCHIP_GEM_CPU_ACQUIRE, struct drm_rockchip_gem_cpu_acquire) + +#define DRM_IOCTL_ROCKCHIP_GEM_CPU_RELEASE DRM_IOWR(DRM_COMMAND_BASE + \ + DRM_ROCKCHIP_GEM_CPU_RELEASE, struct drm_rockchip_gem_cpu_release) + #endif /* _UAPI_ROCKCHIP_DRM_H */