#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_sync_helper.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/component.h>
+#include <linux/fence.h>
#include <drm/rockchip_drm.h>
drm_dev->dev_private = private;
+#ifdef CONFIG_DRM_DMA_SYNC
+ private->cpu_fence_context = fence_context_alloc(1);
+ atomic_set(&private->cpu_fence_seqno, 0);
+#endif
+
drm_mode_config_init(drm_dev);
rockchip_drm_mode_config_init(drm_dev);
priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
}
+static int rockchip_drm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct rockchip_drm_file_private *file_priv;
+
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&file_priv->gem_cpu_acquire_list);
+
+ file->driver_priv = file_priv;
+
+ return 0;
+}
+
static void rockchip_drm_preclose(struct drm_device *dev,
struct drm_file *file_priv)
{
+ struct rockchip_drm_file_private *file_private = file_priv->driver_priv;
+ struct rockchip_gem_object_node *cur, *d;
struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry_safe(cur, d,
+ &file_private->gem_cpu_acquire_list, list) {
+#ifdef CONFIG_DRM_DMA_SYNC
+ BUG_ON(!cur->rockchip_gem_obj->acquire_fence);
+ drm_fence_signal_and_put(&cur->rockchip_gem_obj->acquire_fence);
+#endif
+ drm_gem_object_unreference(&cur->rockchip_gem_obj->base);
+ kfree(cur);
+ }
+ /* since we are deleting the whole list, just initialize the header
+ * instead of calling list_del for every element
+ */
+ INIT_LIST_HEAD(&file_private->gem_cpu_acquire_list);
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void rockchip_drm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ kfree(file->driver_priv);
+ file->driver_priv = NULL;
}
void rockchip_drm_lastclose(struct drm_device *dev)
DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_MAP_OFFSET,
rockchip_gem_map_offset_ioctl,
DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_CPU_ACQUIRE,
+ rockchip_gem_cpu_acquire_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(ROCKCHIP_GEM_CPU_RELEASE,
+ rockchip_gem_cpu_release_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations rockchip_drm_driver_fops = {
.preclose = rockchip_drm_preclose,
.lastclose = rockchip_drm_lastclose,
.get_vblank_counter = drm_vblank_no_hw_counter,
+ .open = rockchip_drm_open,
+ .postclose = rockchip_drm_postclose,
.enable_vblank = rockchip_drm_crtc_enable_vblank,
.disable_vblank = rockchip_drm_crtc_disable_vblank,
.gem_vm_ops = &rockchip_drm_vm_ops,
struct mutex lock;
};
+/*
+ * Rockchip drm_file private structure.
+ *
+ * @gem_cpu_acquire_list: list of GEM objects we hold acquires on
+ */
+struct rockchip_drm_file_private {
+ struct list_head gem_cpu_acquire_list;
+};
+
/*
* Rockchip drm private structure.
*
* @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc.
* @num_pipe: number of pipes for this device.
+ * @cpu_fence_context: fence context used for CPU acquire/release
+ * @cpu_fence_seqno: fence sequence number
*/
struct rockchip_drm_private {
struct drm_fb_helper fbdev_helper;
struct rockchip_atomic_commit commit;
struct iommu_domain *domain;
+#ifdef CONFIG_DRM_DMA_SYNC
+ unsigned int cpu_fence_context;
+ atomic_t cpu_fence_seqno;
+#endif
};
void rockchip_drm_atomic_work(struct work_struct *work);
#include <drm/drm.h>
#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <drm/drm_sync_helper.h>
#include <drm/drm_vma_manager.h>
#include <drm/rockchip_drm.h>
+#include <linux/completion.h>
#include <linux/dma-attrs.h>
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
rockchip_gem_free_buf(rk_obj);
+#ifdef CONFIG_DRM_DMA_SYNC
+ drm_fence_signal_and_put(&rk_obj->acquire_fence);
+#endif
+
kfree(rk_obj);
}
return PTR_ERR_OR_ZERO(rk_obj);
}
+static struct reservation_object *drm_gem_get_resv(struct drm_gem_object *gem)
+{
+ struct dma_buf *dma_buf = gem->dma_buf;
+ return dma_buf ? dma_buf->resv : NULL;
+}
+
+#ifdef CONFIG_DRM_DMA_SYNC
+static void rockchip_gem_acquire_complete(struct drm_reservation_cb *rcb,
+ void *context)
+{
+ struct completion *compl = context;
+ complete(compl);
+}
+
+static int rockchip_gem_acquire(struct drm_device *dev,
+ struct rockchip_gem_object *rockchip_gem_obj,
+ bool exclusive)
+{
+ struct fence *fence;
+ struct rockchip_drm_private *dev_priv = dev->dev_private;
+ struct reservation_object *resv =
+ drm_gem_get_resv(&rockchip_gem_obj->base);
+ int ret = 0;
+ struct drm_reservation_cb rcb;
+ DECLARE_COMPLETION_ONSTACK(compl);
+
+ if (!resv)
+ return ret;
+
+ if (!exclusive &&
+ !rockchip_gem_obj->acquire_exclusive &&
+ rockchip_gem_obj->acquire_fence) {
+ atomic_inc(&rockchip_gem_obj->acquire_shared_count);
+ return ret;
+ }
+
+ fence = drm_sw_fence_new(dev_priv->cpu_fence_context,
+ atomic_add_return(1, &dev_priv->cpu_fence_seqno));
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ DRM_ERROR("Failed to create acquire fence %d.\n", ret);
+ return ret;
+ }
+ ww_mutex_lock(&resv->lock, NULL);
+ if (!exclusive) {
+ ret = reservation_object_reserve_shared(resv);
+ if (ret < 0) {
+ DRM_ERROR("Failed to reserve space for shared fence %d.\n",
+ ret);
+ goto resv_unlock;
+ }
+ }
+ drm_reservation_cb_init(&rcb, rockchip_gem_acquire_complete, &compl);
+ ret = drm_reservation_cb_add(&rcb, resv, exclusive);
+ if (ret < 0) {
+ DRM_ERROR("Failed to add reservation to callback %d.\n", ret);
+ goto resv_unlock;
+ }
+ drm_reservation_cb_done(&rcb);
+ if (exclusive)
+ reservation_object_add_excl_fence(resv, fence);
+ else
+ reservation_object_add_shared_fence(resv, fence);
+
+ ww_mutex_unlock(&resv->lock);
+ mutex_unlock(&dev->struct_mutex);
+ ret = wait_for_completion_interruptible(&compl);
+ mutex_lock(&dev->struct_mutex);
+ if (ret < 0) {
+ DRM_ERROR("Failed wait for reservation callback %d.\n", ret);
+ drm_reservation_cb_fini(&rcb);
+ /* somebody else may be already waiting on it */
+ drm_fence_signal_and_put(&fence);
+ return ret;
+ }
+ rockchip_gem_obj->acquire_fence = fence;
+ rockchip_gem_obj->acquire_exclusive = exclusive;
+ atomic_set(&rockchip_gem_obj->acquire_shared_count, 1);
+ return ret;
+
+resv_unlock:
+ ww_mutex_unlock(&resv->lock);
+ fence_put(fence);
+ return ret;
+}
+
+static void rockchip_gem_release(struct rockchip_gem_object *rockchip_gem_obj)
+{
+ BUG_ON(!rockchip_gem_obj->acquire_fence);
+ if (atomic_sub_and_test(1,
+ &rockchip_gem_obj->acquire_shared_count))
+ drm_fence_signal_and_put(&rockchip_gem_obj->acquire_fence);
+}
+#endif
+
+int rockchip_gem_cpu_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_rockchip_gem_cpu_acquire *args = data;
+ struct rockchip_drm_file_private *file_priv = file->driver_priv;
+ struct drm_gem_object *obj;
+ struct rockchip_gem_object *rockchip_gem_obj;
+ struct rockchip_gem_object_node *gem_node;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("[BO:%u] flags: 0x%x\n", args->handle, args->flags);
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ rockchip_gem_obj = to_rockchip_obj(obj);
+
+ if (!drm_gem_get_resv(&rockchip_gem_obj->base)) {
+ /* If there is no reservation object present, there is no
+ * cross-process/cross-device sharing and sync is unnecessary.
+ */
+ ret = 0;
+ goto unref_obj;
+ }
+
+#ifdef CONFIG_DRM_DMA_SYNC
+ ret = rockchip_gem_acquire(dev, rockchip_gem_obj,
+ args->flags & DRM_ROCKCHIP_GEM_CPU_ACQUIRE_EXCLUSIVE);
+ if (ret < 0)
+ goto unref_obj;
+#endif
+
+ gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
+ if (!gem_node) {
+ DRM_ERROR("Failed to allocate rockchip_drm_gem_obj_node.\n");
+ ret = -ENOMEM;
+ goto release_sync;
+ }
+
+ gem_node->rockchip_gem_obj = rockchip_gem_obj;
+ list_add(&gem_node->list, &file_priv->gem_cpu_acquire_list);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+
+release_sync:
+#ifdef CONFIG_DRM_DMA_SYNC
+ rockchip_gem_release(rockchip_gem_obj);
+#endif
+unref_obj:
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int rockchip_gem_cpu_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_rockchip_gem_cpu_release *args = data;
+ struct rockchip_drm_file_private *file_priv = file->driver_priv;
+ struct drm_gem_object *obj;
+ struct rockchip_gem_object *rockchip_gem_obj;
+ struct list_head *cur;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("[BO:%u]\n", args->handle);
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ rockchip_gem_obj = to_rockchip_obj(obj);
+
+ if (!drm_gem_get_resv(&rockchip_gem_obj->base)) {
+ /* If there is no reservation object present, there is no
+ * cross-process/cross-device sharing and sync is unnecessary.
+ */
+ ret = 0;
+ goto unref_obj;
+ }
+
+ list_for_each(cur, &file_priv->gem_cpu_acquire_list) {
+ struct rockchip_gem_object_node *node = list_entry(
+ cur, struct rockchip_gem_object_node, list);
+ if (node->rockchip_gem_obj == rockchip_gem_obj)
+ break;
+ }
+ if (cur == &file_priv->gem_cpu_acquire_list) {
+ DRM_ERROR("gem object not acquired for current process.\n");
+ ret = -EINVAL;
+ goto unref_obj;
+ }
+
+#ifdef CONFIG_DRM_DMA_SYNC
+ rockchip_gem_release(rockchip_gem_obj);
+#endif
+
+ list_del(cur);
+ kfree(list_entry(cur, struct rockchip_gem_object_node, list));
+ /* unreference for the reference held since cpu_acquire_ioctl */
+ drm_gem_object_unreference(obj);
+ ret = 0;
+
+unref_obj:
+ /* unreference for the reference from drm_gem_object_lookup() */
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
/*
* Allocate a sg_table for this GEM object.
* Note: Both the table's contents, and the sg_table itself must be freed by
void *kvaddr;
dma_addr_t dma_addr;
struct dma_attrs dma_attrs;
+
+#ifdef CONFIG_DRM_DMA_SYNC
+ struct fence *acquire_fence;
+ atomic_t acquire_shared_count;
+ bool acquire_exclusive;
+#endif
+};
+
+/*
+ * rockchip drm GEM object linked list structure.
+ *
+ * @list: list link.
+ * @rockchip_gem_obj: struct rockchhip_gem_object that this entry points to.
+ */
+struct rockchip_gem_object_node {
+ struct list_head list;
+ struct rockchip_gem_object *rockchip_gem_obj;
};
struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
-int rockchip_gem_map_offset_ioctl(struct drm_device *drm, void *data,
- struct drm_file *file_priv);
/*
* request gem object creation and buffer allocation as the size
* that it is calculated with framebuffer information such as width,
int rockchip_gem_map_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/*
+ * acquire gem object for CPU access.
+ */
+int rockchip_gem_cpu_acquire_ioctl(struct drm_device *dev, void* data,
+ struct drm_file *file_priv);
+/*
+ * release gem object after CPU access.
+ */
+int rockchip_gem_cpu_release_ioctl(struct drm_device *dev, void* data,
+ struct drm_file *file_priv);
+
#endif /* _ROCKCHIP_DRM_GEM_H */
uint64_t offset;
};
+/* acquire type definitions. */
+enum drm_rockchip_gem_cpu_acquire_type {
+ DRM_ROCKCHIP_GEM_CPU_ACQUIRE_SHARED = 0x0,
+ DRM_ROCKCHIP_GEM_CPU_ACQUIRE_EXCLUSIVE = 0x1,
+};
+
+/**
+ * A structure for acquiring buffer for CPU access.
+ *
+ * @handle: a handle to gem object created.
+ * @flags: acquire flag
+ */
+struct drm_rockchip_gem_cpu_acquire {
+ uint32_t handle;
+ uint32_t flags;
+};
+
+/*
+ * A structure for releasing buffer for GPU access.
+ *
+ * @handle: a handle to gem object created.
+ */
+struct drm_rockchip_gem_cpu_release {
+ uint32_t handle;
+};
+
#define DRM_ROCKCHIP_GEM_CREATE 0x00
#define DRM_ROCKCHIP_GEM_MAP_OFFSET 0x01
+#define DRM_ROCKCHIP_GEM_CPU_ACQUIRE 0x02
+#define DRM_ROCKCHIP_GEM_CPU_RELEASE 0x03
#define DRM_IOCTL_ROCKCHIP_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_ROCKCHIP_GEM_CREATE, struct drm_rockchip_gem_create)
#define DRM_IOCTL_ROCKCHIP_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \
DRM_ROCKCHIP_GEM_MAP_OFFSET, struct drm_rockchip_gem_map_off)
+#define DRM_IOCTL_ROCKCHIP_GEM_CPU_ACQUIRE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ROCKCHIP_GEM_CPU_ACQUIRE, struct drm_rockchip_gem_cpu_acquire)
+
+#define DRM_IOCTL_ROCKCHIP_GEM_CPU_RELEASE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_ROCKCHIP_GEM_CPU_RELEASE, struct drm_rockchip_gem_cpu_release)
+
#endif /* _UAPI_ROCKCHIP_DRM_H */