.gem_prime_vmap = rockchip_gem_prime_vmap,
.gem_prime_vunmap = rockchip_gem_prime_vunmap,
.gem_prime_mmap = rockchip_gem_mmap_buf,
+ .gem_prime_begin_cpu_access = rockchip_gem_prime_begin_cpu_access,
+ .gem_prime_end_cpu_access = rockchip_gem_prime_end_cpu_access,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = rockchip_drm_debugfs_init,
.debugfs_cleanup = rockchip_drm_debugfs_cleanup,
/* Nothing to do if allocated by DMA mapping API. */
}
+
+int rockchip_gem_prime_begin_cpu_access(struct drm_gem_object *obj,
+ size_t start, size_t len,
+ enum dma_data_direction dir)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ struct drm_device *drm = obj->dev;
+
+ if (!rk_obj->sgt)
+ return 0;
+
+ dma_sync_sg_for_cpu(drm->dev, rk_obj->sgt->sgl,
+ rk_obj->sgt->nents, dir);
+ return 0;
+}
+
+void rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj,
+ size_t start, size_t len,
+ enum dma_data_direction dir)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ struct drm_device *drm = obj->dev;
+
+ if (!rk_obj->sgt)
+ return;
+
+ dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl,
+ rk_obj->sgt->nents, dir);
+}
int rockchip_gem_cpu_release_ioctl(struct drm_device *dev, void* data,
struct drm_file *file_priv);
+int rockchip_gem_prime_begin_cpu_access(struct drm_gem_object *obj,
+ size_t start, size_t len,
+ enum dma_data_direction dir);
+
+void rockchip_gem_prime_end_cpu_access(struct drm_gem_object *obj,
+ size_t start, size_t len,
+ enum dma_data_direction dir);
+
#endif /* _ROCKCHIP_DRM_GEM_H */