--- /dev/null
+/*
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: Jung Zhao jung.zhao@rock-chips.com
+ * Randy Li, randy.li@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-iommu.h>
+
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_sync_helper.h>
+#include <drm/rockchip_drm.h>
+#include <linux/dma-mapping.h>
+#include <linux/rockchip-iovmm.h>
+#include <linux/pm_runtime.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/component.h>
+#include <linux/fence.h>
+#include <linux/console.h>
+#include <linux/kref.h>
+#include <linux/fdtable.h>
+
+#include "vcodec_iommu_ops.h"
+
+struct vcodec_drm_buffer {
+ struct list_head list;
+ struct dma_buf *dma_buf;
+ union {
+ unsigned long iova;
+ unsigned long phys;
+ };
+ void *cpu_addr;
+ unsigned long size;
+ int fd;
+ int index;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct page **pages;
+ struct kref ref;
+ struct vcodec_iommu_session_info *session_info;
+};
+
+struct vcodec_iommu_drm_info {
+ struct iommu_domain *domain;
+ bool attached;
+};
+
+static struct vcodec_drm_buffer *
+vcodec_drm_get_buffer_no_lock(struct vcodec_iommu_session_info *session_info,
+ int idx)
+{
+ struct vcodec_drm_buffer *drm_buffer = NULL, *n;
+
+ list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
+ list) {
+ if (drm_buffer->index == idx)
+ return drm_buffer;
+ }
+
+ return NULL;
+}
+
+static struct vcodec_drm_buffer *
+vcodec_drm_get_buffer_fd_no_lock(struct vcodec_iommu_session_info *session_info,
+ int fd)
+{
+ struct vcodec_drm_buffer *drm_buffer = NULL, *n;
+
+ list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
+ list) {
+ if (drm_buffer->fd == fd)
+ return drm_buffer;
+ }
+
+ return NULL;
+}
+
+static void vcodec_drm_detach(struct vcodec_iommu_info *iommu_info)
+{
+ struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
+ struct device *dev = iommu_info->dev;
+ struct iommu_domain *domain = drm_info->domain;
+
+ mutex_lock(&iommu_info->iommu_mutex);
+
+ if (!drm_info->attached) {
+ mutex_unlock(&iommu_info->iommu_mutex);
+ return;
+ }
+
+ iommu_detach_device(domain, dev);
+ drm_info->attached = false;
+
+ mutex_unlock(&iommu_info->iommu_mutex);
+}
+
+static int vcodec_drm_attach_unlock(struct vcodec_iommu_info *iommu_info)
+{
+ struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
+ struct device *dev = iommu_info->dev;
+ struct iommu_domain *domain = drm_info->domain;
+ int ret = 0;
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ ret = iommu_attach_device(domain, dev);
+ if (ret) {
+ dev_err(dev, "Failed to attach iommu device\n");
+ return ret;
+ }
+
+ if (!common_iommu_setup_dma_ops(dev, 0x10000000, SZ_2G, domain->ops)) {
+ dev_err(dev, "Failed to set dma_ops\n");
+ iommu_detach_device(domain, dev);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int vcodec_drm_attach(struct vcodec_iommu_info *iommu_info)
+{
+ struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
+ int ret;
+
+ mutex_lock(&iommu_info->iommu_mutex);
+
+ if (drm_info->attached) {
+ mutex_unlock(&iommu_info->iommu_mutex);
+ return 0;
+ }
+
+ ret = vcodec_drm_attach_unlock(iommu_info);
+ if (ret) {
+ mutex_unlock(&iommu_info->iommu_mutex);
+ return ret;
+ }
+
+ drm_info->attached = true;
+
+ mutex_unlock(&iommu_info->iommu_mutex);
+
+ return ret;
+}
+
+static void *vcodec_drm_sgt_map_kernel(struct vcodec_drm_buffer *drm_buffer)
+{
+ struct vcodec_iommu_session_info *session_info =
+ drm_buffer->session_info;
+ struct device *dev = session_info->dev;
+ struct scatterlist *sgl, *sg;
+ int nr_pages = PAGE_ALIGN(drm_buffer->size) >> PAGE_SHIFT;
+ int i = 0, j = 0, k = 0;
+ struct page *page;
+
+ drm_buffer->pages = kmalloc_array(nr_pages, sizeof(*drm_buffer->pages),
+ GFP_KERNEL);
+ if (!(drm_buffer->pages)) {
+ dev_err(dev, "drm map can not alloc pages\n");
+
+ return NULL;
+ }
+
+ sgl = drm_buffer->sgt->sgl;
+
+ for_each_sg(sgl, sg, drm_buffer->sgt->nents, i) {
+ page = sg_page(sg);
+ for (j = 0; j < sg->length / PAGE_SIZE; j++)
+ drm_buffer->pages[k++] = page++;
+ }
+
+ return vmap(drm_buffer->pages, nr_pages, VM_MAP,
+ pgprot_noncached(PAGE_KERNEL));
+}
+
+static void vcodec_drm_sgt_unmap_kernel(struct vcodec_drm_buffer *drm_buffer)
+{
+ vunmap(drm_buffer->cpu_addr);
+ kfree(drm_buffer->pages);
+}
+
+static void vcodec_drm_clear_map(struct kref *ref)
+{
+ struct vcodec_drm_buffer *drm_buffer =
+ container_of(ref, struct vcodec_drm_buffer, ref);
+ struct vcodec_iommu_session_info *session_info =
+ drm_buffer->session_info;
+ struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
+ struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
+ struct device *dev = session_info->dev;
+ struct iommu_domain *domain = drm_info->domain;
+
+ mutex_lock(&iommu_info->iommu_mutex);
+ drm_info = session_info->iommu_info->private;
+ if (!drm_info->attached) {
+ if (vcodec_drm_attach_unlock(session_info->iommu_info))
+ dev_err(dev, "can't clea map, attach iommu failed.\n");
+ }
+
+ if (drm_buffer->cpu_addr) {
+ vcodec_drm_sgt_unmap_kernel(drm_buffer);
+ drm_buffer->cpu_addr = NULL;
+ }
+
+ if (drm_buffer->attach) {
+ dma_buf_unmap_attachment(drm_buffer->attach, drm_buffer->sgt,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(drm_buffer->dma_buf, drm_buffer->attach);
+ dma_buf_put(drm_buffer->dma_buf);
+ drm_buffer->attach = NULL;
+ }
+
+ if (!drm_info->attached)
+ iommu_detach_device(domain, dev);
+
+ mutex_unlock(&iommu_info->iommu_mutex);
+}
+
+static void vcdoec_drm_dump_info(struct vcodec_iommu_session_info *session_info)
+{
+ struct vcodec_drm_buffer *drm_buffer = NULL, *n;
+
+ vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
+ "still there are below buffers stored in list\n");
+ list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
+ list) {
+ vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
+ "index %d drm_buffer fd %d cpu_addr %p\n",
+ drm_buffer->index,
+ drm_buffer->fd, drm_buffer->cpu_addr);
+ }
+}
+
+static int vcodec_drm_free(struct vcodec_iommu_session_info *session_info,
+ int idx)
+{
+ struct device *dev = session_info->dev;
+ /* please double-check all maps have been release */
+ struct vcodec_drm_buffer *drm_buffer;
+
+ mutex_lock(&session_info->list_mutex);
+ drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
+
+ if (!drm_buffer) {
+ dev_err(dev, "can not find %d buffer in list\n", idx);
+ mutex_unlock(&session_info->list_mutex);
+
+ return -EINVAL;
+ }
+
+ if (atomic_read(&drm_buffer->ref.refcount) == 0) {
+ dma_buf_put(drm_buffer->dma_buf);
+ list_del_init(&drm_buffer->list);
+ kfree(drm_buffer);
+ }
+ mutex_unlock(&session_info->list_mutex);
+
+ return 0;
+}
+
+static int
+vcodec_drm_unmap_iommu(struct vcodec_iommu_session_info *session_info,
+ int idx)
+{
+ struct device *dev = session_info->dev;
+ struct vcodec_drm_buffer *drm_buffer;
+
+ /* Force to flush iommu table */
+ if (of_machine_is_compatible("rockchip,rk3288"))
+ rockchip_iovmm_invalidate_tlb(session_info->mmu_dev);
+
+ mutex_lock(&session_info->list_mutex);
+ drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
+ mutex_unlock(&session_info->list_mutex);
+
+ if (!drm_buffer) {
+ dev_err(dev, "can not find %d buffer in list\n", idx);
+ return -EINVAL;
+ }
+
+ kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
+
+ return 0;
+}
+
+static int vcodec_drm_map_iommu(struct vcodec_iommu_session_info *session_info,
+ int idx,
+ unsigned long *iova,
+ unsigned long *size)
+{
+ struct device *dev = session_info->dev;
+ struct vcodec_drm_buffer *drm_buffer;
+
+ /* Force to flush iommu table */
+ if (of_machine_is_compatible("rockchip,rk3288"))
+ rockchip_iovmm_invalidate_tlb(session_info->mmu_dev);
+
+ mutex_lock(&session_info->list_mutex);
+ drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
+ mutex_unlock(&session_info->list_mutex);
+
+ if (!drm_buffer) {
+ dev_err(dev, "can not find %d buffer in list\n", idx);
+ return -EINVAL;
+ }
+
+ kref_get(&drm_buffer->ref);
+ if (iova)
+ *iova = drm_buffer->iova;
+ if (size)
+ *size = drm_buffer->size;
+ return 0;
+}
+
+static int
+vcodec_drm_unmap_kernel(struct vcodec_iommu_session_info *session_info, int idx)
+{
+ struct device *dev = session_info->dev;
+ struct vcodec_drm_buffer *drm_buffer;
+
+ mutex_lock(&session_info->list_mutex);
+ drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
+ mutex_unlock(&session_info->list_mutex);
+
+ if (!drm_buffer) {
+ dev_err(dev, "can not find %d buffer in list\n", idx);
+
+ return -EINVAL;
+ }
+
+ if (drm_buffer->cpu_addr) {
+ vcodec_drm_sgt_unmap_kernel(drm_buffer);
+ drm_buffer->cpu_addr = NULL;
+ }
+
+ kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
+ return 0;
+}
+
+static int
+vcodec_drm_free_fd(struct vcodec_iommu_session_info *session_info, int fd)
+{
+ struct device *dev = session_info->dev;
+ /* please double-check all maps have been release */
+ struct vcodec_drm_buffer *drm_buffer = NULL;
+
+ mutex_lock(&session_info->list_mutex);
+ drm_buffer = vcodec_drm_get_buffer_fd_no_lock(session_info, fd);
+
+ if (!drm_buffer) {
+ dev_err(dev, "can not find %d buffer in list\n", fd);
+ mutex_unlock(&session_info->list_mutex);
+
+ return -EINVAL;
+ }
+ mutex_unlock(&session_info->list_mutex);
+
+ vcodec_drm_unmap_iommu(session_info, drm_buffer->index);
+
+ mutex_lock(&session_info->list_mutex);
+ if (atomic_read(&drm_buffer->ref.refcount) == 0) {
+ dma_buf_put(drm_buffer->dma_buf);
+ list_del_init(&drm_buffer->list);
+ kfree(drm_buffer);
+ }
+ mutex_unlock(&session_info->list_mutex);
+
+ return 0;
+}
+
+static void
+vcodec_drm_clear_session(struct vcodec_iommu_session_info *session_info)
+{
+ struct vcodec_drm_buffer *drm_buffer = NULL, *n;
+
+ list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
+ list) {
+ kref_put(&drm_buffer->ref, vcodec_drm_clear_map);
+ vcodec_drm_free(session_info, drm_buffer->index);
+ }
+}
+
+static void *
+vcodec_drm_map_kernel(struct vcodec_iommu_session_info *session_info, int idx)
+{
+ struct device *dev = session_info->dev;
+ struct vcodec_drm_buffer *drm_buffer;
+
+ mutex_lock(&session_info->list_mutex);
+ drm_buffer = vcodec_drm_get_buffer_no_lock(session_info, idx);
+ mutex_unlock(&session_info->list_mutex);
+
+ if (!drm_buffer) {
+ dev_err(dev, "can not find %d buffer in list\n", idx);
+ return NULL;
+ }
+
+ if (!drm_buffer->cpu_addr)
+ drm_buffer->cpu_addr =
+ vcodec_drm_sgt_map_kernel(drm_buffer);
+
+ kref_get(&drm_buffer->ref);
+
+ return drm_buffer->cpu_addr;
+}
+
+static int vcodec_drm_import(struct vcodec_iommu_session_info *session_info,
+ int fd)
+{
+ struct vcodec_drm_buffer *drm_buffer = NULL, *n;
+ struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
+ struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
+ struct device *dev = session_info->dev;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ int ret = 0;
+
+ list_for_each_entry_safe(drm_buffer, n,
+ &session_info->buffer_list, list) {
+ if (drm_buffer->fd == fd)
+ return drm_buffer->index;
+ }
+
+ drm_buffer = kzalloc(sizeof(*drm_buffer), GFP_KERNEL);
+ if (!drm_buffer) {
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ drm_buffer->dma_buf = dma_buf_get(fd);
+ if (IS_ERR(drm_buffer->dma_buf)) {
+ ret = PTR_ERR(drm_buffer->dma_buf);
+ kfree(drm_buffer);
+ return ret;
+ }
+ drm_buffer->fd = fd;
+ drm_buffer->session_info = session_info;
+
+ kref_init(&drm_buffer->ref);
+
+ mutex_lock(&iommu_info->iommu_mutex);
+ drm_info = session_info->iommu_info->private;
+ if (!drm_info->attached) {
+ ret = vcodec_drm_attach_unlock(session_info->iommu_info);
+ if (ret)
+ goto fail_out;
+ }
+
+ attach = dma_buf_attach(drm_buffer->dma_buf, dev);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto fail_out;
+ }
+
+ get_dma_buf(drm_buffer->dma_buf);
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ drm_buffer->iova = sg_dma_address(sgt->sgl);
+ drm_buffer->size = drm_buffer->dma_buf->size;
+
+ drm_buffer->attach = attach;
+ drm_buffer->sgt = sgt;
+
+ mutex_unlock(&iommu_info->iommu_mutex);
+
+ INIT_LIST_HEAD(&drm_buffer->list);
+ mutex_lock(&session_info->list_mutex);
+ drm_buffer->index = session_info->max_idx;
+ list_add_tail(&drm_buffer->list, &session_info->buffer_list);
+ session_info->max_idx++;
+ if ((session_info->max_idx & 0xfffffff) == 0)
+ session_info->max_idx = 0;
+ mutex_unlock(&session_info->list_mutex);
+
+ return drm_buffer->index;
+
+fail_detach:
+ dev_err(dev, "dmabuf map attach failed\n");
+ dma_buf_detach(drm_buffer->dma_buf, attach);
+ dma_buf_put(drm_buffer->dma_buf);
+fail_out:
+ kfree(drm_buffer);
+ mutex_unlock(&iommu_info->iommu_mutex);
+
+ return ret;
+}
+
+static int vcodec_drm_create(struct vcodec_iommu_info *iommu_info)
+{
+ struct vcodec_iommu_drm_info *drm_info;
+ int ret;
+
+ iommu_info->private = kzalloc(sizeof(*drm_info),
+ GFP_KERNEL);
+ drm_info = iommu_info->private;
+ if (!drm_info)
+ return -ENOMEM;
+
+ drm_info->domain = iommu_domain_alloc(&platform_bus_type);
+ drm_info->attached = false;
+ if (!drm_info->domain)
+ return -ENOMEM;
+
+ ret = iommu_get_dma_cookie(drm_info->domain);
+ if (ret)
+ goto err_free_domain;
+
+ vcodec_drm_attach(iommu_info);
+
+ return 0;
+
+err_free_domain:
+ iommu_domain_free(drm_info->domain);
+
+ return ret;
+}
+
+static int vcodec_drm_destroy(struct vcodec_iommu_info *iommu_info)
+{
+ struct vcodec_iommu_drm_info *drm_info = iommu_info->private;
+
+ vcodec_drm_detach(iommu_info);
+ iommu_put_dma_cookie(drm_info->domain);
+ iommu_domain_free(drm_info->domain);
+
+ kfree(drm_info);
+ iommu_info->private = NULL;
+
+ return 0;
+}
+
+static struct vcodec_iommu_ops drm_ops = {
+ .create = vcodec_drm_create,
+ .import = vcodec_drm_import,
+ .free = vcodec_drm_free,
+ .free_fd = vcodec_drm_free_fd,
+ .map_kernel = vcodec_drm_map_kernel,
+ .unmap_kernel = vcodec_drm_unmap_kernel,
+ .map_iommu = vcodec_drm_map_iommu,
+ .unmap_iommu = vcodec_drm_unmap_iommu,
+ .destroy = vcodec_drm_destroy,
+ .dump = vcdoec_drm_dump_info,
+ .attach = vcodec_drm_attach,
+ .detach = vcodec_drm_detach,
+ .clear = vcodec_drm_clear_session,
+};
+
+void vcodec_iommu_drm_set_ops(struct vcodec_iommu_info *iommu_info)
+{
+ if (!iommu_info)
+ return;
+ iommu_info->ops = &drm_ops;
+}
--- /dev/null
+/*
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: Jung Zhao jung.zhao@rock-chips.com
+ * Randy Li, randy.li@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/rockchip_ion.h>
+#include <linux/rockchip-iovmm.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/component.h>
+#include <linux/fence.h>
+#include <linux/console.h>
+#include <linux/kref.h>
+#include <linux/fdtable.h>
+
+#include "vcodec_iommu_ops.h"
+
+struct vcodec_ion_buffer {
+ struct list_head list;
+ struct ion_handle *handle;
+ int index;
+};
+
+struct vcodec_iommu_ion_info {
+ struct ion_client *ion_client;
+ bool attached;
+};
+
+static struct vcodec_ion_buffer *
+vcodec_ion_get_buffer_no_lock(struct vcodec_iommu_session_info *session_info,
+ int idx)
+{
+ struct vcodec_ion_buffer *ion_buffer = NULL, *n;
+
+ list_for_each_entry_safe(ion_buffer, n,
+ &session_info->buffer_list, list) {
+ if (ion_buffer->index == idx)
+ return ion_buffer;
+ }
+
+ return NULL;
+}
+
+static void
+vcodec_ion_clear_session(struct vcodec_iommu_session_info *session_info)
+{
+ /* do nothing */
+}
+
+static int vcodec_ion_attach(struct vcodec_iommu_info *iommu_info)
+{
+ struct vcodec_iommu_ion_info *ion_info = iommu_info->private;
+ int ret;
+
+ mutex_lock(&iommu_info->iommu_mutex);
+
+ if (ion_info->attached) {
+ mutex_unlock(&iommu_info->iommu_mutex);
+ return 0;
+ }
+
+ rockchip_iovmm_activate(iommu_info->dev);
+
+ ion_info->attached = true;
+
+ mutex_unlock(&iommu_info->iommu_mutex);
+
+ return ret;
+}
+
+static void vcodec_ion_detach(struct vcodec_iommu_info *iommu_info)
+{
+ struct vcodec_iommu_ion_info *ion_info = iommu_info->private;
+
+ mutex_lock(&iommu_info->iommu_mutex);
+
+ if (!ion_info->attached) {
+ mutex_unlock(&iommu_info->iommu_mutex);
+ return;
+ }
+
+ rockchip_iovmm_deactivate(iommu_info->dev);
+ ion_info->attached = false;
+
+ mutex_unlock(&iommu_info->iommu_mutex);
+}
+
+static int vcodec_ion_destroy(struct vcodec_iommu_info *iommu_info)
+{
+ struct vcodec_iommu_ion_info *ion_info = iommu_info->private;
+
+ vcodec_ion_detach(iommu_info);
+ kfree(ion_info);
+ iommu_info->private = NULL;
+
+ return 0;
+}
+
+static int
+vcodec_ion_free(struct vcodec_iommu_session_info *session_info, int idx)
+{
+ struct vcodec_ion_buffer *ion_buffer;
+
+ mutex_lock(&session_info->list_mutex);
+ ion_buffer = vcodec_ion_get_buffer_no_lock(session_info, idx);
+
+ if (!ion_buffer) {
+ mutex_unlock(&session_info->list_mutex);
+ pr_err("%s can not find %d buffer in list\n", __func__, idx);
+
+ return -EINVAL;
+ }
+
+ list_del_init(&ion_buffer->list);
+ mutex_unlock(&session_info->list_mutex);
+ kfree(ion_buffer);
+
+ return 0;
+}
+
+static int
+vcodec_ion_unmap_iommu(struct vcodec_iommu_session_info *session_info, int idx)
+{
+ struct vcodec_ion_buffer *ion_buffer;
+ struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
+ struct vcodec_iommu_ion_info *ion_info = iommu_info->private;
+
+ mutex_lock(&session_info->list_mutex);
+ ion_buffer = vcodec_ion_get_buffer_no_lock(session_info, idx);
+ mutex_unlock(&session_info->list_mutex);
+
+ if (!ion_buffer) {
+ pr_err("%s can not find %d buffer in list\n", __func__, idx);
+
+ return -EINVAL;
+ }
+
+ ion_free(ion_info->ion_client, ion_buffer->handle);
+
+ return 0;
+}
+
+static int
+vcodec_ion_map_iommu(struct vcodec_iommu_session_info *session_info, int idx,
+ unsigned long *iova, unsigned long *size)
+{
+ struct vcodec_ion_buffer *ion_buffer;
+ struct device *dev = session_info->dev;
+ struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
+ struct vcodec_iommu_ion_info *ion_info = iommu_info->private;
+ int ret = 0;
+
+ /* Force to flush iommu table */
+ rockchip_iovmm_invalidate_tlb(session_info->dev);
+
+ mutex_lock(&session_info->list_mutex);
+ ion_buffer = vcodec_ion_get_buffer_no_lock(session_info, idx);
+ mutex_unlock(&session_info->list_mutex);
+
+ if (!ion_buffer) {
+ pr_err("%s can not find %d buffer in list\n", __func__, idx);
+
+ return -EINVAL;
+ }
+
+ if (session_info->mmu_dev)
+ ret = ion_map_iommu(dev, ion_info->ion_client,
+ ion_buffer->handle, iova, size);
+ else
+ ret = ion_phys(ion_info->ion_client, ion_buffer->handle,
+ iova, size);
+
+ return ret;
+}
+
+static int
+vcodec_ion_unmap_kernel(struct vcodec_iommu_session_info *session_info,
+ int idx)
+{
+ struct vcodec_ion_buffer *ion_buffer;
+
+ mutex_lock(&session_info->list_mutex);
+ ion_buffer = vcodec_ion_get_buffer_no_lock(session_info, idx);
+ mutex_unlock(&session_info->list_mutex);
+
+ if (!ion_buffer) {
+ pr_err("%s can not find %d buffer in list\n", __func__, idx);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *
+vcodec_ion_map_kernel(struct vcodec_iommu_session_info *session_info, int idx)
+{
+ struct vcodec_ion_buffer *ion_buffer;
+ struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
+ struct vcodec_iommu_ion_info *ion_info = iommu_info->private;
+
+ rockchip_iovmm_invalidate_tlb(session_info->dev);
+
+ mutex_lock(&session_info->list_mutex);
+ ion_buffer = vcodec_ion_get_buffer_no_lock(session_info, idx);
+ mutex_unlock(&session_info->list_mutex);
+
+ if (!ion_buffer) {
+ pr_err("%s can not find %d buffer in list\n", __func__, idx);
+
+ return NULL;
+ }
+
+ return ion_map_kernel(ion_info->ion_client, ion_buffer->handle);
+}
+
+static int
+vcodec_ion_import(struct vcodec_iommu_session_info *session_info, int fd)
+{
+ struct vcodec_ion_buffer *ion_buffer = NULL;
+ struct vcodec_iommu_info *iommu_info = session_info->iommu_info;
+ struct vcodec_iommu_ion_info *ion_info = iommu_info->private;
+
+ ion_buffer = kzalloc(sizeof(*ion_buffer), GFP_KERNEL);
+ if (!ion_buffer)
+ return -ENOMEM;
+
+ ion_buffer->handle = ion_import_dma_buf(ion_info->ion_client, fd);
+
+ INIT_LIST_HEAD(&ion_buffer->list);
+ mutex_lock(&session_info->list_mutex);
+ ion_buffer->index = session_info->max_idx;
+ list_add_tail(&ion_buffer->list, &session_info->buffer_list);
+ session_info->max_idx++;
+ if ((session_info->max_idx & 0xfffffff) == 0)
+ session_info->max_idx = 0;
+ mutex_unlock(&session_info->list_mutex);
+
+ return ion_buffer->index;
+}
+
+static int vcodec_ion_create(struct vcodec_iommu_info *iommu_info)
+{
+ struct vcodec_iommu_ion_info *ion_info;
+
+ iommu_info->private = kmalloc(sizeof(*ion_info), GFP_KERNEL);
+
+ ion_info = iommu_info->private;
+ if (!ion_info)
+ return -ENOMEM;
+
+ ion_info->ion_client = rockchip_ion_client_create("vpu");
+ ion_info->attached = false;
+
+ vcodec_ion_attach(iommu_info);
+
+ return IS_ERR(ion_info->ion_client) ? -1 : 0;
+}
+
+static struct vcodec_iommu_ops ion_ops = {
+ .create = vcodec_ion_create,
+ .destroy = vcodec_ion_destroy,
+ .import = vcodec_ion_import,
+ .free = vcodec_ion_free,
+ .free_fd = NULL,
+ .map_kernel = vcodec_ion_map_kernel,
+ .unmap_kernel = vcodec_ion_unmap_kernel,
+ .map_iommu = vcodec_ion_map_iommu,
+ .unmap_iommu = vcodec_ion_unmap_iommu,
+ .dump = NULL,
+ .attach = vcodec_ion_attach,
+ .detach = vcodec_ion_detach,
+ .clear = vcodec_ion_clear_session,
+};
+
+/*
+ * we do not manage the ref number ourselves,
+ * since ion will help us to do that. what we
+ * need to do is just map/unmap and import/free
+ * every time
+ */
+void vcodec_iommu_ion_set_ops(struct vcodec_iommu_info *iommu_info)
+{
+ if (!iommu_info)
+ return;
+ iommu_info->ops = &ion_ops;
+}
/**
* Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
- * author: chenhengming chm@rock-chips.com
+ * author: chenhengming, chm@rock-chips.com
* Alpha Lin, alpha.lin@rock-chips.com
+ * Jung Zhao, jung.zhao@rock-chips.com
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
#include <linux/rockchip/pmu.h>
#include <linux/rockchip/grf.h>
-#if defined(CONFIG_ION_ROCKCHIP)
-#include <linux/rockchip_ion.h>
-#endif
-
-#include <linux/rockchip-iovmm.h>
#include <linux/dma-buf.h>
+#include <linux/rockchip-iovmm.h>
#include "vcodec_hw_info.h"
#include "vcodec_hw_vpu.h"
#include "vcodec_service.h"
+#include "vcodec_iommu_ops.h"
+
/*
* debug flag usage:
* +------+-------------------+
#define PRINT_FUNCTION 0x80000000
#define PRINT_LINE 0x40000000
-static int debug;
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "bit switch for vcodec_service debug information");
+#define MHZ (1000 * 1000)
+#define SIZE_REG(reg) ((reg) * 4)
#define VCODEC_CLOCK_ENABLE 1
+#define EXTRA_INFO_MAGIC 0x4C4A46
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "bit switch for vcodec_service debug information");
/*
* hardware information organization
*
u32 offset;
};
-#define EXTRA_INFO_MAGIC 0x4C4A46
struct extra_info_for_iommu {
u32 magic;
struct extra_info_elem elem[20];
};
-#define MHZ (1000*1000)
-#define SIZE_REG(reg) ((reg)*4)
-
-static struct vcodec_info vcodec_info_set[] = {
- [0] = {
+static const struct vcodec_info vcodec_info_set[] = {
+ {
.hw_id = VPU_ID_8270,
.hw_info = &hw_vpu_8270,
.task_info = task_vpu,
.trans_info = trans_vpu,
},
- [1] = {
+ {
.hw_id = VPU_ID_4831,
.hw_info = &hw_vpu_4831,
.task_info = task_vpu,
.trans_info = trans_vpu,
},
- [2] = {
+ {
.hw_id = VPU_DEC_ID_9190,
.hw_info = &hw_vpu_9190,
.task_info = task_vpu,
.trans_info = trans_vpu,
},
- [3] = {
+ {
.hw_id = HEVC_ID,
.hw_info = &hw_rkhevc,
.task_info = task_rkv,
.trans_info = trans_rkv,
},
- [4] = {
+ {
.hw_id = RKV_DEC_ID,
.hw_info = &hw_rkvdec,
.task_info = task_rkv,
.trans_info = trans_rkv,
},
- [5] = {
- .hw_id = VPU2_ID,
- .hw_info = &hw_vpu2,
- .task_info = task_vpu2,
- .trans_info = trans_vpu2,
+ {
+ .hw_id = VPU2_ID,
+ .hw_info = &hw_vpu2,
+ .task_info = task_vpu2,
+ .trans_info = trans_vpu2,
},
};
+/* Both VPU1 and VPU2 */
+static const struct vcodec_device_info vpu_device_info = {
+ .device_type = VCODEC_DEVICE_TYPE_VPUX,
+ .name = "vpu-service",
+};
+
+static const struct vcodec_device_info vpu_combo_device_info = {
+ .device_type = VCODEC_DEVICE_TYPE_VPUC,
+ .name = "vpu-combo",
+};
+
+static const struct vcodec_device_info hevc_device_info = {
+ .device_type = VCODEC_DEVICE_TYPE_HEVC,
+ .name = "hevc-service",
+};
+
+static const struct vcodec_device_info rkvd_device_info = {
+ .device_type = VCODEC_DEVICE_TYPE_RKVD,
+ .name = "rkvdec",
+};
+
#define DEBUG
#ifdef DEBUG
#define vpu_debug_func(type, fmt, args...) \
unsigned long iova; /* virtual address for iommu */
unsigned long len;
u32 reg_idx;
- struct ion_handle *hdl;
+ int hdl;
};
enum vpu_ctx_state {
#endif
struct device *mmu_dev;
+ struct vcodec_iommu_info *iommu_info;
};
struct vpu_service_info {
u32 irq_status;
atomic_t reset_request;
- struct ion_client *ion_client;
struct list_head mem_region_list;
enum vcodec_device_id dev_id;
u32 subcnt;
struct list_head subdev_list;
+
+ u32 alloc_type;
};
struct vpu_request {
};
#endif
-/* debugfs root directory for all device (vpu, hevc).*/
-static struct dentry *parent;
-
-#ifdef CONFIG_DEBUG_FS
-static int vcodec_debugfs_init(void);
-static void vcodec_debugfs_exit(void);
-static struct dentry *vcodec_debugfs_create_device_dir(
- char *dirname, struct dentry *parent);
-static int debug_vcodec_open(struct inode *inode, struct file *file);
-
-static const struct file_operations debug_vcodec_fops = {
- .open = debug_vcodec_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
-
#define VDPU_SOFT_RESET_REG 101
#define VDPU_CLEAN_CACHE_REG 516
#define VEPU_CLEAN_CACHE_REG 772
#define VPU_POWER_OFF_DELAY (4 * HZ) /* 4s */
#define VPU_TIMEOUT_DELAY (2 * HZ) /* 2s */
+static void *vcodec_get_drv_data(struct platform_device *pdev);
+
+static void vpu_service_power_on(struct vpu_subdev_data *data,
+ struct vpu_service_info *pservice);
+
static void time_record(struct vpu_task_info *task, int is_end)
{
if (unlikely(debug & DEBUG_TIMING) && task)
struct vpu_service_info *pservice = data->pservice;
struct vpu_subdev_data *subdata, *n;
- if (pservice->subcnt < 2) {
- if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
- set_bit(MMU_ACTIVATED, &data->state);
- if (atomic_read(&pservice->enabled))
- rockchip_iovmm_activate(data->dev);
- else
- BUG_ON(!atomic_read(&pservice->enabled));
- }
+ if (pservice->subcnt < 2)
return;
- }
if (pservice->curr_mode == data->mode)
return;
if (data != subdata && subdata->mmu_dev &&
test_bit(MMU_ACTIVATED, &subdata->state)) {
clear_bit(MMU_ACTIVATED, &subdata->state);
- rockchip_iovmm_deactivate(subdata->dev);
}
}
bits = 1 << pservice->mode_bit;
#endif
if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
set_bit(MMU_ACTIVATED, &data->state);
- if (atomic_read(&pservice->enabled))
- rockchip_iovmm_activate(data->dev);
- else
+ if (!atomic_read(&pservice->enabled))
+ /* FIXME BUG_ON should not be used in mass produce */
BUG_ON(!atomic_read(&pservice->enabled));
}
static void vcodec_exit_mode(struct vpu_subdev_data *data)
{
- if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
- clear_bit(MMU_ACTIVATED, &data->state);
- rockchip_iovmm_deactivate(data->dev);
- }
/*
* In case of VPU Combo, it require HW switch its running mode
* before the other HW component start work. set current HW running
case VCODEC_DEVICE_ID_HEVC:
pservice->pd_video = devm_clk_get(dev, "pd_hevc");
if (IS_ERR(pservice->pd_video)) {
- dev_err(dev, "failed on clk_get pd_hevc\n");
pservice->pd_video = NULL;
- return -1;
+ dev_info(dev, "failed on clk_get pd_hevc\n");
}
case VCODEC_DEVICE_ID_COMBO:
case VCODEC_DEVICE_ID_RKVDEC:
#endif
}
-static void vpu_reset(struct vpu_subdev_data *data)
+static void _vpu_reset(struct vpu_subdev_data *data)
{
struct vpu_service_info *pservice = data->pservice;
enum pmu_idle_req type = IDLE_REQ_VIDEO;
if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC)
type = IDLE_REQ_HEVC;
- pr_info("%s: resetting...", dev_name(pservice->dev));
-
-#if defined(CONFIG_ARCH_RK29)
- clk_disable(aclk_ddr_vepu);
- cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
- cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
- cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
- cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
- mdelay(10);
- cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
- cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
- cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
- cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
- clk_enable(aclk_ddr_vepu);
-#elif defined(CONFIG_ARCH_RK30)
- pmu_set_idle_request(IDLE_REQ_VIDEO, true);
- cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
- cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
- cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
- cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
- mdelay(1);
- cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
- cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
- cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
- cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
- pmu_set_idle_request(IDLE_REQ_VIDEO, false);
-#else
-#endif
+ dev_info(pservice->dev, "resetting...\n");
WARN_ON(pservice->reg_codec != NULL);
WARN_ON(pservice->reg_pproc != NULL);
WARN_ON(pservice->reg_resev != NULL);
pservice->reg_pproc = NULL;
pservice->reg_resev = NULL;
- pr_info("for 3288/3368...");
#ifdef CONFIG_RESET_CONTROLLER
+ dev_info(pservice->dev, "for 3288/3368...");
+ if (of_machine_is_compatible("rockchip,rk3288"))
+ rockchip_pmu_idle_request(pservice->dev, true);
if (pservice->rst_a && pservice->rst_h) {
- pr_info("reset in\n");
+ dev_info(pservice->dev, "vpu reset in\n");
+
if (pservice->rst_v)
reset_control_assert(pservice->rst_v);
reset_control_assert(pservice->rst_a);
reset_control_assert(pservice->rst_h);
udelay(5);
+
reset_control_deassert(pservice->rst_h);
reset_control_deassert(pservice->rst_a);
if (pservice->rst_v)
reset_control_deassert(pservice->rst_v);
+ } else if (pservice->rst_v) {
+ dev_info(pservice->dev, "hevc reset in\n");
+ reset_control_assert(pservice->rst_v);
+ udelay(5);
+
+ reset_control_deassert(pservice->rst_v);
}
+ if (of_machine_is_compatible("rockchip,rk3288"))
+ rockchip_pmu_idle_request(pservice->dev, false);
#endif
+}
+static void vpu_reset(struct vpu_subdev_data *data)
+{
+ struct vpu_service_info *pservice = data->pservice;
+
+ _vpu_reset(data);
if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
- clear_bit(MMU_ACTIVATED, &data->state);
- if (atomic_read(&pservice->enabled))
- rockchip_iovmm_deactivate(data->dev);
- else
+ if (atomic_read(&pservice->enabled)) {
+ /* Need to reset iommu */
+ vcodec_iommu_detach(data->iommu_info);
+ vcodec_iommu_attach(data->iommu_info);
+ } else {
+ /* FIXME BUG_ON should not be used in mass produce */
BUG_ON(!atomic_read(&pservice->enabled));
+ }
}
atomic_set(&pservice->reset_request, 0);
- pr_info("done\n");
+ dev_info(pservice->dev, "reset done\n");
}
static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg);
vpu_service_dump(pservice);
}
- pr_info("%s: power off...", dev_name(pservice->dev));
+ dev_dbg(pservice->dev, "power off...\n");
udelay(5);
list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
clear_bit(MMU_ACTIVATED, &data->state);
- rockchip_iovmm_deactivate(data->dev);
+ vcodec_iommu_detach(data->iommu_info);
}
}
pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
-
+ pm_runtime_put(pservice->dev);
#if VCODEC_CLOCK_ENABLE
if (pservice->pd_video)
clk_disable_unprepare(pservice->pd_video);
if (pservice->clk_cabac)
clk_disable_unprepare(pservice->clk_cabac);
#endif
- pm_runtime_put(pservice->dev);
atomic_add(1, &pservice->power_off_cnt);
wake_unlock(&pservice->wake_lock);
- pr_info("done\n");
+ dev_dbg(pservice->dev, "power off done\n");
}
static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
}
}
-static void vpu_service_power_on(struct vpu_service_info *pservice)
+static void vpu_service_power_on(struct vpu_subdev_data *data,
+ struct vpu_service_info *pservice)
{
int ret;
ktime_t now = ktime_get();
- if (ktime_to_ns(ktime_sub(now, pservice->last)) > NSEC_PER_SEC) {
+ if (ktime_to_ns(ktime_sub(now, pservice->last)) > NSEC_PER_SEC ||
+ atomic_read(&pservice->power_on_cnt)) {
+ /* NSEC_PER_SEC */
cancel_delayed_work_sync(&pservice->power_off_work);
vpu_queue_power_off_work(pservice);
pservice->last = now;
}
ret = atomic_add_unless(&pservice->enabled, 1, 1);
- if (!ret)
+ if (!ret) {
+ if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
+ set_bit(MMU_ACTIVATED, &data->state);
+ vcodec_iommu_attach(data->iommu_info);
+ }
return;
+ }
- pr_info("%s: power on\n", dev_name(pservice->dev));
+ dev_dbg(pservice->dev, "power on\n");
#define BIT_VCODEC_CLK_SEL (1<<10)
if (of_machine_is_compatible("rockchip,rk3126"))
#endif
pm_runtime_get_sync(pservice->dev);
+ if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
+ set_bit(MMU_ACTIVATED, &data->state);
+ if (atomic_read(&pservice->enabled))
+ vcodec_iommu_attach(data->iommu_info);
+ else
+ /*
+ * FIXME BUG_ON should not be used in mass
+ * produce.
+ */
+ BUG_ON(!atomic_read(&pservice->enabled));
+ }
+
udelay(5);
atomic_add(1, &pservice->power_on_cnt);
wake_lock(&pservice->wake_lock);
}
static int vcodec_fd_to_iova(struct vpu_subdev_data *data,
- struct vpu_reg *reg, int fd)
+ struct vpu_session *session,
+ struct vpu_reg *reg,
+ int fd)
{
- struct vpu_service_info *pservice = data->pservice;
- struct ion_handle *hdl;
+ int hdl;
int ret = 0;
struct vcodec_mem_region *mem_region;
- hdl = ion_import_dma_buf(pservice->ion_client, fd);
- if (IS_ERR(hdl)) {
- vpu_err("import dma-buf from fd %d failed\n", fd);
- return PTR_ERR(hdl);
- }
- mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
+ hdl = vcodec_iommu_import(data->iommu_info, session, fd);
+ if (hdl < 0)
+ return hdl;
+ mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
if (mem_region == NULL) {
vpu_err("allocate memory for iommu memory region failed\n");
- ion_free(pservice->ion_client, hdl);
+ vcodec_iommu_free(data->iommu_info, session, hdl);
return -ENOMEM;
}
mem_region->hdl = hdl;
- if (data->mmu_dev)
- ret = ion_map_iommu(data->dev, pservice->ion_client,
- mem_region->hdl, &mem_region->iova,
- &mem_region->len);
- else
- ret = ion_phys(pservice->ion_client,
- mem_region->hdl,
- (ion_phys_addr_t *)&mem_region->iova,
- (size_t *)&mem_region->len);
-
+ ret = vcodec_iommu_map_iommu(data->iommu_info, session, mem_region->hdl,
+ &mem_region->iova, &mem_region->len);
if (ret < 0) {
vpu_err("fd %d ion map iommu failed\n", fd);
kfree(mem_region);
- ion_free(pservice->ion_client, hdl);
+ vcodec_iommu_free(data->iommu_info, session, hdl);
+
return -EFAULT;
}
INIT_LIST_HEAD(&mem_region->reg_lnk);
if (scaling_fd > 0) {
int i = 0;
- u32 tmp = vcodec_fd_to_iova(data, reg, scaling_fd);
+ u32 tmp = vcodec_fd_to_iova(data, reg->session, reg,
+ scaling_fd);
if (IS_ERR_VALUE(tmp))
return -1;
return 0;
}
-static int vcodec_bufid_to_iova(struct vpu_subdev_data *data, const u8 *tbl,
+static int vcodec_bufid_to_iova(struct vpu_subdev_data *data,
+ struct vpu_session *session,
+ const u8 *tbl,
int size, struct vpu_reg *reg,
struct extra_info_for_iommu *ext_inf)
{
struct vpu_service_info *pservice = data->pservice;
struct vpu_task_info *task = reg->task;
enum FORMAT_TYPE type;
- struct ion_handle *hdl;
+ int hdl;
int ret = 0;
struct vcodec_mem_region *mem_region;
int i;
if (tbl == NULL || size <= 0) {
dev_err(pservice->dev, "input arguments invalidate\n");
- return -1;
+ return -EINVAL;
}
if (task->get_fmt)
type = task->get_fmt(reg->reg);
else {
- pr_err("invalid task with NULL get_fmt\n");
- return -1;
+ dev_err(pservice->dev, "invalid task with NULL get_fmt\n");
+ return -EINVAL;
}
for (i = 0; i < size; i++) {
if (usr_fd == 0)
continue;
+ /*
+ * for avoiding cache sync issue, we need to map/unmap
+ * input buffer every time. FIX ME, if it is unnecessary
+ */
+ if (task->reg_rlc == tbl[i])
+ vcodec_iommu_free_fd(data->iommu_info, session, usr_fd);
/*
* special offset scale case
*
else
offset = reg->reg[tbl[i]] >> 10;
- vpu_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d\n",
- tbl[i], usr_fd, offset);
+ vpu_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d i %d\n",
+ tbl[i], usr_fd, offset, i);
- hdl = ion_import_dma_buf(pservice->ion_client, usr_fd);
- if (IS_ERR(hdl)) {
- dev_err(pservice->dev,
- "import dma-buf from fd %d failed, reg[%d]\n",
- usr_fd, tbl[i]);
- return PTR_ERR(hdl);
- }
+ hdl = vcodec_iommu_import(data->iommu_info, session, usr_fd);
if (task->reg_pps > 0 && task->reg_pps == tbl[i]) {
int pps_info_offset;
scaling_list_addr_offset);
if (pps_info_count) {
- char *pps = (char *)ion_map_kernel(
- pservice->ion_client, hdl);
+ u8 *pps;
+
+ mutex_lock(&pservice->lock);
+
+ pps = vcodec_iommu_map_kernel
+ (data->iommu_info, session, hdl);
+
vpu_debug(DEBUG_PPS_FILL,
"scaling list setting pps %p\n", pps);
pps += pps_info_offset;
- if (fill_scaling_list_addr_in_pps(
- data, reg, pps,
- pps_info_count,
- pps_info_size,
- scaling_list_addr_offset) < 0) {
- ion_free(pservice->ion_client, hdl);
- return -1;
- }
+ fill_scaling_list_addr_in_pps
+ (data, reg, pps, pps_info_count,
+ pps_info_size,
+ scaling_list_addr_offset);
+
+ vcodec_iommu_unmap_kernel
+ (data->iommu_info, session, hdl);
+ mutex_unlock(&pservice->lock);
}
}
mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
if (!mem_region) {
- ion_free(pservice->ion_client, hdl);
+ vcodec_iommu_free(data->iommu_info, session, hdl);
return -ENOMEM;
}
mem_region->hdl = hdl;
mem_region->reg_idx = tbl[i];
- if (data->mmu_dev)
- ret = ion_map_iommu(data->dev,
- pservice->ion_client,
- mem_region->hdl,
- &mem_region->iova,
- &mem_region->len);
- else
- ret = ion_phys(pservice->ion_client,
- mem_region->hdl,
- (ion_phys_addr_t *)&mem_region->iova,
- (size_t *)&mem_region->len);
-
+ ret = vcodec_iommu_map_iommu(data->iommu_info, session,
+ mem_region->hdl, &mem_region->iova,
+ &mem_region->len);
if (ret < 0) {
- dev_err(pservice->dev, "reg %d fd %d ion map iommu failed\n",
+ dev_err(pservice->dev,
+ "reg %d fd %d ion map iommu failed\n",
tbl[i], usr_fd);
kfree(mem_region);
- ion_free(pservice->ion_client, hdl);
+ vcodec_iommu_free(data->iommu_info, session, hdl);
return ret;
}
}
static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
+ struct vpu_session *session,
struct vpu_reg *reg,
struct extra_info_for_iommu *ext_inf)
{
+ struct vpu_service_info *pservice = data->pservice;
enum FORMAT_TYPE type = reg->task->get_fmt(reg->reg);
if (type < FMT_TYPE_BUTT) {
const u8 *tbl = info->table;
int size = info->count;
- return vcodec_bufid_to_iova(data, tbl, size, reg, ext_inf);
+ return vcodec_bufid_to_iova(data, session, tbl, size, reg,
+ ext_inf);
}
- pr_err("found invalid format type!\n");
- return -1;
+
+ dev_err(pservice->dev, "found invalid format type!\n");
+ return -EINVAL;
}
static void get_reg_freq(struct vpu_subdev_data *data, struct vpu_reg *reg)
vpu_debug_enter();
- if (NULL == reg) {
- vpu_err("error: kmalloc failed\n");
+ if (!reg) {
+ vpu_err("error: kzalloc failed\n");
return NULL;
}
return NULL;
}
- if (0 > vcodec_reg_address_translate(data, reg, &extra_info)) {
+ if (vcodec_reg_address_translate(data, session, reg, &extra_info) < 0) {
int i = 0;
vpu_err("error: translate reg address failed, dumping regs\n");
for (i = 0; i < size >> 2; i++)
- pr_err("reg[%02d]: %08x\n", i, *((u32 *)src + i));
+ dev_err(pservice->dev, "reg[%02d]: %08x\n",
+ i, *((u32 *)src + i));
kfree(reg);
return NULL;
get_reg_freq(data, reg);
vpu_debug_leave();
+
return reg;
}
/* release memory region attach to this registers table. */
list_for_each_entry_safe(mem_region, n,
®->mem_region_list, reg_lnk) {
- ion_free(pservice->ion_client, mem_region->hdl);
+ vcodec_iommu_unmap_iommu(data->iommu_info, reg->session,
+ mem_region->hdl);
+ vcodec_iommu_free(data->iommu_info, reg->session,
+ mem_region->hdl);
list_del_init(&mem_region->reg_lnk);
kfree(mem_region);
}
for (i = 0; i < len; i++)
vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
i, src[i]);
-
/*
* NOTE: The end register is invalid. Do NOT write to it
* Also the base register must be written
struct vpu_reg *reg = list_entry(pservice->waiting.next,
struct vpu_reg, status_link);
- vpu_service_power_on(pservice);
-
if (change_able || !reset_request) {
switch (reg->type) {
case VPU_ENC: {
can_set = 1;
} break;
default: {
- pr_err("undefined reg type %d\n", reg->type);
+ dev_err(pservice->dev,
+ "undefined reg type %d\n",
+ reg->type);
} break;
}
}
struct vpu_request req;
struct vpu_reg *reg;
+ vpu_service_power_on(data, pservice);
+
vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
session->pid, session->type);
if (copy_from_user(&req, (void __user *)arg,
struct vpu_reg *reg;
int ret;
+ vpu_service_power_on(data, pservice);
+
vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
session->pid, session->type);
if (copy_from_user(&req, (void __user *)arg,
atomic_set(&session->task_running, 0);
atomic_sub(task_running,
&pservice->total_running);
- pr_err("%d task is running but not return, reset hardware...",
+ dev_err(pservice->dev,
+ "%d task is running but not return, reset hardware...",
task_running);
vpu_reset(data);
- pr_err("done\n");
+ dev_err(pservice->dev, "done\n");
}
vpu_service_session_clear(data, session);
mutex_unlock(&pservice->lock);
struct compat_vpu_request req;
struct vpu_reg *reg;
+ vpu_service_power_on(data, pservice);
+
vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
session->type);
if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
struct vpu_reg *reg;
int ret;
+ vpu_service_power_on(data, pservice);
+
vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
session->type);
if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
atomic_set(&session->task_running, 0);
atomic_sub(task_running,
&pservice->total_running);
- pr_err("%d task is running but not return, reset hardware...",
- task_running);
+ dev_err(pservice->dev,
+ "%d task is running but not return, reset hardware...",
+ task_running);
vpu_reset(data);
- pr_err("done\n");
+ dev_err(pservice->dev, "done\n");
}
vpu_service_session_clear(data, session);
mutex_unlock(&pservice->lock);
static int vpu_service_check_hw(struct vpu_subdev_data *data)
{
+ struct vpu_service_info *pservice = data->pservice;
int ret = -EINVAL, i = 0;
u32 hw_id = readl_relaxed(data->regs);
hw_id = (hw_id >> 16) & 0xFFFF;
- pr_info("checking hw id %x\n", hw_id);
+ dev_info(pservice->dev, "checking hw id %x\n", hw_id);
data->hw_info = NULL;
+
for (i = 0; i < ARRAY_SIZE(vcodec_info_set); i++) {
- struct vcodec_info *info = &vcodec_info_set[i];
+ const struct vcodec_info *info = &vcodec_info_set[i];
if (hw_id == info->hw_id) {
data->hw_id = info->hw_id;
struct vpu_subdev_data *data = container_of(
inode->i_cdev, struct vpu_subdev_data, cdev);
struct vpu_service_info *pservice = data->pservice;
- struct vpu_session *session = kmalloc(sizeof(*session), GFP_KERNEL);
+ struct vpu_session *session = NULL;
vpu_debug_enter();
- if (NULL == session) {
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session) {
vpu_err("error: unable to allocate memory for vpu_session.");
return -ENOMEM;
}
+ data->iommu_info->debug_level = debug;
+
session->type = VPU_TYPE_BUTT;
session->pid = current->pid;
INIT_LIST_HEAD(&session->waiting);
filp->private_data = (void *)session;
mutex_unlock(&pservice->lock);
- pr_debug("dev opened\n");
+ dev_dbg(pservice->dev, "dev opened\n");
vpu_debug_leave();
return nonseekable_open(inode, filp);
}
task_running = atomic_read(&session->task_running);
if (task_running) {
- pr_err("error: session %d still has %d task running when closing\n",
- session->pid, task_running);
+ dev_err(pservice->dev,
+ "error: session %d still has %d task running when closing\n",
+ session->pid, task_running);
msleep(50);
}
wake_up(&session->wait);
+ vpu_service_power_on(data, pservice);
mutex_lock(&pservice->lock);
/* remove this filp from the asynchronusly notified filp's */
list_del_init(&session->list_session);
vpu_service_session_clear(data, session);
+ vcodec_iommu_clear(data->iommu_info, session);
kfree(session);
filp->private_data = NULL;
mutex_unlock(&pservice->lock);
- pr_debug("dev closed\n");
+ dev_info(pservice->dev, "closed\n");
vpu_debug_leave();
return 0;
}
pr_alert("vcodec, page fault occur, reset hw\n");
/* reg->reg[101] = 1; */
- vpu_reset(data);
+ _vpu_reset(data);
}
return 0;
static int vcodec_subdev_probe(struct platform_device *pdev,
struct vpu_service_info *pservice)
{
- int ret = 0;
+ uint8_t *regs = NULL;
+ int32_t ret = 0;
+ uint32_t ioaddr = 0;
struct resource *res = NULL;
- u32 ioaddr = 0;
- u8 *regs = NULL;
struct vpu_hw_info *hw_info = NULL;
struct device *dev = &pdev->dev;
- char *name = (char *)dev_name(dev);
struct device_node *np = pdev->dev.of_node;
- struct vpu_subdev_data *data =
- devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
- u32 iommu_en = 0;
+ struct vpu_subdev_data *data = NULL;
+ struct platform_device *sub_dev = NULL;
+ struct device_node *sub_np = NULL;
+ const char *name = np->name;
char mmu_dev_dts_name[40];
- of_property_read_u32(np, "iommu_enabled", &iommu_en);
+ dev_info(dev, "probe device");
- pr_info("probe device %s\n", dev_name(dev));
+ data = devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
data->pservice = pservice;
data->dev = dev;
-
- of_property_read_string(np, "name", (const char **)&name);
of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
if (pservice->reg_base == 0) {
ioaddr = pservice->ioaddr;
}
+ sub_np = of_parse_phandle(np, "iommus", 0);
+ if (sub_np) {
+ sub_dev = of_find_device_by_node(sub_np);
+ data->mmu_dev = &sub_dev->dev;
+ }
+
+ /* Back to legacy iommu probe */
+ if (!data->mmu_dev) {
+ switch (data->mode) {
+ case VCODEC_RUNNING_MODE_VPU:
+ sprintf(mmu_dev_dts_name,
+ VPU_IOMMU_COMPATIBLE_NAME);
+ break;
+ case VCODEC_RUNNING_MODE_RKVDEC:
+ sprintf(mmu_dev_dts_name,
+ VDEC_IOMMU_COMPATIBLE_NAME);
+ break;
+ case VCODEC_RUNNING_MODE_HEVC:
+ default:
+ sprintf(mmu_dev_dts_name,
+ HEVC_IOMMU_COMPATIBLE_NAME);
+ break;
+ }
+
+ data->mmu_dev =
+ rockchip_get_sysmmu_dev(mmu_dev_dts_name);
+ if (data->mmu_dev)
+ platform_set_sysmmu(data->mmu_dev, dev);
+
+ rockchip_iovmm_set_fault_handler
+ (dev, vcodec_sysmmu_fault_hdl);
+ }
+
+ dev_info(dev, "vpu mmu dec %p\n", data->mmu_dev);
+
clear_bit(MMU_ACTIVATED, &data->state);
- vcodec_enter_mode(data);
+ vpu_service_power_on(data, pservice);
- vpu_service_power_on(pservice);
ret = vpu_service_check_hw(data);
if (ret < 0) {
vpu_err("error: hw info check faild\n");
atomic_set(&data->enc_dev.irq_count_codec, 0);
atomic_set(&data->enc_dev.irq_count_pp, 0);
- if (iommu_en) {
- if (data->mode == VCODEC_RUNNING_MODE_HEVC)
- sprintf(mmu_dev_dts_name,
- HEVC_IOMMU_COMPATIBLE_NAME);
- else if (data->mode == VCODEC_RUNNING_MODE_VPU)
- sprintf(mmu_dev_dts_name,
- VPU_IOMMU_COMPATIBLE_NAME);
- else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
- sprintf(mmu_dev_dts_name, VDEC_IOMMU_COMPATIBLE_NAME);
- else
- sprintf(mmu_dev_dts_name,
- HEVC_IOMMU_COMPATIBLE_NAME);
-
- data->mmu_dev =
- rockchip_get_sysmmu_dev(mmu_dev_dts_name);
-
- if (data->mmu_dev)
- platform_set_sysmmu(data->mmu_dev, dev);
-
- rockchip_iovmm_set_fault_handler(dev, vcodec_sysmmu_fault_hdl);
- }
-
+ vcodec_enter_mode(data);
+ of_property_read_u32(np, "allocator", (u32 *)&pservice->alloc_type);
+ data->iommu_info = vcodec_iommu_info_create(dev, data->mmu_dev,
+ pservice->alloc_type);
+ dev_info(dev, "allocator is %s\n", pservice->alloc_type == 1 ? "drm" :
+ (pservice->alloc_type == 2 ? "ion" : "null"));
get_hw_info(data);
pservice->auto_freq = true;
INIT_LIST_HEAD(&data->lnk_service);
list_add_tail(&data->lnk_service, &pservice->subdev_list);
-#ifdef CONFIG_DEBUG_FS
- data->debugfs_dir = vcodec_debugfs_create_device_dir(name, parent);
- if (!IS_ERR_OR_NULL(data->debugfs_dir))
- data->debugfs_file_regs =
- debugfs_create_file("regs", 0664, data->debugfs_dir,
- data, &debug_vcodec_fops);
- else
- vpu_err("create debugfs dir %s failed\n", name);
-#endif
return 0;
err:
if (data->child_dev) {
{
struct vpu_service_info *pservice = data->pservice;
+ vcodec_iommu_info_destroy(data->iommu_info);
+ data->iommu_info = NULL;
+
mutex_lock(&pservice->lock);
cancel_delayed_work_sync(&pservice->power_off_work);
vpu_service_power_off(pservice);
pservice->rst_v = devm_reset_control_get(pservice->dev, "video");
if (IS_ERR_OR_NULL(pservice->rst_a)) {
- pr_warn("No aclk reset resource define\n");
+ dev_warn(pservice->dev, "No aclk reset resource define\n");
pservice->rst_a = NULL;
}
if (IS_ERR_OR_NULL(pservice->rst_h)) {
- pr_warn("No hclk reset resource define\n");
+ dev_warn(pservice->dev, "No hclk reset resource define\n");
pservice->rst_h = NULL;
}
if (IS_ERR_OR_NULL(pservice->rst_v)) {
- pr_warn("No core reset resource define\n");
+ dev_warn(pservice->dev, "No core reset resource define\n");
pservice->rst_v = NULL;
}
#endif
INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
pservice->last.tv64 = 0;
- pservice->ion_client = rockchip_ion_client_create("vpu");
- if (IS_ERR(pservice->ion_client)) {
- vpu_err("failed to create ion client for vcodec ret %ld\n",
- PTR_ERR(pservice->ion_client));
- } else {
- vpu_debug(DEBUG_IOMMU, "vcodec ion client create success!\n");
- }
+ pservice->alloc_type = 0;
}
static int vcodec_probe(struct platform_device *pdev)
struct resource *res = NULL;
struct device *dev = &pdev->dev;
struct device_node *np = pdev->dev.of_node;
- struct vpu_service_info *pservice =
- devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);
+ struct vpu_service_info *pservice = NULL;
+ struct vcodec_device_info *driver_data;
+ pservice = devm_kzalloc(dev, sizeof(struct vpu_service_info),
+ GFP_KERNEL);
+ if (!pservice)
+ return -ENOMEM;
pservice->dev = dev;
+ driver_data = vcodec_get_drv_data(pdev);
+ if (!driver_data)
+ return -EINVAL;
+
vcodec_read_property(np, pservice);
vcodec_init_drvdata(pservice);
- if (strncmp(pservice->name, "hevc_service", 12) == 0)
- pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
- else if (strncmp(pservice->name, "vpu_service", 11) == 0)
+ /* Underscore for label, hyphens for name */
+ switch (driver_data->device_type) {
+ case VCODEC_DEVICE_TYPE_VPUX:
pservice->dev_id = VCODEC_DEVICE_ID_VPU;
- else if (strncmp(pservice->name, "rkvdec", 6) == 0)
- pservice->dev_id = VCODEC_DEVICE_ID_RKVDEC;
- else
+ break;
+ case VCODEC_DEVICE_TYPE_VPUC:
pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
+ break;
+ case VCODEC_DEVICE_TYPE_HEVC:
+ pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
+ break;
+ case VCODEC_DEVICE_TYPE_RKVD:
+ pservice->dev_id = VCODEC_DEVICE_ID_RKVDEC;
+ break;
+ default:
+ dev_err(dev, "unsupported device type\n");
+ return -ENODEV;
+ }
if (0 > vpu_get_clk(pservice))
goto err;
vpu_service_power_off(pservice);
- pr_info("init success\n");
+ dev_info(dev, "init success\n");
return 0;
err:
- pr_info("init failed\n");
+ dev_info(dev, "init failed\n");
vpu_service_power_off(pservice);
wake_lock_destroy(&pservice->wake_lock);
struct vpu_subdev_data *data = platform_get_drvdata(pdev);
struct vpu_service_info *pservice = data->pservice;
- dev_info(&pdev->dev, "%s IN\n", __func__);
+ dev_info(&pdev->dev, "vcodec shutdown");
mutex_lock(&pservice->shutdown_lock);
atomic_set(&pservice->service_on, 0);
vcodec_exit_mode(data);
+ vpu_service_power_on(data, pservice);
vpu_service_clear(data);
vcodec_subdev_remove(data);
pm_runtime_disable(&pdev->dev);
}
-#if defined(CONFIG_OF)
static const struct of_device_id vcodec_service_dt_ids[] = {
- {.compatible = "rockchip,vpu_service",},
- {.compatible = "rockchip,hevc_service",},
- {.compatible = "rockchip,vpu_combo",},
- {.compatible = "rockchip,rkvdec",},
+ {
+ .compatible = "rockchip,vpu_service",
+ .data = &vpu_device_info,
+ },
+ {
+ .compatible = "rockchip,hevc_service",
+ .data = &hevc_device_info,
+ },
+ {
+ .compatible = "rockchip,vpu_combo",
+ .data = &vpu_combo_device_info,
+ },
+ {
+ .compatible = "rockchip,rkvdec",
+ .data = &rkvd_device_info,
+ },
{},
};
-#endif
+
+MODULE_DEVICE_TABLE(of, vcodec_service_dt_ids);
+
+static void *vcodec_get_drv_data(struct platform_device *pdev)
+{
+ struct vcodec_device_info *driver_data = NULL;
+ const struct of_device_id *match;
+
+ match = of_match_node(vcodec_service_dt_ids, pdev->dev.of_node);
+ if (match)
+ driver_data = (struct vcodec_device_info *)match->data;
+
+ return driver_data;
+}
static struct platform_driver vcodec_driver = {
.probe = vcodec_probe,
.remove = vcodec_remove,
.shutdown = vcodec_shutdown,
.driver = {
- .name = "vcodec",
+ .name = "rk-vcodec",
.owner = THIS_MODULE,
-#if defined(CONFIG_OF)
.of_match_table = of_match_ptr(vcodec_service_dt_ids),
-#endif
},
};
writel_relaxed(0, dev->regs + task->reg_irq);
- /*
- * NOTE: rkvdec need to reset after each task to avoid timeout
- * error on H.264 switch to H.265
- */
- if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
- writel(0x100000, dev->regs + task->reg_irq);
-
/* set clock gating to save power */
writel(task->gating_mask, dev->regs + task->reg_en);
} else {
reg_from_run_to_done(data, pservice->reg_codec);
/* avoid vpu timeout and can't recover problem */
- VDPU_SOFT_RESET(data->regs);
+ if (data->mode == VCODEC_RUNNING_MODE_VPU)
+ VDPU_SOFT_RESET(data->regs);
}
}
return IRQ_HANDLED;
}
-static int __init vcodec_service_init(void)
-{
- int ret = platform_driver_register(&vcodec_driver);
-
- if (ret) {
- vpu_err("Platform device register failed (%d).\n", ret);
- return ret;
- }
-
-#ifdef CONFIG_DEBUG_FS
- vcodec_debugfs_init();
-#endif
-
- return ret;
-}
-
-static void __exit vcodec_service_exit(void)
-{
-#ifdef CONFIG_DEBUG_FS
- vcodec_debugfs_exit();
-#endif
-
- platform_driver_unregister(&vcodec_driver);
-}
-
-module_init(vcodec_service_init);
-module_exit(vcodec_service_exit);
+module_platform_driver(vcodec_driver);
MODULE_LICENSE("GPL v2");
-
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
-static int vcodec_debugfs_init(void)
-{
- parent = debugfs_create_dir("vcodec", NULL);
- if (!parent)
- return -1;
-
- return 0;
-}
-
-static void vcodec_debugfs_exit(void)
-{
- debugfs_remove(parent);
-}
-
-static struct dentry *vcodec_debugfs_create_device_dir(
- char *dirname, struct dentry *parent)
-{
- return debugfs_create_dir(dirname, parent);
-}
-
-static int debug_vcodec_show(struct seq_file *s, void *unused)
-{
- struct vpu_subdev_data *data = s->private;
- struct vpu_service_info *pservice = data->pservice;
- unsigned int i, n;
- struct vpu_reg *reg, *reg_tmp;
- struct vpu_session *session, *session_tmp;
-
- mutex_lock(&pservice->lock);
- vpu_service_power_on(pservice);
- if (data->hw_info->hw_id != HEVC_ID) {
- seq_puts(s, "\nENC Registers:\n");
- n = data->enc_dev.iosize >> 2;
-
- for (i = 0; i < n; i++)
- seq_printf(s, "\tswreg%d = %08X\n", i,
- readl_relaxed(data->enc_dev.regs + i));
- }
-
- seq_puts(s, "\nDEC Registers:\n");
-
- n = data->dec_dev.iosize >> 2;
- for (i = 0; i < n; i++)
- seq_printf(s, "\tswreg%d = %08X\n", i,
- readl_relaxed(data->dec_dev.regs + i));
-
- seq_puts(s, "\nvpu service status:\n");
-
- list_for_each_entry_safe(session, session_tmp,
- &pservice->session, list_session) {
- seq_printf(s, "session pid %d type %d:\n",
- session->pid, session->type);
-
- list_for_each_entry_safe(reg, reg_tmp,
- &session->waiting, session_link) {
- seq_printf(s, "waiting register set %p\n", reg);
- }
- list_for_each_entry_safe(reg, reg_tmp,
- &session->running, session_link) {
- seq_printf(s, "running register set %p\n", reg);
- }
- list_for_each_entry_safe(reg, reg_tmp,
- &session->done, session_link) {
- seq_printf(s, "done register set %p\n", reg);
- }
- }
-
- seq_printf(s, "\npower counter: on %d off %d\n",
- atomic_read(&pservice->power_on_cnt),
- atomic_read(&pservice->power_off_cnt));
-
- mutex_unlock(&pservice->lock);
- vpu_service_power_off(pservice);
-
- return 0;
-}
-
-static int debug_vcodec_open(struct inode *inode, struct file *file)
-{
- return single_open(file, debug_vcodec_show, inode->i_private);
-}
-
-#endif
-