driver: video: rockchip: add new driver of vpu
authorJung Zhao <jung.zhao@rock-chips.com>
Sat, 1 Apr 2017 09:05:24 +0000 (17:05 +0800)
committerHuang, Tao <huangtao@rock-chips.com>
Wed, 7 Jun 2017 04:05:34 +0000 (12:05 +0800)
this driver only support h264e & h265e. if you want to
enable the driver, you must modify the menuconfig and
turn on MPP_SERVICE & MPP_DEVICE.

Change-Id: I7f1c6e473eaf7aedb4fa86791412b5fbcb2c531d
Signed-off-by: buluess.li <buluess.li@rock-chips.com>
Signed-off-by: Jung Zhao <jung.zhao@rock-chips.com>
22 files changed:
Documentation/devicetree/bindings/video/rockchip_enc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/video/rockchip_mpp.txt [new file with mode: 0644]
drivers/video/rockchip/Kconfig
drivers/video/rockchip/Makefile
drivers/video/rockchip/vpu/Kconfig [new file with mode: 0644]
drivers/video/rockchip/vpu/Makefile [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_dev_common.c [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_dev_common.h [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_dev_h265e.c [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_dev_h265e.h [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_dev_h265e_define.h [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_dev_h265e_reg.h [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_dev_rkvenc.c [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_dev_rkvenc.h [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_dev_vepu.c [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_dev_vepu.h [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_service.c [new file with mode: 0644]
drivers/video/rockchip/vpu/mpp_service.h [new file with mode: 0644]
drivers/video/rockchip/vpu/vpu_iommu_drm.c [new file with mode: 0644]
drivers/video/rockchip/vpu/vpu_iommu_ion.c [new file with mode: 0644]
drivers/video/rockchip/vpu/vpu_iommu_ops.c [new file with mode: 0644]
drivers/video/rockchip/vpu/vpu_iommu_ops.h [new file with mode: 0644]

diff --git a/Documentation/devicetree/bindings/video/rockchip_enc.txt b/Documentation/devicetree/bindings/video/rockchip_enc.txt
new file mode 100644 (file)
index 0000000..476139b
--- /dev/null
@@ -0,0 +1,28 @@
+Device-Tree bindings for Rockchip Encoder.
+
+Required properties:
+- compatible: There are several encoder IP inside rockchip chips. The value
+       should be one of "rockchip,rkvenc", "rockchip,vepu", "rockchip,h265e"
+
+Example:
+
+DT entry:
+       vepu: vepu@ff340000 {
+               compatible = "rockchip,rk3328-vepu", "rockchip,vepu";
+               rockchip,grf = <&grf>;
+               iommu_enabled = <1>;
+               iommus = <&vepu_mmu>;
+               reg = <0x0 0xff340000 0x0 0x400>;
+               interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>;
+               clock-names = "aclk_vcodec", "hclk_vcodec";
+               resets = <&cru SRST_RKVENC_H264_H>,
+                       <&cru SRST_RKVENC_H264_A>;
+               reset-names = "video_h", "video_a";
+               rockchip,srv = <&venc_srv>;
+               mode_bit = <11>;
+               mode_ctrl = <0x40c>;
+               name = "vepu";
+               allocator = <1>;
+               status = "disabled";
+       };
diff --git a/Documentation/devicetree/bindings/video/rockchip_mpp.txt b/Documentation/devicetree/bindings/video/rockchip_mpp.txt
new file mode 100644 (file)
index 0000000..878724a
--- /dev/null
@@ -0,0 +1,9 @@
+Device-Tree bindings for Rockchip MPP Service.
+
+Required properties:
+- compatible: the value must be "rockchip,mpp_service"
+
+Example:
+       venc_srv: venc_srv {
+               compatible = "rockchip,mpp_service";
+       };
index ad87b9435535510fc1e8a8d0db83de84cf082eb2..b050f8a46fff1cb253fa83becd564b4676d378d5 100755 (executable)
@@ -69,4 +69,4 @@ source "drivers/video/rockchip/rga2/Kconfig"
 source "drivers/video/rockchip/vcodec/Kconfig"
 source "drivers/video/rockchip/iep/Kconfig"
 source "drivers/video/rockchip/dp/Kconfig"
-
+source "drivers/video/rockchip/vpu/Kconfig"
index 7e5667bf2802ffd9eecd3ac1b55bb9cef6a221d8..14a2e5faefa444afd16152b797cfe2d4187e6621 100755 (executable)
@@ -8,4 +8,4 @@ obj-$(CONFIG_IEP) += iep/
 obj-$(CONFIG_RK_TVENCODER) += tve/
 obj-$(CONFIG_RK_VCODEC) += vcodec/
 obj-$(CONFIG_ROCKCHIP_DP) += dp/
-
+obj-$(CONFIG_ROCKCHIP_MPP_SERVICE) += vpu/
diff --git a/drivers/video/rockchip/vpu/Kconfig b/drivers/video/rockchip/vpu/Kconfig
new file mode 100644 (file)
index 0000000..5bb2f47
--- /dev/null
@@ -0,0 +1,17 @@
+menu "ROCKCHIP_MPP"
+       depends on ARCH_ROCKCHIP
+
+config ROCKCHIP_MPP_SERVICE
+       tristate "ROCKCHIP MPP SERVICE driver"
+       default n
+       help
+         rockchip mpp service.
+
+config ROCKCHIP_MPP_DEVICE
+       tristate "ROCKCHIP MPP DEVICE driver"
+       depends on ROCKCHIP_MPP_SERVICE
+       default n
+       help
+         rockchip mpp module.
+
+endmenu
diff --git a/drivers/video/rockchip/vpu/Makefile b/drivers/video/rockchip/vpu/Makefile
new file mode 100644 (file)
index 0000000..0dc4780
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_ROCKCHIP_MPP_SERVICE) += mpp_service.o
+obj-$(CONFIG_ROCKCHIP_MPP_DEVICE) += mpp_dev_rkvenc.o mpp_dev_vepu.o \
+       mpp_dev_h265e.o mpp_dev_common.o vpu_iommu_drm.o vpu_iommu_ion.o \
+       vpu_iommu_ops.o
diff --git a/drivers/video/rockchip/vpu/mpp_dev_common.c b/drivers/video/rockchip/vpu/mpp_dev_common.c
new file mode 100644 (file)
index 0000000..f98ca74
--- /dev/null
@@ -0,0 +1,1088 @@
+/**
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: chenhengming chm@rock-chips.com
+ *        Alpha Lin, alpha.lin@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <linux/rockchip/pmu.h>
+#include <linux/rockchip/cru.h>
+
+#include "vpu_iommu_ops.h"
+#include "mpp_dev_common.h"
+#include "mpp_service.h"
+
+int mpp_dev_debug;
+module_param(mpp_dev_debug, int, 0644);
+MODULE_PARM_DESC(mpp_dev_debug, "bit switch for mpp_dev debug information");
+
+static int mpp_bufid_to_iova(struct rockchip_mpp_dev *mpp, const u8 *tbl,
+                            int size, u32 *reg, struct mpp_ctx *ctx)
+{
+       int hdl;
+       int ret = 0;
+       struct mpp_mem_region *mem_region, *n;
+       int i;
+       int offset = 0;
+       int retval = 0;
+
+       if (!tbl || size <= 0) {
+               mpp_err("input arguments invalidate, table %p, size %d\n",
+                       tbl, size);
+               return -1;
+       }
+
+       for (i = 0; i < size; i++) {
+               int usr_fd = reg[tbl[i]] & 0x3FF;
+
+               mpp_debug(DEBUG_IOMMU, "reg[%03d] fd = %d\n", tbl[i], usr_fd);
+
+               /* if userspace do not set the fd at this register, skip */
+               if (usr_fd == 0)
+                       continue;
+
+               offset = reg[tbl[i]] >> 10;
+
+               mpp_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d\n",
+                         tbl[i], usr_fd, offset);
+
+               hdl = vpu_iommu_import(mpp->iommu_info, ctx->session, usr_fd);
+               if (hdl < 0) {
+                       mpp_err("import dma-buf from fd %d failed, reg[%d]\n",
+                               usr_fd, tbl[i]);
+                       retval = hdl;
+                       goto fail;
+               }
+
+               mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
+
+               if (!mem_region) {
+                       vpu_iommu_free(mpp->iommu_info, ctx->session, hdl);
+                       retval = -1;
+                       goto fail;
+               }
+
+               mem_region->hdl = hdl;
+               mem_region->reg_idx = tbl[i];
+
+               ret = vpu_iommu_map_iommu(mpp->iommu_info, ctx->session,
+                                         hdl, (void *)&mem_region->iova,
+                                         &mem_region->len);
+
+               if (ret < 0) {
+                       mpp_err("reg %d fd %d ion map iommu failed\n",
+                               tbl[i], usr_fd);
+                       kfree(mem_region);
+                       vpu_iommu_free(mpp->iommu_info, ctx->session, hdl);
+                       retval = -1;
+                       goto fail;
+               }
+
+               reg[tbl[i]] = mem_region->iova + offset;
+               INIT_LIST_HEAD(&mem_region->reg_lnk);
+               list_add_tail(&mem_region->reg_lnk, &ctx->mem_region_list);
+       }
+
+       return 0;
+
+fail:
+       list_for_each_entry_safe(mem_region, n,
+                                &ctx->mem_region_list, reg_lnk) {
+               vpu_iommu_free(mpp->iommu_info, ctx->session, mem_region->hdl);
+               list_del_init(&mem_region->reg_lnk);
+               kfree(mem_region);
+       }
+
+       return retval;
+}
+
+int mpp_reg_address_translate(struct rockchip_mpp_dev *mpp,
+                             u32 *reg,
+                             struct mpp_ctx *ctx,
+                             int idx)
+{
+       struct mpp_trans_info *trans_info = mpp->variant->trans_info;
+       const u8 *tbl = trans_info[idx].table;
+       int size = trans_info[idx].count;
+
+       return mpp_bufid_to_iova(mpp, tbl, size, reg, ctx);
+}
+
+void mpp_translate_extra_info(struct mpp_ctx *ctx,
+                             struct extra_info_for_iommu *ext_inf,
+                             u32 *reg)
+{
+       if (ext_inf) {
+               int i;
+
+               for (i = 0; i < ext_inf->cnt; i++) {
+                       mpp_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
+                                 ext_inf->elem[i].index,
+                                 ext_inf->elem[i].offset);
+                       reg[ext_inf->elem[i].index] += ext_inf->elem[i].offset;
+               }
+       }
+}
+
+void mpp_dump_reg(void __iomem *regs, int count)
+{
+       int i;
+
+       pr_info("dumping registers:");
+
+       for (i = 0; i < count; i++)
+               pr_info("reg[%02d]: %08x\n", i, readl_relaxed(regs + i * 4));
+}
+
+void mpp_dump_reg_mem(u32 *regs, int count)
+{
+       int i;
+
+       pr_info("Dumping mpp_service registers:\n");
+
+       for (i = 0; i < count; i++)
+               pr_info("reg[%03d]: %08x\n", i, regs[i]);
+}
+
+int mpp_dev_common_ctx_init(struct rockchip_mpp_dev *mpp, struct mpp_ctx *cfg)
+{
+       INIT_LIST_HEAD(&cfg->session_link);
+       INIT_LIST_HEAD(&cfg->status_link);
+       INIT_LIST_HEAD(&cfg->mem_region_list);
+
+       return 0;
+}
+
+struct mpp_request {
+       u32 *req;
+       u32 size;
+};
+
+#ifdef CONFIG_COMPAT
+struct compat_mpp_request {
+       compat_uptr_t req;
+       u32 size;
+};
+#endif
+
+#define MPP_TIMEOUT_DELAY              (2 * HZ)
+#define MPP_POWER_OFF_DELAY            (4 * HZ)
+
+static void mpp_dev_session_clear(struct rockchip_mpp_dev *mpp,
+                                 struct mpp_session *session)
+{
+       struct mpp_ctx *ctx, *n;
+
+       list_for_each_entry_safe(ctx, n, &session->done, session_link) {
+               mpp_dev_common_ctx_deinit(mpp, ctx);
+       }
+}
+
+static struct mpp_ctx *ctx_init(struct rockchip_mpp_dev *mpp,
+                               struct mpp_session *session,
+                               void __user *src, u32 size)
+{
+       struct mpp_ctx *ctx;
+
+       mpp_debug_enter();
+
+       if (mpp->ops->init)
+               ctx = mpp->ops->init(mpp, session, src, size);
+       else
+               ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+
+       if (!ctx)
+               return NULL;
+
+       ctx->session = session;
+       ctx->mpp = mpp;
+
+       mpp_srv_pending_locked(mpp->srv, ctx);
+
+       mpp_debug_leave();
+
+       return ctx;
+}
+
+void mpp_dev_common_ctx_deinit(struct rockchip_mpp_dev *mpp,
+                              struct mpp_ctx *ctx)
+{
+       struct mpp_mem_region *mem_region = NULL, *n;
+
+       list_del_init(&ctx->session_link);
+       list_del_init(&ctx->status_link);
+
+       /* release memory region attach to this registers table. */
+       list_for_each_entry_safe(mem_region, n,
+                                &ctx->mem_region_list, reg_lnk) {
+               vpu_iommu_unmap_iommu(mpp->iommu_info, ctx->session,
+                                     mem_region->hdl);
+               vpu_iommu_free(mpp->iommu_info, ctx->session, mem_region->hdl);
+               list_del_init(&mem_region->reg_lnk);
+               kfree(mem_region);
+       }
+
+       kfree(ctx);
+}
+
+static inline void mpp_queue_power_off_work(struct rockchip_mpp_dev *mpp)
+{
+       queue_delayed_work(system_wq, &mpp->power_off_work,
+                          MPP_POWER_OFF_DELAY);
+}
+
+static void mpp_power_off_work(struct work_struct *work_s)
+{
+       struct delayed_work *dlwork = container_of(work_s,
+                                                  struct delayed_work, work);
+       struct rockchip_mpp_dev *mpp =
+                                      container_of(dlwork,
+                                                   struct rockchip_mpp_dev,
+                                                   power_off_work);
+
+       if (mutex_trylock(&mpp->srv->lock)) {
+               mpp_dev_power_off(mpp);
+               mutex_unlock(&mpp->srv->lock);
+       } else {
+               /* Come back later if the device is busy... */
+               mpp_queue_power_off_work(mpp);
+       }
+}
+
+static void mpp_dev_reset(struct rockchip_mpp_dev *mpp)
+{
+       mpp_debug_enter();
+
+       atomic_set(&mpp->reset_request, 0);
+
+       mpp->variant->reset(mpp);
+
+       if (!mpp->iommu_enable)
+               return;
+
+       if (test_bit(MMU_ACTIVATED, &mpp->state)) {
+               if (atomic_read(&mpp->enabled))
+                       vpu_iommu_detach(mpp->iommu_info);
+               else
+                       WARN_ON(!atomic_read(&mpp->enabled));
+
+               vpu_iommu_attach(mpp->iommu_info);
+       }
+
+       mpp_debug_leave();
+}
+
+void mpp_dev_power_on(struct rockchip_mpp_dev *mpp)
+{
+       int ret;
+       ktime_t now = ktime_get();
+
+       if (ktime_to_ns(ktime_sub(now, mpp->last)) > NSEC_PER_SEC) {
+               cancel_delayed_work_sync(&mpp->power_off_work);
+               mpp_queue_power_off_work(mpp);
+               mpp->last = now;
+       }
+       ret = atomic_add_unless(&mpp->enabled, 1, 1);
+       if (!ret)
+               return;
+
+       pr_info("%s: power on\n", dev_name(mpp->dev));
+
+       mpp->variant->power_on(mpp);
+       if (mpp->iommu_enable) {
+               set_bit(MMU_ACTIVATED, &mpp->state);
+               vpu_iommu_attach(mpp->iommu_info);
+       }
+       atomic_add(1, &mpp->power_on_cnt);
+       wake_lock(&mpp->wake_lock);
+}
+
+void mpp_dev_power_off(struct rockchip_mpp_dev *mpp)
+{
+       int total_running;
+       int ret = atomic_add_unless(&mpp->enabled, -1, 0);
+
+       if (!ret)
+               return;
+
+       total_running = atomic_read(&mpp->total_running);
+       if (total_running) {
+               pr_alert("alert: power off when %d task running!!\n",
+                        total_running);
+               mdelay(50);
+               pr_alert("alert: delay 50 ms for running task\n");
+       }
+
+       pr_info("%s: power off...", dev_name(mpp->dev));
+
+       if (mpp->iommu_enable) {
+               clear_bit(MMU_ACTIVATED, &mpp->state);
+               vpu_iommu_detach(mpp->iommu_info);
+       }
+       mpp->variant->power_off(mpp);
+
+       atomic_add(1, &mpp->power_off_cnt);
+       wake_unlock(&mpp->wake_lock);
+       pr_info("done\n");
+}
+
+bool mpp_dev_is_power_on(struct rockchip_mpp_dev *mpp)
+{
+       return !!atomic_read(&mpp->enabled);
+}
+
+static void rockchip_mpp_run(struct rockchip_mpp_dev *mpp)
+{
+       struct mpp_ctx *ctx;
+
+       mpp_debug_enter();
+
+       mpp_srv_run(mpp->srv);
+
+       ctx = mpp_srv_get_last_running_ctx(mpp->srv);
+       mpp_time_record(ctx);
+
+       mpp_dev_power_on(mpp);
+
+       mpp_debug(DEBUG_TASK_INFO, "pid %d, start hw %s\n",
+                 ctx->session->pid, dev_name(mpp->dev));
+
+       if (atomic_read(&mpp->reset_request))
+               mpp_dev_reset(mpp);
+
+       if (unlikely(mpp_dev_debug & DEBUG_REGISTER))
+               mpp_dump_reg(mpp->reg_base, mpp->variant->reg_len);
+
+       atomic_add(1, &mpp->total_running);
+       if (mpp->ops->run)
+               mpp->ops->run(mpp);
+
+       mpp_debug_leave();
+}
+
+static void rockchip_mpp_try_run(struct rockchip_mpp_dev *mpp)
+{
+       int ret = 0;
+       struct rockchip_mpp_dev *pending;
+       struct mpp_ctx *ctx;
+
+       mpp_debug_enter();
+
+       if (!mpp_srv_pending_is_empty(mpp->srv)) {
+               /*
+                * if prepare func in hw driver define, state will be determined
+                * by hw driver prepare func, or state will be determined by
+                * service. ret = 0, run ready ctx.
+                */
+               ctx = mpp_srv_get_pending_ctx(mpp->srv);
+               pending = ctx->mpp;
+               if (mpp->ops->prepare)
+                       ret = mpp->ops->prepare(pending);
+               else if (mpp_srv_is_running(mpp->srv))
+                       ret = -1;
+
+               if (ret == 0)
+                       rockchip_mpp_run(pending);
+       }
+
+       mpp_debug_leave();
+}
+
+static int rockchip_mpp_result(struct rockchip_mpp_dev *mpp,
+                              struct mpp_ctx *ctx, u32 __user *dst)
+{
+       mpp_debug_enter();
+
+       if (mpp->ops->result)
+               mpp->ops->result(mpp, ctx, dst);
+
+       mpp_dev_common_ctx_deinit(mpp, ctx);
+
+       mpp_debug_leave();
+       return 0;
+}
+
+static int mpp_dev_wait_result(struct mpp_session *session,
+                              struct rockchip_mpp_dev *mpp,
+                              u32 __user *req)
+{
+       struct mpp_ctx *ctx;
+       int ret;
+
+       ret = wait_event_timeout(session->wait,
+                                !list_empty(&session->done),
+                                MPP_TIMEOUT_DELAY);
+
+       if (!list_empty(&session->done)) {
+               if (ret < 0)
+                       mpp_err("warning: pid %d wait task error ret %d\n",
+                               session->pid, ret);
+               ret = 0;
+       } else {
+               if (unlikely(ret < 0)) {
+                       mpp_err("error: pid %d wait task ret %d\n",
+                               session->pid, ret);
+               } else if (ret == 0) {
+                       mpp_err("error: pid %d wait %d task done timeout\n",
+                               session->pid,
+                               atomic_read(&session->task_running));
+                       ret = -ETIMEDOUT;
+
+                       mpp_dump_reg(mpp->reg_base, mpp->variant->reg_len);
+               }
+       }
+
+       if (ret < 0) {
+               mpp_srv_lock(mpp->srv);
+               atomic_sub(1, &mpp->total_running);
+
+               if (mpp->variant->reset)
+                       mpp->variant->reset(mpp);
+               mpp_srv_unlock(mpp->srv);
+               return ret;
+       }
+
+       mpp_srv_lock(mpp->srv);
+       ctx = mpp_srv_get_done_ctx(session);
+       rockchip_mpp_result(mpp, ctx, req);
+       mpp_srv_unlock(mpp->srv);
+
+       return 0;
+}
+
+static long mpp_dev_ioctl(struct file *filp, unsigned int cmd,
+                         unsigned long arg)
+{
+       struct rockchip_mpp_dev *mpp =
+                       container_of(filp->f_path.dentry->d_inode->i_cdev,
+                                    struct rockchip_mpp_dev,
+                                    cdev);
+       struct mpp_session *session = (struct mpp_session *)filp->private_data;
+
+       mpp_debug_enter();
+       if (!session)
+               return -EINVAL;
+
+       switch (cmd) {
+       case MPP_IOC_SET_CLIENT_TYPE:
+               break;
+       case MPP_IOC_SET_REG:
+               {
+                       struct mpp_request req;
+                       struct mpp_ctx *ctx;
+
+                       mpp_debug(DEBUG_IOCTL, "pid %d set reg\n",
+                                 session->pid);
+                       if (copy_from_user(&req, (void __user *)arg,
+                                          sizeof(struct mpp_request))) {
+                               mpp_err("error: set reg copy_from_user failed\n");
+                               return -EFAULT;
+                       }
+                       ctx = ctx_init(mpp, session, (void __user *)req.req,
+                                      req.size);
+                       if (!ctx)
+                               return -EFAULT;
+
+                       mpp_srv_lock(mpp->srv);
+                       rockchip_mpp_try_run(mpp);
+                       mpp_srv_unlock(mpp->srv);
+               }
+               break;
+       case MPP_IOC_GET_REG:
+               {
+                       struct mpp_request req;
+
+                       mpp_debug(DEBUG_IOCTL, "pid %d get reg\n",
+                                 session->pid);
+                       if (copy_from_user(&req, (void __user *)arg,
+                                          sizeof(struct mpp_request))) {
+                               mpp_err("error: get reg copy_from_user failed\n");
+                               return -EFAULT;
+                       }
+
+                       return mpp_dev_wait_result(session, mpp, req.req);
+               }
+               break;
+       case MPP_IOC_PROBE_IOMMU_STATUS:
+               {
+                       int iommu_enable = 1;
+
+                       mpp_debug(DEBUG_IOCTL, "VPU_IOC_PROBE_IOMMU_STATUS\n");
+
+                       if (copy_to_user((void __user *)arg,
+                                        &iommu_enable, sizeof(int))) {
+                               mpp_err("error: iommu status copy_to_user failed\n");
+                               return -EFAULT;
+                       }
+               }
+               break;
+       default:
+               {
+                       if (mpp->ops->ioctl)
+                               return mpp->ops->ioctl(session, cmd, arg);
+
+                       mpp_err("unknown mpp ioctl cmd %x\n", cmd);
+               }
+               break;
+       }
+
+       mpp_debug_leave();
+       return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long compat_mpp_dev_ioctl(struct file *filp, unsigned int cmd,
+                                unsigned long arg)
+{
+       struct rockchip_mpp_dev *mpp =
+                       container_of(filp->f_path.dentry->d_inode->i_cdev,
+                                    struct rockchip_mpp_dev, cdev);
+       struct mpp_session *session = (struct mpp_session *)filp->private_data;
+
+       mpp_debug_enter();
+
+       if (!session)
+               return -EINVAL;
+
+       switch (cmd) {
+       case MPP_IOC_SET_CLIENT_TYPE:
+               break;
+       case MPP_IOC_SET_REG:
+               {
+                       struct compat_mpp_request req;
+                       struct mpp_ctx *ctx;
+
+                       mpp_debug(DEBUG_IOCTL, "compat set reg\n");
+                       if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
+                                          sizeof(struct compat_mpp_request))) {
+                               mpp_err("compat set_reg copy_from_user failed\n");
+                               return -EFAULT;
+                       }
+                       ctx = ctx_init(mpp, session,
+                                      compat_ptr((compat_uptr_t)req.req),
+                                      req.size);
+                       if (!ctx)
+                               return -EFAULT;
+
+                       mpp_srv_lock(mpp->srv);
+                       rockchip_mpp_try_run(mpp);
+                       mpp_srv_unlock(mpp->srv);
+               }
+               break;
+       case MPP_IOC_GET_REG:
+               {
+                       struct compat_mpp_request req;
+
+                       mpp_debug(DEBUG_IOCTL, "compat get reg\n");
+                       if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
+                                          sizeof(struct compat_mpp_request))) {
+                               mpp_err("compat get reg copy_from_user failed\n");
+                               return -EFAULT;
+                       }
+
+                       return mpp_dev_wait_result(session,
+                                                  mpp,
+                                                  compat_ptr((compat_uptr_t)req.req));
+               }
+               break;
+       case MPP_IOC_PROBE_IOMMU_STATUS:
+               {
+                       int iommu_enable = 1;
+
+                       mpp_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_PROBE_IOMMU_STATUS\n");
+
+                       if (copy_to_user(compat_ptr((compat_uptr_t)arg),
+                                        &iommu_enable, sizeof(int))) {
+                               mpp_err("error: VPU_IOC_PROBE_IOMMU_STATUS failed\n");
+                               return -EFAULT;
+                       }
+               }
+               break;
+       default:
+               {
+                       if (mpp->ops->ioctl)
+                               return mpp->ops->ioctl(session, cmd, arg);
+
+                       mpp_err("unknown mpp ioctl cmd %x\n", cmd);
+               }
+               break;
+       }
+       mpp_debug_leave();
+       return 0;
+}
+#endif
+
+static int mpp_dev_open(struct inode *inode, struct file *filp)
+{
+       struct rockchip_mpp_dev *mpp =
+                                      container_of(inode->i_cdev,
+                                                   struct rockchip_mpp_dev,
+                                                   cdev);
+       struct mpp_session *session;
+
+       mpp_debug_enter();
+
+       if (mpp->ops->open)
+               session = mpp->ops->open(mpp);
+       else
+               session = kzalloc(sizeof(*session), GFP_KERNEL);
+
+       if (!session)
+               return -ENOMEM;
+
+       session->pid = current->pid;
+       session->mpp = mpp;
+       INIT_LIST_HEAD(&session->done);
+       INIT_LIST_HEAD(&session->list_session);
+       init_waitqueue_head(&session->wait);
+       atomic_set(&session->task_running, 0);
+       mpp_srv_lock(mpp->srv);
+       list_add_tail(&session->list_session, &mpp->srv->session);
+       filp->private_data = (void *)session;
+       mpp_srv_unlock(mpp->srv);
+
+       mpp_debug_leave();
+
+       return nonseekable_open(inode, filp);
+}
+
+static int mpp_dev_release(struct inode *inode, struct file *filp)
+{
+       struct rockchip_mpp_dev *mpp = container_of(
+                                                   inode->i_cdev,
+                                                   struct rockchip_mpp_dev,
+                                                   cdev);
+       int task_running;
+       struct mpp_session *session = filp->private_data;
+
+       mpp_debug_enter();
+       if (!session)
+               return -EINVAL;
+
+       task_running = atomic_read(&session->task_running);
+       if (task_running) {
+               pr_err("session %d still has %d task running when closing\n",
+                      session->pid, task_running);
+               msleep(50);
+       }
+       wake_up(&session->wait);
+
+       mpp_srv_lock(mpp->srv);
+       /* remove this filp from the asynchronusly notified filp's */
+       list_del_init(&session->list_session);
+       mpp_dev_session_clear(mpp, session);
+       filp->private_data = NULL;
+       mpp_srv_unlock(mpp->srv);
+       if (mpp->ops->release)
+               mpp->ops->release(session);
+       else
+               kfree(session);
+
+       pr_debug("dev closed\n");
+       mpp_debug_leave();
+       return 0;
+}
+
+static const struct file_operations mpp_dev_fops = {
+       .unlocked_ioctl = mpp_dev_ioctl,
+       .open           = mpp_dev_open,
+       .release        = mpp_dev_release,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = compat_mpp_dev_ioctl,
+#endif
+};
+
+static irqreturn_t mpp_irq(int irq, void *dev_id)
+{
+       struct rockchip_mpp_dev *mpp = dev_id;
+
+       int ret = -1;
+
+       if (mpp->ops->irq)
+               ret = mpp->ops->irq(mpp);
+
+       if (ret < 0)
+               return IRQ_NONE;
+       else
+               return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t mpp_isr(int irq, void *dev_id)
+{
+       struct rockchip_mpp_dev *mpp = dev_id;
+       struct mpp_ctx *ctx;
+       int ret = 0;
+
+       ctx = mpp_srv_get_current_ctx(mpp->srv);
+       if (IS_ERR_OR_NULL(ctx)) {
+               mpp_err("no current context present\n");
+               return IRQ_HANDLED;
+       }
+
+       mpp_time_diff(ctx);
+       mpp_srv_lock(mpp->srv);
+
+       if (mpp->ops->done)
+               ret = mpp->ops->done(mpp);
+
+       if (ret == 0)
+               mpp_srv_done(mpp->srv);
+
+       atomic_sub(1, &mpp->total_running);
+       rockchip_mpp_try_run(mpp);
+
+       mpp_srv_unlock(mpp->srv);
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_IOMMU_API
+static inline void platform_set_sysmmu(struct device *iommu,
+                                      struct device *dev)
+{
+       dev->archdata.iommu = iommu;
+}
+#else
+static inline void platform_set_sysmmu(struct device *iommu,
+                                      struct device *dev)
+{
+}
+#endif
+
+static int mpp_sysmmu_fault_hdl(struct device *dev,
+                               enum rk_iommu_inttype itype,
+                               unsigned long pgtable_base,
+                               unsigned long fault_addr, unsigned int status)
+{
+       struct platform_device *pdev;
+       struct rockchip_mpp_dev *mpp;
+       struct mpp_ctx *ctx;
+
+       mpp_debug_enter();
+
+       if (!dev) {
+               mpp_err("invalid NULL dev\n");
+               return 0;
+       }
+
+       pdev = container_of(dev, struct platform_device, dev);
+       if (!pdev) {
+               mpp_err("invalid NULL platform_device\n");
+               return 0;
+       }
+
+       mpp = platform_get_drvdata(pdev);
+       if (!mpp || !mpp->srv) {
+               mpp_err("invalid mpp_dev or mpp_srv\n");
+               return 0;
+       }
+
+       ctx = mpp_srv_get_current_ctx(mpp->srv);
+       if (ctx) {
+               struct mpp_mem_region *mem, *n;
+               int i = 0;
+
+               mpp_err("mpp, fault addr 0x%08lx\n", fault_addr);
+               if (!list_empty(&ctx->mem_region_list)) {
+                       list_for_each_entry_safe(mem, n, &ctx->mem_region_list,
+                                                reg_lnk) {
+                               mpp_err("mpp, reg[%02u] mem[%02d] 0x%lx %lx\n",
+                                       mem->reg_idx, i, mem->iova, mem->len);
+                               i++;
+                       }
+               } else {
+                       mpp_err("no memory region mapped\n");
+               }
+
+               if (ctx->mpp) {
+                       struct rockchip_mpp_dev *mpp = ctx->mpp;
+
+                       mpp_err("current errror register set:\n");
+                       mpp_dump_reg(mpp->reg_base, mpp->variant->reg_len);
+               }
+
+               if (mpp->variant->reset)
+                       mpp->variant->reset(mpp);
+       }
+
+       mpp_debug_leave();
+
+       return 0;
+}
+
+static struct device *rockchip_get_sysmmu_dev(const char *compt)
+{
+       struct device_node *dn = NULL;
+       struct platform_device *pd = NULL;
+       struct device *ret = NULL;
+
+       dn = of_find_compatible_node(NULL, NULL, compt);
+       if (!dn) {
+               pr_err("can't find device node %s \r\n", compt);
+               return NULL;
+       }
+
+       pd = of_find_device_by_node(dn);
+       if (!pd) {
+               pr_err("can't find platform device in device node %s\n", compt);
+               return  NULL;
+       }
+       ret = &pd->dev;
+
+       return ret;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id mpp_dev_dt_ids[] = {
+       { .compatible = "rockchip,rkvenc", .data = &rkvenc_variant, },
+       { .compatible = "rockchip,vepu", .data = &vepu_variant, },
+       { .compatible = "rockchip,h265e", .data = &h265e_variant, },
+       { },
+};
+#endif
+
+static int mpp_dev_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       struct device *dev = &pdev->dev;
+       char *name = (char *)dev_name(dev);
+       struct device_node *np = pdev->dev.of_node;
+       struct rockchip_mpp_dev *mpp = NULL;
+       const struct of_device_id *match;
+       const struct rockchip_mpp_dev_variant *variant;
+       struct device_node *srv_np, *mmu_np;
+       struct platform_device *srv_pdev;
+       struct resource *res = NULL;
+       struct mpp_session *session;
+       int allocator_type;
+
+       pr_info("probe device %s\n", dev_name(dev));
+
+       match = of_match_node(mpp_dev_dt_ids, dev->of_node);
+       variant = match->data;
+
+       mpp = devm_kzalloc(dev, variant->data_len, GFP_KERNEL);
+
+       /* Get service */
+       srv_np = of_parse_phandle(np, "rockchip,srv", 0);
+       srv_pdev = of_find_device_by_node(srv_np);
+
+       mpp->srv = platform_get_drvdata(srv_pdev);
+
+       mpp->dev = dev;
+       mpp->state = 0;
+       mpp->variant = variant;
+
+       wake_lock_init(&mpp->wake_lock, WAKE_LOCK_SUSPEND, "mpp");
+       atomic_set(&mpp->enabled, 0);
+       atomic_set(&mpp->power_on_cnt, 0);
+       atomic_set(&mpp->power_off_cnt, 0);
+       atomic_set(&mpp->total_running, 0);
+       atomic_set(&mpp->reset_request, 0);
+
+       INIT_DELAYED_WORK(&mpp->power_off_work, mpp_power_off_work);
+       mpp->last.tv64 = 0;
+
+       of_property_read_string(np, "name", (const char **)&name);
+       of_property_read_u32(np, "iommu_enabled", &mpp->iommu_enable);
+
+       if (mpp->srv->reg_base == 0) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+               mpp->reg_base = devm_ioremap_resource(dev, res);
+               if (IS_ERR(mpp->reg_base)) {
+                       ret = PTR_ERR(mpp->reg_base);
+                       goto err;
+               }
+       } else {
+               mpp->reg_base = mpp->srv->reg_base;
+       }
+
+       mpp->irq = platform_get_irq(pdev, 0);
+       if (mpp->irq > 0) {
+               ret = devm_request_threaded_irq(dev, mpp->irq,
+                                               mpp_irq, mpp_isr,
+                                               IRQF_SHARED, dev_name(dev),
+                                               (void *)mpp);
+               if (ret) {
+                       dev_err(dev, "error: can't request vepu irq %d\n",
+                               mpp->irq);
+                       goto err;
+               }
+       } else {
+               dev_info(dev, "No interrupt resource found\n");
+       }
+
+       mmu_np = of_parse_phandle(np, "iommus", 0);
+       if (mmu_np) {
+               struct platform_device *pd = NULL;
+
+               pd = of_find_device_by_node(mmu_np);
+               mpp->mmu_dev = &pd->dev;
+               if (!mpp->mmu_dev) {
+                       mpp->iommu_enable = false;
+                       dev_err(dev, "get iommu dev failed");
+               }
+       } else {
+               mpp->mmu_dev =
+                       rockchip_get_sysmmu_dev(mpp->variant->mmu_dev_dts_name);
+               if (mpp->mmu_dev) {
+                       platform_set_sysmmu(mpp->mmu_dev, dev);
+                       rockchip_iovmm_set_fault_handler(dev,
+                                                        mpp_sysmmu_fault_hdl);
+               } else {
+                       dev_err(dev,
+                               "get iommu dev %s failed, set iommu_enable to false\n",
+                               mpp->variant->mmu_dev_dts_name);
+                       mpp->iommu_enable = false;
+               }
+       }
+
+       dev_info(dev, "try to get iommu dev %p\n",
+                mpp->mmu_dev);
+
+       of_property_read_u32(np, "allocator", &allocator_type);
+       mpp->iommu_info = vpu_iommu_info_create(dev, mpp->mmu_dev,
+                                               allocator_type);
+       if (IS_ERR(mpp->iommu_info)) {
+               dev_err(dev, "failed to create ion client for mpp ret %ld\n",
+                       PTR_ERR(mpp->iommu_info));
+       }
+
+       /*
+        * this session is global session, each dev
+        * only has one global session, and will be
+        * release when dev remove
+        */
+       session = devm_kzalloc(dev, sizeof(*session), GFP_KERNEL);
+
+       if (!session)
+               return -ENOMEM;
+
+       session->mpp = mpp;
+       INIT_LIST_HEAD(&session->done);
+       INIT_LIST_HEAD(&session->list_session);
+       init_waitqueue_head(&session->wait);
+       atomic_set(&session->task_running, 0);
+       /* this first session of each dev is global session */
+       list_add_tail(&session->list_session, &mpp->srv->session);
+
+       ret = mpp->variant->hw_probe(mpp);
+       if (ret)
+               goto err;
+
+       dev_info(dev, "resource ready, register device\n");
+       /* create device node */
+       ret = alloc_chrdev_region(&mpp->dev_t, 0, 1, name);
+       if (ret) {
+               dev_err(dev, "alloc dev_t failed\n");
+               goto err;
+       }
+
+       cdev_init(&mpp->cdev, &mpp_dev_fops);
+
+       mpp->cdev.owner = THIS_MODULE;
+       mpp->cdev.ops = &mpp_dev_fops;
+
+       ret = cdev_add(&mpp->cdev, mpp->dev_t, 1);
+       if (ret) {
+               unregister_chrdev_region(mpp->dev_t, 1);
+               dev_err(dev, "add dev_t failed\n");
+               goto err;
+       }
+
+       mpp->child_dev = device_create(mpp->srv->cls, dev,
+                                      mpp->dev_t, NULL, name);
+
+       mpp_srv_attach(mpp->srv, &mpp->lnk_service);
+
+       platform_set_drvdata(pdev, mpp);
+
+       return 0;
+err:
+       wake_lock_destroy(&mpp->wake_lock);
+       return ret;
+}
+
+static int mpp_dev_remove(struct platform_device *pdev)
+{
+       struct rockchip_mpp_dev *mpp = platform_get_drvdata(pdev);
+       struct mpp_session *session = list_first_entry(&mpp->srv->session,
+                                                      struct mpp_session,
+                                                      list_session);
+
+       mpp->variant->hw_remove(mpp);
+
+       vpu_iommu_clear(mpp->iommu_info, session);
+       vpu_iommu_destroy(mpp->iommu_info);
+       kfree(session);
+
+       mpp_srv_lock(mpp->srv);
+       cancel_delayed_work_sync(&mpp->power_off_work);
+       mpp_dev_power_off(mpp);
+       mpp_srv_detach(mpp->srv, &mpp->lnk_service);
+       mpp_srv_unlock(mpp->srv);
+
+       device_destroy(mpp->srv->cls, mpp->dev_t);
+       cdev_del(&mpp->cdev);
+       unregister_chrdev_region(mpp->dev_t, 1);
+
+       return 0;
+}
+
+static struct platform_driver mpp_dev_driver = {
+       .probe = mpp_dev_probe,
+       .remove = mpp_dev_remove,
+       .driver = {
+               .name = "mpp_dev",
+               .owner = THIS_MODULE,
+#if defined(CONFIG_OF)
+               .of_match_table = of_match_ptr(mpp_dev_dt_ids),
+#endif
+       },
+};
+
+static int __init mpp_dev_init(void)
+{
+       int ret = platform_driver_register(&mpp_dev_driver);
+
+       if (ret) {
+               mpp_err("Platform device register failed (%d).\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+static void __exit mpp_dev_exit(void)
+{
+       platform_driver_unregister(&mpp_dev_driver);
+}
+
+module_init(mpp_dev_init);
+module_exit(mpp_dev_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0.build.201610121711");
+MODULE_AUTHOR("Alpha Lin alpha.lin@rock-chips.com");
+MODULE_DESCRIPTION("Rockchip mpp device driver");
diff --git a/drivers/video/rockchip/vpu/mpp_dev_common.h b/drivers/video/rockchip/vpu/mpp_dev_common.h
new file mode 100644 (file)
index 0000000..466467c
--- /dev/null
@@ -0,0 +1,309 @@
+/**
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: chenhengming chm@rock-chips.com
+ *        Alpha Lin, alpha.lin@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ROCKCHIP_MPP_DEV_COMMON_H
+#define __ROCKCHIP_MPP_DEV_COMMON_H
+
+#include <linux/cdev.h>
+#include <linux/dma-buf.h>
+#include <linux/rockchip_ion.h>
+#include <linux/rockchip-iovmm.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/wakelock.h>
+
+extern int mpp_dev_debug;
+
+/*
+ * Ioctl definitions
+ */
+
+/* Use 'l' as magic number */
+#define MPP_IOC_MAGIC                  'l'
+
+#define MPP_IOC_SET_CLIENT_TYPE                _IOW(MPP_IOC_MAGIC, 1, u32)
+#define MPP_IOC_GET_HW_FUSE_STATUS     _IOW(MPP_IOC_MAGIC, 2, u32)
+
+#define MPP_IOC_SET_REG                        _IOW(MPP_IOC_MAGIC, 3, u32)
+#define MPP_IOC_GET_REG                        _IOW(MPP_IOC_MAGIC, 4, u32)
+
+#define MPP_IOC_PROBE_IOMMU_STATUS     _IOR(MPP_IOC_MAGIC, 5, u32)
+#define MPP_IOC_CUSTOM_BASE                    0x1000
+
+/*
+ * debug flag usage:
+ * +------+-------------------+
+ * | 8bit |      24bit        |
+ * +------+-------------------+
+ *  0~23 bit is for different information type
+ * 24~31 bit is for information print format
+ */
+
+#define DEBUG_POWER                            0x00000001
+#define DEBUG_CLOCK                            0x00000002
+#define DEBUG_IRQ_STATUS                       0x00000004
+#define DEBUG_IOMMU                            0x00000008
+#define DEBUG_IOCTL                            0x00000010
+#define DEBUG_FUNCTION                         0x00000020
+#define DEBUG_REGISTER                         0x00000040
+#define DEBUG_EXTRA_INFO                       0x00000080
+#define DEBUG_TIMING                           0x00000100
+#define DEBUG_TASK_INFO                                0x00000200
+#define DEBUG_DUMP_ERR_REG                     0x00000400
+
+#define DEBUG_SET_REG                          0x00001000
+#define DEBUG_GET_REG                          0x00002000
+#define DEBUG_PPS_FILL                         0x00004000
+#define DEBUG_IRQ_CHECK                                0x00008000
+#define DEBUG_CACHE_32B                                0x00010000
+
+#define DEBUG_RESET                            0x00020000
+
+#define PRINT_FUNCTION                         0x80000000
+#define PRINT_LINE                             0x40000000
+
+#define DEBUG
+#ifdef DEBUG
+#define mpp_debug_func(type, fmt, args...)                     \
+       do {                                                    \
+               if (unlikely(mpp_dev_debug & type)) {           \
+                       pr_info("%s:%d: " fmt,                  \
+                                __func__, __LINE__, ##args);   \
+               }                                               \
+       } while (0)
+#define mpp_debug(type, fmt, args...)                          \
+       do {                                                    \
+               if (unlikely(mpp_dev_debug & type)) {           \
+                       pr_info(fmt, ##args);                   \
+               }                                               \
+       } while (0)
+#else
+#define mpp_debug_func(level, fmt, args...)
+#define mpp_debug(level, fmt, args...)
+#endif
+
+#define mpp_debug_enter() mpp_debug_func(DEBUG_FUNCTION, "enter\n")
+#define mpp_debug_leave() mpp_debug_func(DEBUG_FUNCTION, "leave\n")
+
+#define mpp_err(fmt, args...)                          \
+               pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
+
+struct mpp_trans_info {
+       const int count;
+       const char * const table;
+};
+
+enum RKVENC_MODE {
+       RKVENC_MODE_NONE,
+       RKVENC_MODE_ONEFRAME,
+       RKVENC_MODE_LINKTABLE_FIX,
+       RKVENC_MODE_LINKTABLE_UPDATE,
+       RKVENC_MODE_NUM
+};
+
+struct rockchip_mpp_dev;
+struct mpp_service;
+struct mpp_ctx;
+
+struct mpp_mem_region {
+       struct list_head srv_lnk;
+       struct list_head reg_lnk;
+       struct list_head session_lnk;
+       /* virtual address for iommu */
+       unsigned long iova;
+       unsigned long len;
+       u32 reg_idx;
+       int hdl;
+};
+
+/**
+ * struct for process register set
+ *
+ * @author ChenHengming (2011-5-4)
+ */
+struct mpp_ctx {
+       /* context belong to */
+       struct rockchip_mpp_dev *mpp;
+       struct mpp_session *session;
+
+       /* link to service session */
+       struct list_head session_link;
+       /* link to service list */
+       struct list_head status_link;
+
+       struct list_head mem_region_list;
+
+       /* record context running start time */
+       struct timeval start;
+};
+
+enum vpu_ctx_state {
+       MMU_ACTIVATED   = BIT(0),
+};
+
+struct extra_info_elem {
+       u32 index;
+       u32 offset;
+};
+
+#define EXTRA_INFO_MAGIC       0x4C4A46
+
+struct extra_info_for_iommu {
+       u32 magic;
+       u32 cnt;
+       struct extra_info_elem elem[20];
+};
+
+struct rockchip_mpp_dev_variant {
+       u32 data_len;
+       u32 reg_len;
+       struct mpp_trans_info *trans_info;
+       char *mmu_dev_dts_name;
+
+       int (*hw_probe)(struct rockchip_mpp_dev *mpp);
+       void (*hw_remove)(struct rockchip_mpp_dev *mpp);
+       void (*power_on)(struct rockchip_mpp_dev *mpp);
+       void (*power_off)(struct rockchip_mpp_dev *mpp);
+       int (*reset)(struct rockchip_mpp_dev *mpp);
+};
+
+struct rockchip_mpp_dev {
+       struct mpp_dev_ops *ops;
+
+       struct cdev cdev;
+       dev_t dev_t;
+       struct device *child_dev;
+
+       int irq;
+       struct mpp_service *srv;
+
+       void __iomem *reg_base;
+       struct list_head lnk_service;
+
+       struct device *dev;
+
+       unsigned long state;
+       struct vpu_iommu_info *iommu_info;
+
+       const struct rockchip_mpp_dev_variant *variant;
+
+       struct device *mmu_dev;
+       u32 iommu_enable;
+
+       struct wake_lock wake_lock;
+       struct delayed_work power_off_work;
+       /* record previous power-on time */
+       ktime_t last;
+       atomic_t power_on_cnt;
+       atomic_t power_off_cnt;
+       atomic_t total_running;
+       atomic_t enabled;
+       atomic_t reset_request;
+};
+
+/**
+ * struct mpp_dev_ops - context specific operations for mpp_device
+ *
+ * @init       Prepare for registers file for specific hardware.
+ * @prepare    Check HW status for determining run next task or not.
+ * @run                Start a single {en,de}coding run. Set registers to hardware.
+ * @done       Read back processing results and additional data from hardware.
+ * @result     Read status to userspace.
+ * @deinit     Release the resource allocate during init.
+ * @ioctl      ioctl for special HW besides the common ioctl.
+ * @irq                interrupt service for specific hardware.
+ * @open       a specific instance open operation for hardware.
+ * @release    a specific instance release operation for hardware.
+ */
+struct mpp_dev_ops {
+       /* size: in bytes, data sent from userspace, length in bytes */
+       struct mpp_ctx *(*init)(struct rockchip_mpp_dev *mpp,
+                               struct mpp_session *session,
+                               void __user *src, u32 size);
+       int (*prepare)(struct rockchip_mpp_dev *mpp);
+       int (*run)(struct rockchip_mpp_dev *mpp);
+       int (*done)(struct rockchip_mpp_dev *mpp);
+       int (*irq)(struct rockchip_mpp_dev *mpp);
+       int (*result)(struct rockchip_mpp_dev *mpp, struct mpp_ctx *ctx,
+                     u32 __user *dst);
+       void (*deinit)(struct rockchip_mpp_dev *mpp);
+       long (*ioctl)(struct mpp_session *isession,
+                     unsigned int cmd, unsigned long arg);
+       struct mpp_session *(*open)(struct rockchip_mpp_dev *mpp);
+       void (*release)(struct mpp_session *session);
+};
+
+void mpp_dump_reg(void __iomem *regs, int count);
+void mpp_dump_reg_mem(u32 *regs, int count);
+int mpp_reg_address_translate(struct rockchip_mpp_dev *data,
+                             u32 *reg,
+                             struct mpp_ctx *ctx,
+                             int idx);
+void mpp_translate_extra_info(struct mpp_ctx *ctx,
+                             struct extra_info_for_iommu *ext_inf,
+                             u32 *reg);
+
+int mpp_dev_common_ctx_init(struct rockchip_mpp_dev *mpp, struct mpp_ctx *cfg);
+void mpp_dev_common_ctx_deinit(struct rockchip_mpp_dev *mpp,
+                              struct mpp_ctx *ctx);
+void mpp_dev_power_on(struct rockchip_mpp_dev *mpp);
+void mpp_dev_power_off(struct rockchip_mpp_dev *mpp);
+bool mpp_dev_is_power_on(struct rockchip_mpp_dev *mpp);
+
+static inline void mpp_write_relaxed(struct rockchip_mpp_dev *mpp,
+                                    u32 val, u32 reg)
+{
+       mpp_debug(DEBUG_SET_REG, "MARK: set reg[%03d]: %08x\n", reg / 4, val);
+       writel_relaxed(val, mpp->reg_base + reg);
+}
+
+static inline void mpp_write(struct rockchip_mpp_dev *mpp,
+                            u32 val, u32 reg)
+{
+       mpp_debug(DEBUG_SET_REG, "MARK: set reg[%03d]: %08x\n", reg / 4, val);
+       writel(val, mpp->reg_base + reg);
+}
+
+static inline u32 mpp_read(struct rockchip_mpp_dev *mpp, u32 reg)
+{
+       u32 val = readl(mpp->reg_base + reg);
+
+       mpp_debug(DEBUG_GET_REG, "MARK: get reg[%03d] 0x%x: %08x\n", reg / 4,
+                 reg, val);
+       return val;
+}
+
+static inline void mpp_time_record(struct mpp_ctx *ctx)
+{
+       if (unlikely(mpp_dev_debug & DEBUG_TIMING) && ctx)
+               do_gettimeofday(&ctx->start);
+}
+
+static inline void mpp_time_diff(struct mpp_ctx *ctx)
+{
+       struct timeval end;
+
+       do_gettimeofday(&end);
+       mpp_debug(DEBUG_TIMING, "consume: %ld us\n",
+                 (end.tv_sec  - ctx->start.tv_sec)  * 1000000 +
+                 (end.tv_usec - ctx->start.tv_usec));
+}
+
+extern const struct rockchip_mpp_dev_variant rkvenc_variant;
+extern const struct rockchip_mpp_dev_variant vepu_variant;
+extern const struct rockchip_mpp_dev_variant h265e_variant;
+
+#endif
diff --git a/drivers/video/rockchip/vpu/mpp_dev_h265e.c b/drivers/video/rockchip/vpu/mpp_dev_h265e.c
new file mode 100644 (file)
index 0000000..8666a22
--- /dev/null
@@ -0,0 +1,2142 @@
+/*
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: hehua,hh@rock-chips.com
+ * lixinhuang, buluess.li@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/rk_fb.h>
+#include <linux/rockchip/pmu.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "vpu_iommu_ops.h"
+#include "mpp_dev_common.h"
+#include "mpp_dev_h265e.h"
+#include "mpp_dev_h265e_define.h"
+#include "mpp_dev_h265e_reg.h"
+
+#define MPP_ALIGN_SIZE 0x1000
+
+#define H265E_FIRMWARE_NAME "monet.bin"
+#define PRINT_BS_DATA 0
+#if PRINT_BS_DATA
+#define H265E_BS_DATA_PATH "/h265e/bs_data"
+static char buff[1000000];
+static struct file *fp_bs[H265E_INSTANCE_NUM];
+static u32 w_bs_size;
+#endif
+
+#define H265E_WORK_BUFFER_SIZE                 (128 * 1024)
+#define H265E_TEMP_BUFFER_SIZE                 (1024 * 1024)
+#define H265E_CODE_BUFFER_SIZE                 (1024 * 1024)
+#define H265E_SEC_AXI_BUF_SIZE                 0x12800
+#define H265E_INT_CLOSE 0
+#define H265E_INT_OPEN  0x08
+#define H265E_REMAP_CODE_INDEX                 0
+
+#define H265E_BUSY_CHECK_TIMEOUT               5000
+
+#define DEBUG_H265E_INFO                               0x00100000
+#define DEBUG_H265E_ENCODE_ONE_FRAME   0x00200000
+#define H265E_POWER_SAVE 0
+#define H265E_CLK 1
+#ifdef CONFIG_MFD_SYSCON
+#define H265E_AXI_STATUS 1
+#endif
+static ktime_t h265e_now, h265e_last;
+
+static
+struct mpp_session *rockchip_mpp_h265e_open(struct rockchip_mpp_dev *mpp);
+static void rockchip_mpp_h265e_release(struct mpp_session *isession);
+static int rockchip_mpp_h265e_load_firmware(struct rockchip_mpp_dev *mpp);
+static int rockchip_mpp_h265e_encode_one_frame(struct rockchip_mpp_dev *mpp,
+                                              struct h265e_ctx *ctx,
+                                              int index);
+static int rockchip_mpp_h265e_get_encode_result(struct rockchip_mpp_dev *mpp,
+                                               struct h265e_ctx *result);
+static int rockchip_mpp_h265e_set_gop_parameter(struct rockchip_mpp_dev *mpp,
+                                               int index);
+static
+int rockchip_mpp_h265e_register_frame_buffer(struct rockchip_mpp_dev *mpp,
+                                            int index);
+static void rockchip_mpp_h265e_enable_clk(struct rockchip_mpp_dev *mpp);
+static void rockchip_mpp_h265e_disable_clk(struct rockchip_mpp_dev *mpp);
+
+static int rockchip_mpp_h265e_dma_alloc(struct rockchip_mpp_dev *mpp,
+                                       struct mpp_session *session,
+                                       size_t len,
+                                       size_t align,
+                                       unsigned long *addr)
+{
+       int ret = 0;
+       unsigned long tmp;
+       int tmp_hdl;
+
+       tmp_hdl = vpu_iommu_alloc(mpp->iommu_info, session, len, align);
+       if (tmp_hdl < 0) {
+               dev_err(mpp->dev, "error: ion_alloc fail\n");
+               return -1;
+       }
+
+       ret = vpu_iommu_map_iommu(mpp->iommu_info,
+                                 session, tmp_hdl, addr, &tmp);
+       if (ret < 0) {
+               dev_err(mpp->dev, "get link table dma_addr failed\n");
+               goto FAIL;
+       }
+       return tmp_hdl;
+FAIL:
+       if (tmp_hdl)
+               vpu_iommu_free(mpp->iommu_info, session, tmp_hdl);
+       return -1;
+}
+
+static int rockchip_mpp_h265e_global_dma_alloc(struct rockchip_mpp_dev *mpp,
+                                              size_t len,
+                                              size_t align,
+                                              unsigned long *addr)
+{
+       struct mpp_session *session = list_first_entry(&mpp->srv->session,
+                                                      struct mpp_session,
+                                                      list_session);
+
+       return rockchip_mpp_h265e_dma_alloc(mpp, session, len, align, addr);
+}
+
+static void rockchip_mpp_h265e_free_frame_buffer(struct rockchip_mpp_dev *mpp,
+                                                struct rockchip_h265e_instance *instance)
+{
+       int i = 0;
+       struct mpp_h265e_buffer *buf = NULL;
+       struct mpp_h265e_frame_buffer *fb = NULL;
+       struct mpp_session *session = instance->session;
+
+       mpp_debug_enter();
+       buf = &instance->mv;
+       if (buf->hdl >= 0)
+               vpu_iommu_free(mpp->iommu_info, session, buf->hdl);
+       buf = &instance->fbc_luma;
+       if (buf->hdl >= 0)
+               vpu_iommu_free(mpp->iommu_info, session, buf->hdl);
+       buf = &instance->fbc_chroma;
+       if (buf->hdl >= 0)
+               vpu_iommu_free(mpp->iommu_info, session, buf->hdl);
+       buf = &instance->sub_sample;
+       if (buf->hdl >= 0)
+               vpu_iommu_free(mpp->iommu_info, session, buf->hdl);
+       for (i = 0; i < ARRAY_SIZE(instance->frame_buffer); i++) {
+               fb = &instance->frame_buffer[i];
+               buf = &fb->buffer;
+               if (buf->hdl >= 0)
+                       vpu_iommu_free(mpp->iommu_info, session, buf->hdl);
+               fb->y = 0;
+               fb->cb = 0;
+               fb->cr = 0;
+       }
+       mpp_debug_leave();
+}
+
+static void rockchip_mpp_h265e_free_instance(struct rockchip_mpp_dev *mpp,
+                                            int index)
+{
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct rockchip_h265e_instance *instance = &enc->instance[index];
+       struct mpp_h265e_buffer *buf = &instance->work;
+       struct mpp_session *session = instance->session;
+
+       mpp_debug_enter();
+#if PRINT_BS_DATA
+       filp_close(fp_bs[index], NULL);
+#endif
+       if (!mpp || !instance)
+               return;
+       if (buf->hdl >= 0)
+               vpu_iommu_free(mpp->iommu_info, session, buf->hdl);
+       rockchip_mpp_h265e_free_frame_buffer(mpp, instance);
+       atomic_set(&enc->instance[index].is_used, 0);
+       mpp_debug_leave();
+}
+
+static int rockchip_mpp_h265e_wait_busy(struct rockchip_mpp_dev *mpp)
+{
+       int reg_val = 0xFFFFFFFF, time_count = 0;
+
+       while (reg_val != 0x0) {
+               reg_val = mpp_read(mpp, H265E_VPU_BUSY_STATUS);
+               if (time_count++ > H265E_BUSY_CHECK_TIMEOUT)
+                       return -1;
+       }
+       return 0;
+}
+
+static void rockchip_mpp_h265e_issue_command(struct rockchip_mpp_dev *mpp,
+                                            u32 index, u32 cmd)
+{
+       u32 value = 0;
+
+       mpp_write(mpp, 1, H265E_VPU_BUSY_STATUS);
+       value = ((index & 0xffff) | (1 << 16));
+       mpp_write(mpp, value, H265E_INST_INDEX);
+       mpp_write(mpp, cmd, H265E_COMMAND);
+       if (cmd != H265E_CMD_INIT_VPU)
+               mpp_write(mpp, 1, H265E_VPU_HOST_INT_REQ);
+}
+
+#if PRINT_BS_DATA
+static int rockchip_mpp_h265e_write_encoder_file(struct rockchip_mpp_dev *mpp)
+{
+       struct h265e_ctx *ctx = container_of(mpp_srv_get_current_ctx(mpp->srv),
+                                            struct h265e_ctx, ictx);
+       struct h265e_session *session =
+                                       container_of(ctx->ictx.session,
+                                                    struct h265e_session,
+                                                    isession);
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       int index = session->instance_index;
+       int nread = 0;
+       loff_t pos = 0;
+       mm_segment_t old_fs;
+       u32 value = 0;
+       u32 i = 0;
+       char file_name[30];
+
+       mutex_lock(&enc->lock);
+       mpp_debug_enter();
+       value = w_bs_size;
+       old_fs = get_fs();
+       set_fs(KERNEL_DS);
+       sprintf(file_name, "%s_%d.bin", H265E_BS_DATA_PATH, index);
+       fp_bs[index] = filp_open(file_name, O_RDWR | O_CREAT | O_APPEND, 0x777);
+       if (IS_ERR(fp_bs[index])) {
+               mpp_err("error: open yuv failed in load_yuv\n");
+               set_fs(old_fs);
+               mutex_unlock(&enc->lock);
+               return -1;
+       }
+       for (i = 0; i < (value * 3); i++) {
+               if (ctx->bs_data[i] < 0x10) {
+                       sprintf(&buff[3 * i], "0");
+                       sprintf(&buff[3 * i + 1], "%-2x", ctx->bs_data[i]);
+               } else {
+                       sprintf(&buff[3 * i], "%-3x", ctx->bs_data[i]);
+               }
+       }
+       nread = (int)vfs_write(fp_bs[index], buff, value * 3, &pos);
+       set_fs(old_fs);
+       mutex_unlock(&enc->lock);
+       mpp_debug_leave();
+       return 0;
+}
+#endif
+
+static int rockchip_mpp_h265e_load_firmware(struct rockchip_mpp_dev *mpp)
+{
+       const struct firmware *firmware;
+       u32 size = 0;
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct mpp_session *session = list_first_entry(&mpp->srv->session,
+                                                      struct mpp_session,
+                                                      list_session);
+
+       if (request_firmware(&firmware, H265E_FIRMWARE_NAME, mpp->dev) < 0) {
+               mpp_err("firmware request failed\n");
+               release_firmware(firmware);
+               return -1;
+       }
+       mpp_debug(DEBUG_H265E_INFO, "h265e firmware data %p size %zu\n",
+                 firmware->data, firmware->size);
+       size = ALIGN(firmware->size, H265E_CODE_BUFFER_SIZE);
+       enc->firmware.hdl =
+               rockchip_mpp_h265e_global_dma_alloc(mpp,
+                                                   size,
+                                                   MPP_ALIGN_SIZE,
+                                                   &enc->firmware.dma_addr);
+       if (enc->firmware.hdl < 0) {
+               mpp_err("error: alloc firmware buffer error\n");
+               goto FAIL;
+       }
+       enc->firmware.size = ALIGN(firmware->size, MPP_ALIGN_SIZE);
+       enc->firmware_cpu_addr = vpu_iommu_map_kernel(mpp->iommu_info,
+                                                     session,
+                                                     enc->firmware.hdl);
+       mpp_debug(DEBUG_H265E_INFO,
+                 "firmware_buffer_size = %d,firmware size = %zd,code_base = %x\n",
+                 size, firmware->size, (u32)enc->firmware.dma_addr);
+       memcpy(enc->firmware_cpu_addr, firmware->data, firmware->size);
+       release_firmware(firmware);
+       return 0;
+FAIL:
+       release_firmware(firmware);
+       if (enc->firmware.hdl >= 0) {
+               vpu_iommu_unmap_kernel(mpp->iommu_info, session,
+                                      enc->firmware.hdl);
+               vpu_iommu_free(mpp->iommu_info, session, enc->firmware.hdl);
+       }
+       return -1;
+}
+
+static struct mpp_ctx *rockchip_mpp_h265e_ctx_init(struct rockchip_mpp_dev *mpp,
+                                                  struct mpp_session *session,
+                                                  void __user *src, u32 dwsize)
+{
+       struct h265e_ctx *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct mpp_mem_region *mem_bs_region = NULL;
+       struct mpp_mem_region *mem_src_region = NULL;
+       unsigned long size = 0;
+       int ret = 0;
+
+       mutex_lock(&enc->lock);
+       mpp_debug_enter();
+       if (!ctx) {
+               mutex_unlock(&enc->lock);
+               return NULL;
+       }
+       mpp_dev_common_ctx_init(mpp, &ctx->ictx);
+       ctx->ictx.session = session;
+       if (copy_from_user(&ctx->cfg, src, dwsize)) {
+               mpp_err("error: copy_from_user failed in reg_init\n");
+               kfree(ctx);
+               mutex_unlock(&enc->lock);
+               return NULL;
+       }
+#if H265E_POWER_SAVE
+       rockchip_mpp_h265e_enable_clk(mpp);
+#endif
+       ctx->bs.hdl = vpu_iommu_import(mpp->iommu_info, session,
+                                      ctx->cfg.bs_fd);
+       if (ctx->bs.hdl < 0) {
+               mpp_err("import dma-buf from fd %d failed\n", ctx->cfg.bs_fd);
+               mutex_unlock(&enc->lock);
+               return NULL;
+       }
+
+       ret = vpu_iommu_map_iommu(mpp->iommu_info, session,
+                                 ctx->bs.hdl, &ctx->bs.dma_addr, &size);
+       ctx->bs.size = (u32)size;
+#if PRINT_BS_DATA
+       ctx->bs_data = vpu_iommu_map_kernel(mpp->iommu_info, session,
+                                           ctx->bs.hdl);
+#endif
+
+       if (ret < 0) {
+               mpp_err("bs fd %d ion map iommu failed\n", ctx->cfg.bs_fd);
+               goto FAIL;
+       }
+
+       ctx->src.hdl = vpu_iommu_import(mpp->iommu_info, session,
+                                       ctx->cfg.src_fd);
+       if (ctx->src.hdl < 0) {
+               mpp_err("import dma-buf from fd %d failed\n", ctx->cfg.src_fd);
+               goto FAIL;
+       }
+       ret = vpu_iommu_map_iommu(mpp->iommu_info, session,
+                                 ctx->src.hdl, &ctx->src.dma_addr, &size);
+       ctx->src.size = (u32)size;
+
+       if (ret < 0) {
+               mpp_err("source fd %d ion map iommu failed\n", ctx->cfg.src_fd);
+               goto FAIL;
+       }
+
+       mem_bs_region = kzalloc(sizeof(*mem_bs_region), GFP_KERNEL);
+       if (!mem_bs_region)
+               goto FAIL;
+       mem_src_region = kzalloc(sizeof(*mem_src_region), GFP_KERNEL);
+       if (!mem_src_region)
+               goto FAIL;
+       mem_bs_region->hdl = ctx->bs.hdl;
+       INIT_LIST_HEAD(&mem_bs_region->reg_lnk);
+       list_add_tail(&mem_bs_region->reg_lnk, &ctx->ictx.mem_region_list);
+
+       mem_src_region->hdl = ctx->src.hdl;
+       INIT_LIST_HEAD(&mem_src_region->reg_lnk);
+       list_add_tail(&mem_src_region->reg_lnk, &ctx->ictx.mem_region_list);
+
+       ctx->mode = H265E_MODE_ONEFRAME;
+       mpp_debug_leave();
+       mutex_unlock(&enc->lock);
+
+       return &ctx->ictx;
+
+FAIL:
+       if (ctx->bs.hdl >= 0) {
+               vpu_iommu_unmap_kernel(mpp->iommu_info, session, ctx->bs.hdl);
+               vpu_iommu_free(mpp->iommu_info, session, ctx->bs.hdl);
+       }
+
+       if (ctx->src.hdl >= 0) {
+               vpu_iommu_unmap_kernel(mpp->iommu_info, session, ctx->src.hdl);
+               vpu_iommu_free(mpp->iommu_info, session, ctx->src.hdl);
+       }
+
+       if (!IS_ERR_OR_NULL(mem_bs_region)) {
+               kfree(mem_bs_region);
+               mem_bs_region = NULL;
+       }
+
+       if (!IS_ERR_OR_NULL(mem_src_region)) {
+               kfree(mem_src_region);
+               mem_src_region = NULL;
+       }
+
+       if (!IS_ERR_OR_NULL(ctx)) {
+               kfree(ctx);
+               ctx = NULL;
+       }
+       mutex_unlock(&enc->lock);
+       return NULL;
+}
+
+static int rockchip_mpp_h265e_run(struct rockchip_mpp_dev *mpp)
+{
+       struct h265e_ctx *ctx = container_of(mpp_srv_get_current_ctx(mpp->srv),
+                                            struct h265e_ctx, ictx);
+       struct h265e_session *session =
+                                       container_of(ctx->ictx.session,
+                                                    struct h265e_session,
+                                                    isession);
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       int index = session->instance_index;
+
+       mpp_debug_enter();
+#ifdef CONFIG_MFD_SYSCON
+       if (enc->grf) {
+               u32 raw;
+               u32 bits = BIT(enc->mode_bit);
+
+               regmap_read(enc->grf, enc->mode_ctrl, &raw);
+               regmap_write(enc->grf, enc->mode_ctrl,
+                            (raw | bits) | (bits << 16));
+       }
+#endif
+       rockchip_mpp_h265e_encode_one_frame(mpp, ctx, index);
+       mpp_debug_leave();
+       return 0;
+}
+
+static int rockchip_mpp_h265e_done(struct rockchip_mpp_dev *mpp)
+{
+       struct mpp_ctx *ictx = mpp_srv_get_current_ctx(mpp->srv);
+       struct h265e_ctx *ctx = container_of(ictx, struct h265e_ctx, ictx);
+       int ret = 0;
+
+       mpp_debug_enter();
+       if (IS_ERR_OR_NULL(ictx)) {
+               mpp_err("Invaidate context to save result\n");
+               return -1;
+       }
+       ret = rockchip_mpp_h265e_get_encode_result(mpp, ctx);
+#if PRINT_BS_DATA
+       rockchip_mpp_h265e_write_encoder_file(mpp);
+#endif
+       mpp_debug_leave();
+
+       return ret;
+}
+
+static int rockchip_mpp_h265e_irq(struct rockchip_mpp_dev *mpp)
+{
+       int reason = -1;
+
+       reason = mpp_read(mpp, H265E_VPU_VINT_REASON_USR);
+       mpp_write(mpp, reason, H265E_VPU_VINT_REASON_CLR);
+       mpp_write(mpp, 1, H265E_VPU_VINT_CLEAR);
+       mpp_write(mpp, 0, H265E_VPU_VINT_REASON_USR);
+       if (reason & (1 << INT_BIT_BIT_BUF_FULL))
+               return -1;
+       else if (reason == 0x02)
+               return -1;
+
+       return 0;
+}
+
+static int rockchip_mpp_h265e_result(struct rockchip_mpp_dev *mpp,
+                                    struct mpp_ctx *ictx, u32 __user *dst)
+{
+       struct h265e_ctx *ctx = container_of(ictx, struct h265e_ctx, ictx);
+
+       switch (ctx->mode) {
+       case H265E_MODE_ONEFRAME:
+               if (copy_to_user(dst, &ctx->result,
+                                sizeof(struct h265e_result))) {
+                       mpp_err("copy result to user failed\n");
+                       return -1;
+               }
+               break;
+       default:
+               mpp_err("invalid context mode %d\n", (int)ctx->mode);
+               return -1;
+       }
+
+       return 0;
+}
+
+int rockchip_mpp_h265e_get_stream_header(struct rockchip_mpp_dev *mpp,
+                                        int index,
+                                        struct hal_h265e_header *head)
+{
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+
+       struct rockchip_h265e_instance *instance = NULL;
+       struct mpp_h265e_cfg *cfg = NULL;
+       u32 value = 0;
+       u32 address = 0;
+       int bs_hd;
+       unsigned long bs_address;
+       unsigned long bs_size;
+       int ret = 0;
+
+       if (index < 0 || index >= H265E_INSTANCE_NUM || !head) {
+               mpp_err("index = %d is invalid", index);
+               return -1;
+       }
+       mutex_lock(&enc->lock);
+#if    H265E_POWER_SAVE
+       rockchip_mpp_h265e_enable_clk(mpp);
+#endif
+       mpp_debug_enter();
+       head->size = 0;
+       instance = &enc->instance[index];
+       cfg = &instance->cfg;
+       address = head->buf;
+
+       bs_hd = vpu_iommu_import(mpp->iommu_info, instance->session, address);
+       if (bs_hd < 0) {
+               mpp_err("import dma-buf from fd %d failed\n", address);
+               mutex_unlock(&enc->lock);
+               return -1;
+       }
+       ret = vpu_iommu_map_iommu(mpp->iommu_info,
+                                 instance->session, bs_hd, &bs_address,
+                                 &bs_size);
+       if (ret < 0) {
+               mpp_err("bs fd %d ion map iommu failed\n", address);
+               goto FAIL;
+       }
+       mpp_write(mpp, (u32)bs_address, H265E_BS_START_ADDR);
+       mpp_write(mpp, (u32)bs_address, H265E_BS_SIZE);
+
+       mpp_write(mpp, (u32)bs_address, H265E_BS_RD_PTR);
+       mpp_write(mpp, (u32)bs_address, H265E_BS_WR_PTR);
+
+       value = (cfg->line_buf_int_en << 6) |
+               (cfg->slice_int_enable << 5) |
+               (cfg->ring_buffer_enable << 4) |
+               cfg->bs_endian;
+       mpp_write(mpp, value, H265E_BS_PARAM);
+       mpp_write(mpp, 0, H265E_BS_OPTION);
+
+       /* Set up work-buffer */
+       mpp_write(mpp, instance->work.dma_addr, H265E_ADDR_WORK_BASE);
+       mpp_write(mpp, instance->work.size, H265E_WORK_SIZE);
+       mpp_write(mpp, 0, H265E_WORK_PARAM);
+
+       /* Set up temp-buffer */
+       mpp_write(mpp, enc->temp.dma_addr, H265E_ADDR_TEMP_BASE);
+       mpp_write(mpp, enc->temp.size, H265E_TEMP_SIZE);
+       mpp_write(mpp, 0, H265E_TEMP_PARAM);
+
+       mpp_write(mpp, 0, H265E_ENC_SRC_PIC_IDX);
+       if (cfg->code_option.implicit_header_encode == 1) {
+               value = CODEOPT_ENC_HEADER_IMPLICIT |
+                       CODEOPT_ENC_VCL |
+                       (cfg->code_option.encode_aud << 5) |
+                       (cfg->code_option.encode_eos << 6) |
+                       (cfg->code_option.encode_eob << 7);
+       } else {
+               value = (cfg->code_option.implicit_header_encode << 0) |
+                       (cfg->code_option.encode_vcl << 1) |
+                       (cfg->code_option.encode_vps << 2) |
+                       (cfg->code_option.encode_sps << 3) |
+                       (cfg->code_option.encode_pps << 4) |
+                       (cfg->code_option.encode_aud << 5) |
+                       (cfg->code_option.encode_eos << 6) |
+                       (cfg->code_option.encode_eob << 7) |
+                       (cfg->code_option.encode_vui << 9);
+       }
+       mpp_write(mpp, value, H265E_CMD_ENC_CODE_OPTION);
+       rockchip_mpp_h265e_issue_command(mpp, index, H265E_CMD_ENC_PIC);
+       if (mpp_read(mpp, H265E_RET_SUCCESS) == 0) {
+               mpp_err("read return register fail\n");
+               goto FAIL;
+       }
+       head->size = mpp_read(mpp, H265E_RET_ENC_PIC_BYTE);
+       mpp_debug(DEBUG_H265E_INFO, "%s %d head->size=%d\n",
+                 __func__, __LINE__, head->size);
+       if (bs_hd >= 0)
+               vpu_iommu_free(mpp->iommu_info, instance->session, bs_hd);
+#if    H265E_POWER_SAVE
+       rockchip_mpp_h265e_disable_clk(mpp);
+#endif
+       mutex_unlock(&enc->lock);
+       mpp_debug_leave();
+
+       return 0;
+FAIL:
+       if (bs_hd >= 0)
+               vpu_iommu_free(mpp->iommu_info, instance->session, bs_hd);
+       mutex_unlock(&enc->lock);
+       mpp_err("fail, index = %d\n", index);
+       return -1;
+}
+
+/*
+ * set/change common parameter
+ * when first run this function ,the cfg_mask is 0xffffffff,
+ * and to configure all parameter.
+ * when dynamically changed in the encoding process,
+ * the configure register according to cfg_mask
+ */
+static int rockchip_mpp_h265e_set_common_parameter(struct rockchip_mpp_dev *mpp,
+                                                  int index)
+{
+       u32 value = 0;
+       struct mpp_h265e_cfg *cfg = NULL;
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct rockchip_h265e_instance *instance = NULL;
+
+       mpp_debug_enter();
+       if (!mpp || index < 0 || index >= H265E_INSTANCE_NUM) {
+               mpp_err("param is invalid,index = %d", index);
+               return -1;
+       }
+
+       instance = &enc->instance[index];
+       cfg = &instance->cfg;
+
+       mpp_write(mpp, 0x00010000 | index, H265E_INST_INDEX);
+       mpp_write(mpp, (cfg->line_buf_int_en << 6) |
+                 (cfg->slice_int_enable << 5) |
+                 (cfg->ring_buffer_enable << 4) |
+                 cfg->bs_endian, H265E_BS_PARAM);
+       mpp_debug(DEBUG_H265E_INFO, "%s %d W=%d,H=%d,index=%d\n",
+                 __func__, __LINE__,
+                 cfg->width, cfg->height, index);
+
+       /* Set up work-buffer */
+       mpp_write(mpp, (u32)instance->work.dma_addr, H265E_ADDR_WORK_BASE);
+       mpp_write(mpp, instance->work.size, H265E_WORK_SIZE);
+       mpp_write(mpp, 0, H265E_WORK_PARAM);
+
+       /* Set up temp-buffer */
+       mpp_write(mpp, (u32)enc->temp.dma_addr, H265E_ADDR_TEMP_BASE);
+       mpp_write(mpp, enc->temp.size, H265E_TEMP_SIZE);
+       mpp_write(mpp, 0, H265E_TEMP_PARAM);
+       /* Secondary AXI */
+#if    H265E_AXI_STATUS
+       mpp_write(mpp, 0x0, H265E_ADDR_SEC_AXI_BASE);
+       mpp_write(mpp, H265E_SEC_AXI_BUF_SIZE, H265E_SEC_AXI_SIZE);
+       mpp_write(mpp, 0xffff, H265E_USE_SEC_AXI);
+#else
+       mpp_write(mpp, 0, H265E_USE_SEC_AXI);
+#endif
+
+       /* Set up BitstreamBuffer */
+       mpp_write(mpp, 0, H265E_BS_START_ADDR);
+       mpp_write(mpp, 0, H265E_BS_SIZE);
+       mpp_write(mpp, 0, H265E_BS_RD_PTR);
+       mpp_write(mpp, 0, H265E_BS_WR_PTR);
+
+       /* SET_PARAM + COMMON */
+       mpp_write(mpp, H265E_OPT_COMMON, H265E_ENC_SET_PARAM_OPTION);
+       mpp_write(mpp, (u32)cfg->cfg_mask, H265E_ENC_SET_PARAM_ENABLE);
+
+       if (cfg->cfg_mask & H265E_CFG_SEQ_SRC_SIZE_CHANGE) {
+               value = ((cfg->height << 16) | cfg->width);
+               mpp_write(mpp, value, H265E_ENC_SEQ_SRC_SIZE);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_SEQ_PARAM_CHANGE) {
+               /* set seq parameter*/
+               value = (cfg->profile << 0) |
+                       (cfg->level << 3) |
+                       (cfg->tier << 12) |
+                       (cfg->bit_depth << 14) |
+                       (cfg->chroma_idc << 18) |
+                       (cfg->lossless_enable << 20) |
+                       (cfg->const_intra_pred_flag << 21) |
+                       ((cfg->chroma_cb_qp_offset & 0x1f) << 22) |
+                       ((cfg->chroma_cr_qp_offset & 0x1f) << 27);
+               mpp_write(mpp, value, H265E_ENC_SEQ_PARAM);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_GOP_PARAM_CHANGE)
+               mpp_write(mpp, cfg->gop_idx, H265E_ENC_SEQ_GOP_PARAM);
+       if (cfg->cfg_mask & H265E_CFG_INTRA_PARAM_CHANGE) {
+               value = (cfg->decoding_refresh_type << 0) |
+                       (cfg->intra_qp << 3) |
+                       (cfg->intra_period << 16);
+               mpp_write(mpp, value, H265E_ENC_SEQ_INTRA_PARAM);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_CONF_WIN_TOP_BOT_CHANGE) {
+               value = (cfg->conf_win_bot << 16) | cfg->conf_win_top;
+               mpp_write(mpp, value, H265E_ENC_SEQ_CONF_WIN_TOP_BOT);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_CONF_WIN_LEFT_RIGHT_CHANGE) {
+               value = (cfg->conf_win_right << 16) | cfg->conf_win_left;
+               mpp_write(mpp, value, H265E_ENC_SEQ_CONF_WIN_LEFT_RIGHT);
+       }
+       if (cfg->cfg_mask & H265E_CFG_FRAME_RATE_CHANGE)
+               mpp_write(mpp, cfg->frame_rate, H265E_ENC_SEQ_FRAME_RATE);
+
+       if (cfg->cfg_mask & H265E_CFG_INDEPENDENT_SLICE_CHANGE) {
+               value = (cfg->independ_slice_mode_arg << 16) |
+                       cfg->independ_slice_mode;
+               mpp_write(mpp, value, H265E_ENC_SEQ_INDEPENDENT_SLICE);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_DEPENDENT_SLICE_CHANGE) {
+               value = (cfg->depend_slice_mode_arg << 16) |
+                       cfg->depend_slice_mode;
+               mpp_write(mpp, value, H265E_ENC_SEQ_DEPENDENT_SLICE);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_INTRA_REFRESH_CHANGE) {
+               value = (cfg->intra_refresh_arg << 16) |
+                       cfg->intra_refresh_mode;
+               mpp_write(mpp, value, H265E_ENC_SEQ_INTRA_REFRESH);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_PARAM_CHANGE) {
+               value = (cfg->use_recommend_param) |
+                       (cfg->ctu.ctu_qp_enable << 2) |
+                       (cfg->scaling_list_enable << 3) |
+                       (cfg->cu_size_mode << 4) |
+                       (cfg->tmvp_enable << 7) |
+                       (cfg->wpp_enable << 8) |
+                       (cfg->max_num_merge << 9) |
+                       (cfg->dynamic_merge_8x8_enable << 12) |
+                       (cfg->dynamic_merge_16x16_enable << 13) |
+                       (cfg->dynamic_merge_32x32_enable << 14) |
+                       (cfg->disable_deblk << 15) |
+                       (cfg->lf_cross_slice_boundary_enable << 16) |
+                       ((cfg->beta_offset_div2 & 0xF) << 17) |
+                       ((cfg->tc_offset_div2 & 0xF) << 21) |
+                       (cfg->skip_intra_trans << 25) |
+                       (cfg->sao_enable << 26) |
+                       (cfg->intra_in_inter_slice_enable << 27) |
+                       (cfg->intra_nxn_enable << 28);
+
+               mpp_write(mpp, value, H265E_ENC_PARAM);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_RC_PARAM_CHANGE) {
+               value = (cfg->rc_enable << 0) |
+                       (cfg->cu_level_rc_enable << 1) |
+                       (cfg->hvs_qp_enable << 2) |
+                       (cfg->hvs_qp_scale_enable << 3) |
+                       (cfg->hvs_qp_scale << 4) |
+                       (cfg->bit_alloc_mode << 7) |
+                       (cfg->init_buf_levelx8 << 9) |
+                       (cfg->ctu.roi_enable << 13) |
+                       (cfg->initial_rc_qp << 14) |
+                       (cfg->initial_delay << 20);
+               mpp_write(mpp, value, H265E_ENC_RC_PARAM);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_RC_MIN_MAX_QP_CHANGE) {
+               value = (cfg->min_qp << 0) |
+                       (cfg->max_qp << 6) |
+                       (cfg->max_delta_qp << 12) |
+                       ((cfg->intra_qp_offset & 0xFFFF) << 18);
+               mpp_write(mpp, value, H265E_ENC_RC_MIN_MAX_QP);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_RC_TARGET_RATE_LAYER_0_3_CHANGE) {
+               value = (cfg->fixed_bit_ratio[0] << 0) |
+                       (cfg->fixed_bit_ratio[1] << 8) |
+                       (cfg->fixed_bit_ratio[2] << 16) |
+                       (cfg->fixed_bit_ratio[3] << 24);
+               mpp_write(mpp, value, H265E_ENC_RC_BIT_RATIO_LAYER_0_3);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_RC_TARGET_RATE_LAYER_4_7_CHANGE) {
+               value = (cfg->fixed_bit_ratio[4] << 0) |
+                       (cfg->fixed_bit_ratio[5] << 8) |
+                       (cfg->fixed_bit_ratio[6] << 16) |
+                       (cfg->fixed_bit_ratio[7] << 24);
+               mpp_write(mpp, value, H265E_ENC_RC_BIT_RATIO_LAYER_4_7);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_SET_NUM_UNITS_IN_TICK) {
+               mpp_write(mpp, cfg->num_units_in_tick,
+                         H265E_ENC_NUM_UNITS_IN_TICK);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_SET_TIME_SCALE) {
+               mpp_write(mpp, cfg->time_scale,
+                         H265E_ENC_TIME_SCALE);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_SET_NUM_TICKS_POC_DIFF_ONE) {
+               mpp_write(mpp, cfg->num_ticks_poc_diff_one,
+                         H265E_ENC_NUM_TICKS_POC_DIFF_ONE);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_NR_PARAM_CHANGE) {
+               value = (cfg->nr_y_enable << 0) |
+                       (cfg->nr_cb_enable << 1) |
+                       (cfg->nr_cr_enable << 2) |
+                       (cfg->nr_noise_est_enable << 3) |
+                       (cfg->nr_noise_sigma_y << 4) |
+                       (cfg->nr_noise_sigma_cb << 12) |
+                       (cfg->nr_noise_sigma_cr << 20);
+               mpp_write(mpp, value, H265E_ENC_NR_PARAM);
+       }
+
+       if (cfg->cfg_mask & H265E_CFG_NR_WEIGHT_CHANGE) {
+               value = (cfg->nr_intra_weight_y << 0) |
+                       (cfg->nr_intra_weight_cb << 5) |
+                       (cfg->nr_intra_weight_cr << 10) |
+                       (cfg->nr_inter_weight_y << 15) |
+                       (cfg->nr_inter_weight_cb << 20) |
+                       (cfg->nr_inter_weight_cr << 25);
+               mpp_write(mpp, value, H265E_ENC_NR_WEIGHT);
+       }
+       if (cfg->cfg_mask & H265E_CFG_RC_TARGET_RATE_CHANGE)
+               mpp_write(mpp, cfg->bit_rate, H265E_ENC_RC_TARGET_RATE);
+       if (cfg->cfg_mask & H265E_CFG_RC_TRANS_RATE_CHANGE)
+               mpp_write(mpp, cfg->trans_rate, H265E_ENC_RC_TRANS_RATE);
+       if (cfg->cfg_mask & H265E_CFG_ROT_PARAM_CHANGE)
+               mpp_write(mpp, 0, H265E_ENC_ROT_PARAM);
+       if (cfg->cfg_mask == H265E_CFG_CHANGE_SET_PARAM_ALL) {
+               value = (cfg->intra_max_qp << 6) | cfg->intra_min_qp;
+               mpp_write(mpp, value, H265E_ENC_RC_INTRA_MIN_MAX_QP);
+       }
+       rockchip_mpp_h265e_issue_command(mpp, index, H265E_CMD_SET_PARAM);
+       if (rockchip_mpp_h265e_wait_busy(mpp) == -1) {
+               mpp_err("h265e_wait_busy timeout, index=%d\n", index);
+               goto FAIL;
+       }
+       if (mpp_read(mpp, H265E_RET_SUCCESS) == 0) {
+               mpp_err("h265e set common parameter ret fail\n");
+               goto FAIL;
+       }
+       mpp_debug_leave();
+       return 0;
+FAIL:
+       mpp_err("fail,index = %d\n", index);
+       return -1;
+}
+
+static int rockchip_mpp_h265e_set_gop_parameter(struct rockchip_mpp_dev *mpp,
+                                               int index)
+{
+       u32 value = 0;
+       int int_reason = 0;
+       int i = 0, j = 0;
+       struct mpp_h265e_cfg *cfg = NULL;
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct rockchip_h265e_instance *instance = NULL;
+
+       mpp_debug_enter();
+
+       if (!mpp || index < 0 || index >= H265E_INSTANCE_NUM) {
+               mpp_err("param is invalid,index = %d", index);
+               return -1;
+       }
+
+       instance = &enc->instance[index];
+       cfg = &instance->cfg;
+
+       /* Set up work-buffer */
+       mpp_write(mpp, (u32)instance->work.dma_addr, H265E_ADDR_WORK_BASE);
+       mpp_write(mpp, instance->work.size, H265E_WORK_SIZE);
+       mpp_write(mpp, 0, H265E_WORK_PARAM);
+
+       /* Set up temp-buffer */
+       mpp_write(mpp, (u32)enc->temp.dma_addr, H265E_ADDR_TEMP_BASE);
+       mpp_write(mpp, enc->temp.size, H265E_TEMP_SIZE);
+       mpp_write(mpp, 0, H265E_TEMP_PARAM);
+       /* Secondary AXI */
+#if    H265E_AXI_STATUS
+       mpp_write(mpp, 0x0, H265E_ADDR_SEC_AXI_BASE);
+       mpp_write(mpp, H265E_SEC_AXI_BUF_SIZE, H265E_SEC_AXI_SIZE);
+       mpp_write(mpp, 0xffff, H265E_USE_SEC_AXI);
+#else
+       mpp_write(mpp, 0, H265E_USE_SEC_AXI);
+#endif
+
+       /*
+        * SET_PARAM + CUSTOM_GOP
+        * only when gop_size == custom_gop,
+        * custom_gop related registers should be set
+        */
+       mpp_write(mpp, 0x00010000 | index, H265E_INST_INDEX);
+       int_reason = 0;
+       if (cfg->gop_idx == PRESET_IDX_CUSTOM_GOP) {
+               mpp_write(mpp, H265E_OPT_CUSTOM_GOP,
+                         H265E_ENC_SET_PARAM_OPTION);
+               mpp_write(mpp, (u32)H265E_CFG_CHANGE_SET_PARAM_ALL,
+                         H265E_ENC_SET_CUSTOM_GOP_ENABLE);
+
+               value = (cfg->gop.custom_gop_size << 0) |
+                       (cfg->gop.use_derive_lambda_weight << 4);
+               mpp_write(mpp, value, H265E_ENC_CUSTOM_GOP_PARAM);
+
+               for (i = 0; i < cfg->gop.custom_gop_size; i++) {
+                       value = (cfg->gop.pic[i].type << 0) |
+                               (cfg->gop.pic[i].offset << 2) |
+                               (cfg->gop.pic[i].qp << 6) |
+                               ((cfg->gop.pic[i].ref_poc_l0 &
+                                 0x1F) << 14) |
+                               ((cfg->gop.pic[i].ref_poc_l1 &
+                                 0x1F) << 19) |
+                               (cfg->gop.pic[i].temporal_id << 24);
+
+                       mpp_write(mpp, value,
+                                 H265E_ENC_CUSTOM_GOP_PIC_PARAM_0 + (i * 4));
+                       mpp_write(mpp, cfg->gop.gop_pic_lambda[i],
+                                 H265E_ENC_CUSTOM_GOP_PIC_LAMBDA_0 + (i * 4));
+               }
+               for (j = i; j < H265E_MAX_GOP_NUM; j++) {
+                       mpp_write(mpp, 0,
+                                 H265E_ENC_CUSTOM_GOP_PIC_PARAM_0 + (j * 4));
+                       mpp_write(mpp, 0,
+                                 H265E_ENC_CUSTOM_GOP_PIC_LAMBDA_0 + (j * 4));
+               }
+               rockchip_mpp_h265e_issue_command(mpp,
+                                                index,
+                                                H265E_CMD_SET_PARAM);
+               if (rockchip_mpp_h265e_wait_busy(mpp) == -1) {
+                       mpp_err("h265e_wait_busy timeout, index=%d\n", index);
+                       goto FAIL;
+               }
+               if (mpp_read(mpp, H265E_RET_SUCCESS) == 0) {
+                       mpp_err("h265e set gop ret fail\n");
+                       goto FAIL;
+               }
+       }
+
+       value = mpp_read(mpp, H265E_RET_ENC_MIN_FB_NUM);
+       if (value > instance->min_frame_buffer_count)
+               instance->min_frame_buffer_count = value;
+
+       value = mpp_read(mpp, H265E_RET_ENC_MIN_SRC_BUF_NUM);
+       if (value > instance->min_src_frame_count)
+               instance->min_src_frame_count = value;
+       mpp_debug(DEBUG_H265E_INFO,
+                 "%s %d,min_frame_buffer_count = %d,min_src_frame_count=%d\n",
+                 __func__, __LINE__, instance->min_frame_buffer_count,
+                 instance->min_src_frame_count);
+       mpp_debug_leave();
+       return 0;
+
+FAIL:
+       mpp_err("fail,index = %d\n", index);
+       return -1;
+}
+
+static int rockchip_mpp_h265e_set_vui_parameter(struct rockchip_mpp_dev *mpp,
+                                               int index)
+{
+       struct mpp_h265e_cfg *cfg = NULL;
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct rockchip_h265e_instance *instance = NULL;
+       u32 value = 0;
+
+       mpp_debug_enter();
+       if (!mpp || index < 0 || index >= H265E_INSTANCE_NUM) {
+               mpp_err("param is invalid,index = %d", index);
+               return -1;
+       }
+
+       instance = &enc->instance[index];
+       cfg = &instance->cfg;
+       mpp_write(mpp, 0x00010000 | index, H265E_INST_INDEX);
+
+       /* Set up work-buffer */
+       mpp_write(mpp, (u32)instance->work.dma_addr, H265E_ADDR_WORK_BASE);
+       mpp_write(mpp, instance->work.size, H265E_WORK_SIZE);
+       mpp_write(mpp, 0, H265E_WORK_PARAM);
+
+       if (cfg->vui.flags || cfg->vui_rbsp ||
+           cfg->hrd_rbsp_in_vps || cfg->hrd_rbsp_in_vui) {
+               /*** VUI encoding by host registers ***/
+               if (cfg->vui.flags) {
+                       mpp_write(mpp, H265E_OPT_VUI,
+                                 H265E_ENC_SET_PARAM_OPTION);
+                       mpp_write(mpp, cfg->vui.flags,
+                                 H265E_ENC_VUI_PARAM_FLAGS);
+                       mpp_write(mpp, cfg->vui.aspect_ratio_idc,
+                                 H265E_ENC_VUI_ASPECT_RATIO_IDC);
+                       mpp_write(mpp, cfg->vui.sar_size,
+                                 H265E_ENC_VUI_SAR_SIZE);
+                       mpp_write(mpp, cfg->vui.over_scan_appropriate,
+                                 H265E_ENC_VUI_OVERSCAN_APPROPRIATE);
+                       mpp_write(mpp, cfg->vui.signal,
+                                 H265E_ENC_VUI_VIDEO_SIGNAL);
+                       mpp_write(mpp, cfg->vui.chroma_sample_loc,
+                                 H265E_ENC_VUI_CHROMA_SAMPLE_LOC);
+                       mpp_write(mpp, cfg->vui.disp_win_left_right,
+                                 H265E_ENC_VUI_DISP_WIN_LEFT_RIGHT);
+                       mpp_write(mpp, cfg->vui.disp_win_top_bottom,
+                                 H265E_ENC_VUI_DISP_WIN_TOP_BOT);
+               } else {
+                       mpp_write(mpp, 0, H265E_ENC_VUI_PARAM_FLAGS);
+               }
+               if (cfg->vui_rbsp ||
+                   cfg->hrd_rbsp_in_vps ||
+                   cfg->hrd_rbsp_in_vui) {
+                       /*** VUI encoding by given rbsp data ***/
+                       mpp_write(mpp, H265E_OPT_VUI,
+                                 H265E_ENC_SET_PARAM_OPTION);
+                       value = (cfg->hrd_rbsp_in_vps << 2) |
+                               (cfg->hrd_rbsp_in_vui << 1) |
+                               (cfg->vui_rbsp);
+                       mpp_write(mpp, value,
+                                 H265E_ENC_VUI_HRD_RBSP_PARAM_FLAG);
+                       mpp_write(mpp, cfg->vui_rbsp_data_addr,
+                                 H265E_ENC_VUI_RBSP_ADDR);
+                       mpp_write(mpp, cfg->vui_rbsp_data_size,
+                                 H265E_ENC_VUI_RBSP_SIZE);
+                       mpp_write(mpp, cfg->hrd_rbsp_data_addr,
+                                 H265E_ENC_HRD_RBSP_ADDR);
+                       mpp_write(mpp, cfg->hrd_rbsp_data_size,
+                                 H265E_ENC_HRD_RBSP_SIZE);
+               } else {
+                       mpp_write(mpp, 0, H265E_ENC_VUI_HRD_RBSP_PARAM_FLAG);
+               }
+               rockchip_mpp_h265e_issue_command(mpp,
+                                                index,
+                                                H265E_CMD_SET_PARAM);
+               if (rockchip_mpp_h265e_wait_busy(mpp) == -1) {
+                       mpp_err("h265e_wait_busy timeout, index=%d\n", index);
+                       goto FAIL;
+               }
+               if (mpp_read(mpp, H265E_RET_SUCCESS) == 0) {
+                       mpp_err("h265e set vui ret fail\n");
+                       goto FAIL;
+               }
+       }
+
+       mpp_debug_leave();
+       return 0;
+FAIL:
+       mpp_err("fail,index = %d\n", index);
+       return -1;
+}
+
+static int rockchip_mpp_h265e_set_parameter(struct rockchip_mpp_dev *mpp,
+                                           int index)
+{
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct rockchip_h265e_instance *instance = &enc->instance[index];
+
+       mpp_debug_enter();
+       mutex_lock(&enc->lock);
+#if    H265E_POWER_SAVE
+       rockchip_mpp_h265e_enable_clk(mpp);
+#endif
+       mpp_dev_power_on(mpp);
+       if (instance->status != H265E_INSTANCE_STATUS_OPENED) {
+               mpp_err("error:status = %d\n", instance->status);
+               goto FAIL;
+       }
+       instance->cfg.cfg_mask = H265E_CFG_CHANGE_SET_PARAM_ALL;
+       if (rockchip_mpp_h265e_set_common_parameter(mpp, index) != 0)
+               goto FAIL;
+       if (rockchip_mpp_h265e_set_gop_parameter(mpp, index) != 0)
+               goto FAIL;
+       if (rockchip_mpp_h265e_set_vui_parameter(mpp, index) != 0)
+               goto FAIL;
+       if (rockchip_mpp_h265e_register_frame_buffer(mpp, index) != 0)
+               goto FAIL;
+       instance->status = H265E_INSTANCE_STATUS_SET_PARAMETER;
+       instance->cfg.cfg_mask = 0;
+       instance->cfg.cfg_option = 0;
+#if    H265E_POWER_SAVE
+       rockchip_mpp_h265e_disable_clk(mpp);
+#endif
+       mutex_unlock(&enc->lock);
+       mpp_debug_leave();
+       return 0;
+FAIL:
+       instance->status = H265E_INSTANCE_STATUS_ERROR;
+       mutex_unlock(&enc->lock);
+       mpp_err("fail,index = %d\n", index);
+       return -1;
+}
+
+static int rockchip_mpp_h265e_change_parameter(struct rockchip_mpp_dev *mpp,
+                                              int index)
+{
+       struct rockchip_h265e_dev *enc =
+                                       container_of(mpp,
+                                                    struct rockchip_h265e_dev,
+                                                    dev);
+       struct rockchip_h265e_instance *instance = &enc->instance[index];
+       u32 enable = instance->cfg.cfg_option;
+
+       mpp_debug_enter();
+       mutex_lock(&enc->lock);
+#if    H265E_POWER_SAVE
+       rockchip_mpp_h265e_enable_clk(mpp);
+#endif
+       mpp_dev_power_on(mpp);
+       if (instance->status == H265E_INSTANCE_STATUS_ERROR ||
+           instance->status == H265E_INSTANCE_STATUS_CLOSE) {
+               mpp_err("error:status = %d\n", instance->status);
+               goto FAIL;
+       }
+
+       instance->status = H265E_INSTANCE_STATUS_OPENED;
+       if (enable & H265E_PARAM_CHANEGED_COMMON) {
+               if (rockchip_mpp_h265e_set_common_parameter(mpp, index) != 0)
+                       goto FAIL;
+       }
+       if (enable & H265E_PARAM_CHANEGED_CUSTOM_GOP) {
+               if (rockchip_mpp_h265e_set_gop_parameter(mpp, index) != 0)
+                       goto FAIL;
+       }
+       if (enable & H265E_PARAM_CHANEGED_REGISTER_BUFFER) {
+               rockchip_mpp_h265e_free_frame_buffer(mpp, instance);
+               if (rockchip_mpp_h265e_register_frame_buffer(mpp, index) != 0)
+                       goto FAIL;
+       }
+       instance->status = H265E_INSTANCE_STATUS_SET_PARAMETER;
+       instance->cfg.cfg_mask = 0;
+       instance->cfg.cfg_option = 0;
+#if    H265E_POWER_SAVE
+       rockchip_mpp_h265e_disable_clk(mpp);
+#endif
+       mutex_unlock(&enc->lock);
+       mpp_debug_leave();
+       return 0;
+FAIL:
+       instance->status = H265E_INSTANCE_STATUS_ERROR;
+       mutex_unlock(&enc->lock);
+       mpp_err("fail,index = %d\n", index);
+       return -1;
+}
+
+static u32 rockchip_mpp_h265e_get_fb_luma_size(u32 map_type,
+                                              u32 stride, u32 height)
+{
+       u32 size = stride * height;
+
+       if (map_type == LINEAR_FRAME_MAP)
+               size = stride * height;
+       else if (map_type == COMPRESSED_FRAME_MAP)
+               size = stride * height;
+       else if (map_type == TILED_SUB_CTU_MAP)
+               size = (ALIGN(stride, 32) * ALIGN(height, 32));
+       else
+               mpp_err("unsupport may_type = %d\n", map_type);
+
+       return size;
+}
+
+static u32 rockchip_mpp_h265e_get_fb_chroma_size(u32 map_type,
+                                                u32 stride, u32 height)
+{
+       u32 size = 0;
+       u32 chroma_width = stride >> 1;
+       u32 chroma_height = height >> 1;
+
+       if (map_type == LINEAR_FRAME_MAP) {
+               size = chroma_width * chroma_height;
+       } else if (map_type == COMPRESSED_FRAME_MAP) {
+               chroma_width = ALIGN(chroma_width, 16);
+               size = chroma_width * chroma_height;
+       } else if (map_type == TILED_SUB_CTU_MAP) {
+               chroma_width = ALIGN(chroma_width, 16);
+               size = chroma_width * chroma_height / 2;
+       } else {
+               mpp_err("unsupport may_type = %d\n", map_type);
+       }
+
+       return size;
+}
+
+static
+int rockchip_mpp_h265e_register_frame_buffer(struct rockchip_mpp_dev *mpp,
+                                            int index)
+{
+       struct mpp_h265e_cfg *cfg = NULL;
+       struct rockchip_h265e_dev *enc =
+                               container_of(mpp,
+                                            struct rockchip_h265e_dev,
+                                            dev);
+       struct rockchip_h265e_instance *instance = NULL;
+       int buf_width = 0;
+       int buf_height = 0;
+       int luma_stride = 0;
+       int chroma_stride = 0;
+       int count = 0;
+       u32 value, mv_col_size;
+       u32 fbc_y_table_size = 0, fbc_c_table_size = 0, sub_sampled_size = 0;
+       int q, j, i, remain, idx;
+       int start_no, end_no;
+       u32 addr_y, addr_cb, addr_cr;
+       int stride;
+       u32 axi_id = 0;
+       int     size_rec_luma, size_rec_chroma;
+       struct mpp_h265e_buffer *buffer = NULL;
+       int interlace = 0;
+       struct mpp_h265e_frame_buffer *frame_buffer = NULL;
+
+       mpp_debug_enter();
+       if (!mpp || index < 0 || index >= H265E_INSTANCE_NUM) {
+               mpp_err("parameter is invalid, index = %d\n", index);
+               return -1;
+       }
+       instance = &enc->instance[index];
+       cfg = &instance->cfg;
+       interlace = (cfg->src_format == H265E_SRC_YUV_420_NV12) ||
+                   (cfg->src_format == H265E_SRC_YUV_420_NV21);
+
+       stride = ALIGN(cfg->width, 32);
+
+       buf_width = ALIGN(cfg->width, 8);
+       buf_height = ALIGN(cfg->height, 8);
+
+       size_rec_luma =
+                       rockchip_mpp_h265e_get_fb_luma_size(cfg->map_type,
+                                                           stride, buf_height);
+       size_rec_chroma =
+                         rockchip_mpp_h265e_get_fb_chroma_size(cfg->map_type,
+                                                               stride,
+                                                               buf_height);
+
+       count = instance->min_frame_buffer_count;
+       memset(&instance->mv, 0, sizeof(struct mpp_h265e_buffer));
+       memset(&instance->fbc_luma, 0, sizeof(struct mpp_h265e_buffer));
+       memset(&instance->fbc_chroma, 0, sizeof(struct mpp_h265e_buffer));
+       if (cfg->map_type == COMPRESSED_FRAME_MAP) {
+               mv_col_size = H265E_MVCOL_BUF_SIZE(buf_width, buf_height);
+               mv_col_size = ALIGN(mv_col_size, 16);
+
+               instance->mv.size = ALIGN(mv_col_size * count, 4096) + 4096;
+               instance->mv.hdl =
+                       rockchip_mpp_h265e_dma_alloc(mpp,
+                                                    instance->session,
+                                                    instance->mv.size,
+                                                    MPP_ALIGN_SIZE,
+                                                    &instance->mv.dma_addr);
+               if (instance->mv.hdl < 0) {
+                       mpp_err("alloc mv buffer fail,index = %d\n", index);
+                       goto FAIL;
+               }
+
+               fbc_y_table_size =
+                       H265E_FBC_LUMA_TABLE_SIZE(buf_width,
+                                                 buf_height);
+               fbc_y_table_size =
+                       ALIGN(fbc_y_table_size, 16);
+               instance->fbc_luma.size =
+                       ALIGN(fbc_y_table_size * count, 4096) + 4096;
+               instance->fbc_luma.hdl =
+                       rockchip_mpp_h265e_dma_alloc(mpp,
+                                                    instance->session,
+                                                    instance->fbc_luma.size,
+                                                    MPP_ALIGN_SIZE,
+                                                    &instance->fbc_luma.dma_addr);
+               if (instance->fbc_luma.hdl < 0) {
+                       mpp_err("alloc fbc y buffer fail,index = %d\n", index);
+                       goto FAIL;
+               }
+
+               fbc_c_table_size =
+                               H265E_FBC_CHROMA_TABLE_SIZE(buf_width,
+                                                           buf_height);
+               fbc_c_table_size = ALIGN(fbc_c_table_size, 16);
+               instance->fbc_chroma.size = ALIGN(fbc_c_table_size * count,
+                                                 4096) + 4096;
+               instance->fbc_chroma.hdl =
+                       rockchip_mpp_h265e_dma_alloc(mpp,
+                                                    instance->session,
+                                                    instance->fbc_chroma.size,
+                                                    MPP_ALIGN_SIZE,
+                                                    &instance->fbc_chroma.dma_addr);
+               if (instance->fbc_chroma.hdl < 0) {
+                       mpp_err("alloc fbc c buffer fail,index = %d\n", index);
+                       goto FAIL;
+               }
+       }
+
+       sub_sampled_size = H265E_SUBSAMPLED_ONE_SIZE(buf_width, buf_height);
+       memset(&instance->sub_sample, 0, sizeof(struct mpp_h265e_buffer));
+       instance->sub_sample.size =
+               ALIGN(sub_sampled_size * count, 4096) + 4096;
+       instance->sub_sample.hdl =
+               rockchip_mpp_h265e_dma_alloc(mpp,
+                                            instance->session,
+                                            instance->sub_sample.size,
+                                            MPP_ALIGN_SIZE,
+                                            &instance->sub_sample.dma_addr);
+       if (instance->sub_sample.hdl < 0) {
+               mpp_err("alloc fbc c buffer fail,index = %d\n", index);
+               goto FAIL;
+       }
+       mpp_write(mpp, (u32)instance->sub_sample.dma_addr,
+                 H265E_ADDR_SUB_SAMPLED_FB_BASE);
+       mpp_write(mpp, sub_sampled_size, H265E_SUB_SAMPLED_ONE_FB_SIZE);
+
+       value = (buf_width << 16) | buf_height;
+       mpp_write(mpp, value, H265E_PIC_SIZE);
+
+       luma_stride = ALIGN(cfg->width, 16) * 4;
+       luma_stride = ALIGN(luma_stride, 32);
+       chroma_stride = ALIGN(cfg->width / 2, 16) * 4;
+       chroma_stride = ALIGN(chroma_stride, 32);
+       value = (luma_stride << 16) | chroma_stride;
+       mpp_write(mpp, value, H265E_FBC_STRIDE);
+
+       value = ((cfg->src_format == H265E_SRC_YUV_420_NV21) << 29) |
+               ((cfg->map_type == LINEAR_FRAME_MAP) << 28) |
+               (axi_id << 24) |
+               (interlace << 16) |
+               stride;
+       mpp_write(mpp, value, H265E_COMMON_PIC_INFO);
+
+       memset(&instance->frame_buffer, 0, sizeof(instance->frame_buffer));
+       /* set frame buffer address*/
+       for (i = 0; i < count; i++) {
+               frame_buffer = &instance->frame_buffer[i];
+               buffer = &frame_buffer->buffer;
+               buffer->size = size_rec_luma + 2 * size_rec_chroma;
+               buffer->hdl = rockchip_mpp_h265e_dma_alloc(mpp,
+                                                          instance->session,
+                                                          buffer->size,
+                                                          MPP_ALIGN_SIZE,
+                                                          &buffer->dma_addr);
+               if (buffer->hdl < 0) {
+                       mpp_err("alloc fbc y buffer fail,index = %d\n", index);
+                       goto FAIL;
+               }
+
+               frame_buffer->y = (u32)buffer->dma_addr;
+               frame_buffer->cb = frame_buffer->y + size_rec_luma;
+               frame_buffer->cr = frame_buffer->cb + size_rec_chroma;
+       }
+
+       remain = count;
+       q      = (remain + 7) / 8;
+       idx    = 0;
+       for (j = 0; j < q; j++) {
+               value = (cfg->fb_endian << 16) |
+                       ((j == q - 1) << 4) |
+                       ((j == 0) << 3);
+               mpp_write(mpp, value, H265E_SFB_OPTION);
+               start_no = j * 8;
+               end_no   = start_no + (remain >= 8 ? 8 : remain) - 1;
+               value = (start_no << 8) | end_no;
+               mpp_write(mpp, value, H265E_SET_FB_NUM);
+               for (i = 0; i < 8 && i < remain; i++) {
+                       frame_buffer = &instance->frame_buffer[i];
+                       addr_y  = frame_buffer->y;
+                       addr_cb = frame_buffer->cb;
+                       addr_cr = frame_buffer->cr;
+                       mpp_write(mpp, addr_y,
+                                 H265E_ADDR_LUMA_BASE0 + (i << 4));
+                       mpp_write(mpp, addr_cb,
+                                 H265E_ADDR_CB_BASE0 + (i << 4));
+                       if (cfg->map_type == COMPRESSED_FRAME_MAP) {
+                               mpp_write(mpp,
+                                         ((u32)instance->fbc_luma.dma_addr) +
+                                         idx * fbc_y_table_size,
+                                         H265E_ADDR_FBC_Y_OFFSET0 + (i << 4));
+                               mpp_write(mpp,
+                                         ((u32)instance->fbc_chroma.dma_addr) +
+                                         idx * fbc_c_table_size,
+                                         H265E_ADDR_FBC_C_OFFSET0 + (i << 4));
+                               mpp_write(mpp, ((u32)instance->mv.dma_addr) +
+                                         idx * mv_col_size,
+                                         H265E_ADDR_MV_COL0 + (i << 2));
+                       } else {
+                               mpp_write(mpp, addr_cr,
+                                         H265E_ADDR_CR_BASE0 + (i << 4));
+                               mpp_write(mpp, 0,
+                                         H265E_ADDR_FBC_C_OFFSET0 + (i << 4));
+                               mpp_write(mpp, 0,
+                                         H265E_ADDR_MV_COL0 + (i << 2));
+                       }
+                       idx++;
+               }
+               remain -= i;
+               mpp_write(mpp, (u32)instance->work.dma_addr,
+                         H265E_ADDR_WORK_BASE);
+               mpp_write(mpp, (u32)instance->work.size, H265E_WORK_SIZE);
+               mpp_write(mpp, 0, H265E_WORK_PARAM);
+               rockchip_mpp_h265e_issue_command(mpp,
+                                                index,
+                                                H265E_CMD_SET_FRAMEBUF);
+               if (rockchip_mpp_h265e_wait_busy(mpp) == -1) {
+                       mpp_err("rockchip_mpp_h265e_wait_busy timeout\n");
+                       goto FAIL;
+               }
+       }
+       if (mpp_read(mpp, H265E_RET_SUCCESS) == 0) {
+               mpp_err("h265e register frame buffer ret fail\n");
+               goto FAIL;
+       }
+       mpp_debug_leave();
+       return 0;
+FAIL:
+       rockchip_mpp_h265e_free_frame_buffer(mpp, instance);
+       mpp_err("fail,index = %d\n", index);
+       return -1;
+}
+
+static int rockchip_mpp_h265e_encode_one_frame(struct rockchip_mpp_dev *mpp,
+                                              struct h265e_ctx *ctx,
+                                              int index)
+{
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct mpp_h265e_encode_info *en_info = &ctx->cfg;
+       struct rockchip_h265e_instance *instance = &enc->instance[index];
+       struct mpp_h265e_cfg *cfg = &instance->cfg;
+       int luma_stride = 0;
+       int chroma_stride = 0;
+       int src_format = 0;
+       u32 value, src_y, src_cb, src_cr;
+       int interlace = 0;
+       u32 roi_enable = 0;
+       u32 ctu_qp_enable = 0;
+
+       mpp_debug_enter();
+       if (!cfg || !ctx)
+               return -1;
+
+       mutex_lock(&enc->lock);
+       h265e_last = ktime_get();
+#if H265E_POWER_SAVE
+       rockchip_mpp_h265e_enable_clk(mpp);
+#endif
+       mpp_dev_power_on(mpp);
+       if (instance->status != H265E_INSTANCE_STATUS_SET_PARAMETER) {
+               mutex_unlock(&enc->lock);
+               mpp_err("fail,status = %d,index = %d\n",
+                       instance->status, index);
+               return -1;
+       }
+
+       luma_stride = cfg->width_stride;
+       interlace = (cfg->src_format == H265E_SRC_YUV_420_NV12) ||
+                   (cfg->src_format == H265E_SRC_YUV_420_NV21);
+       if (cfg->src_format == H265E_SRC_YUV_420_NV12)
+               src_format = 0x02;
+       else if (cfg->src_format == H265E_SRC_YUV_420_NV21)
+               src_format = 0x03;
+       if (cfg->map_type == TILED_SUB_CTU_MAP)
+               src_format = 0x1;
+       mpp_write(mpp, 0xfffffff2, H265E_PERF_LATENCY_CTRL0);
+       mpp_write(mpp, (u32)ctx->bs.dma_addr, H265E_BS_START_ADDR);
+       mpp_write(mpp, ctx->bs.size, H265E_BS_SIZE);
+       mpp_write(mpp, (u32)ctx->bs.dma_addr, H265E_BS_RD_PTR);
+       mpp_write(mpp, (u32)ctx->bs.dma_addr, H265E_BS_WR_PTR);
+
+       value = (cfg->line_buf_int_en << 6) |
+               (cfg->slice_int_enable << 5) |
+               (cfg->ring_buffer_enable << 4) |
+               cfg->bs_endian;
+       mpp_write(mpp, value, H265E_BS_PARAM);
+       mpp_write(mpp, 0, H265E_BS_OPTION);
+
+       mpp_write(mpp, instance->work.dma_addr, H265E_ADDR_WORK_BASE);
+       mpp_write(mpp, instance->work.size, H265E_WORK_SIZE);
+       mpp_write(mpp, 0, H265E_WORK_PARAM);
+
+       mpp_write(mpp, enc->temp.dma_addr, H265E_ADDR_TEMP_BASE);
+       mpp_write(mpp, enc->temp.size, H265E_TEMP_SIZE);
+       mpp_write(mpp, 0, H265E_TEMP_PARAM);
+
+#if    H265E_AXI_STATUS
+       mpp_write(mpp, 0x0, H265E_ADDR_SEC_AXI_BASE);
+       mpp_write(mpp, H265E_SEC_AXI_BUF_SIZE, H265E_SEC_AXI_SIZE);
+       mpp_write(mpp, 0xffff, H265E_USE_SEC_AXI);
+#else
+       mpp_write(mpp, 0, H265E_USE_SEC_AXI);
+#endif
+       if (cfg->code_option.implicit_header_encode == 1) {
+               value = CODEOPT_ENC_HEADER_IMPLICIT     |
+                       CODEOPT_ENC_VCL |
+                       (cfg->code_option.encode_aud << 5) |
+                       (cfg->code_option.encode_eos << 6) |
+                       (cfg->code_option.encode_eob << 7);
+       } else {
+               value = (cfg->code_option.implicit_header_encode << 0) |
+                       (cfg->code_option.encode_vcl << 1) |
+                       (cfg->code_option.encode_vps << 2) |
+                       (cfg->code_option.encode_sps << 3) |
+                       (cfg->code_option.encode_pps << 4) |
+                       (cfg->code_option.encode_aud << 5) |
+                       (cfg->code_option.encode_eos << 6) |
+                       (cfg->code_option.encode_eob << 7) |
+                       (cfg->code_option.encode_vui << 9);
+       }
+
+       mpp_write(mpp, value, H265E_CMD_ENC_CODE_OPTION);
+
+       value = (en_info->skip_pic << 0) |
+               (en_info->force_qp_enable << 1) |
+               (en_info->force_qp_i << 2) |
+               (en_info->force_qp_p << 8) |
+               (0 << 14) |
+               (en_info->force_frame_type_enable << 20) |
+               (en_info->force_frame_type << 21);
+       mpp_write(mpp, value, H265E_CMD_ENC_PIC_PARAM);
+       if (en_info->stream_end == 1) {
+               mpp_debug(DEBUG_H265E_INFO,
+                         "%s %d instance %d en_info->stream_end\n",
+                         __func__, __LINE__, index);
+               mpp_write(mpp, 0xFFFFFFFE, H265E_CMD_ENC_SRC_PIC_IDX);
+       } else {
+               mpp_write(mpp, instance->src_idx, H265E_CMD_ENC_SRC_PIC_IDX);
+       }
+       instance->src_idx++;
+       instance->src_idx = instance->src_idx % instance->min_src_frame_count;
+       src_y = (u32)ctx->src.dma_addr;
+       src_cb = src_y + luma_stride * cfg->height_stride;
+       src_cr = src_cb + luma_stride * cfg->height_stride / 4;
+       mpp_write(mpp, src_y, H265E_CMD_ENC_SRC_ADDR_Y);
+       if (cfg->src_format == H265E_SRC_YUV_420_YV12) {
+               mpp_write(mpp, src_cb, H265E_CMD_ENC_SRC_ADDR_V);
+               mpp_write(mpp, src_cr, H265E_CMD_ENC_SRC_ADDR_U);
+       } else {
+               mpp_write(mpp, src_cb, H265E_CMD_ENC_SRC_ADDR_U);
+               mpp_write(mpp, src_cr, H265E_CMD_ENC_SRC_ADDR_V);
+       }
+       chroma_stride = (interlace == 1) ? luma_stride : (luma_stride >> 1);
+       if (cfg->map_type == TILED_SUB_CTU_MAP)
+               chroma_stride = luma_stride;
+       mpp_write(mpp, (luma_stride << 16) | chroma_stride,
+                 H265E_CMD_ENC_SRC_STRIDE);
+       value = (src_format << 0) | (cfg->src_endian << 6);
+       mpp_write(mpp, value, H265E_CMD_ENC_SRC_FORMAT);
+       value = 0;
+       if (cfg->sei.prefix_sei_nal_enable) {
+               mpp_write(mpp, cfg->sei.prefix_sei_nal_addr,
+                         H265E_CMD_ENC_PREFIX_SEI_NAL_ADDR);
+               value = cfg->sei.prefix_sei_data_size << 16 |
+                       cfg->sei.prefix_sei_data_order << 1 |
+                       cfg->sei.prefix_sei_nal_enable;
+       }
+       mpp_write(mpp, value, H265E_CMD_ENC_PREFIX_SEI_INFO);
+
+       value = 0;
+       if (cfg->sei.suffix_sei_nal_enable) {
+               mpp_write(mpp, cfg->sei.suffix_sei_nal_addr,
+                         H265E_CMD_ENC_SUFFIX_SEI_NAL_ADDR);
+               value = (cfg->sei.suffix_sei_data_size << 16) |
+                       (cfg->sei.suffix_sei_data_enc_order << 1) |
+                       cfg->sei.suffix_sei_nal_enable;
+       }
+       mpp_write(mpp, value, H265E_CMD_ENC_SUFFIX_SEI_INFO);
+
+       mpp_write(mpp, (u32)ctx->roi.dma_addr,
+                 H265E_CMD_ENC_ROI_ADDR_CTU_MAP);
+       mpp_write(mpp, (u32)ctx->ctu.dma_addr,
+                 H265E_CMD_ENC_CTU_QP_MAP_ADDR);
+
+       if (ctx->roi.dma_addr == 0 || ctx->roi.hdl < 0)
+               roi_enable = 0;
+       else
+               roi_enable = cfg->ctu.roi_enable;
+
+       if (ctx->ctu.dma_addr == 0 || ctx->ctu.hdl < 0)
+               ctu_qp_enable = 0;
+       else
+               ctu_qp_enable = cfg->ctu.ctu_qp_enable;
+       value = ((roi_enable) << 0) |
+                       (cfg->ctu.roi_delta_qp << 1) |
+                       (ctu_qp_enable << 9) |
+                       (cfg->ctu.map_endian << 12) |
+                       (cfg->ctu.map_stride << 16);
+
+       mpp_debug(DEBUG_H265E_INFO,
+                 "roi_enable = %d,roi_delta_qp = %d,ctu_qp_enable = %d\n",
+                 cfg->ctu.roi_enable, cfg->ctu.roi_delta_qp, ctu_qp_enable);
+       mpp_write(mpp, value,
+                 H265E_CMD_ENC_CTU_OPT_PARAM);
+
+       mpp_write(mpp, 0, H265E_CMD_ENC_SRC_TIMESTAMP_LOW);
+       mpp_write(mpp, 0, H265E_CMD_ENC_SRC_TIMESTAMP_HIGH);
+
+       value = (cfg->use_cur_as_longterm_pic << 0) |
+               (cfg->use_longterm_ref << 1);
+       mpp_write(mpp, value, H265E_CMD_ENC_LONGTERM_PIC);
+
+       mpp_write(mpp, 0, H265E_CMD_ENC_SUB_FRAME_SYNC_CONFIG);
+       rockchip_mpp_h265e_issue_command(mpp, index, H265E_CMD_ENC_PIC);
+       mpp_debug_leave();
+       return 0;
+}
+
+static int rockchip_mpp_h265e_get_encode_result(struct rockchip_mpp_dev *mpp,
+                                               struct h265e_ctx *ctx)
+{
+       u32 value, rd, wt;
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct h265e_result *result = NULL;
+       struct h265e_session *session = NULL;
+       int index;
+
+       mpp_debug_enter();
+       if (!mpp || !ctx) {
+               mpp_err("param is invalid");
+               return -1;
+       }
+       session = container_of(ctx->ictx.session,
+                              struct h265e_session,
+                              isession);
+       index = session->instance_index;
+       result = &ctx->result;
+       value = mpp_read(mpp, H265E_RET_SUCCESS);
+       if (value == 0) {
+               result->fail_reason = mpp_read(mpp, H265E_RET_FAIL_REASON);
+               mpp_err("fail reason = 0x%x", result->fail_reason);
+               mutex_unlock(&enc->lock);
+               mpp_debug_leave();
+               return -1;
+       }
+       result->fail_reason = 0;
+       result->enc_pic_cnt = mpp_read(mpp, H265E_RET_ENC_PIC_NUM);
+       value = mpp_read(mpp, H265E_RET_ENC_PIC_TYPE);
+       result->pic_type         = value & 0xFFFF;
+       result->recon_frame_index = mpp_read(mpp, H265E_RET_ENC_PIC_IDX);
+       result->num_of_slice     = mpp_read(mpp, H265E_RET_ENC_PIC_SLICE_NUM);
+       result->pick_skipped     = mpp_read(mpp, H265E_RET_ENC_PIC_SKIP);
+       result->num_intra        = mpp_read(mpp, H265E_RET_ENC_PIC_NUM_INTRA);
+       result->num_merge        = mpp_read(mpp, H265E_RET_ENC_PIC_NUM_MERGE);
+       result->num_skip_block   = mpp_read(mpp, H265E_RET_ENC_PIC_NUM_SKIP);
+       result->avg_ctu_qp       = mpp_read(mpp, H265E_RET_ENC_PIC_AVG_CU_QP);
+       result->bs_size          = mpp_read(mpp, H265E_RET_ENC_PIC_BYTE);
+       result->gop_idx    = mpp_read(mpp, H265E_RET_ENC_GOP_PIC_IDX);
+       result->poc       = mpp_read(mpp, H265E_RET_ENC_PIC_POC);
+       result->src_idx       = mpp_read(mpp, H265E_RET_ENC_USED_SRC_IDX);
+       rd = mpp_read(mpp, H265E_BS_RD_PTR);
+       wt = mpp_read(mpp, H265E_BS_WR_PTR);
+#if PRINT_BS_DATA
+       w_bs_size = result->bs_size;
+#endif
+       h265e_now = ktime_get();
+       mpp_debug(DEBUG_H265E_ENCODE_ONE_FRAME,
+                 "h265e encode time is:%d us\n",
+                 (int)ktime_to_us(ktime_sub(h265e_now, h265e_last)));
+       mpp_debug(DEBUG_H265E_ENCODE_ONE_FRAME,
+                 "RD_AXI_BYTE=%d,WR_AXI_BYTE=%d,WORK_CNT=%d\n",
+                 mpp_read(mpp, H265E_PERF_RD_AXI_TOTAL_BYTE),
+                 mpp_read(mpp, H265E_PERF_WR_AXI_TOTAL_BYTE),
+                 mpp_read(mpp, H265E_PERF_WORKING_CNT));
+       mpp_debug(DEBUG_H265E_ENCODE_ONE_FRAME,
+                 "index = %d, bs_size = %d,size = %d\n",
+                 index, result->bs_size, wt - rd);
+       if (result->recon_frame_index < 0)
+               result->bs_size   = 0;
+#if H265E_POWER_SAVE
+       rockchip_mpp_h265e_disable_clk(mpp);
+#endif
+       mutex_unlock(&enc->lock);
+       mpp_debug_leave();
+       return 0;
+}
+
+static long rockchip_mpp_h265e_ioctl(struct mpp_session *isession,
+                                    unsigned int cmd,
+                                    unsigned long arg)
+{
+       struct rockchip_mpp_dev *mpp = isession->mpp;
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+
+       struct rockchip_h265e_instance *instance = NULL;
+       struct h265e_session *session =
+                                       container_of(isession,
+                                                    struct h265e_session,
+                                                    isession);
+
+       int ret = 0;
+       int index = session->instance_index;
+
+       if (index < 0 || index >= H265E_INSTANCE_NUM) {
+               mpp_err("error: index = %d is invalid\n", index);
+               return -1;
+       }
+
+       instance = &enc->instance[index];
+       switch (cmd) {
+       case MPP_DEV_H265E_SET_COLOR_PALETTE:
+               break;
+       case MPP_DEV_H265E_SET_PARAMETER:
+               if (copy_from_user(&instance->cfg, (void __user *)arg,
+                                  sizeof(struct mpp_h265e_cfg))) {
+                       mpp_err("error: set reg copy_from_user failed\n");
+                       return -EFAULT;
+               }
+               if (instance->status == H265E_INSTANCE_STATUS_OPENED)
+                       ret = rockchip_mpp_h265e_set_parameter(mpp, index);
+               else
+                       ret = rockchip_mpp_h265e_change_parameter(mpp, index);
+               break;
+       case MPP_DEV_H265E_GET_HEAD_PARAMETER:
+               {
+                       struct hal_h265e_header head;
+
+                       if (copy_from_user(&head,
+                                          (void __user *)arg, sizeof(head))) {
+                               mpp_err("error: set reg copy_from_user failed\n");
+                               return -EFAULT;
+                       }
+                       head.size = 0;
+#ifdef H265E_STREAM_HEADER
+                       if (rockchip_mpp_h265e_get_stream_header(mpp,
+                                                                index, &head))
+                               head.size = 0;
+#endif
+                       if (copy_to_user((void __user *)arg,
+                                        &head, sizeof(head))) {
+                               mpp_err("copy result to user failed\n");
+                               return -1;
+                       }
+               }
+               break;
+       default:
+               break;
+       }
+       return ret;
+}
+
+struct mpp_dev_ops h265e_ops = {
+       .init = rockchip_mpp_h265e_ctx_init,
+       .prepare = NULL,
+       .run = rockchip_mpp_h265e_run,
+       .done = rockchip_mpp_h265e_done,
+       .irq = rockchip_mpp_h265e_irq,
+       .result = rockchip_mpp_h265e_result,
+       .ioctl = rockchip_mpp_h265e_ioctl,
+       .open = rockchip_mpp_h265e_open,
+       .release = rockchip_mpp_h265e_release,
+};
+
+static void rockchip_mpp_h265e_enable_clk(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+
+       if (enc->aclk)
+               clk_prepare_enable(enc->aclk);
+       if (enc->pclk)
+               clk_prepare_enable(enc->pclk);
+       if (enc->core)
+               clk_prepare_enable(enc->core);
+       if (enc->dsp)
+               clk_prepare_enable(enc->dsp);
+#if H265E_AXI_STATUS
+       if (enc->aclk_axi2sram)
+               clk_prepare_enable(enc->aclk_axi2sram);
+#endif
+}
+
+static void rockchip_mpp_h265e_disable_clk(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+
+       if (enc->dsp)
+               clk_disable_unprepare(enc->dsp);
+       if (enc->core)
+               clk_disable_unprepare(enc->core);
+       if (enc->pclk)
+               clk_disable_unprepare(enc->pclk);
+       if (enc->aclk)
+               clk_disable_unprepare(enc->aclk);
+#if H265E_AXI_STATUS
+       if (enc->aclk_axi2sram)
+               clk_disable_unprepare(enc->aclk_axi2sram);
+#endif
+}
+
+static void rockchip_mpp_h265e_power_on(struct rockchip_mpp_dev *mpp)
+{
+       rockchip_mpp_h265e_enable_clk(mpp);
+}
+
+static void rockchip_mpp_h265e_power_off(struct rockchip_mpp_dev *mpp)
+{
+       rockchip_mpp_h265e_disable_clk(mpp);
+}
+
+static struct mpp_session *rockchip_mpp_h265e_open(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct h265e_session *session = kzalloc(sizeof(*session), GFP_KERNEL);
+       u32 code_base;
+       u32     i, reg_val = 0, remap_size = 0, ret;
+       struct rockchip_h265e_instance *instance = NULL;
+       int index = 0;
+
+       mpp_debug_enter();
+       mutex_lock(&enc->lock);
+       if (!session) {
+               mpp_err("failed to allocate h265e_session data");
+               goto NFREE_FAIL;
+       }
+#if    H265E_POWER_SAVE
+       rockchip_mpp_h265e_enable_clk(mpp);
+#endif
+       mpp_dev_power_on(mpp);
+
+       if (!atomic_read(&enc->load_firmware)) {
+               ret = rockchip_mpp_h265e_load_firmware(mpp);
+               if (ret)
+                       goto NFREE_FAIL;
+               atomic_inc(&enc->load_firmware);
+               enc->temp.size = H265E_TEMP_BUFFER_SIZE;
+               enc->temp.hdl =
+                       rockchip_mpp_h265e_global_dma_alloc(mpp,
+                                                           enc->temp.size,
+                                                           MPP_ALIGN_SIZE,
+                                                           &enc->temp.dma_addr);
+               if (enc->temp.hdl < 0) {
+                       mpp_err("error: alloc temp buffer error\n");
+                       goto NFREE_FAIL;
+               }
+       }
+       for (i = 0; i < H265E_INSTANCE_NUM; i++) {
+               instance = &enc->instance[i];
+               if (!atomic_read(&instance->is_used)) {
+                       instance->work.size = H265E_WORK_BUFFER_SIZE;
+                       instance->work.hdl =
+                               rockchip_mpp_h265e_global_dma_alloc(mpp,
+                                                                   instance->work.size,
+                                                                   MPP_ALIGN_SIZE,
+                                                                   &instance->work.dma_addr);
+                       instance->index = i;
+                       atomic_set(&instance->is_used, 1);
+                       break;
+               }
+       }
+       if (i == H265E_INSTANCE_NUM) {
+               mpp_err("error: the num of instance up to H265E_INSTANCE_NUM\n");
+               goto NFREE_FAIL;
+       }
+       index = instance->index;
+       instance->status = H265E_INSTANCE_STATUS_ERROR;
+       mpp_debug(DEBUG_H265E_INFO,
+                 "%s = %d\n", __func__, index);
+       instance->session = &session->isession;
+       session->instance_index = index;
+       code_base = (u32)enc->firmware.dma_addr;
+       mpp_debug(DEBUG_H265E_INFO, "h265e code_base = %x\n", code_base);
+       if (!atomic_read(&enc->is_init)) {
+               mpp_write(mpp, 0x0, H265E_PO_CONF);
+               mpp_write(mpp, 0x7ffffff, H265E_VPU_RESET_REQ);
+
+               if (rockchip_mpp_h265e_wait_busy(mpp) == -1) {
+                       mpp_err("rockchip_mpp_h265e_wait_busy timeout\n");
+                       mpp_write(mpp, 0, H265E_VPU_RESET_REQ);
+                       goto FAIL;
+               }
+               mpp_write(mpp, 0, H265E_VPU_RESET_REQ);
+               for (i = H265E_COMMAND; i < H265E_CMD_REG_END; i += 4)
+                       mpp_write(mpp, 0x00, i);
+               remap_size = 0x100;
+               reg_val = 0x80000000 | (0 << 16) |
+                         (H265E_REMAP_CODE_INDEX << 12) |
+                         (1 << 11) | remap_size;
+               mpp_write(mpp, reg_val, H265E_VPU_REMAP_CTRL);
+               mpp_write(mpp, 0x00000000, H265E_VPU_REMAP_VADDR);
+               mpp_write(mpp, code_base, H265E_VPU_REMAP_PADDR);
+               mpp_write(mpp, code_base, H265E_ADDR_CODE_BASE);
+               mpp_write(mpp, H265E_CODE_BUFFER_SIZE, H265E_CODE_SIZE);
+               mpp_write(mpp, 0, H265E_CODE_PARAM);
+               mpp_write(mpp, 0, H265E_HW_OPTION);
+               mpp_write(mpp, H265E_INT_OPEN, H265E_VPU_VINT_ENABLE);
+               mpp_write(mpp, 0xfffffff2, H265E_PERF_LATENCY_CTRL0);
+               mpp_write(mpp, 0x0, H265E_PERF_LATENCY_CTRL1);
+               mpp_write(mpp, 0x1, H265E_PERF_AXI_CTRL);
+               mpp_write(mpp, 0x01, H265E_VPU_BUSY_STATUS);
+               mpp_write(mpp, H265E_CMD_INIT_VPU, H265E_COMMAND);
+               mpp_write(mpp, 0x01, H265E_VPU_REMAP_CORE_START);
+               if (rockchip_mpp_h265e_wait_busy(mpp) == -1) {
+                       mpp_err("rockchip_mpp_h265e_wait_busy timeout\n");
+                       goto FAIL;
+               }
+               if (mpp_read(mpp, H265E_RET_SUCCESS) == 0) {
+                       mpp_err("h265e init ret fail\n");
+                       goto FAIL;
+               }
+               /* start Init command*/
+               rockchip_mpp_h265e_issue_command(mpp,
+                                                index,
+                                                H265E_CMD_GET_FW_VERSION);
+               if (rockchip_mpp_h265e_wait_busy(mpp) == -1) {
+                       mpp_err("rockchip_mpp_h265e_wait_busy timeout\n");
+                       goto FAIL;
+               }
+               if (mpp_read(mpp, H265E_RET_SUCCESS) == 0) {
+                       mpp_err("h265e creat instance ret fail\n");
+                       goto FAIL;
+               }
+               reg_val = mpp_read(mpp, H265E_RET_FW_VERSION);
+               mpp_debug(DEBUG_H265E_INFO,
+                         "get_firmware_version:VERSION=%d\n", reg_val);
+               atomic_inc(&enc->is_init);
+       }
+       mpp_write(mpp, 0x0, H265E_CORE_INDEX);
+       mpp_write(mpp, 0x00010000 | index, H265E_INST_INDEX);
+       mpp_write(mpp, (u32)instance->work.dma_addr, H265E_ADDR_WORK_BASE);
+       mpp_write(mpp, H265E_WORK_BUFFER_SIZE, H265E_WORK_SIZE);
+       mpp_write(mpp, 0, H265E_WORK_PARAM);
+       mpp_debug(DEBUG_H265E_INFO,
+                 "open instance=%d work addr=%x\n",
+                 index,
+                 (u32)instance->work.dma_addr);
+       /* create instance*/
+       rockchip_mpp_h265e_issue_command(mpp, index, H265E_CMD_CREATE_INSTANCE);
+       if (rockchip_mpp_h265e_wait_busy(mpp) == -1) {
+               mpp_err("rockchip_mpp_h265e_wait_busy timeout\n");
+               goto FAIL;
+       }
+       if (mpp_read(mpp, H265E_RET_SUCCESS) == 0) {
+               mpp_err("h265e creat instance ret fail\n");
+               goto FAIL;
+       }
+       /* set default buffer counter*/
+       instance->min_frame_buffer_count = 2;
+       instance->min_src_frame_count = 2;
+       instance->src_idx = 0;
+       instance->status = H265E_INSTANCE_STATUS_OPENED;
+#if H265E_POWER_SAVE
+       rockchip_mpp_h265e_disable_clk(mpp);
+#endif
+       mutex_unlock(&enc->lock);
+       mpp_debug_leave();
+       return &session->isession;
+FAIL:
+       rockchip_mpp_h265e_free_instance(mpp, index);
+NFREE_FAIL:
+       kfree(session);
+       session = NULL;
+       mutex_unlock(&enc->lock);
+       mpp_err("h265e open fail\n");
+       return NULL;
+}
+
+static void rockchip_mpp_h265e_release(struct mpp_session *isession)
+{
+       struct h265e_session *session =
+                                       container_of(isession,
+                                                    struct h265e_session,
+                                                    isession);
+       struct rockchip_mpp_dev *mpp = session->isession.mpp;
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       int index = 0;
+
+       mpp_debug_enter();
+       mutex_lock(&enc->lock);
+#if    H265E_POWER_SAVE
+       rockchip_mpp_h265e_enable_clk(mpp);
+#endif
+       mpp_dev_power_on(mpp);
+       index = session->instance_index;
+       rockchip_mpp_h265e_issue_command(mpp, index, H265E_CMD_FINI_SEQ);
+       if (rockchip_mpp_h265e_wait_busy(mpp) == -1)
+               mpp_err("h265e_wait_busy timeout,index=%d\n", index);
+       if (mpp_read(mpp, H265E_RET_SUCCESS) == 0)
+               mpp_err("h265e close instance %d ret fail\n", index);
+       rockchip_mpp_h265e_free_instance(mpp, index);
+       kfree(session);
+#if    H265E_POWER_SAVE
+       rockchip_mpp_h265e_disable_clk(mpp);
+#endif
+       mutex_unlock(&enc->lock);
+       mpp_debug_leave();
+}
+
+static int rockchip_mpp_h265e_probe(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct device_node *np = mpp->dev->of_node;
+       int i;
+
+       enc->dev.ops = &h265e_ops;
+       for (i = 0; i < H265E_INSTANCE_NUM; i++)
+               atomic_set(&enc->instance[i].is_used, 0);
+       atomic_set(&enc->load_firmware, 0);
+       atomic_set(&enc->is_init, 0);
+       mutex_init(&enc->lock);
+#if H265E_CLK
+       enc->aclk = devm_clk_get(mpp->dev, "aclk_h265");
+       if (IS_ERR_OR_NULL(enc->aclk)) {
+               dev_err(mpp->dev, "failed on clk_get aclk\n");
+               enc->aclk = NULL;
+               goto fail;
+       }
+       enc->pclk = devm_clk_get(mpp->dev, "pclk_h265");
+       if (IS_ERR_OR_NULL(enc->pclk)) {
+               dev_err(mpp->dev, "failed on clk_get pclk\n");
+               enc->pclk = NULL;
+               goto fail;
+       }
+       enc->core = devm_clk_get(mpp->dev, "clk_core");
+       if (IS_ERR_OR_NULL(enc->core)) {
+               dev_err(mpp->dev, "failed on clk_get core\n");
+               enc->core = NULL;
+               goto fail;
+       }
+       enc->dsp = devm_clk_get(mpp->dev, "clk_dsp");
+       if (IS_ERR_OR_NULL(enc->dsp)) {
+               dev_err(mpp->dev, "failed on clk_get dsp\n");
+               enc->dsp = NULL;
+               goto fail;
+       }
+#if H265E_AXI_STATUS
+       enc->aclk_axi2sram = devm_clk_get(mpp->dev, "aclk_axi2sram");
+       if (IS_ERR_OR_NULL(enc->aclk_axi2sram)) {
+               dev_err(mpp->dev, "failed on clk_get aclk_axi2sram\n");
+               enc->aclk_axi2sram = NULL;
+               goto fail;
+       }
+#endif
+#endif
+       if (of_property_read_bool(np, "mode_ctrl")) {
+               of_property_read_u32(np, "mode_bit", &enc->mode_bit);
+               of_property_read_u32(np, "mode_ctrl", &enc->mode_ctrl);
+#ifdef CONFIG_MFD_SYSCON
+               enc->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+               if (IS_ERR_OR_NULL(enc->grf)) {
+                       enc->grf = NULL;
+                       mpp_err("can't find vpu grf property\n");
+                       return -1;
+               }
+#endif
+       }
+
+       return 0;
+#if H265E_CLK
+fail:
+       return -1;
+#endif
+}
+
+static void rockchip_mpp_h265e_remove(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_h265e_dev *enc =
+                                        container_of(mpp,
+                                                     struct rockchip_h265e_dev,
+                                                     dev);
+       struct rockchip_h265e_instance *instance = NULL;
+       struct mpp_h265e_buffer *buf = NULL;
+       struct mpp_session *session = list_first_entry(&mpp->srv->session,
+                                                      struct mpp_session,
+                                                      list_session);
+       int i = 0;
+
+       mpp_debug_enter();
+       mutex_lock(&enc->lock);
+       for (i = 0; i < H265E_INSTANCE_NUM; i++) {
+               instance = &enc->instance[i];
+               if (atomic_read(&instance->is_used) == 1) {
+                       buf = &instance->work;
+                       if (buf->hdl >= 0) {
+                               vpu_iommu_unmap_kernel(mpp->iommu_info,
+                                                      session, buf->hdl);
+                               vpu_iommu_free(mpp->iommu_info, session,
+                                              buf->hdl);
+                       }
+                       rockchip_mpp_h265e_free_frame_buffer(mpp, instance);
+                       atomic_set(&instance->is_used, 0);
+               }
+       }
+       atomic_set(&enc->is_init, 0);
+       atomic_set(&enc->load_firmware, 0);
+       buf = &enc->temp;
+       if (buf->hdl >= 0) {
+               vpu_iommu_unmap_kernel(mpp->iommu_info, session, buf->hdl);
+               vpu_iommu_free(mpp->iommu_info, session, buf->hdl);
+       }
+
+       if (enc->firmware.hdl >= 0) {
+               vpu_iommu_unmap_kernel(mpp->iommu_info, session,
+                                      enc->firmware.hdl);
+               vpu_iommu_free(mpp->iommu_info, session, enc->firmware.hdl);
+       }
+       mutex_unlock(&enc->lock);
+       mpp_debug_leave();
+}
+
+const struct rockchip_mpp_dev_variant h265e_variant = {
+       .data_len = sizeof(struct rockchip_h265e_dev),
+       .trans_info = NULL,
+       .mmu_dev_dts_name = NULL,
+       .hw_probe = rockchip_mpp_h265e_probe,
+       .hw_remove = rockchip_mpp_h265e_remove,
+       .power_on = rockchip_mpp_h265e_power_on,
+       .power_off = rockchip_mpp_h265e_power_off,
+       .reset = NULL,
+};
+EXPORT_SYMBOL(h265e_variant);
diff --git a/drivers/video/rockchip/vpu/mpp_dev_h265e.h b/drivers/video/rockchip/vpu/mpp_dev_h265e.h
new file mode 100644 (file)
index 0000000..a4a40c6
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: hehua,hh@rock-chips.com
+ * lixinhuang, buluess.li@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ROCKCHIP_MPP_DEV_H265E_H
+#define __ROCKCHIP_MPP_DEV_H265E_H
+
+#include "mpp_dev_h265e_define.h"
+#include "mpp_service.h"
+#include <linux/ioctl.h>
+#include <linux/wakelock.h>
+
+#define MPP_DEV_H265E_SET_COLOR_PALETTE        \
+               _IOW(MPP_IOC_MAGIC, MPP_IOC_CUSTOM_BASE + 1, u32)
+
+#define MPP_DEV_H265E_SET_PARAMETER    \
+               _IOW(MPP_IOC_MAGIC, \
+               MPP_IOC_CUSTOM_BASE + 6, struct mpp_h265e_cfg)
+
+#define MPP_DEV_H265E_GET_HEAD_PARAMETER       \
+               _IOW(MPP_IOC_MAGIC, \
+               MPP_IOC_CUSTOM_BASE + 7, struct hal_h265e_header)
+
+#define H265E_INSTANCE_NUM 4
+
+enum H265E_MODE {
+       H265E_MODE_NONE,
+       H265E_MODE_ONEFRAME,
+       H265E_MODE_LINKTABLE_FIX,
+       H265E_MODE_LINKTABLE_UPDATE,
+       H265E_MODE_NUM
+};
+
+struct regmap;
+
+struct h265e_result {
+       u32 bs_size;
+       u32 enc_pic_cnt;
+       u32 pic_type;
+       u32 num_of_slice;
+       u32 pick_skipped;
+       u32 num_intra;
+       u32 num_merge;
+       u32 num_skip_block;
+       u32 avg_ctu_qp;
+       int recon_frame_index;
+       u32 gop_idx;
+       u32 poc;
+       u32 src_idx;
+       u32 fail_reason;
+};
+
+struct mpp_h265e_buffer {
+       unsigned long dma_addr;
+       u32 size;
+       int hdl;
+};
+
+struct mpp_h265e_frame_buffer {
+       struct mpp_h265e_buffer buffer;
+       u32 y;
+       u32 cb;
+       u32 cr;
+};
+
+struct h265e_ctx {
+       struct mpp_ctx ictx;
+       enum H265E_MODE mode;
+       struct mpp_h265e_buffer bs;
+       char __iomem *bs_data;/*for debug read data*/
+       struct mpp_h265e_buffer src;
+       struct mpp_h265e_buffer roi;
+       struct mpp_h265e_buffer ctu;
+
+       struct mpp_h265e_encode_info cfg;
+
+       /* store status read from hw, oneframe mode used only */
+       struct h265e_result result;
+};
+
+enum H265E_INSTANCE_STATUS {
+       H265E_INSTANCE_STATUS_ERROR,
+       H265E_INSTANCE_STATUS_OPENED,
+       H265E_INSTANCE_STATUS_SET_PARAMETER,
+       H265E_INSTANCE_STATUS_ENCODE,
+       H265E_INSTANCE_STATUS_CLOSE
+};
+
+struct rockchip_h265e_instance {
+       int index;
+       atomic_t is_used;
+       struct mpp_h265e_buffer work;
+       struct mpp_h265e_buffer temp;
+       struct mpp_h265e_buffer mv;
+       struct mpp_h265e_buffer fbc_luma;
+       struct mpp_h265e_buffer fbc_chroma;
+       struct mpp_h265e_buffer sub_sample;
+       /*
+        * for recon frames
+        */
+       struct mpp_h265e_frame_buffer frame_buffer[16];
+
+       int min_frame_buffer_count;
+       int min_src_frame_count;
+       int src_idx;
+       int status;
+       struct mpp_h265e_cfg cfg;
+       struct mpp_session *session;
+};
+
+struct rockchip_h265e_dev {
+       struct rockchip_mpp_dev dev;
+       struct rockchip_h265e_instance instance[H265E_INSTANCE_NUM];
+       struct mpp_h265e_buffer temp;
+
+       u32 lkt_index;
+       u32 irq_status;
+       atomic_t is_init;
+       atomic_t load_firmware;
+
+       struct delayed_work work_list;
+       struct mutex lock;
+
+       char __iomem *firmware_cpu_addr;
+       struct mpp_h265e_buffer firmware;
+
+       struct clk *aclk;
+       struct clk *aclk_axi2sram;
+       struct clk *pclk;
+       struct clk *core;
+       struct clk *dsp;
+       void __iomem *grf_base;
+       u32 mode_bit;
+       u32 mode_ctrl;
+       struct regmap *grf;
+};
+
+struct h265e_session {
+       struct mpp_session isession;
+       int instance_index;
+};
+
+#endif
+
diff --git a/drivers/video/rockchip/vpu/mpp_dev_h265e_define.h b/drivers/video/rockchip/vpu/mpp_dev_h265e_define.h
new file mode 100644 (file)
index 0000000..fd5da7c
--- /dev/null
@@ -0,0 +1,759 @@
+ /*
+  * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+  * author: hehua,hh@rock-chips.com
+  * lixinhuang, buluess.li@rock-chips.com
+  *
+  * This software is licensed under the terms of the GNU General Public
+  * License version 2, as published by the Free Software Foundation, and
+  * may be copied, distributed, and modified under those terms.
+  *
+  * This program is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  * GNU General Public License for more details.
+  *
+  */
+#ifndef __MPP_DEV_H265E_DEFINE_H__
+#define __MPP_DEV_H265E_DEFINE_H__
+
+#include <linux/bitops.h>
+
+#define H265E_MVCOL_BUF_SIZE(w, h) \
+       ((((w) + 63) / 64) * (((h) + 63) / 64) * 128)
+#define H265E_FBC_LUMA_TABLE_SIZE(w, h) \
+       ((((h) + 15) / 16) * (((w) + 255) / 256) * 128)
+#define H265E_FBC_CHROMA_TABLE_SIZE(w, h) \
+       ((((h) + 15) / 16) * (((w) / 2 + 255) / 256) * 128)
+#define H265E_SUBSAMPLED_ONE_SIZE(w, h) \
+       (((((w) / 4) + 15) & ~15) * ((((h) / 4) + 7) & ~7))
+
+#define H265E_PIC_TYPE_I 0
+#define H265E_PIC_TYPE_P 1
+
+enum H265E_VPU_COMMAND {
+       H265E_CMD_INIT_VPU        = 0x0001,
+       H265E_CMD_SET_PARAM       = 0x0002,
+       H265E_CMD_FINI_SEQ        = 0x0004,
+       H265E_CMD_ENC_PIC         = 0x0008,
+       H265E_CMD_SET_FRAMEBUF    = 0x0010,
+       H265E_CMD_FLUSH_DECODER   = 0x0020,
+       H265E_CMD_GET_FW_VERSION  = 0x0100,
+       H265E_CMD_QUERY_DECODER   = 0x0200,
+       H265E_CMD_SLEEP_VPU       = 0x0400,
+       H265E_CMD_WAKEUP_VPU      = 0x0800,
+       H265E_CMD_CREATE_INSTANCE = 0x4000,
+       H265E_CMD_RESET_VPU       = 0x10000,
+       H265E_CMD_MAX_VPU_COMD    = 0x10000,
+};
+
+enum H265E_PIC_CODE_OPTION {
+       CODEOPT_ENC_HEADER_IMPLICIT = BIT(0),
+       CODEOPT_ENC_VCL             = BIT(1),
+       CODEOPT_ENC_VPS             = BIT(2),
+       CODEOPT_ENC_SPS             = BIT(3),
+       CODEOPT_ENC_PPS             = BIT(4),
+       CODEOPT_ENC_AUD             = BIT(5),
+       CODEOPT_ENC_EOS             = BIT(6),
+       CODEOPT_ENC_EOB             = BIT(7),
+       CODEOPT_ENC_RESERVED        = BIT(8),
+       CODEOPT_ENC_VUI             = BIT(9),
+};
+
+enum H265E_TILED_MAP_TYPE {
+       LINEAR_FRAME_MAP            = 0,
+       TILED_FRAME_V_MAP           = 1,
+       TILED_FRAME_H_MAP           = 2,
+       TILED_FIELD_V_MAP           = 3,
+       TILED_MIXED_V_MAP           = 4,
+       TILED_FRAME_MB_RASTER_MAP   = 5,
+       TILED_FIELD_MB_RASTER_MAP   = 6,
+       TILED_FRAME_NO_BANK_MAP     = 7,
+       TILED_FIELD_NO_BANK_MAP     = 8,
+       LINEAR_FIELD_MAP            = 9,
+       CODA_TILED_MAP_TYPE_MAX     = 10,
+       COMPRESSED_FRAME_MAP        = 10,
+       TILED_SUB_CTU_MAP           = 11,
+       ARM_COMPRESSED_FRAME_MAP      = 12,
+};
+
+#define H265E_MAX_NUM_TEMPORAL_LAYER          7
+#define H265E_MAX_GOP_NUM                     8
+#define H265E_MIN_PIC_WIDTH            256
+#define H265E_MIN_PIC_HEIGHT           128
+#define H265E_MAX_PIC_WIDTH            1920
+#define H265E_MAX_PIC_HEIGHT           1080
+#define MAX_ROI_NUMBER  64
+
+enum H265E_GOP_PRESET_IDX {
+       PRESET_IDX_CUSTOM_GOP       = 0,
+       PRESET_IDX_ALL_I            = 1,
+       PRESET_IDX_IPP              = 2,
+       PRESET_IDX_IPPPP            = 6,
+};
+
+enum H265E_SET_PARAM_OPTION {
+       H265E_OPT_COMMON          = 0,
+       H265E_OPT_CUSTOM_GOP      = 1,
+       H265E_OPT_CUSTOM_HEADER   = 2,
+       H265E_OPT_VUI             = 3,
+       H265E_OPT_ALL_PARAM       = 0xffffffff
+};
+
+enum H265E_PARAM_CHANEGED {
+       H265E_PARAM_CHANEGED_COMMON          = 1,
+       H265E_PARAM_CHANEGED_CUSTOM_GOP      = 2,
+       H265E_PARAM_CHANEGED_VUI             = 4,
+       H265E_PARAM_CHANEGED_REGISTER_BUFFER = 8,
+};
+
+enum H265E_COMON_CFG_MASK {
+       /* COMMON parameters*/
+       H265E_CFG_SEQ_SRC_SIZE_CHANGE             = BIT(0),
+       H265E_CFG_SEQ_PARAM_CHANGE                = BIT(1),
+       H265E_CFG_GOP_PARAM_CHANGE                = BIT(2),
+       H265E_CFG_INTRA_PARAM_CHANGE              = BIT(3),
+       H265E_CFG_CONF_WIN_TOP_BOT_CHANGE         = BIT(4),
+       H265E_CFG_CONF_WIN_LEFT_RIGHT_CHANGE      = BIT(5),
+       H265E_CFG_FRAME_RATE_CHANGE               = BIT(6),
+       H265E_CFG_INDEPENDENT_SLICE_CHANGE        = BIT(7),
+       H265E_CFG_DEPENDENT_SLICE_CHANGE          = BIT(8),
+       H265E_CFG_INTRA_REFRESH_CHANGE            = BIT(9),
+       H265E_CFG_PARAM_CHANGE                    = BIT(10),
+       H265E_CFG_CHANGE_RESERVED                 = BIT(11),
+       H265E_CFG_RC_PARAM_CHANGE                 = BIT(12),
+       H265E_CFG_RC_MIN_MAX_QP_CHANGE            = BIT(13),
+       H265E_CFG_RC_TARGET_RATE_LAYER_0_3_CHANGE = BIT(14),
+       H265E_CFG_RC_TARGET_RATE_LAYER_4_7_CHANGE = BIT(15),
+
+       H265E_CFG_SET_NUM_UNITS_IN_TICK           = BIT(18),
+       H265E_CFG_SET_TIME_SCALE                  = BIT(19),
+       H265E_CFG_SET_NUM_TICKS_POC_DIFF_ONE      = BIT(20),
+       H265E_CFG_RC_TRANS_RATE_CHANGE            = BIT(21),
+       H265E_CFG_RC_TARGET_RATE_CHANGE           = BIT(22),
+       H265E_CFG_ROT_PARAM_CHANGE                = BIT(23),
+       H265E_CFG_NR_PARAM_CHANGE                 = BIT(24),
+       H265E_CFG_NR_WEIGHT_CHANGE                = BIT(25),
+
+       H265E_CFG_SET_VCORE_LIMIT                 = BIT(27),
+       H265E_CFG_CHANGE_SET_PARAM_ALL            = (0xFFFFFFFF),
+};
+
+/**
+ * @brief    This is a data structure for setting
+ * CTU level options (ROI, CTU mode, CTU QP) in HEVC encoder.
+ */
+struct h265e_ctu {
+       u32 roi_enable;
+       u32 roi_delta_qp;
+       u32 map_endian;
+
+       /*
+        * Stride of CTU-level ROI/mode/QP map
+        * Set this with (Width  + CTB_SIZE - 1) / CTB_SIZE
+        */
+       u32 map_stride;
+       /*
+        * It enables CTU QP map that allows
+        * CTUs to be encoded with the given QPs.
+        * NOTE: rcEnable should be turned off for this,
+        * encoding with the given CTU QPs.
+        */
+       u32 ctu_qp_enable;
+};
+
+struct h265e_sei {
+       u8 prefix_sei_nal_enable;
+
+       /*
+        * A flag whether to encode PREFIX_SEI_DATA
+        * with a picture of this command or with a source
+        * picture of the buffer at the moment
+        * 0 : encode PREFIX_SEI_DATA when a source picture is encoded.
+        * 1 : encode PREFIX_SEI_DATA at this command.
+        */
+       u8 prefix_sei_data_order;
+
+       /*
+        * enables to encode the suffix SEI NAL which is given by host.
+        */
+       u8 suffix_sei_nal_enable;
+
+       /*
+        * A flag whether to encode SUFFIX_SEI_DATA
+        * with a picture of this command or with a source
+        * picture of the buffer at the moment
+        * 0 : encode SUFFIX_SEI_DATA when a source picture is encoded.
+        * 1 : encode SUFFIX_SEI_DATA at this command.
+        */
+       u8 suffix_sei_data_enc_order;
+
+       /*
+        * The total byte size of the prefix SEI
+        */
+       u32 prefix_sei_data_size;
+
+       /*
+        * The start address of the total prefix SEI NALs to be encoded
+        */
+       u32 prefix_sei_nal_addr;
+
+       /*
+        * The total byte size of the suffix SEI
+        */
+       u32 suffix_sei_data_size;
+
+       /*
+        * The start address of the total suffix SEI NALs to be encoded
+        */
+       u32 suffix_sei_nal_addr;
+};
+
+/**
+ * @brief    This is a data structure for setting
+ * VUI parameters in HEVC encoder.
+ */
+struct h265e_vui {
+       /*
+        * VUI parameter flag
+        */
+       u32 flags;
+       /**< aspect_ratio_idc */
+       u32 aspect_ratio_idc;
+       /**< sar_width, sar_height
+        * (only valid when aspect_ratio_idc is equal to 255)
+        */
+       u32 sar_size;
+       /**< overscan_appropriate_flag */
+       u32 over_scan_appropriate;
+       /**< VUI parameter flag */
+       u32 signal;
+       /**< chroma_sample_loc_type_top_field,
+        *chroma_sample_loc_type_bottom_field
+        */
+       u32 chroma_sample_loc;
+       /**< def_disp_win_left_offset, def_disp_win_right_offset */
+       u32 disp_win_left_right;
+       /**< def_disp_win_top_offset, def_disp_win_bottom_offset */
+       u32 disp_win_top_bottom;
+};
+
+/**
+ * @brief    This is a data structure for
+ *custom GOP parameters of the given picture.
+ */
+struct h265e_custom_gop_pic {
+       /**< A picture type of #th picture in the custom GOP */
+       u32 type;
+       /**< A POC offset of #th picture in the custom GOP */
+       u32 offset;
+       /**< A quantization parameter of #th picture in the custom GOP */
+       u32 qp;
+       /**< POC offset of reference L0 of #th picture in the custom GOP */
+       u32 ref_poc_l0;
+       /**< POC offset of reference L1 of #th picture in the custom GOP */
+       u32 ref_poc_l1;
+       /**< A temporal ID of #th picture in the custom GOP */
+       u32 temporal_id;
+};
+
+/**
+ * @brief    This is a data structure for custom GOP parameters.
+ */
+struct h265e_custom_gop {
+       /**< Size of the custom GOP (0~8) */
+       u32 custom_gop_size;
+       /**< It derives a lamda weight internally
+        * instead of using lamda weight specified.
+        */
+       u32 use_derive_lambda_weight;
+       /**< picture parameters of #th picture in the custom gop */
+       struct h265e_custom_gop_pic pic[H265E_MAX_GOP_NUM];
+       /**< a lamda weight of #th picture in the custom gop */
+       u32 gop_pic_lambda[H265E_MAX_GOP_NUM];
+};
+
+struct enc_code_opt {
+       /**< whether host encode a header implicitly or not.
+        * if this value is 1, below encode options will be ignored
+        */
+       int implicit_header_encode;
+       int encode_vcl;/**< a flag to encode vcl nal unit explicitly*/
+       int encode_vps;/**< a flag to encode vps nal unit explicitly*/
+       int encode_sps;/**< a flag to encode sps nal unit explicitly*/
+       int encode_pps;/**< a flag to encode pps nal unit explicitly*/
+       int encode_aud;/**< a flag to encode aud nal unit explicitly*/
+       int encode_eos;/**< a flag to encode eos nal unit explicitly*/
+       int encode_eob;/**< a flag to encode eob nal unit explicitly*/
+       int encode_vui;/**< a flag to encode vui nal unit explicitly*/
+};
+
+enum H265E_SRC_FORMAT {
+       H265E_SRC_YUV_420 = 0,
+       H265E_SRC_YUV_420_YU12 = 0, /*  3Plane 1.Y, 2.U, 3.V*/
+       H265E_SRC_YUV_420_YV12, /*  3Plane 1.Y, 2.V, 3.U*/
+       H265E_SRC_YUV_420_NV12, /* 2 Plane 1.Y 2. UV*/
+       H265E_SRC_YUV_420_NV21, /* 2 Plane 1.Y 2. VU*/
+       H265E_SRC_YUV_420_MAX,
+};
+
+struct hal_h265e_header {
+       u32         buf;
+       u32         size;
+};
+
+struct mpp_h265e_cfg {
+       /*
+        * A profile indicator
+        * 1 : main
+        * 2 : main10
+        */
+       u8 profile;
+
+       /*
+        * only support to level 4.1
+        */
+       u8 level; /**< A level indicator (level * 10) */
+
+       /*
+        * A tier indicator
+        * 0 : main
+        * 1 : high
+        */
+       u8 tier;
+
+       /*
+        * A chroma format indecator, only support YUV420
+        */
+       u8 chroma_idc;
+
+       /*
+        * the source's width and height
+        */
+       u16 width;
+       u16 height;
+       u16 width_stride;
+       u16 height_stride;
+
+       /*
+        * bitdepth,only support 8 bits(only support 8 bits)
+        */
+       u8 bit_depth;
+
+       /*
+        * source yuv's format. The value is defined
+        * in H265E_FrameBufferFormat(only support YUV420)
+        * the value could be YU12,YV12,NV12,NV21
+        */
+       u8 src_format;
+
+       u8 src_endian;
+       u8 bs_endian;
+       u8 fb_endian;
+       u8 frame_rate;
+       u8 frame_skip;
+       u32 bit_rate;
+
+       u32 map_type;
+       u32 line_buf_int_en;
+       u32 slice_int_enable;
+       u32 ring_buffer_enable;
+
+       struct enc_code_opt code_option;
+       /*
+        * A chroma format indecator, only support YUV420
+        */
+       int lossless_enable;/**< It enables lossless coding */
+       /**< It enables constrained intra prediction */
+       int const_intra_pred_flag;
+       /**< The value of chroma(cb) qp offset (only for WAVE420L) */
+       int chroma_cb_qp_offset;
+       /**< The value of chroma(cr) qp offset  (only for WAVE420L) */
+       int chroma_cr_qp_offset;
+       /**
+        * A GOP structure option
+        * 0: Custom GOP
+        * 1 : I-I-I-I,..I (all intra, gop_size=1)
+        * 2 : I-P-P-P,... P (consecutive P, gop_size=1)
+        * 6 : I-P-P-P-P (consecutive P, gop_size=4)
+        */
+       u32 gop_idx;
+
+       /**
+        * An intra picture refresh mode
+        * 0 : Non-IRAP
+        * 1 : CRA
+        * 2 : IDR
+        */
+       u32 decoding_refresh_type;
+
+       /*
+        * A quantization parameter of intra picture
+        */
+       u32 intra_qp;
+
+       /*
+        * A period of intra picture in GOP size
+        */
+       u32 intra_period;
+
+       /** A conformance window size of TOP,BUTTOM,LEFT,RIGHT */
+       u16 conf_win_top;
+       u16 conf_win_bot;
+       u16 conf_win_left;
+       u16 conf_win_right;
+
+       /*
+        * A slice mode for independent slice
+        * 0 : no multi-slice
+        * 1 : Slice in CTU number
+        * 2 : Slice in number of byte
+        */
+       u32 independ_slice_mode;
+
+       /*
+        * The number of CTU or bytes for a slice
+        * when independ_slice_mode is set with 1 or 2.
+        */
+       u32 independ_slice_mode_arg;
+
+       /**
+        *A slice mode for dependent slice
+        * 0 : no multi-slice
+        * 1 : Slice in CTU number
+        * 2 : Slice in number of byte
+        */
+       u32 depend_slice_mode;
+
+       /*
+        * The number of CTU or bytes for a slice
+        * when depend_slice_mode is set with 1 or 2.
+        */
+       u32 depend_slice_mode_arg;
+
+       /*
+        * An intra refresh mode
+        * 0 : No intra refresh
+        * 1 : Row
+        * 2 : Column
+        * 3 : Step size in CTU
+        */
+       u32 intra_refresh_mode;
+
+       /*
+        * The number of CTU (only valid when intraRefreshMode is 3.)
+        */
+       u32 intra_refresh_arg;
+
+       /*
+        * It uses one of the recommended encoder parameter presets.
+        * 0 : Custom
+        * 1 : Recommend enc params
+        * (slow encoding speed, highest picture quality)
+        * 2 : Boost mode (normal encoding speed, normal picture quality)
+        * 3 : Fast mode (high encoding speed, low picture quality)
+        */
+       u8 use_recommend_param;
+       u8 scaling_list_enable; /**< It enables a scaling list */
+
+       /*
+        * It specifies CU size.
+        * 3'b001: 8x8
+        * 3'b010: 16x16
+        * 3'b100 : 32x32
+        */
+       u8 cu_size_mode;
+       u8 tmvp_enable;
+       u8 wpp_enable; /**< It enables wave-front parallel processing. */
+       u8 max_num_merge; /**< Maximum number of merge candidates (0~2) */
+       u8 dynamic_merge_8x8_enable;
+       u8 dynamic_merge_16x16_enable;
+       u8 dynamic_merge_32x32_enable;
+       u8 disable_deblk; /**< It disables in-loop deblocking filtering. */
+       /**< it enables filtering across slice
+        * boundaries for in-loop deblocking.
+        */
+       u8 lf_cross_slice_boundary_enable;
+       /**< BetaOffsetDiv2 for deblocking filter */
+       u8 beta_offset_div2;
+       /**< TcOffsetDiv3 for deblocking filter */
+       u8 tc_offset_div2;
+       /**< It enables transform skip for an intra CU. */
+       u8 skip_intra_trans;
+       /**< It enables SAO (sample adaptive offset). */
+       u8 sao_enable;
+       /**< It enables to make intra CUs in an inter slice. */
+       u8 intra_in_inter_slice_enable;
+       /**< It enables intra NxN PUs. */
+       u8 intra_nxn_enable;
+
+       /*
+        * specifies intra QP offset relative
+        * to inter QP (Only available when rc_enable is enabled)
+        */
+       s8 intra_qp_offset;
+
+       /*
+        * It specifies encoder initial delay,
+        * Only available when RateControl is enabled
+        * (encoder initial delay = initial_delay * init_buf_levelx8 / 8)
+        */
+       int init_buf_levelx8;
+
+       /*
+        * specifies picture bits allocation mode.
+        * Only available when RateControl is enabled
+        * and GOP size is larger than 1
+        * 0: More referenced pictures have
+        * better quality than less referenced pictures
+        * 1: All pictures in a GOP have similar image quality
+        * 2: Each picture bits in a GOP is allocated according to FixedRatioN
+        */
+       u8 bit_alloc_mode;
+
+       /*
+        * A fixed bit ratio (1 ~ 255) for each picture of GOP's bitallocation
+        * N = 0 ~ (MAX_GOP_SIZE - 1)
+        * MAX_GOP_SIZE = 8
+        * For instance when MAX_GOP_SIZE is 3, FixedBitRaio0
+        * to FixedBitRaio2 can be set as 2, 1, and 1 respectively for
+        * the fixed bit ratio 2:1:1. This is only valid when BitAllocMode is 2.
+        */
+       u8 fixed_bit_ratio[H265E_MAX_GOP_NUM];
+
+       /*
+        * enable rate control
+        */
+       u32 rc_enable;
+
+       /*
+        * enable CU level rate control
+        */
+       u8 cu_level_rc_enable;
+
+       /*
+        * enable CU QP adjustment for subjective quality enhancement
+        */
+       u8 hvs_qp_enable;
+
+       /*
+        * enable QP scaling factor for CU QP adjustment when hvs_qp_enable = 1
+        */
+       u8 hvs_qp_scale_enable;
+
+       /*
+        * A QP scaling factor for CU QP adjustment when hvs_qp_enable = 1
+        */
+       s8 hvs_qp_scale;
+
+       /*
+        * A minimum QP for rate control
+        */
+       u8 min_qp;
+
+       /*
+        * A maximum QP for rate control
+        */
+       u8 max_qp;
+
+       /*
+        * A maximum delta QP for rate control
+        */
+       u8 max_delta_qp;
+
+       /*
+        * A peak transmission bitrate in bps
+        */
+       u32 trans_rate;
+       /*< It specifies the number of time units of
+        * a clock operating at the frequency time_scale Hz
+        */
+       u32 num_units_in_tick;
+       /**< It specifies the number of time units that pass in one second */
+       u32 time_scale;
+       /**< It specifies the number of clock ticks corresponding to a
+        * difference of picture order count values equal to 1
+        */
+       u32 num_ticks_poc_diff_one;
+
+       /*< The value of initial QP by host.
+        * This value is meaningless if INITIAL_RC_QP == 63
+        */
+       int initial_rc_qp;
+
+       /*
+        * enables noise reduction algorithm to Y/Cb/Cr component.
+        */
+       u8 nr_y_enable;
+       u8 nr_cb_enable;
+       u8 nr_cr_enable;
+
+       /*
+        * enables noise estimation for reduction. When this is disabled,
+        * noise estimation is carried out ouside VPU.
+        */
+       u8 nr_noise_est_enable;
+       /*
+        * It specifies Y/Cb/Cr noise standard deviation
+        * if no use of noise estimation (nr_noise_est_enable=0)
+        */
+       u8 nr_noise_sigma_y;
+       u8 nr_noise_sigma_cb;
+       u8 nr_noise_sigma_cr;
+       /* ENC_NR_WEIGHT*/
+       /*< A weight to Y noise level for intra picture (0 ~ 31).
+        * nr_intra_weight_y/4 is multiplied to the noise
+        * level that has been estimated.
+        * This weight is put for intra frame to be filtered more strongly or
+        * more weakly than just with the estimated noise level.
+        */
+       u8 nr_intra_weight_y;
+       /**< A weight to Cb noise level for intra picture (0 ~ 31). */
+       u8 nr_intra_weight_cb;
+       /**< A weight to Cr noise level for intra picture (0 ~ 31). */
+       u8 nr_intra_weight_cr;
+       /*< A weight to Y noise level for inter picture (0 ~ 31).
+        * nr_inter_weight_y/4 is multiplied to the noise
+        * level that has been estimated.
+        * This weight is put for inter frame to be filtered more strongly or
+        * more weakly than just with the estimated noise level.
+        */
+       u8 nr_inter_weight_y;
+       /**< A weight to Cb noise level for inter picture (0 ~ 31). */
+       u8 nr_inter_weight_cb;
+       /**< A weight to Cr noise level for inter picture (0 ~ 31). */
+       u8 nr_inter_weight_cr;
+       /*
+        * a minimum QP for intra picture (0 ~ 51).
+        * It is only available when rc_enable is 1.
+        */
+       u8 intra_min_qp;
+
+       /*
+        * a maximum QP for intra picture (0 ~ 51).
+        * It is only available when rc_enable is 1.
+        */
+       u8 intra_max_qp;
+
+       u32 initial_delay;
+
+       u8 hrd_rbsp_in_vps;
+       u8 hrd_rbsp_in_vui;
+       u32 vui_rbsp;
+
+       u32 hrd_rbsp_data_size; /**< The size of the HRD rbsp data */
+       u32 hrd_rbsp_data_addr;  /**< The address of the HRD rbsp data */
+
+       u32 vui_rbsp_data_size;   /**< The size of the VUI rbsp data */
+       u32 vui_rbsp_data_addr;   /**< The address of the VUI rbsp data */
+
+       u8 use_long_term;
+       u8 use_cur_as_longterm_pic;
+       u8 use_longterm_ref;
+
+       struct h265e_custom_gop gop;
+       struct h265e_ctu ctu;
+       struct h265e_vui vui;
+       struct h265e_sei sei;
+
+       /*
+        * define which type of parameters are changed,
+        * only support common parameter chanegd now,
+        * see H265eCommonCfgMask
+        */
+       u32 cfg_option;
+
+       /*
+        * define which parameters are changed,see H265E_SET_PARAM_OPTION
+        */
+       u32 cfg_mask;
+};
+
+struct mpp_h265e_encode_info {
+       /*
+        * the address of source(yuv) data for encoding
+        */
+       u32 src_fd;
+
+       /*
+        * the size of source(yuv) data for encoding
+        */
+       u32 src_size;
+
+       /*
+        * the address of bitstream buffer
+        */
+       u32 bs_fd;
+
+       /*
+        * the size of bitstream buffer
+        */
+       u32 bs_size;
+       u32 roi_fd;
+       u32 ctu_qp_fd;
+       u32 stream_end;
+
+       /*
+        * skip current frame
+        */
+       u32 skip_pic;
+
+       /*
+        * A flag to use a force picture quantization parameter
+        */
+       u32 force_qp_enable;
+
+       /*
+        *Force picture quantization parameter for I picture
+        */
+       u32 force_qp_i;
+
+       /*
+        * Force picture quantization parameter for P picture
+        */
+       u32 force_qp_p;
+
+       /*
+        * A flag to use a force picture type
+        */
+       u32 force_frame_type_enable;
+
+       /*
+        * A force picture type (I, P, B, IDR, CRA)
+        */
+       u32 force_frame_type;
+};
+
+enum INTERRUPT_BIT {
+       INT_BIT_INIT            = 0,
+       INT_BIT_SEQ_INIT        = 1,
+       INT_BIT_SEQ_END         = 2,
+       INT_BIT_PIC_RUN         = 3,
+       INT_BIT_FRAMEBUF_SET    = 4,
+       INT_BIT_ENC_HEADER      = 5,
+       INT_BIT_DEC_PARA_SET    = 7,
+       INT_BIT_DEC_BUF_FLUSH   = 8,
+       INT_BIT_USERDATA        = 9,
+       INT_BIT_DEC_FIELD       = 10,
+       INT_BIT_DEC_MB_ROWS     = 13,
+       INT_BIT_BIT_BUF_EMPTY   = 14,
+       INT_BIT_BIT_BUF_FULL    = 15
+};
+
+enum H265E_INTERRUPT_BIT {
+       INT_H265E_INIT            = 0,
+       INT_H265E_DEC_PIC_HDR     = 1,
+       INT_H265E_FINI_SEQ        = 2,
+       INT_H265E_ENC_PIC         = 3,
+       INT_H265E_SET_FRAMEBUF    = 4,
+       INT_H265E_FLUSH_DECODER   = 5,
+       INT_H265E_GET_FW_VERSION  = 8,
+       INT_H265E_QUERY_DECODER   = 9,
+       INT_H265E_SLEEP_VPU       = 10,
+       INT_H265E_WAKEUP_VPU      = 11,
+       INT_H265E_CHANGE_INST     = 12,
+       INT_H265E_CREATE_INSTANCE = 14,
+       INT_H265E_BIT_BUF_EMPTY   = 15,
+       INT_H265E_BIT_BUF_FULL    = 15,   /* Encoder */
+};
+
+#endif
diff --git a/drivers/video/rockchip/vpu/mpp_dev_h265e_reg.h b/drivers/video/rockchip/vpu/mpp_dev_h265e_reg.h
new file mode 100644 (file)
index 0000000..49f9cff
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: hehua,hh@rock-chips.com
+ * lixinhuang, buluess.li@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MPP_DEV_H265E_REGISTER_H_
+#define _MPP_DEV_H265E_REGISTER_H_
+
+#define H265E_PO_CONF                        0x0000
+#define H265E_VCPU_CUR_PC                    0x0004
+#define H265E_VPU_PDBG_CTRL                  0x0010
+#define H265E_VPU_PDBG_IDX_REG               0x0014
+#define H265E_VPU_PDBG_WDATA_REG             0x0018
+#define H265E_VPU_PDBG_RDATA_REG             0x001C
+#define H265E_VPU_FIO_CTRL_ADDR              0x0020
+#define H265E_VPU_FIO_DATA                   0x0024
+#define H265E_VPU_VINT_REASON_USR            0x0030
+#define H265E_VPU_VINT_REASON_CLR            0x0034
+#define H265E_VPU_HOST_INT_REQ               0x0038
+#define H265E_VPU_VINT_CLEAR                 0x003C
+#define H265E_VPU_HINT_CLEAR                 0x0040
+#define H265E_VPU_VPU_INT_STS                0x0044
+#define H265E_VPU_VINT_ENABLE                0x0048
+
+#define H265E_CMD_REG_END                    0x0200
+
+#define H265E_VPU_VINT_REASON                0x004c
+#define H265E_VPU_RESET_REQ                  0x0050
+#define H265E_VPU_RESET_STATUS               0x0070
+#define H265E_VPU_REMAP_CTRL                 0x0060
+#define H265E_VPU_REMAP_VADDR                0x0064
+#define H265E_VPU_REMAP_PADDR                0x0068
+#define H265E_VPU_REMAP_CORE_START           0x006C
+#define H265E_VPU_BUSY_STATUS                0x0070
+#define H265E_COMMAND                        0x0100
+#define H265E_CORE_INDEX                     0x0104
+#define H265E_INST_INDEX                     0x0108
+#define H265E_ENC_SET_PARAM_OPTION           0x010C
+#define H265E_RET_FW_VERSION                 0x0118
+#define H265E_ADDR_CODE_BASE                 0x0118
+#define H265E_CODE_SIZE                             0x011C
+#define H265E_CODE_PARAM                     0x0120
+#define H265E_HW_OPTION                      0x0124
+
+#define H265E_RET_SUCCESS                    0x0110
+#define H265E_VPU_HOST_INT_REQ               0x0038
+#define H265E_SFB_OPTION                     0x010C
+#define H265E_RET_FAIL_REASON                0x0114
+#define H265E_BS_START_ADDR                  0x0120
+#define H265E_COMMON_PIC_INFO                0x0120
+#define H265E_BS_SIZE                        0x0124
+#define H265E_PIC_SIZE                       0x0124
+#define H265E_BS_PARAM                       0x0128
+#define H265E_SET_FB_NUM                     0x0128
+#define H265E_BS_OPTION                      0x012C
+#define H265E_BS_RD_PTR                      0x0130
+#define H265E_BS_WR_PTR                      0x0134
+#define H265E_ADDR_WORK_BASE                 0x0138
+#define H265E_WORK_SIZE                      0x013c
+#define H265E_WORK_PARAM                     0x0140
+#define H265E_ADDR_TEMP_BASE                 0x0144
+#define H265E_TEMP_SIZE                      0x0148
+#define H265E_TEMP_PARAM                     0x014C
+#define H265E_FBC_STRIDE                     0x0154
+#define H265E_ENC_SET_PARAM_ENABLE           0x015C
+#define H265E_ENC_SEQ_SRC_SIZE               0x0160
+#define H265E_ADDR_LUMA_BASE0                0x0160
+#define H265E_ADDR_CB_BASE0                  0x0164
+#define H265E_ADDR_CR_BASE0                  0x0168
+#define H265E_ADDR_FBC_Y_OFFSET0             0x0168
+#define H265E_ADDR_FBC_C_OFFSET0             0x016C
+
+#define H265E_ENC_SEQ_PARAM                  0x016C
+#define H265E_ENC_SEQ_GOP_PARAM              0x0170
+#define H265E_ENC_SRC_PIC_IDX                0x0170
+#define H265E_ENC_SEQ_INTRA_PARAM            0x0174
+#define H265E_ENC_SEQ_CONF_WIN_TOP_BOT       0x0178
+#define H265E_ENC_SEQ_CONF_WIN_LEFT_RIGHT    0x017C
+#define H265E_ENC_SEQ_FRAME_RATE             0x0180
+#define H265E_ENC_SEQ_INDEPENDENT_SLICE      0x0184
+#define H265E_ENC_SEQ_DEPENDENT_SLICE        0x0188
+
+#define H265E_ENC_SEQ_INTRA_REFRESH          0x018C
+#define H265E_ENC_PARAM                      0x0190
+#define H265E_ENC_RC_INTRA_MIN_MAX_QP        0x0194
+#define H265E_ENC_RC_PARAM                   0x0198
+#define H265E_ENC_RC_MIN_MAX_QP              0x019C
+#define H265E_ENC_RC_BIT_RATIO_LAYER_0_3     0x01A0
+#define H265E_ENC_RC_BIT_RATIO_LAYER_4_7     0x01A4
+#define H265E_ENC_NR_PARAM                   0x01A8
+#define H265E_ENC_NR_WEIGHT                  0x01AC
+#define H265E_ENC_NUM_UNITS_IN_TICK          0x01B0
+#define H265E_ENC_TIME_SCALE                 0x01B4
+#define H265E_ENC_NUM_TICKS_POC_DIFF_ONE     0x01B8
+#define H265E_ENC_RC_TRANS_RATE              0x01BC
+#define H265E_ENC_RC_TARGET_RATE             0x01C0
+#define H265E_ENC_ROT_PARAM                  0x01C4
+#define H265E_ENC_ROT_RESERVED               0x01C8
+#define H265E_RET_ENC_MIN_FB_NUM             0x01CC
+#define H265E_RET_ENC_NAL_INFO_TO_BE_ENCODED 0x01D0
+#define H265E_RET_ENC_MIN_SRC_BUF_NUM        0x01D8
+
+#define H265E_ADDR_MV_COL0                   0x01E0
+#define H265E_ADDR_MV_COL1                   0x01E4
+#define H265E_ADDR_MV_COL2                   0x01E8
+#define H265E_ADDR_MV_COL3                   0x01EC
+#define H265E_ADDR_MV_COL4                   0x01F0
+#define H265E_ADDR_MV_COL5                   0x01F4
+#define H265E_ADDR_MV_COL6                   0x01F8
+#define H265E_ADDR_MV_COL7                   0x01FC
+
+#define H265E_ADDR_SEC_AXI_BASE              0x150
+#define H265E_SEC_AXI_SIZE                   0x154
+#define H265E_USE_SEC_AXI                    0x158
+
+/************************************************************************/
+/*H265 ENCODER - SET_PARAM + CUSTOM_GOP                                 */
+/************************************************************************/
+#define H265E_ENC_SET_CUSTOM_GOP_ENABLE      0x015C
+#define H265E_ENC_CUSTOM_GOP_PARAM           0x0160
+#define H265E_ENC_CUSTOM_GOP_PIC_PARAM_0     0x0164
+#define H265E_ENC_CUSTOM_GOP_PIC_PARAM_1     0x0168
+#define H265E_ENC_CUSTOM_GOP_PIC_PARAM_2     0x016C
+#define H265E_ENC_CUSTOM_GOP_PIC_PARAM_3     0x0170
+#define H265E_ENC_CUSTOM_GOP_PIC_PARAM_4     0x0174
+#define H265E_ENC_CUSTOM_GOP_PIC_PARAM_5     0x0178
+#define H265E_ENC_CUSTOM_GOP_PIC_PARAM_6     0x017C
+#define H265E_ENC_CUSTOM_GOP_PIC_PARAM_7     0x0180
+#define H265E_ENC_CUSTOM_GOP_RESERVED        0x0184
+#define H265E_ENC_CUSTOM_GOP_PIC_LAMBDA_0    0x0188
+#define H265E_ENC_CUSTOM_GOP_PIC_LAMBDA_1    0x018C
+#define H265E_ENC_CUSTOM_GOP_PIC_LAMBDA_2    0x0190
+#define H265E_ENC_CUSTOM_GOP_PIC_LAMBDA_3    0x0194
+#define H265E_ENC_CUSTOM_GOP_PIC_LAMBDA_4    0x0198
+#define H265E_ENC_CUSTOM_GOP_PIC_LAMBDA_5    0x019C
+#define H265E_ENC_CUSTOM_GOP_PIC_LAMBDA_6    0x01A0
+#define H265E_ENC_CUSTOM_GOP_PIC_LAMBDA_7    0x01A4
+
+/************************************************************************/
+/* H265 ENCODER - SET_PARAM + VUI                                       */
+/************************************************************************/
+#define H265E_ENC_VUI_PARAM_FLAGS            0x015C
+#define H265E_ENC_VUI_ASPECT_RATIO_IDC       0x0160
+#define H265E_ENC_VUI_SAR_SIZE               0x0164
+#define H265E_ENC_VUI_OVERSCAN_APPROPRIATE   0x0168
+#define H265E_ENC_VUI_VIDEO_SIGNAL           0x016C
+#define H265E_ENC_VUI_CHROMA_SAMPLE_LOC      0x0170
+#define H265E_ENC_VUI_DISP_WIN_LEFT_RIGHT    0x0174
+#define H265E_ENC_VUI_DISP_WIN_TOP_BOT       0x0178
+
+#define H265E_ENC_VUI_HRD_RBSP_PARAM_FLAG    0x017C
+#define H265E_ENC_VUI_RBSP_ADDR              0x0180
+#define H265E_ENC_VUI_RBSP_SIZE              0x0184
+#define H265E_ENC_HRD_RBSP_ADDR              0x0188
+#define H265E_ENC_HRD_RBSP_SIZE              0x018C
+
+/************************************************************************/
+/* H265 ENCODER - SET_FRAMEBUF                                          */
+/************************************************************************/
+#define H265E_FBC_STRIDE_Y                   0x150
+#define H265E_FBC_STRIDE_C                   0x154
+/* 1/4 sub-sampled buffer (for S2 ME)
+ *      SUB_SAMPLED_ONE_FB_SIZE = ALIGN16(width/4) * ALIGN8(height/4)
+ *      total size for sub-sampled buffer = SUB_SAMPLED_ONE_FB_SIZE * SET_FB_NUM
+ */
+#define H265E_ADDR_SUB_SAMPLED_FB_BASE       0x0158
+#define H265E_SUB_SAMPLED_ONE_FB_SIZE        0x015C
+
+/************************************************************************/
+/* ENCODER - ENC_PIC                                                    */
+/************************************************************************/
+#define H265E_CMD_ENC_ADDR_REPORT_BASE       0x015C
+#define H265E_CMD_ENC_REPORT_SIZE            0x0160
+#define H265E_CMD_ENC_REPORT_PARAM           0x0164
+#define H265E_CMD_ENC_CODE_OPTION            0x0168
+#define H265E_CMD_ENC_PIC_PARAM              0x016C
+#define H265E_CMD_ENC_SRC_PIC_IDX            0x0170
+#define H265E_CMD_ENC_SRC_ADDR_Y             0x0174
+#define H265E_CMD_ENC_SRC_ADDR_U             0x0178
+#define H265E_CMD_ENC_SRC_ADDR_V             0x017C
+#define H265E_CMD_ENC_SRC_STRIDE             0x0180
+#define H265E_CMD_ENC_SRC_FORMAT             0x0184
+#define H265E_CMD_ENC_PREFIX_SEI_NAL_ADDR    0x0188
+#define H265E_CMD_ENC_PREFIX_SEI_INFO        0x018C
+#define H265E_CMD_ENC_SUFFIX_SEI_NAL_ADDR    0x0190
+#define H265E_CMD_ENC_SUFFIX_SEI_INFO        0x0194
+#define H265E_CMD_ENC_LONGTERM_PIC           0x0198
+#define H265E_CMD_ENC_SUB_FRAME_SYNC_CONFIG  0x019C
+#define H265E_CMD_ENC_CTU_OPT_PARAM          0x01A0
+#define H265E_CMD_ENC_ROI_ADDR_CTU_MAP       0x01A4
+#define H265E_CMD_ENC_CTU_QP_MAP_ADDR        0x01AC
+#define H265E_CMD_ENC_SRC_TIMESTAMP_LOW      0x01B0
+#define H265E_CMD_ENC_SRC_TIMESTAMP_HIGH     0x01B4
+
+#define H265E_CMD_ENC_FC_PARAM               0x01E8
+#define H265E_CMD_ENC_FC_TABLE_ADDR_Y        0x01EC
+#define H265E_CMD_ENC_FC_TABLE_ADDR_C        0x01F0
+
+#define H265E_RET_ENC_PIC_IDX                0x01A8
+#define H265E_RET_ENC_PIC_SLICE_NUM          0x01AC
+#define H265E_RET_ENC_PIC_SKIP               0x01B0
+#define H265E_RET_ENC_PIC_NUM_INTRA          0x01B4
+#define H265E_RET_ENC_PIC_NUM_MERGE          0x01B8
+#define H265E_RET_ENC_PIC_FLAG               0x01BC
+#define H265E_RET_ENC_PIC_NUM_SKIP           0x01C0
+#define H265E_RET_ENC_PIC_AVG_CU_QP          0x01C4
+#define H265E_RET_ENC_PIC_BYTE               0x01C8
+#define H265E_RET_ENC_GOP_PIC_IDX            0x01CC
+#define H265E_RET_ENC_PIC_POC                0x01D0
+#define H265E_RET_ENC_USED_SRC_IDX           0x01D8
+#define H265E_RET_ENC_PIC_NUM                0x01DC
+#define H265E_RET_ENC_PIC_TYPE               0x01E0
+#define H265E_RET_ENC_VCL_NUT                0x01E4
+
+#define H265E_PERF_AXI_CTRL                 0x0240
+#define H265E_PERF_LATENCY_CTRL0             0x0264
+#define H265E_PERF_LATENCY_CTRL1             0x0268
+#define H265E_PERF_RD_MAX_LATENCY_NUM0       0x026C
+#define H265E_PERF_RD_LATENCY_SAMP_NUM       0x0270
+#define H265E_PERF_RD_LATENCY_ACC_SUM        0x0274
+#define H265E_PERF_RD_AXI_TOTAL_BYTE         0x0278
+#define H265E_PERF_WR_AXI_TOTAL_BYTE         0x027C
+#define H265E_PERF_WORKING_CNT              0x0280
+#endif
diff --git a/drivers/video/rockchip/vpu/mpp_dev_rkvenc.c b/drivers/video/rockchip/vpu/mpp_dev_rkvenc.c
new file mode 100644 (file)
index 0000000..9b00463
--- /dev/null
@@ -0,0 +1,814 @@
+/**
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: chenhengming chm@rock-chips.com
+ *        Alpha Lin, alpha.lin@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/reset.h>
+#include <linux/rockchip/pmu.h>
+
+#include "vpu_iommu_ops.h"
+#include "mpp_service.h"
+#include "mpp_dev_common.h"
+#include "mpp_dev_rkvenc.h"
+
+#define MPP_ALIGN_SIZE 0x1000
+
+#define LINK_TABLE_START       12
+#define LINK_TABLE_LEN         128
+
+#define        RKVENC_ENC_START                0x004
+#define                RKVENC_LKT_NUM(x)                       (((x) & 0xff) << 0)
+#define                RKVENC_CMD(x)                           (((x) & 0x3) << 8)
+#define                RKVENC_CLK_GATE_EN                      BIT(16)
+#define        RKVENC_SAFE_CLR                 0x008
+#define        RKVENC_LKT_ADDR                 0x00c
+#define        RKVENC_INT_EN                   0x010
+#define                RKVENC_INT_EN_SAFE_CLEAR                BIT(2)
+#define                RKVENC_INT_EN_TIMEOUT                   BIT(8)
+#define        RKVENC_INT_MSK                  0x014
+#define                RKVENC_INT_MSK_OVERFLOW                 BIT(4)
+#define                RKVENC_INT_MSK_W_FIFO_FULL              BIT(5)
+#define                RKVENC_INT_MSK_W_CHN_ERROR              BIT(6)
+#define                RKVENC_INT_MSK_R_CHN_ERROR              BIT(7)
+#define                RKVENC_INT_MSK_TIMEOUT                  BIT(8)
+#define        RKVENC_INT_CLR                  0x018
+#define        RKVENC_INT_STATUS               0x01c
+#define                RKVENC_ONE_FRAME_FINISH                 BIT(0)
+#define                RKVENC_LINK_TABLE_FINISH                BIT(1)
+#define                RKVENC_SAFE_CLEAR_FINISH                BIT(2)
+#define                RKVENC_ONE_SLICE_FINISH                 BIT(3)
+#define                RKVENC_BIT_STREAM_OVERFLOW              BIT(4)
+#define                RKVENC_AXI_WRITE_FIFO_FULL              BIT(5)
+#define                RKVENC_AXI_WRITE_CHANNEL_ERROR          BIT(6)
+#define                RKVENC_AXI_READ_CHANNEL_ERROR           BIT(7)
+#define                RKVENC_TIMEOUT_ERROR                    BIT(8)
+#define RKVENC_INT_ERROR_BITS          ((RKVENC_BIT_STREAM_OVERFLOW) |    \
+                                        (RKVENC_AXI_WRITE_FIFO_FULL) |    \
+                                        (RKVENC_AXI_WRITE_CHANNEL_ERROR) |\
+                                        (RKVENC_AXI_READ_CHANNEL_ERROR) | \
+                                        (RKVENC_TIMEOUT_ERROR))
+#define        RKVENC_ENC_PIC                  0x034
+#define                RKVENC_ENC_PIC_NODE_INT_EN              BIT(31)
+#define        RKVENC_ENC_WDG                  0x038
+#define                RKVENC_PPLN_ENC_LMT(x)                  (((x) & 0xff) << 0)
+#define        RKVENC_OSD_CFG                  0x1c0
+#define                RKVENC_OSD_PLT_TYPE                     BIT(17)
+#define                RKVENC_OSD_CLK_SEL_BIT                  BIT(16)
+#define        RKVENC_STATUS(i)                (0x210 + (4 * (i)))
+#define        RKVENC_BSL_STATUS               0x210
+#define                RKVENC_BITSTREAM_LENGTH(x)              ((x) & 0x7FFFFFF)
+#define        RKVENC_ENC_STATUS               0x220
+#define                RKVENC_ENC_STATUS_ENC(x)                (((x) >> 0) & 0x3)
+#define        RKVENC_LKT_STATUS               0x224
+#define                RKVENC_LKT_STATUS_FNUM_ENC(x)           (((x) >> 0) & 0xff)
+#define                RKVENC_LKT_STATUS_FNUM_CFG(x)           (((x) >> 8) & 0xff)
+#define                RKVENC_LKT_STATUS_FNUM_INT(x)           (((x) >> 16) & 0xff)
+#define        RKVENC_OSD_PLT(i)               (0x400 + (4 * (i)))
+
+#define to_rkvenc_ctx(ctx)             \
+               container_of(ctx, struct rkvenc_ctx, ictx)
+#define to_rkvenc_session(session)     \
+               container_of(session, struct rkvenc_session, isession)
+#define to_rkvenc_dev(dev)             \
+               container_of(dev, struct rockchip_rkvenc_dev, idev)
+
+/*
+ * file handle translate information
+ */
+static const char trans_tbl_rkvenc[] = {
+       70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+       124, 125, 126, 127, 128, 129, 130, 131
+};
+
+static struct mpp_trans_info trans_rkvenc[1] = {
+       [0] = {
+               .count = sizeof(trans_tbl_rkvenc),
+               .table = trans_tbl_rkvenc,
+       },
+};
+
+static struct mpp_dev_rkvenc_reg mpp_rkvenc_dummy_reg = {
+       .enc_rsl = 0x00070007,          /* 64x64 */
+       .enc_pic = 0x00001714,          /* h264, qp 30 */
+       .enc_wdg = 0x00000002,
+       .dtrns_map = 0x00007000,
+       .dtrns_cfg = 0x0000007f,
+       .src_fmt = 0x00000018,          /* nv12 */
+       .src_strd = 0x003f003f,
+       .sli_spl = 0x00000004,
+       .me_rnge = 0x00002f7b,
+       .me_cnst = 0x000e0505,
+       .me_ram = 0x000e79ab,
+       .rc_qp = 0x07340000,
+       .rdo_cfg = 0x00000002,
+       .synt_nal = 0x00000017,
+       .synt_sps = 0x0000019c,
+       .synt_pps = 0x01000d03,
+       .synt_sli0 = 0x00000002,
+};
+
+static int rockchip_mpp_rkvenc_reset(struct rockchip_mpp_dev *mpp);
+
+/*
+ * In order to workaround hw bug which make the first frame run failure with
+ * timeout interrupt occur, we make a dummy 64x64 encoding on power on here to
+ * cover the hw bug.
+ */
+static void rockchip_mpp_war_init(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+       size_t img_width = 64;
+       size_t img_height = 64;
+       size_t img_y_size = img_width * img_height;
+       size_t img_uv_size = img_y_size / 2;
+       size_t img_u_size = img_uv_size / 2;
+       size_t img_size = img_y_size + img_uv_size;
+
+       enc->war_reg = &mpp_rkvenc_dummy_reg;
+
+       /* 4k align required */
+       enc->war_reg->adr_rfpw = enc->war_dma_addr;
+       enc->war_reg->adr_srcy = enc->war_reg->adr_rfpw + img_size;
+       enc->war_reg->adr_srcu = enc->war_reg->adr_srcy + img_y_size;
+       enc->war_reg->adr_srcv = enc->war_reg->adr_srcu + img_u_size;
+       enc->war_reg->adr_bsbb = enc->war_reg->adr_srcv + img_u_size;
+       enc->war_reg->adr_bsbt = enc->war_reg->adr_bsbb + img_size;
+       enc->war_reg->adr_bsbr = enc->war_reg->adr_bsbb;
+       enc->war_reg->adr_bsbw = enc->war_reg->adr_bsbb;
+
+       /* 1k align required */
+       enc->war_reg->adr_dspw = enc->war_dma_addr + 0x4000;
+       enc->war_reg->adr_dspr = enc->war_reg->adr_dspw + 0x400;
+
+       enc->dummy_ctx = kzalloc(sizeof(*enc->dummy_ctx), GFP_KERNEL);
+       if (!enc->dummy_ctx)
+               return;
+
+       enc->dummy_ctx->ictx.mpp = mpp;
+       enc->dummy_ctx->ictx.session = NULL;
+       enc->dummy_ctx->mode = RKVENC_MODE_ONEFRAME;
+       enc->dummy_ctx->cfg.mode = RKVENC_MODE_ONEFRAME;
+       atomic_set(&enc->dummy_ctx_in_used, 0);
+       memcpy(enc->dummy_ctx->cfg.elem[0].reg, enc->war_reg,
+              sizeof(*enc->war_reg));
+       enc->dummy_ctx->cfg.elem[0].reg_num = sizeof(*enc->war_reg) / 4;
+}
+
+static void rockchip_mpp_rkvenc_cfg_palette(struct rockchip_mpp_dev *mpp,
+                                           struct mpp_session *isession)
+{
+       struct rkvenc_session *session;
+       int i;
+       u32 reg;
+
+       mpp_debug_enter();
+
+       if (!isession) {
+               mpp_debug(DEBUG_TASK_INFO, "fake ctx, do not cfg palette\n");
+               return;
+       }
+       session = to_rkvenc_session(isession);
+
+       if (!session->palette_valid)
+               return;
+
+       reg = mpp_read(mpp, RKVENC_OSD_CFG);
+       mpp_write(mpp, reg & (~RKVENC_OSD_CLK_SEL_BIT), RKVENC_OSD_CFG);
+
+       for (i = 0; i < RKVENC_OSD_PLT_LEN; i++)
+               mpp_write(mpp, session->palette.plalette[i].elem,
+                         RKVENC_OSD_PLT(i));
+
+       mpp_write(mpp, reg | RKVENC_OSD_CLK_SEL_BIT, RKVENC_OSD_CFG);
+
+       mpp_debug_leave();
+}
+
+static struct mpp_ctx *rockchip_mpp_rkvenc_init(struct rockchip_mpp_dev *mpp,
+                                               struct mpp_session *session,
+                                               void __user *src, u32 size)
+{
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+       struct rkvenc_ctx *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       int i;
+
+       mpp_debug_enter();
+
+       if (!ctx)
+               return NULL;
+
+       /* HW defeat workaround start */
+       if (!mpp_dev_is_power_on(mpp) && enc->dummy_ctx &&
+           atomic_inc_return(&enc->dummy_ctx_in_used) == 1) {
+               mpp_debug(DEBUG_RESET, "add a dummy ctx\n");
+               mpp_srv_pending_locked(mpp->srv, &enc->dummy_ctx->ictx);
+       }
+
+       mpp_dev_common_ctx_init(mpp, &ctx->ictx);
+
+       ctx->ictx.session = session;
+       ctx->mode = RKVENC_MODE_LINKTABLE_FIX;
+
+       size = size > sizeof(ctx->cfg) ? sizeof(ctx->cfg) : size;
+
+       if (copy_from_user(&ctx->cfg, src, size)) {
+               mpp_err("error: copy_from_user failed in reg_init\n");
+               kfree(ctx);
+               return NULL;
+       }
+
+       ctx->mode = ctx->cfg.mode;
+       if (ctx->mode >= RKVENC_MODE_NUM || ctx->mode == RKVENC_MODE_NONE) {
+               mpp_err("Invalid rkvenc running mode %d\n", (int)ctx->mode);
+               kfree(ctx);
+               return NULL;
+       } else if (ctx->mode == RKVENC_MODE_ONEFRAME && ctx->cfg.tbl_num > 1) {
+               mpp_err("Configuration miss match, ignore redundant cfg\n");
+               ctx->cfg.tbl_num = 1;
+       }
+
+       mpp_debug(DEBUG_SET_REG, "tbl num %u, mode %u\n",
+                 ctx->cfg.tbl_num, ctx->cfg.mode);
+
+       for (i = 0; i < ctx->cfg.tbl_num; i++) {
+               if (mpp_reg_address_translate(mpp, ctx->cfg.elem[i].reg,
+                                             &ctx->ictx, 0) < 0) {
+                       mpp_err("error: translate reg address failed.\n");
+
+                       if (unlikely(mpp_dev_debug & DEBUG_DUMP_ERR_REG))
+                               mpp_dump_reg_mem(ctx->cfg.elem[i].reg,
+                                                ctx->cfg.elem[i].reg_num);
+
+                       mpp_dev_common_ctx_deinit(mpp, &ctx->ictx);
+                       kfree(ctx);
+
+                       return NULL;
+               }
+
+               mpp_debug(DEBUG_SET_REG, "extra info cnt %u, magic %08x",
+                         ctx->cfg.elem[i].ext_inf.cnt,
+                         ctx->cfg.elem[i].ext_inf.magic);
+
+               mpp_translate_extra_info(&ctx->ictx, &ctx->cfg.elem[i].ext_inf,
+                                        ctx->cfg.elem[i].reg);
+       }
+
+       mpp_debug_leave();
+
+       return &ctx->ictx;
+}
+
+static int rockchip_mpp_rkvenc_reset_init(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+
+       mpp_debug(DEBUG_RESET, "reset init in:\n");
+       enc->rst_a = devm_reset_control_get(mpp->dev, "video_a");
+       enc->rst_h = devm_reset_control_get(mpp->dev, "video_h");
+       enc->rst_v = devm_reset_control_get(mpp->dev, "video_c");
+
+       if (IS_ERR_OR_NULL(enc->rst_a)) {
+               mpp_err("No aclk reset resource define\n");
+               enc->rst_a = NULL;
+       }
+
+       if (IS_ERR_OR_NULL(enc->rst_h)) {
+               mpp_err("No hclk reset resource define\n");
+               enc->rst_h = NULL;
+       }
+
+       if (IS_ERR_OR_NULL(enc->rst_v)) {
+               mpp_err("No core reset resource define\n");
+               enc->rst_v = NULL;
+       }
+
+       return 0;
+}
+
+static int rockchip_mpp_rkvenc_reset(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+       int cnt = 100;
+
+       if (enc->rst_a && enc->rst_h && enc->rst_v) {
+               mpp_debug(DEBUG_RESET, "reset in\n");
+               mpp_write(mpp, 0, RKVENC_INT_EN);
+               mpp_write(mpp, 1, RKVENC_SAFE_CLR);
+               while (cnt-- > 0) {
+                       int status;
+
+                       usleep_range(100, 200);
+                       status = mpp_read(mpp, RKVENC_ENC_STATUS);
+                       if (status & 4) {
+                               mpp_debug(DEBUG_RESET, "st_enc %08x\n", status);
+                               break;
+                       }
+               }
+               reset_control_assert(enc->rst_v);
+               reset_control_assert(enc->rst_a);
+               reset_control_assert(enc->rst_h);
+
+               udelay(1);
+
+               reset_control_deassert(enc->rst_v);
+               reset_control_deassert(enc->rst_a);
+               reset_control_deassert(enc->rst_h);
+               mpp_debug(DEBUG_RESET, "reset out\n");
+       }
+       return 0;
+}
+
+static int rockchip_mpp_rkvenc_prepare(struct rockchip_mpp_dev *mpp)
+{
+       struct rkvenc_ctx *ctx_curr;
+       struct rkvenc_ctx *ctx_ready;
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+       u32 lkt_status;
+       u32 fnum_int;
+       u32 fnum_cfg;
+       u32 fnum_enc;
+       u8 *cpu_addr;
+       int i;
+
+       u32 reg = 0;
+
+       mpp_debug_enter();
+
+       if (!mpp_srv_is_running(mpp->srv))
+               return 0;
+
+       /* if service running, determine link table mode */
+       ctx_curr = to_rkvenc_ctx(mpp_srv_get_current_ctx(mpp->srv));
+       ctx_ready = to_rkvenc_ctx(mpp_srv_get_pending_ctx(mpp->srv));
+
+       if (ctx_curr->mode != RKVENC_MODE_LINKTABLE_UPDATE ||
+           ctx_ready->mode != ctx_curr->mode) {
+               mpp_debug(DEBUG_TASK_INFO,
+                         "link table condition not fulfill\n");
+               return -1;
+       }
+
+       lkt_status = mpp_read(mpp, RKVENC_LKT_STATUS);
+       fnum_int = RKVENC_LKT_STATUS_FNUM_INT(lkt_status);
+       fnum_cfg = RKVENC_LKT_STATUS_FNUM_CFG(lkt_status);
+       fnum_enc = RKVENC_LKT_STATUS_FNUM_ENC(lkt_status);
+       cpu_addr = (u8 *)enc->lkt_cpu_addr + fnum_cfg * LINK_TABLE_LEN * 4;
+
+       mpp_dev_power_on(mpp);
+
+       mpp_debug(DEBUG_GET_REG, "frame number int %u, cfg %u, enc %u\n",
+                 fnum_int, fnum_cfg, fnum_enc);
+
+       for (i = 0; i < ctx_ready->cfg.tbl_num; i++) {
+               u32 *src = ctx_ready->cfg.elem[i].reg;
+
+               memcpy(cpu_addr + i * LINK_TABLE_LEN * 4,
+                      &src[LINK_TABLE_START], LINK_TABLE_LEN * 4);
+       }
+
+       reg = RKVENC_CLK_GATE_EN |
+               RKVENC_CMD(ctx_curr->mode) |
+               RKVENC_LKT_NUM(ctx_ready->cfg.tbl_num);
+       mpp_write_relaxed(mpp, reg, RKVENC_ENC_START);
+
+       /* remove from pending queue */
+       mpp_dev_common_ctx_deinit(mpp, &ctx_ready->ictx);
+
+       mpp_debug_leave();
+
+       return 0;
+}
+
+static int rockchip_mpp_rkvenc_run(struct rockchip_mpp_dev *mpp)
+{
+       struct rkvenc_ctx *ctx =
+                       to_rkvenc_ctx(mpp_srv_get_current_ctx(mpp->srv));
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+       u32 reg;
+       int i;
+
+       mpp_debug_enter();
+
+       switch (ctx->mode) {
+       case RKVENC_MODE_ONEFRAME:
+               {
+                       u32 *src = ctx->cfg.elem[0].reg;
+
+                       for (i = 2; i < (LINK_TABLE_START + LINK_TABLE_LEN); i++)
+                               mpp_write_relaxed(mpp, src[i], i * 4);
+
+                       rockchip_mpp_rkvenc_cfg_palette(mpp, ctx->ictx.session);
+
+                       mpp_write_relaxed(mpp, 0x1ff, RKVENC_INT_EN);
+                       reg = RKVENC_CLK_GATE_EN
+                               | RKVENC_CMD(1);
+                       mpp_write(mpp, reg, RKVENC_ENC_START);
+
+                       break;
+               }
+       case RKVENC_MODE_LINKTABLE_FIX:
+       case RKVENC_MODE_LINKTABLE_UPDATE:
+               {
+                       for (i = 0; i < ctx->cfg.tbl_num; i++) {
+                               u32 *src = ctx->cfg.elem[i].reg;
+
+                               memcpy(enc->lkt_cpu_addr +
+                                      i * LINK_TABLE_LEN * 4,
+                                      &src[LINK_TABLE_START],
+                                      LINK_TABLE_LEN * 4);
+                       }
+
+                       rockchip_mpp_rkvenc_cfg_palette(mpp, ctx->ictx.session);
+
+                       mpp_write_relaxed(mpp,
+                                         enc->lkt_dma_addr,
+                                         RKVENC_LKT_ADDR);
+                       mpp_write_relaxed(mpp, 0xffffffff, RKVENC_INT_EN);
+
+                       reg = RKVENC_LKT_NUM(ctx->cfg.tbl_num) |
+                               RKVENC_CMD(RKVENC_MODE_LINKTABLE_FIX) |
+                               RKVENC_CLK_GATE_EN;
+
+                       mpp_write_relaxed(mpp, reg, RKVENC_ENC_START);
+
+                       break;
+               }
+       default:
+               break;
+       }
+
+       mpp_debug_leave();
+
+       return 0;
+}
+
+static int rockchip_mpp_rkvenc_done(struct rockchip_mpp_dev *mpp)
+{
+       struct mpp_ctx *ictx = mpp_srv_get_current_ctx(mpp->srv);
+       struct rkvenc_ctx *ctx;
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+       struct rkvenc_result *result;
+       int i;
+
+       mpp_debug_enter();
+
+       if (IS_ERR_OR_NULL(ictx)) {
+               mpp_err("Invaidate context to save result\n");
+               return -1;
+       }
+
+       ctx = to_rkvenc_ctx(ictx);
+
+       if (enc->irq_status & RKVENC_INT_ERROR_BITS)
+               /*
+                * according to war running, if the dummy encoding
+                * running with timeout, we enable a safe clear process,
+                * we reset the ip, and complete the war procedure.
+                */
+               atomic_inc(&mpp->reset_request);
+
+       if (ctx == enc->dummy_ctx) {
+               mpp_debug(DEBUG_RESET, "war done\n");
+
+               /* for war do not trigger service done process */
+               list_del_init(&ictx->status_link);
+               atomic_set(&enc->dummy_ctx_in_used, 0);
+
+               /* dummy ctx, do not trigger service to wake up done process */
+               return -1;
+       }
+
+       result = &ctx->result;
+       switch (ctx->mode) {
+       case RKVENC_MODE_ONEFRAME:
+               result->tbl_num = 1;
+               result->elem[0].status = enc->irq_status;
+               for (i = 0; i < sizeof(result->elem[0].result) / 4; i++)
+                       result->elem[0].result[i] =
+                                                   mpp_read(mpp,
+                                                            RKVENC_STATUS(i));
+               break;
+       case RKVENC_MODE_LINKTABLE_FIX:
+       case RKVENC_MODE_LINKTABLE_UPDATE:
+               {
+                       u32 lkt_status = mpp_read(mpp, RKVENC_LKT_STATUS);
+                       u32 fnum_int = RKVENC_LKT_STATUS_FNUM_INT(lkt_status);
+                       u32 fnum_cfg = RKVENC_LKT_STATUS_FNUM_CFG(lkt_status);
+                       u32 fnum_enc = RKVENC_LKT_STATUS_FNUM_ENC(lkt_status);
+
+                       u32 *lkt_cpu_addr = (u32 *)enc->lkt_cpu_addr;
+
+                       if (unlikely(mpp_dev_debug & DEBUG_DUMP_ERR_REG))
+                               mpp_dump_reg_mem(lkt_cpu_addr, LINK_TABLE_LEN);
+
+                       result->tbl_num = fnum_int;
+                       for (i = 0; i < fnum_int; i++) {
+                               result->elem[i].status = enc->irq_status;
+                               memcpy(result->elem[i].result,
+                                      &lkt_cpu_addr[i * LINK_TABLE_LEN + 120],
+                                      sizeof(result->elem[i].result));
+                               mpp_debug(DEBUG_GET_REG, "stream length %u\n",
+                                         result->elem[i].result[0]);
+                       }
+                       mpp_debug(DEBUG_GET_REG, "frame number %u, %u, %u\n",
+                                 fnum_int, fnum_cfg, fnum_enc);
+                       break;
+               }
+       default:
+               break;
+       }
+
+       mpp_debug_leave();
+
+       return 0;
+}
+
+static int rockchip_mpp_rkvenc_irq(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+
+       enc->irq_status = mpp_read(mpp, RKVENC_INT_STATUS);
+
+       mpp_debug_enter();
+
+       if (enc->irq_status == 0)
+               return -1;
+
+       mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", enc->irq_status);
+       mpp_write(mpp, 0xffffffff, RKVENC_INT_CLR);
+       if (enc->irq_status & RKVENC_INT_ERROR_BITS) {
+               mpp_err("error irq %08x\n", enc->irq_status);
+               /* time out error */
+               mpp_write(mpp, RKVENC_INT_ERROR_BITS, RKVENC_INT_MSK);
+       }
+
+       mpp_debug_leave();
+
+       return 0;
+}
+
+static int rockchip_mpp_rkvenc_result(struct rockchip_mpp_dev *mpp,
+                                     struct mpp_ctx *ictx, u32 __user *dst)
+{
+       struct rkvenc_ctx *ctx = to_rkvenc_ctx(ictx);
+       struct rkvenc_result *result = &ctx->result;
+       unsigned long tbl_size = sizeof(result->tbl_num) +
+                                sizeof(result->elem[0]) * result->tbl_num;
+
+       switch (ctx->mode) {
+       case RKVENC_MODE_ONEFRAME:
+       case RKVENC_MODE_LINKTABLE_FIX:
+       case RKVENC_MODE_LINKTABLE_UPDATE:
+               {
+                       if (copy_to_user(dst, &ctx->result, tbl_size)) {
+                               mpp_err("copy result to user failed\n");
+                               return -1;
+                       }
+                       break;
+               }
+       default:
+               mpp_err("invalid context mode %d\n", (int)ctx->mode);
+               return -1;
+       }
+
+       return 0;
+}
+
+static long rockchip_mpp_rkvenc_ioctl(struct mpp_session *isession,
+                                     unsigned int cmd,
+                                     unsigned long arg)
+{
+       struct rkvenc_session *session = to_rkvenc_session(isession);
+
+       mpp_debug_enter();
+
+       switch (cmd) {
+       case MPP_DEV_RKVENC_SET_COLOR_PALETTE:
+               if (copy_from_user(&session->palette, (void __user *)arg,
+                                  sizeof(session->palette))) {
+                       mpp_err("copy palette from user failed\n");
+                       return -EINVAL;
+               }
+               session->palette_valid = true;
+
+               break;
+       default:
+               mpp_err("%s, unknown ioctl cmd %x\n",
+                       dev_name(isession->mpp->dev), cmd);
+               break;
+       }
+
+       mpp_debug_leave();
+
+       return 0;
+}
+
+static struct mpp_session *mpp_dev_rkvenc_open(struct rockchip_mpp_dev *mpp)
+{
+       struct rkvenc_session *session = kzalloc(sizeof(*session), GFP_KERNEL);
+
+       mpp_debug_enter();
+
+       if (!session)
+               return NULL;
+
+       session->palette_valid = false;
+
+       mpp_debug_leave();
+
+       return &session->isession;
+}
+
+static void mpp_dev_rkvenc_release(struct mpp_session *isession)
+{
+       struct rkvenc_session *session = to_rkvenc_session(isession);
+
+       kfree(session);
+}
+
+struct mpp_dev_ops rkvenc_ops = {
+       .init = rockchip_mpp_rkvenc_init,
+       .prepare = rockchip_mpp_rkvenc_prepare,
+       .run = rockchip_mpp_rkvenc_run,
+       .done = rockchip_mpp_rkvenc_done,
+       .irq = rockchip_mpp_rkvenc_irq,
+       .result = rockchip_mpp_rkvenc_result,
+       .ioctl = rockchip_mpp_rkvenc_ioctl,
+       .open = mpp_dev_rkvenc_open,
+       .release = mpp_dev_rkvenc_release,
+};
+
+static void rockchip_mpp_rkvenc_power_on(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+
+       if (enc->aclk)
+               clk_prepare_enable(enc->aclk);
+       if (enc->hclk)
+               clk_prepare_enable(enc->hclk);
+       if (enc->core)
+               clk_prepare_enable(enc->core);
+
+       /*
+        * Because hw cannot reset status fully in all its modules, we make a
+        * reset here to make sure the hw status fully reset.
+        */
+       rockchip_mpp_rkvenc_reset(mpp);
+}
+
+static void rockchip_mpp_rkvenc_power_off(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+
+       if (enc->core)
+               clk_disable_unprepare(enc->core);
+       if (enc->hclk)
+               clk_disable_unprepare(enc->hclk);
+       if (enc->aclk)
+               clk_disable_unprepare(enc->aclk);
+}
+
+static int rockchip_mpp_rkvenc_probe(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+       struct mpp_session *session = list_first_entry(&mpp->srv->session,
+                                                      struct mpp_session,
+                                                      list_session);
+       int ret;
+       size_t tmp;
+
+       enc->idev.ops = &rkvenc_ops;
+
+       enc->lkt_hdl = vpu_iommu_alloc(mpp->iommu_info, session,
+                                      LINK_TABLE_LEN * 4 * 256,
+                                      MPP_ALIGN_SIZE);
+
+       if (enc->lkt_hdl < 0) {
+               dev_err(mpp->dev, "allocate link table buffer failure\n");
+               return -1;
+       }
+
+       ret = vpu_iommu_map_iommu(mpp->iommu_info, session,
+                                 enc->lkt_hdl, &enc->lkt_dma_addr, &tmp);
+
+       if (ret < 0) {
+               dev_err(mpp->dev, "get link table dma_addr failed\n");
+               goto fail;
+       }
+
+       enc->lkt_cpu_addr = vpu_iommu_map_kernel(mpp->iommu_info,
+                                                session, enc->lkt_hdl);
+
+       /*
+        * buffer for workaround context running, include input picture, output
+        * stream, reconstruction picture. we set the output stream buffer to 1
+        * time picture size, so the total buffer size is 3 times picture size,
+        * 64 * 64 * 3 / 2 * 3 = 4.5 * 4k.
+        */
+       enc->war_hdl = vpu_iommu_alloc(mpp->iommu_info, session,
+                                      MPP_ALIGN_SIZE * 5,
+                                      MPP_ALIGN_SIZE);
+       if (enc->war_hdl < 0) {
+               dev_err(mpp->dev, "allocate workaround buffer failure\n");
+               goto fail;
+       }
+
+       ret = vpu_iommu_map_iommu(mpp->iommu_info, session,
+                                 enc->war_hdl, &enc->war_dma_addr, &tmp);
+
+       if (ret < 0) {
+               dev_err(mpp->dev, "get war dma_addr failed\n");
+               goto fail;
+       }
+
+       rockchip_mpp_war_init(mpp);
+
+       enc->aclk = devm_clk_get(mpp->dev, "aclk_vcodec");
+       if (IS_ERR_OR_NULL(enc->aclk)) {
+               dev_err(mpp->dev, "failed on clk_get aclk\n");
+               goto fail;
+       }
+
+       enc->hclk = devm_clk_get(mpp->dev, "hclk_vcodec");
+       if (IS_ERR_OR_NULL(enc->hclk)) {
+               dev_err(mpp->dev, "failed on clk_get hclk\n");
+               goto fail;
+       }
+
+       enc->core = devm_clk_get(mpp->dev, "clk_core");
+       if (IS_ERR_OR_NULL(enc->core)) {
+               dev_err(mpp->dev, "failed on clk_get core\n");
+               goto fail;
+       }
+
+       rockchip_mpp_rkvenc_reset_init(mpp);
+
+       return 0;
+
+fail:
+       kfree(enc->dummy_ctx);
+
+       if (enc->war_hdl >= 0) {
+               vpu_iommu_unmap_iommu(mpp->iommu_info,
+                                     session, enc->war_hdl);
+               vpu_iommu_free(mpp->iommu_info, session, enc->war_hdl);
+       }
+       if (enc->lkt_cpu_addr)
+               vpu_iommu_unmap_kernel(mpp->iommu_info, session, enc->lkt_hdl);
+       if (enc->lkt_hdl >= 0) {
+               vpu_iommu_unmap_iommu(mpp->iommu_info,
+                                     session, enc->lkt_hdl);
+               vpu_iommu_free(mpp->iommu_info, session, enc->lkt_hdl);
+       }
+
+       return -1;
+}
+
+static void rockchip_mpp_rkvenc_remove(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_rkvenc_dev *enc = to_rkvenc_dev(mpp);
+       struct mpp_session *session = list_first_entry(&mpp->srv->session,
+                                                      struct mpp_session,
+                                                      list_session);
+
+       vpu_iommu_unmap_kernel(mpp->iommu_info, session, enc->lkt_hdl);
+       vpu_iommu_unmap_iommu(mpp->iommu_info,
+                             session, enc->lkt_hdl);
+       vpu_iommu_free(mpp->iommu_info, session, enc->lkt_hdl);
+
+       vpu_iommu_unmap_iommu(mpp->iommu_info,
+                             session, enc->war_hdl);
+       vpu_iommu_free(mpp->iommu_info, session, enc->war_hdl);
+
+       kfree(enc->dummy_ctx);
+}
+
+const struct rockchip_mpp_dev_variant rkvenc_variant = {
+       .data_len = sizeof(struct rockchip_rkvenc_dev),
+       .reg_len = 140,
+       .trans_info = trans_rkvenc,
+       .hw_probe = rockchip_mpp_rkvenc_probe,
+       .hw_remove = rockchip_mpp_rkvenc_remove,
+       .power_on = rockchip_mpp_rkvenc_power_on,
+       .power_off = rockchip_mpp_rkvenc_power_off,
+       .reset = rockchip_mpp_rkvenc_reset,
+};
+EXPORT_SYMBOL(rkvenc_variant);
+
diff --git a/drivers/video/rockchip/vpu/mpp_dev_rkvenc.h b/drivers/video/rockchip/vpu/mpp_dev_rkvenc.h
new file mode 100644 (file)
index 0000000..a9fad2c
--- /dev/null
@@ -0,0 +1,177 @@
+/**
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: chenhengming chm@rock-chips.com
+ *        Alpha Lin, alpha.lin@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ROCKCHIP_MPP_DEV_RKVENC_H
+#define __ROCKCHIP_MPP_DEV_RKVENC_H
+
+union rkvenc_osd_palette_elem {
+       struct  {
+               u8 y;
+               u8 u;
+               u8 v;
+               u8 alpha;
+       };
+       u32 elem;
+};
+
+#define        RKVENC_OSD_PLT_LEN              256
+struct rkvenc_osd_palette {
+       union rkvenc_osd_palette_elem plalette[RKVENC_OSD_PLT_LEN];
+};
+
+#define MPP_DEV_RKVENC_SET_COLOR_PALETTE       \
+                       _IOW(MPP_IOC_MAGIC, MPP_IOC_CUSTOM_BASE + 1,    \
+                       struct rkvenc_osd_palette)
+
+struct rkvenc_config_elem {
+       u32 reg_num;
+       u32 reg[140];
+       struct extra_info_for_iommu ext_inf;
+};
+
+struct rkvenc_config {
+       u32 mode;
+       u32 tbl_num;
+       struct rkvenc_config_elem elem[10];
+};
+
+struct rkvenc_result_elem {
+       u32 status;
+       u32 result[11];
+};
+
+struct rkvenc_result {
+       u32 tbl_num;
+       struct rkvenc_result_elem elem[10];
+};
+
+struct rkvenc_ctx {
+       struct mpp_ctx ictx;
+       enum RKVENC_MODE mode;
+
+       struct rkvenc_config cfg;
+
+       /* store status read from hw, oneframe mode used only */
+       struct rkvenc_result result;
+};
+
+struct rkvenc_session {
+       struct mpp_session isession;
+
+       struct rkvenc_osd_palette palette;
+       bool palette_valid;
+};
+
+struct mpp_dev_rkvenc_reg {
+       u32 unused_00;
+       u32 enc_strt;
+       u32 enc_clr;
+       u32 lkt_addr;
+       u32 int_en;
+       u32 int_msk;
+       u32 int_clr;
+       u32 unused_20[4];
+       u32 int_stus;
+       /* 12 */
+       u32 enc_rsl;
+       u32 enc_pic;
+       u32 enc_wdg;
+       u32 dtrns_map;
+       u32 dtrns_cfg;
+       u32 src_fmt;
+       u32 src_udfy;
+       u32 src_udfu;
+       u32 src_udfv;
+       u32 src_udfo;
+       u32 src_proc;
+       u32 src_tthrd;
+       u32 src_stbl[5];
+       u32 h3d_tbl[40];
+       u32 src_strd;
+       u32 adr_srcy;
+       u32 adr_srcu;
+       u32 adr_srcv;
+       u32 adr_fltw;
+       u32 adr_fltr;
+       u32 adr_ctuc;
+       u32 adr_rfpw;
+       u32 adr_rfpr;
+       u32 adr_cmvw;
+       u32 adr_cmvr;
+       u32 adr_dspw;
+       u32 adr_dspr;
+       u32 adr_meiw;
+       u32 adr_bsbt;
+       u32 adr_bsbb;
+       u32 adr_bsbr;
+       u32 adr_bsbw;
+       u32 sli_spl;
+       u32 sli_spl_byte;
+       u32 me_rnge;
+       u32 me_cnst;
+       u32 me_ram;
+       u32 rc_cfg;
+       u32 rc_erp[5];
+       u32 rc_adj[2];
+       u32 rc_qp;
+       u32 rc_tgt;
+       u32 rdo_cfg;
+       u32 synt_nal;
+       u32 synt_sps;
+       u32 synt_pps;
+       u32 synt_sli0;
+       u32 synt_sli1;
+       u32 synt_sli2_rodr;
+       u32 synt_ref_mark0;
+       u32 synt_ref_mark1;
+       u32 osd_cfg;
+       u32 osd_inv;
+       u32 unused_1c8[2];
+       u32 osd_pos[8];
+       u32 osd_addr[8];
+       u32 unused_210[9];
+};
+
+struct rockchip_rkvenc_dev {
+       struct rockchip_mpp_dev idev;
+       unsigned long lkt_dma_addr;
+       int lkt_hdl;
+       void *lkt_cpu_addr;
+       u32 irq_status;
+       unsigned long war_dma_addr;
+       int war_hdl;
+       struct mpp_dev_rkvenc_reg *war_reg;
+       struct rkvenc_ctx *dummy_ctx;
+       atomic_t dummy_ctx_in_used;
+
+       struct clk *aclk;
+       struct clk *hclk;
+       struct clk *core;
+
+       struct reset_control *rst_a;
+       struct reset_control *rst_h;
+       struct reset_control *rst_v;
+};
+
+struct link_table_elem {
+       unsigned long lkt_dma_addr;
+       int lkt_hdl;
+       void *lkt_cpu_addr;
+       u32 lkt_index;
+       struct list_head list;
+};
+
+#endif
diff --git a/drivers/video/rockchip/vpu/mpp_dev_vepu.c b/drivers/video/rockchip/vpu/mpp_dev_vepu.c
new file mode 100644 (file)
index 0000000..27148b7
--- /dev/null
@@ -0,0 +1,358 @@
+/**
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ *        Alpha Lin, alpha.lin@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/types.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/rockchip/grf.h>
+#include <linux/rockchip/pmu.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "vpu_iommu_ops.h"
+#include "mpp_dev_common.h"
+#include "mpp_dev_vepu.h"
+#include "mpp_service.h"
+
+#define to_vepu_ctx(ctx)               \
+               container_of(ctx, struct vepu_ctx, ictx)
+#define to_vepu_dev(dev)               \
+               container_of(dev, struct rockchip_vepu_dev, idev)
+
+#define VEPU_REG_INTERRUPT             0x1b4
+#define VEPU_REG_ENC_START             0x19c
+#define                VEPU_ENC_GET_FORMAT(x)          (((x) >> 4) & 0x3)
+#define                VEPU_ENC_FMT_VP8E               1
+#define                VEPU_ENC_ENABLE                 BIT(0)
+
+/*
+ * file handle translate information
+ */
+static const char trans_tbl_default[] = {
+       77, 78, 56, 57, 63, 64, 48, 49, 50, 81
+};
+
+static const char trans_tbl_vp8e[] = {
+       77, 78, 56, 57, 63, 64, 48, 49, 50, 76, 106, 108, 81, 80, 44, 45, 27
+};
+
+static struct mpp_trans_info trans_vepu[2] = {
+       [0] = {
+               .count = sizeof(trans_tbl_default),
+               .table = trans_tbl_default,
+       },
+       [1] = {
+               .count = sizeof(trans_tbl_vp8e),
+               .table = trans_tbl_vp8e,
+       },
+};
+
+static struct mpp_ctx *rockchip_mpp_vepu_init(struct rockchip_mpp_dev *mpp,
+                                             struct mpp_session *session,
+                                             void __user *src, u32 size)
+{
+       struct vepu_ctx *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       u32 reg_len;
+       u32 extinf_len;
+       u32 fmt = 0;
+       int trans_idx = 0;
+       u32 dwsize = size / sizeof(u32);
+
+       mpp_debug_enter();
+
+       if (!ctx)
+               return NULL;
+
+       mpp_dev_common_ctx_init(mpp, &ctx->ictx);
+
+       ctx->ictx.session = session;
+
+       reg_len = dwsize > ROCKCHIP_VEPU_REG_LEN ?
+                 ROCKCHIP_VEPU_REG_LEN : dwsize;
+       extinf_len = dwsize > reg_len ? (dwsize - reg_len) * 4 : 0;
+
+       if (copy_from_user(ctx->reg, src, reg_len * 4)) {
+               mpp_err("error: copy_from_user failed in reg_init\n");
+               kfree(ctx);
+               return NULL;
+       }
+
+       if (extinf_len > 0) {
+               u32 ext_cpy = min_t(size_t, extinf_len, sizeof(ctx->ext_inf));
+
+               if (copy_from_user(&ctx->ext_inf, (u8 *)src + reg_len,
+                                  ext_cpy)) {
+                       mpp_err("copy_from_user failed when extra info\n");
+                       kfree(ctx);
+                       return NULL;
+               }
+       }
+
+       fmt = VEPU_ENC_GET_FORMAT(ctx->reg[VEPU_REG_ENC_START / 4]);
+       if (fmt == VEPU_ENC_FMT_VP8E)
+               trans_idx = 1;
+
+       if (mpp_reg_address_translate(mpp, ctx->reg, &ctx->ictx,
+                                     trans_idx) < 0) {
+               mpp_err("error: translate reg address failed.\n");
+
+               if (unlikely(mpp_dev_debug & DEBUG_DUMP_ERR_REG))
+                       mpp_dump_reg_mem(ctx->reg, ROCKCHIP_VEPU_REG_LEN);
+
+               mpp_dev_common_ctx_deinit(mpp, &ctx->ictx);
+               kfree(ctx);
+
+               return NULL;
+       }
+
+       mpp_debug(DEBUG_SET_REG, "extra info cnt %u, magic %08x",
+                 ctx->ext_inf.cnt, ctx->ext_inf.magic);
+
+       mpp_translate_extra_info(&ctx->ictx, &ctx->ext_inf, ctx->reg);
+
+       mpp_debug_leave();
+
+       return &ctx->ictx;
+}
+
+static int rockchip_mpp_vepu_reset_init(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_vepu_dev *enc = to_vepu_dev(mpp);
+
+       mpp_debug(DEBUG_RESET, "reset init in:\n");
+       enc->rst_a = devm_reset_control_get(mpp->dev, "video_a");
+       enc->rst_h = devm_reset_control_get(mpp->dev, "video_h");
+
+       if (IS_ERR_OR_NULL(enc->rst_a)) {
+               mpp_err("No aclk reset resource define\n");
+               enc->rst_a = NULL;
+       }
+
+       if (IS_ERR_OR_NULL(enc->rst_h)) {
+               mpp_err("No hclk reset resource define\n");
+               enc->rst_h = NULL;
+       }
+
+       return 0;
+}
+
+static int rockchip_mpp_vepu_reset(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_vepu_dev *enc = to_vepu_dev(mpp);
+
+       if (enc->rst_a && enc->rst_h) {
+               mpp_debug(DEBUG_RESET, "reset in\n");
+               reset_control_assert(enc->rst_a);
+               reset_control_assert(enc->rst_h);
+
+               udelay(1);
+
+               reset_control_deassert(enc->rst_a);
+               reset_control_deassert(enc->rst_h);
+               mpp_debug(DEBUG_RESET, "reset out\n");
+       }
+       return 0;
+}
+
+static int rockchip_mpp_vepu_run(struct rockchip_mpp_dev *mpp)
+{
+       struct vepu_ctx *ctx =
+                              to_vepu_ctx(mpp_srv_get_current_ctx(mpp->srv));
+       struct rockchip_vepu_dev *enc = to_vepu_dev(mpp);
+       int i;
+
+       mpp_debug_enter();
+
+       /*
+        * before encoding running, we have to switch grf ctrl bit to ensure
+        * ip inner-sram controlled by vepu
+        */
+#ifdef CONFIG_MFD_SYSCON
+       if (enc->grf) {
+               u32 raw;
+               u32 bits = BIT(enc->mode_bit);
+
+               regmap_read(enc->grf, enc->mode_ctrl, &raw);
+               regmap_write(enc->grf, enc->mode_ctrl,
+                            (raw & (~bits)) | (bits << 16));
+       }
+#endif
+
+       /*
+        * NOTE: encoder need to setup mode first
+        */
+       mpp_write(mpp,
+                 ctx->reg[VEPU_REG_ENC_START / 4] & (~VEPU_ENC_ENABLE),
+                 VEPU_REG_ENC_START);
+
+       for (i = 0; i < ROCKCHIP_VEPU_REG_LEN; i++) {
+               if (i * 4 != VEPU_REG_ENC_START)
+                       mpp_write_relaxed(mpp, ctx->reg[i], i * 4);
+       }
+
+       mpp_write(mpp, ctx->reg[VEPU_REG_ENC_START / 4], VEPU_REG_ENC_START);
+
+       mpp_debug_leave();
+
+       return 0;
+}
+
+static int rockchip_mpp_vepu_done(struct rockchip_mpp_dev *mpp)
+{
+       struct mpp_ctx *ictx = mpp_srv_get_current_ctx(mpp->srv);
+       struct vepu_ctx *ctx;
+       struct rockchip_vepu_dev *enc = to_vepu_dev(mpp);
+       int i;
+
+       mpp_debug_enter();
+
+       if (IS_ERR_OR_NULL(ictx)) {
+               mpp_err("Invaidate context to save result\n");
+               return -1;
+       }
+
+       ctx = to_vepu_ctx(ictx);
+
+       for (i = 0; i < ROCKCHIP_VEPU_REG_LEN; i++)
+               ctx->reg[i] = mpp_read(mpp, i * 4);
+
+       ctx->reg[VEPU_REG_INTERRUPT / 4] = enc->irq_status;
+
+       mpp_debug_leave();
+
+       return 0;
+}
+
+static int rockchip_mpp_vepu_irq(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_vepu_dev *enc = to_vepu_dev(mpp);
+
+       enc->irq_status = mpp_read(mpp, VEPU_REG_INTERRUPT);
+
+       mpp_debug_enter();
+
+       if (enc->irq_status == 0)
+               return -1;
+
+       mpp_debug(DEBUG_IRQ_STATUS, "irq_status: %08x\n", enc->irq_status);
+       mpp_write(mpp, 0, VEPU_REG_INTERRUPT);
+
+       mpp_debug_leave();
+
+       return 0;
+}
+
+static int rockchip_mpp_vepu_result(struct rockchip_mpp_dev *mpp,
+                                   struct mpp_ctx *ictx, u32 __user *dst)
+{
+       struct vepu_ctx *ctx = to_vepu_ctx(ictx);
+
+       if (copy_to_user(dst, ctx->reg, ROCKCHIP_VEPU_REG_LEN * 4)) {
+               mpp_err("copy_to_user failed\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+struct mpp_dev_ops vepu_ops = {
+       .init = rockchip_mpp_vepu_init,
+       .run = rockchip_mpp_vepu_run,
+       .done = rockchip_mpp_vepu_done,
+       .irq = rockchip_mpp_vepu_irq,
+       .result = rockchip_mpp_vepu_result,
+};
+
+static void rockchip_mpp_vepu_power_on(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_vepu_dev *enc = to_vepu_dev(mpp);
+
+       if (enc->aclk)
+               clk_prepare_enable(enc->aclk);
+       if (enc->hclk)
+               clk_prepare_enable(enc->hclk);
+}
+
+static void rockchip_mpp_vepu_power_off(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_vepu_dev *enc = to_vepu_dev(mpp);
+
+       if (enc->hclk)
+               clk_disable_unprepare(enc->hclk);
+       if (enc->aclk)
+               clk_disable_unprepare(enc->aclk);
+}
+
+static int rockchip_mpp_vepu_probe(struct rockchip_mpp_dev *mpp)
+{
+       struct rockchip_vepu_dev *enc = to_vepu_dev(mpp);
+       struct device_node *np = mpp->dev->of_node;
+
+       enc->idev.ops = &vepu_ops;
+
+       enc->aclk = devm_clk_get(mpp->dev, "aclk_vcodec");
+       if (IS_ERR_OR_NULL(enc->aclk)) {
+               dev_err(mpp->dev, "failed on clk_get aclk\n");
+               goto fail;
+       }
+
+       enc->hclk = devm_clk_get(mpp->dev, "hclk_vcodec");
+       if (IS_ERR_OR_NULL(enc->hclk)) {
+               dev_err(mpp->dev, "failed on clk_get hclk\n");
+               goto fail;
+       }
+
+       if (of_property_read_bool(np, "mode_ctrl")) {
+               of_property_read_u32(np, "mode_bit", &enc->mode_bit);
+               of_property_read_u32(np, "mode_ctrl", &enc->mode_ctrl);
+
+#ifdef COFNIG_MFD_SYSCON
+               enc->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+               if (IS_ERR_OR_NULL(enc->grf)) {
+                       enc->grf = NULL;
+                       mpp_err("can't find vpu grf property\n");
+                       goto fail;
+               }
+#endif
+       }
+
+       rockchip_mpp_vepu_reset_init(mpp);
+
+       return 0;
+
+fail:
+       return -1;
+}
+
+static void rockchip_mpp_vepu_remove(struct rockchip_mpp_dev *mpp)
+{
+}
+
+const struct rockchip_mpp_dev_variant vepu_variant = {
+       .data_len = sizeof(struct rockchip_vepu_dev),
+       .reg_len = ROCKCHIP_VEPU_REG_LEN,
+       .trans_info = trans_vepu,
+       .mmu_dev_dts_name = NULL,
+       .hw_probe = rockchip_mpp_vepu_probe,
+       .hw_remove = rockchip_mpp_vepu_remove,
+       .power_on = rockchip_mpp_vepu_power_on,
+       .power_off = rockchip_mpp_vepu_power_off,
+       .reset = rockchip_mpp_vepu_reset,
+};
+EXPORT_SYMBOL(vepu_variant);
diff --git a/drivers/video/rockchip/vpu/mpp_dev_vepu.h b/drivers/video/rockchip/vpu/mpp_dev_vepu.h
new file mode 100644 (file)
index 0000000..3e0d587
--- /dev/null
@@ -0,0 +1,46 @@
+/**
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ *        Alpha Lin, alpha.lin@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ROCKCHIP_MPP_DEV_VEPU_H
+#define __ROCKCHIP_MPP_DEV_VEPU_H
+
+#define ROCKCHIP_VEPU_REG_LEN          184
+
+struct regmap;
+
+struct rockchip_vepu_dev {
+       struct rockchip_mpp_dev idev;
+
+       u32 irq_status;
+
+       struct clk *aclk;
+       struct clk *hclk;
+
+       struct reset_control *rst_a;
+       struct reset_control *rst_h;
+
+       u32 mode_bit;
+       u32 mode_ctrl;
+       struct regmap *grf;
+};
+
+struct vepu_ctx {
+       struct mpp_ctx ictx;
+
+       u32 reg[ROCKCHIP_VEPU_REG_LEN];
+       struct extra_info_for_iommu ext_inf;
+};
+
+#endif
diff --git a/drivers/video/rockchip/vpu/mpp_service.c b/drivers/video/rockchip/vpu/mpp_service.c
new file mode 100644 (file)
index 0000000..dfbef46
--- /dev/null
@@ -0,0 +1,228 @@
+/**
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: chenhengming chm@rock-chips.com
+ *        Alpha Lin, alpha.lin@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include "mpp_dev_common.h"
+#include "mpp_service.h"
+
+void mpp_srv_lock(struct mpp_service *pservice)
+{
+       mutex_lock(&pservice->lock);
+}
+EXPORT_SYMBOL(mpp_srv_lock);
+
+void mpp_srv_unlock(struct mpp_service *pservice)
+{
+       mutex_unlock(&pservice->lock);
+}
+EXPORT_SYMBOL(mpp_srv_unlock);
+
+/* service queue schedule */
+void mpp_srv_pending_locked(struct mpp_service *pservice,
+                           struct mpp_ctx *ctx)
+{
+       mpp_srv_lock(pservice);
+
+       list_add_tail(&ctx->status_link, &pservice->pending);
+
+       mpp_srv_unlock(pservice);
+}
+EXPORT_SYMBOL(mpp_srv_pending_locked);
+
+void mpp_srv_run(struct mpp_service *pservice)
+{
+       struct mpp_ctx *ctx = mpp_srv_get_pending_ctx(pservice);
+
+       list_del_init(&ctx->status_link);
+       list_add_tail(&ctx->status_link, &pservice->running);
+}
+EXPORT_SYMBOL(mpp_srv_run);
+
+void mpp_srv_done(struct mpp_service *pservice)
+{
+       struct mpp_ctx *ctx = list_entry(pservice->running.next,
+                                        struct mpp_ctx, status_link);
+
+       list_del_init(&ctx->session_link);
+       list_add_tail(&ctx->session_link, &ctx->session->done);
+
+       list_del_init(&ctx->status_link);
+       list_add_tail(&ctx->status_link, &pservice->done);
+
+       wake_up(&ctx->session->wait);
+}
+EXPORT_SYMBOL(mpp_srv_done);
+
+struct mpp_ctx *mpp_srv_get_pending_ctx(struct mpp_service *pservice)
+{
+       return list_entry(pservice->pending.next, struct mpp_ctx, status_link);
+}
+EXPORT_SYMBOL(mpp_srv_get_pending_ctx);
+
+struct mpp_ctx *mpp_srv_get_current_ctx(struct mpp_service *pservice)
+{
+       return list_entry(pservice->running.next, struct mpp_ctx, status_link);
+}
+EXPORT_SYMBOL(mpp_srv_get_current_ctx);
+
+struct mpp_ctx *mpp_srv_get_last_running_ctx(struct mpp_service *pservice)
+{
+       return list_entry(pservice->running.prev, struct mpp_ctx, status_link);
+}
+EXPORT_SYMBOL(mpp_srv_get_last_running_ctx);
+
+struct mpp_session *mpp_srv_get_current_session(struct mpp_service *pservice)
+{
+       struct mpp_ctx *ctx = list_entry(pservice->running.next,
+                                        struct mpp_ctx, status_link);
+       return ctx ? ctx->session : NULL;
+}
+EXPORT_SYMBOL(mpp_srv_get_current_session);
+
+struct mpp_ctx *mpp_srv_get_done_ctx(struct mpp_session *session)
+{
+       return list_entry(session->done.next, struct mpp_ctx, session_link);
+}
+EXPORT_SYMBOL(mpp_srv_get_done_ctx);
+
+bool mpp_srv_pending_is_empty(struct mpp_service *pservice)
+{
+       return !!list_empty(&pservice->pending);
+}
+EXPORT_SYMBOL(mpp_srv_pending_is_empty);
+
+void mpp_srv_attach(struct mpp_service *pservice, struct list_head *elem)
+{
+       INIT_LIST_HEAD(elem);
+       list_add_tail(elem, &pservice->subdev_list);
+       pservice->dev_cnt++;
+}
+EXPORT_SYMBOL(mpp_srv_attach);
+
+void mpp_srv_detach(struct mpp_service *pservice, struct list_head *elem)
+{
+       list_del_init(elem);
+       pservice->dev_cnt--;
+}
+EXPORT_SYMBOL(mpp_srv_detach);
+
+bool mpp_srv_is_running(struct mpp_service *pservice)
+{
+       return !list_empty(&pservice->running);
+}
+EXPORT_SYMBOL(mpp_srv_is_running);
+
+static void mpp_init_drvdata(struct mpp_service *pservice)
+{
+       INIT_LIST_HEAD(&pservice->pending);
+       mutex_init(&pservice->lock);
+
+       INIT_LIST_HEAD(&pservice->done);
+       INIT_LIST_HEAD(&pservice->session);
+       INIT_LIST_HEAD(&pservice->subdev_list);
+       INIT_LIST_HEAD(&pservice->running);
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id mpp_service_dt_ids[] = {
+       { .compatible = "rockchip,mpp_service", },
+       { },
+};
+#endif
+
+static int mpp_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       struct resource *res = NULL;
+       struct device *dev = &pdev->dev;
+       struct device_node *np = pdev->dev.of_node;
+       struct mpp_service *pservice =
+                                      devm_kzalloc(dev, sizeof(*pservice),
+                                                   GFP_KERNEL);
+
+       dev_info(dev, "%s enter\n", __func__);
+
+       pservice->dev = dev;
+
+       mpp_init_drvdata(pservice);
+
+       if (of_property_read_bool(np, "reg")) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+               pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
+               if (IS_ERR(pservice->reg_base)) {
+                       dev_err(dev, "ioremap registers base failed\n");
+                       ret = PTR_ERR(pservice->reg_base);
+                       pservice->reg_base = 0;
+               }
+       } else {
+               pservice->reg_base = 0;
+       }
+
+       pservice->cls = class_create(THIS_MODULE, dev_name(dev));
+
+       if (IS_ERR(pservice->cls)) {
+               ret = PTR_ERR(pservice->cls);
+               dev_err(dev, "class_create err:%d\n", ret);
+               return -1;
+       }
+
+       platform_set_drvdata(pdev, pservice);
+       dev_info(dev, "init success\n");
+
+       return 0;
+}
+
+static int mpp_remove(struct platform_device *pdev)
+{
+       struct mpp_service *pservice = platform_get_drvdata(pdev);
+
+       class_destroy(pservice->cls);
+       return 0;
+}
+
+static struct platform_driver mpp_driver = {
+       .probe = mpp_probe,
+       .remove = mpp_remove,
+       .driver = {
+               .name = "mpp",
+               .owner = THIS_MODULE,
+#if defined(CONFIG_OF)
+               .of_match_table = of_match_ptr(mpp_service_dt_ids),
+#endif
+       },
+};
+
+static int __init mpp_service_init(void)
+{
+       int ret = platform_driver_register(&mpp_driver);
+
+       if (ret) {
+               mpp_err("Platform device register failed (%d).\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+subsys_initcall(mpp_service_init);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/video/rockchip/vpu/mpp_service.h b/drivers/video/rockchip/vpu/mpp_service.h
new file mode 100644 (file)
index 0000000..8e7287f
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: chenhengming chm@rock-chips.com
+ *        Alpha Lin, alpha.lin@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ROCKCHIP_MPP_SERVICE_H
+#define __ROCKCHIP_MPP_SERVICE_H
+
+#include <linux/ioctl.h>
+
+#include "mpp_dev_common.h"
+
+struct mpp_session {
+       /* a linked list of data so we can access them for debugging */
+       struct list_head list_session;
+       /* the session related device private data */
+       struct rockchip_mpp_dev *mpp;
+       struct list_head done;
+       wait_queue_head_t wait;
+       pid_t pid;
+       atomic_t task_running;
+};
+
+enum mpp_srv_state {
+       HW_RUNNING      = BIT(1)
+};
+
+struct mpp_service {
+       /* service structure global lock */
+       struct mutex lock;
+       struct list_head pending;
+       struct list_head done;
+       struct list_head running;
+       /* link to list_session in struct mpp_session */
+       struct list_head session;
+
+       struct device *dev;
+
+       void __iomem *reg_base;
+
+       struct class *cls;
+
+       u32 dev_cnt;
+       struct list_head subdev_list;
+};
+
+void mpp_srv_lock(struct mpp_service *pservice);
+void mpp_srv_unlock(struct mpp_service *pservice);
+void mpp_srv_pending_locked(struct mpp_service *pservice, struct mpp_ctx *ctx);
+void mpp_srv_run(struct mpp_service *pservice);
+void mpp_srv_done(struct mpp_service *pservice);
+void mpp_srv_attach(struct mpp_service *pservice, struct list_head *elem);
+void mpp_srv_detach(struct mpp_service *pservice, struct list_head *elem);
+struct mpp_ctx *mpp_srv_get_pending_ctx(struct mpp_service *pservice);
+struct mpp_ctx *mpp_srv_get_current_ctx(struct mpp_service *pservice);
+struct mpp_ctx *mpp_srv_get_last_running_ctx(struct mpp_service *pservice);
+struct mpp_session *mpp_srv_get_current_session(struct mpp_service *pservice);
+bool mpp_srv_pending_is_empty(struct mpp_service *pservice);
+struct mpp_ctx *mpp_srv_get_done_ctx(struct mpp_session *session);
+bool mpp_srv_is_power_on(struct mpp_service *pservice);
+bool mpp_srv_is_running(struct mpp_service *pservice);
+
+#endif
+
diff --git a/drivers/video/rockchip/vpu/vpu_iommu_drm.c b/drivers/video/rockchip/vpu/vpu_iommu_drm.c
new file mode 100644 (file)
index 0000000..ef307c5
--- /dev/null
@@ -0,0 +1,936 @@
+/*
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: Jung Zhao jung.zhao@rock-chips.com
+ *         Randy Li, randy.li@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-iommu.h>
+
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_sync_helper.h>
+#include <drm/rockchip_drm.h>
+#include <linux/dma-mapping.h>
+#include <linux/rockchip-iovmm.h>
+#include <linux/pm_runtime.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/component.h>
+#include <linux/fence.h>
+#include <linux/console.h>
+#include <linux/kref.h>
+#include <linux/fdtable.h>
+#include <linux/ktime.h>
+#include <linux/iova.h>
+
+#include "vpu_iommu_ops.h"
+
+#define VPU_DRM_BUFFER_ALLOC   0x00000001
+
+struct vpu_drm_buffer {
+       struct list_head list;
+       struct dma_buf *dma_buf;
+       union {
+               unsigned long iova;
+               unsigned long phys;
+       };
+       void *cpu_addr;
+       unsigned long size;
+       int index;
+       struct dma_buf_attachment *attach;
+       struct sg_table *sgt;
+       struct sg_table *copy_sgt;
+       struct page **pages;
+       struct kref ref;
+       struct vpu_iommu_session_info *session_info;
+       ktime_t last_used;
+       int flags;
+};
+
+struct vpu_iommu_drm_info {
+       struct iommu_domain *domain;
+       bool attached;
+};
+
+static struct vpu_drm_buffer *
+vpu_drm_get_buffer_no_lock(struct vpu_iommu_session_info *session_info,
+                          int idx)
+{
+       struct vpu_drm_buffer *drm_buffer = NULL, *n;
+
+       list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
+                                list) {
+               if (drm_buffer->index == idx) {
+                       drm_buffer->last_used = ktime_get();
+                       return drm_buffer;
+               }
+       }
+
+       return NULL;
+}
+
+static struct vpu_drm_buffer*
+vpu_drm_get_buffer_fd_no_lock(struct vpu_iommu_session_info *session_info,
+                             int fd)
+{
+       struct vpu_drm_buffer *drm_buffer = NULL, *n;
+       struct dma_buf *dma_buf = NULL;
+
+       dma_buf = dma_buf_get(fd);
+
+       list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
+                                list) {
+               if (drm_buffer->dma_buf == dma_buf) {
+                       drm_buffer->last_used = ktime_get();
+                       dma_buf_put(dma_buf);
+                       return drm_buffer;
+               }
+       }
+
+       dma_buf_put(dma_buf);
+
+       return NULL;
+}
+
+static void vpu_drm_detach(struct vpu_iommu_info *iommu_info)
+{
+       struct vpu_iommu_drm_info *drm_info = iommu_info->private;
+       struct device *dev = iommu_info->dev;
+       struct iommu_domain *domain = drm_info->domain;
+
+       mutex_lock(&iommu_info->iommu_mutex);
+
+       if (!drm_info->attached) {
+               mutex_unlock(&iommu_info->iommu_mutex);
+               return;
+       }
+
+       iommu_detach_device(domain, dev);
+       drm_info->attached = false;
+
+       mutex_unlock(&iommu_info->iommu_mutex);
+}
+
+static int vpu_drm_attach_unlock(struct vpu_iommu_info *iommu_info)
+{
+       struct vpu_iommu_drm_info *drm_info = iommu_info->private;
+       struct device *dev = iommu_info->dev;
+       struct iommu_domain *domain = drm_info->domain;
+       int ret = 0;
+
+       ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
+       dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+       ret = iommu_attach_device(domain, dev);
+       if (ret) {
+               dev_err(dev, "Failed to attach iommu device\n");
+               return ret;
+       }
+
+       return ret;
+}
+
+static int vpu_drm_attach(struct vpu_iommu_info *iommu_info)
+{
+       struct vpu_iommu_drm_info *drm_info = iommu_info->private;
+       int ret;
+
+       mutex_lock(&iommu_info->iommu_mutex);
+
+       if (drm_info->attached) {
+               mutex_unlock(&iommu_info->iommu_mutex);
+               return 0;
+       }
+
+       ret = vpu_drm_attach_unlock(iommu_info);
+       if (ret) {
+               mutex_unlock(&iommu_info->iommu_mutex);
+               return ret;
+       }
+
+       drm_info->attached = true;
+
+       mutex_unlock(&iommu_info->iommu_mutex);
+
+       return ret;
+}
+
+static void *vpu_drm_sgt_map_kernel(struct vpu_drm_buffer *drm_buffer)
+{
+       struct vpu_iommu_session_info *session_info =
+                                                     drm_buffer->session_info;
+       struct device *dev = session_info->dev;
+       struct scatterlist *sgl, *sg;
+       int nr_pages = PAGE_ALIGN(drm_buffer->size) >> PAGE_SHIFT;
+       int i = 0, j = 0, k = 0;
+       struct page *page;
+
+       drm_buffer->pages = kmalloc_array(nr_pages, sizeof(*drm_buffer->pages),
+                                         GFP_KERNEL);
+       if (!(drm_buffer->pages)) {
+               dev_err(dev, "drm map can not alloc pages\n");
+
+               return NULL;
+       }
+
+       sgl = drm_buffer->copy_sgt->sgl;
+
+       for_each_sg(sgl, sg, drm_buffer->copy_sgt->nents, i) {
+               page = sg_page(sg);
+               for (j = 0; j < sg->length / PAGE_SIZE; j++)
+                       drm_buffer->pages[k++] = page++;
+       }
+
+       return vmap(drm_buffer->pages, nr_pages, VM_MAP,
+                   pgprot_noncached(PAGE_KERNEL));
+}
+
+static void vpu_drm_sgt_unmap_kernel(struct vpu_drm_buffer *drm_buffer)
+{
+       vunmap(drm_buffer->cpu_addr);
+       kfree(drm_buffer->pages);
+}
+
+static int vpu_finalise_sg(struct scatterlist *sg,
+                          int nents,
+                          dma_addr_t dma_addr)
+{
+       struct scatterlist *s, *cur = sg;
+       unsigned long seg_mask = DMA_BIT_MASK(32);
+       unsigned int cur_len = 0, max_len = DMA_BIT_MASK(32);
+       int i, count = 0;
+
+       for_each_sg(sg, s, nents, i) {
+               /* Restore this segment's original unaligned fields first */
+               unsigned int s_iova_off = sg_dma_address(s);
+               unsigned int s_length = sg_dma_len(s);
+               unsigned int s_iova_len = s->length;
+
+               s->offset += s_iova_off;
+               s->length = s_length;
+               sg_dma_address(s) = DMA_ERROR_CODE;
+               sg_dma_len(s) = 0;
+
+               /*
+                * Now fill in the real DMA data. If...
+                * - there is a valid output segment to append to
+                * - and this segment starts on an IOVA page boundary
+                * - but doesn't fall at a segment boundary
+                * - and wouldn't make the resulting output segment too long
+                */
+               if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
+                   (cur_len + s_length <= max_len)) {
+                       /* ...then concatenate it with the previous one */
+                       cur_len += s_length;
+               } else {
+                       /* Otherwise start the next output segment */
+                       if (i > 0)
+                               cur = sg_next(cur);
+                       cur_len = s_length;
+                       count++;
+
+                       sg_dma_address(cur) = dma_addr + s_iova_off;
+               }
+
+               sg_dma_len(cur) = cur_len;
+               dma_addr += s_iova_len;
+
+               if (s_length + s_iova_off < s_iova_len)
+                       cur_len = 0;
+       }
+       return count;
+}
+
+static void vpu_invalidate_sg(struct scatterlist *sg, int nents)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i) {
+               if (sg_dma_address(s) != DMA_ERROR_CODE)
+                       s->offset += sg_dma_address(s);
+               if (sg_dma_len(s))
+                       s->length = sg_dma_len(s);
+               sg_dma_address(s) = DMA_ERROR_CODE;
+               sg_dma_len(s) = 0;
+       }
+}
+
+static dma_addr_t vpu_dma_map_sg(struct iommu_domain *domain,
+                                struct scatterlist *sg,
+                                int nents, int prot)
+{
+       struct iova_domain *iovad = domain->iova_cookie;
+       struct iova *iova;
+       struct scatterlist *s, *prev = NULL;
+       dma_addr_t dma_addr;
+       size_t iova_len = 0;
+       unsigned long mask = DMA_BIT_MASK(32);
+       unsigned long shift = iova_shift(iovad);
+       int i;
+
+       /*
+        * Work out how much IOVA space we need, and align the segments to
+        * IOVA granules for the IOMMU driver to handle. With some clever
+        * trickery we can modify the list in-place, but reversibly, by
+        * stashing the unaligned parts in the as-yet-unused DMA fields.
+        */
+       for_each_sg(sg, s, nents, i) {
+               size_t s_iova_off = iova_offset(iovad, s->offset);
+               size_t s_length = s->length;
+               size_t pad_len = (mask - iova_len + 1) & mask;
+
+               sg_dma_address(s) = s_iova_off;
+               sg_dma_len(s) = s_length;
+               s->offset -= s_iova_off;
+               s_length = iova_align(iovad, s_length + s_iova_off);
+               s->length = s_length;
+
+               /*
+                * Due to the alignment of our single IOVA allocation, we can
+                * depend on these assumptions about the segment boundary mask:
+                * - If mask size >= IOVA size, then the IOVA range cannot
+                *   possibly fall across a boundary, so we don't care.
+                * - If mask size < IOVA size, then the IOVA range must start
+                *   exactly on a boundary, therefore we can lay things out
+                *   based purely on segment lengths without needing to know
+                *   the actual addresses beforehand.
+                * - The mask must be a power of 2, so pad_len == 0 if
+                *   iova_len == 0, thus we cannot dereference prev the first
+                *   time through here (i.e. before it has a meaningful value).
+                */
+               if (pad_len && pad_len < s_length - 1) {
+                       prev->length += pad_len;
+                       iova_len += pad_len;
+               }
+
+               iova_len += s_length;
+               prev = s;
+       }
+
+       iova = alloc_iova(iovad, iova_align(iovad, iova_len) >> shift,
+                         mask >> shift, true);
+       if (!iova)
+               goto out_restore_sg;
+
+       /*
+        * We'll leave any physical concatenation to the IOMMU driver's
+        * implementation - it knows better than we do.
+        */
+       dma_addr = iova_dma_addr(iovad, iova);
+       if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
+               goto out_free_iova;
+
+       return vpu_finalise_sg(sg, nents, dma_addr);
+
+out_free_iova:
+       __free_iova(iovad, iova);
+out_restore_sg:
+       vpu_invalidate_sg(sg, nents);
+       return 0;
+}
+
+static void vpu_dma_unmap_sg(struct iommu_domain *domain,
+                            dma_addr_t dma_addr)
+{
+       struct iova_domain *iovad = domain->iova_cookie;
+       unsigned long shift = iova_shift(iovad);
+       unsigned long pfn = dma_addr >> shift;
+       struct iova *iova = find_iova(iovad, pfn);
+       size_t size;
+
+       if (WARN_ON(!iova))
+               return;
+
+       size = iova_size(iova) << shift;
+       size -= iommu_unmap(domain, pfn << shift, size);
+       /* ...and if we can't, then something is horribly, horribly wrong */
+       WARN_ON(size > 0);
+       __free_iova(iovad, iova);
+}
+
+static void vpu_drm_clear_map(struct kref *ref)
+{
+       struct vpu_drm_buffer *drm_buffer =
+                                           container_of(ref,
+                                                        struct vpu_drm_buffer,
+                                                        ref);
+       struct vpu_iommu_session_info *session_info =
+                                                     drm_buffer->session_info;
+       struct vpu_iommu_info *iommu_info = session_info->iommu_info;
+       struct vpu_iommu_drm_info *drm_info = iommu_info->private;
+                                             struct sg_table *table;
+                                             struct scatterlist *sg;
+                                             struct page *page;
+                                             int i;
+
+       mutex_lock(&iommu_info->iommu_mutex);
+       drm_info = session_info->iommu_info->private;
+
+       if (drm_buffer->cpu_addr) {
+               vpu_drm_sgt_unmap_kernel(drm_buffer);
+               drm_buffer->cpu_addr = NULL;
+       }
+
+       vpu_dma_unmap_sg(drm_info->domain, drm_buffer->iova);
+       if (drm_buffer->flags & VPU_DRM_BUFFER_ALLOC) {
+               table = drm_buffer->copy_sgt;
+               for_each_sg(table->sgl, sg, table->nents, i) {
+                       page = sg_page(sg);
+                       __free_pages(page, compound_order(page));
+               }
+       }
+       sg_free_table(drm_buffer->copy_sgt);
+       kfree(drm_buffer->copy_sgt);
+
+       if (drm_buffer->attach) {
+               dma_buf_unmap_attachment(drm_buffer->attach, drm_buffer->sgt,
+                                        DMA_BIDIRECTIONAL);
+               dma_buf_detach(drm_buffer->dma_buf, drm_buffer->attach);
+               dma_buf_put(drm_buffer->dma_buf);
+               drm_buffer->attach = NULL;
+       }
+
+       mutex_unlock(&iommu_info->iommu_mutex);
+}
+
+static void vcdoec_drm_dump_info(struct vpu_iommu_session_info *session_info)
+{
+       struct vpu_drm_buffer *drm_buffer = NULL, *n;
+
+       vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
+                       "still there are below buffers stored in list\n");
+       list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
+                                list) {
+               vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_OPS_DUMP,
+                               "index %d drm_buffer dma_buf %p cpu_addr %p\n",
+                               drm_buffer->index,
+                               drm_buffer->dma_buf, drm_buffer->cpu_addr);
+       }
+}
+
+static int vpu_drm_free(struct vpu_iommu_session_info *session_info,
+                       int idx)
+{
+       struct device *dev = session_info->dev;
+       /* please double-check all maps have been release */
+       struct vpu_drm_buffer *drm_buffer;
+
+       mutex_lock(&session_info->list_mutex);
+       drm_buffer = vpu_drm_get_buffer_no_lock(session_info, idx);
+
+       if (!drm_buffer) {
+               dev_err(dev, "can not find %d buffer in list\n", idx);
+               mutex_unlock(&session_info->list_mutex);
+
+               return -EINVAL;
+       }
+
+       if (atomic_read(&drm_buffer->ref.refcount) == 0) {
+               if (drm_buffer->dma_buf)
+                       dma_buf_put(drm_buffer->dma_buf);
+               list_del_init(&drm_buffer->list);
+               kfree(drm_buffer);
+               session_info->buffer_nums--;
+               vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
+                               "buffer nums %d\n", session_info->buffer_nums);
+       }
+       mutex_unlock(&session_info->list_mutex);
+
+       return 0;
+}
+
+static int
+vpu_drm_unmap_iommu(struct vpu_iommu_session_info *session_info,
+                   int idx)
+{
+       struct device *dev = session_info->dev;
+       struct vpu_drm_buffer *drm_buffer;
+
+       /* Force to flush iommu table */
+       if (of_machine_is_compatible("rockchip,rk3288"))
+               rockchip_iovmm_invalidate_tlb(session_info->mmu_dev);
+
+       mutex_lock(&session_info->list_mutex);
+       drm_buffer = vpu_drm_get_buffer_no_lock(session_info, idx);
+       mutex_unlock(&session_info->list_mutex);
+
+       if (!drm_buffer) {
+               dev_err(dev, "can not find %d buffer in list\n", idx);
+               return -EINVAL;
+       }
+
+       kref_put(&drm_buffer->ref, vpu_drm_clear_map);
+
+       return 0;
+}
+
+static int vpu_drm_map_iommu(struct vpu_iommu_session_info *session_info,
+                            int idx,
+                            unsigned long *iova,
+                            unsigned long *size)
+{
+       struct device *dev = session_info->dev;
+       struct vpu_drm_buffer *drm_buffer;
+
+       /* Force to flush iommu table */
+       if (of_machine_is_compatible("rockchip,rk3288"))
+               rockchip_iovmm_invalidate_tlb(session_info->mmu_dev);
+
+       mutex_lock(&session_info->list_mutex);
+       drm_buffer = vpu_drm_get_buffer_no_lock(session_info, idx);
+       mutex_unlock(&session_info->list_mutex);
+
+       if (!drm_buffer) {
+               dev_err(dev, "can not find %d buffer in list\n", idx);
+               return -EINVAL;
+       }
+
+       kref_get(&drm_buffer->ref);
+       if (iova)
+               *iova = drm_buffer->iova;
+       if (size)
+               *size = drm_buffer->size;
+       return 0;
+}
+
+static int
+vpu_drm_unmap_kernel(struct vpu_iommu_session_info *session_info, int idx)
+{
+       struct device *dev = session_info->dev;
+       struct vpu_drm_buffer *drm_buffer;
+
+       mutex_lock(&session_info->list_mutex);
+       drm_buffer = vpu_drm_get_buffer_no_lock(session_info, idx);
+       mutex_unlock(&session_info->list_mutex);
+
+       if (!drm_buffer) {
+               dev_err(dev, "can not find %d buffer in list\n", idx);
+
+               return -EINVAL;
+       }
+
+       if (drm_buffer->cpu_addr) {
+               vpu_drm_sgt_unmap_kernel(drm_buffer);
+               drm_buffer->cpu_addr = NULL;
+       }
+
+       kref_put(&drm_buffer->ref, vpu_drm_clear_map);
+       return 0;
+}
+
+static int
+vpu_drm_free_fd(struct vpu_iommu_session_info *session_info, int fd)
+{
+       struct device *dev = session_info->dev;
+       /* please double-check all maps have been release */
+       struct vpu_drm_buffer *drm_buffer = NULL;
+
+       mutex_lock(&session_info->list_mutex);
+       drm_buffer = vpu_drm_get_buffer_fd_no_lock(session_info, fd);
+
+       if (!drm_buffer) {
+               dev_err(dev, "can not find %d buffer in list\n", fd);
+               mutex_unlock(&session_info->list_mutex);
+
+               return -EINVAL;
+       }
+       mutex_unlock(&session_info->list_mutex);
+
+       vpu_drm_unmap_iommu(session_info, drm_buffer->index);
+
+       mutex_lock(&session_info->list_mutex);
+       if (atomic_read(&drm_buffer->ref.refcount) == 0) {
+               if (drm_buffer->dma_buf)
+                       dma_buf_put(drm_buffer->dma_buf);
+               list_del_init(&drm_buffer->list);
+               kfree(drm_buffer);
+               session_info->buffer_nums--;
+               vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
+                               "buffer nums %d\n", session_info->buffer_nums);
+       }
+       mutex_unlock(&session_info->list_mutex);
+
+       return 0;
+}
+
+static void
+vpu_drm_clear_session(struct vpu_iommu_session_info *session_info)
+{
+       struct vpu_drm_buffer *drm_buffer = NULL, *n;
+
+       list_for_each_entry_safe(drm_buffer, n, &session_info->buffer_list,
+                                list) {
+               kref_put(&drm_buffer->ref, vpu_drm_clear_map);
+               vpu_drm_free(session_info, drm_buffer->index);
+       }
+}
+
+static void*
+vpu_drm_map_kernel(struct vpu_iommu_session_info *session_info, int idx)
+{
+       struct device *dev = session_info->dev;
+       struct vpu_drm_buffer *drm_buffer;
+
+       mutex_lock(&session_info->list_mutex);
+       drm_buffer = vpu_drm_get_buffer_no_lock(session_info, idx);
+       mutex_unlock(&session_info->list_mutex);
+
+       if (!drm_buffer) {
+               dev_err(dev, "can not find %d buffer in list\n", idx);
+               return NULL;
+       }
+
+       if (!drm_buffer->cpu_addr)
+               drm_buffer->cpu_addr =
+                                      vpu_drm_sgt_map_kernel(drm_buffer);
+
+       kref_get(&drm_buffer->ref);
+
+       return drm_buffer->cpu_addr;
+}
+
+static void vpu_drm_remove_extra_buffer_no_lock(struct vpu_iommu_session_info *session_info)
+{
+       struct vpu_drm_buffer *oldest_buffer = NULL, *loop_buffer = NULL;
+       struct vpu_drm_buffer *n;
+       ktime_t oldest_time = ktime_set(0, 0);
+
+       if (session_info->buffer_nums > BUFFER_LIST_MAX_NUMS) {
+               list_for_each_entry_safe(loop_buffer, n,
+                                        &session_info->buffer_list, list) {
+                       if (loop_buffer->flags & VPU_DRM_BUFFER_ALLOC)
+                               continue;
+
+                       if (ktime_to_ns(oldest_time) == 0 ||
+                           ktime_after(oldest_time,
+                                       loop_buffer->last_used)) {
+                               oldest_time = loop_buffer->last_used;
+                               oldest_buffer = loop_buffer;
+                       }
+               }
+               kref_put(&oldest_buffer->ref, vpu_drm_clear_map);
+               dma_buf_put(oldest_buffer->dma_buf);
+               list_del_init(&oldest_buffer->list);
+               kfree(oldest_buffer);
+               session_info->buffer_nums--;
+       }
+}
+
+static int vpu_drm_import(struct vpu_iommu_session_info *session_info,
+                         int fd)
+{
+       struct vpu_drm_buffer *drm_buffer = NULL, *n;
+       struct vpu_iommu_info *iommu_info = session_info->iommu_info;
+       struct vpu_iommu_drm_info *drm_info = iommu_info->private;
+       struct device *dev = session_info->dev;
+       struct dma_buf_attachment *attach;
+       struct sg_table *sgt;
+       struct dma_buf *dma_buf;
+       struct scatterlist *sg, *s;
+       int i;
+       int ret = 0;
+
+       dma_buf = dma_buf_get(fd);
+       if (IS_ERR(dma_buf)) {
+               ret = PTR_ERR(dma_buf);
+               return ret;
+       }
+
+       list_for_each_entry_safe(drm_buffer, n,
+                                &session_info->buffer_list, list) {
+               if (drm_buffer->dma_buf == dma_buf) {
+                       dma_buf_put(dma_buf);
+                       drm_buffer->last_used = ktime_get();
+                       return drm_buffer->index;
+               }
+       }
+
+       drm_buffer = kzalloc(sizeof(*drm_buffer), GFP_KERNEL);
+       if (!drm_buffer) {
+               ret = -ENOMEM;
+               return ret;
+       }
+
+       drm_buffer->dma_buf = dma_buf;
+       drm_buffer->session_info = session_info;
+       drm_buffer->last_used = ktime_get();
+
+       kref_init(&drm_buffer->ref);
+
+       mutex_lock(&iommu_info->iommu_mutex);
+       drm_info = session_info->iommu_info->private;
+
+       attach = dma_buf_attach(drm_buffer->dma_buf, dev);
+       if (IS_ERR(attach)) {
+               ret = PTR_ERR(attach);
+               goto fail_out;
+       }
+
+       get_dma_buf(drm_buffer->dma_buf);
+
+       sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+       if (IS_ERR(sgt)) {
+               ret = PTR_ERR(sgt);
+               goto fail_detach;
+       }
+
+       /*
+        * Since we call dma_buf_map_attachment outside attach/detach, this
+        * will cause incorrectly map. we have to re-build map table native
+        * and for avoiding destroy their origin map table, we need use a
+        * copy one sg_table.
+        */
+       drm_buffer->copy_sgt = kmalloc(sizeof(*drm_buffer->copy_sgt),
+                                      GFP_KERNEL);
+       if (!drm_buffer->copy_sgt) {
+               ret = -ENOMEM;
+               goto fail_detach;
+       }
+
+       ret = sg_alloc_table(drm_buffer->copy_sgt, sgt->nents, GFP_KERNEL);
+       s = drm_buffer->copy_sgt->sgl;
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               sg_set_page(s, sg_page(sg),
+                           PAGE_SIZE << compound_order(sg_page(sg)), 0);
+               sg_dma_address(s) = page_to_phys(sg_page(sg));
+               s->offset = sg->offset;
+               s->length = sg->length;
+               s = sg_next(s);
+       }
+
+       vpu_dma_map_sg(drm_info->domain, drm_buffer->copy_sgt->sgl,
+                      drm_buffer->copy_sgt->nents,
+                      IOMMU_READ | IOMMU_WRITE);
+       drm_buffer->iova = sg_dma_address(drm_buffer->copy_sgt->sgl);
+       drm_buffer->size = drm_buffer->dma_buf->size;
+
+       drm_buffer->attach = attach;
+       drm_buffer->sgt = sgt;
+
+       mutex_unlock(&iommu_info->iommu_mutex);
+
+       INIT_LIST_HEAD(&drm_buffer->list);
+       mutex_lock(&session_info->list_mutex);
+       session_info->buffer_nums++;
+       vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
+                       "buffer nums %d\n", session_info->buffer_nums);
+       vpu_drm_remove_extra_buffer_no_lock(session_info);
+       drm_buffer->index = session_info->max_idx;
+       list_add_tail(&drm_buffer->list, &session_info->buffer_list);
+       session_info->max_idx++;
+       if ((session_info->max_idx & 0xfffffff) == 0)
+               session_info->max_idx = 0;
+       mutex_unlock(&session_info->list_mutex);
+
+       return drm_buffer->index;
+
+fail_detach:
+       dev_err(dev, "dmabuf map attach failed\n");
+       dma_buf_detach(drm_buffer->dma_buf, attach);
+       dma_buf_put(drm_buffer->dma_buf);
+fail_out:
+       kfree(drm_buffer);
+       mutex_unlock(&iommu_info->iommu_mutex);
+
+       return ret;
+}
+
+static int vpu_drm_alloc(struct vpu_iommu_session_info *session_info,
+                        unsigned long size,
+                        unsigned long align)
+{
+       struct sg_table *table;
+       struct scatterlist *sg;
+       struct list_head pages;
+       struct page *page, *tmp_page;
+       long size_remaining = PAGE_ALIGN(size);
+       struct vpu_drm_buffer *drm_buffer;
+       struct vpu_iommu_info *iommu_info = session_info->iommu_info;
+       struct vpu_iommu_drm_info *drm_info = iommu_info->private;
+       int i;
+
+       if (align > PAGE_SIZE)
+               return -EINVAL;
+
+       if (size / PAGE_SIZE > totalram_pages / 2)
+               return -ENOMEM;
+
+       drm_buffer = kzalloc(sizeof(*drm_buffer), GFP_KERNEL);
+       if (!drm_buffer)
+               return -ENOMEM;
+
+       drm_buffer->session_info = session_info;
+       drm_buffer->last_used = ktime_set(0, 0);
+
+       kref_init(&drm_buffer->ref);
+
+       INIT_LIST_HEAD(&pages);
+
+       i = 0;
+       while (size_remaining > 0) {
+               gfp_t gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+                                  __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM;
+               page = alloc_pages(gfp_flags | __GFP_COMP, 8);
+               if (!page)
+                       goto free_pages;
+
+               size_remaining -= PAGE_SIZE << compound_order(page);
+               list_add_tail(&page->lru, &pages);
+               i++;
+       }
+
+       table = kmalloc(sizeof(*table), GFP_KERNEL);
+       if (!table)
+               goto free_pages;
+
+       if (sg_alloc_table(table, i, GFP_KERNEL))
+               goto free_table;
+
+       sg = table->sgl;
+       list_for_each_entry_safe(page, tmp_page, &pages, lru) {
+               sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+               sg = sg_next(sg);
+               list_del(&page->lru);
+       }
+
+       mutex_lock(&iommu_info->iommu_mutex);
+       drm_info = session_info->iommu_info->private;
+       drm_buffer->copy_sgt = table;
+
+       vpu_dma_map_sg(drm_info->domain, drm_buffer->copy_sgt->sgl,
+                      drm_buffer->copy_sgt->nents,
+                      IOMMU_READ | IOMMU_WRITE);
+       drm_buffer->iova = sg_dma_address(drm_buffer->copy_sgt->sgl);
+       drm_buffer->size = size;
+       drm_buffer->flags = VPU_DRM_BUFFER_ALLOC;
+
+       mutex_unlock(&iommu_info->iommu_mutex);
+
+       INIT_LIST_HEAD(&drm_buffer->list);
+       mutex_lock(&session_info->list_mutex);
+       session_info->buffer_nums++;
+       vpu_iommu_debug(session_info->debug_level, DEBUG_IOMMU_NORMAL,
+                       "buffer nums %d\n", session_info->buffer_nums);
+       vpu_drm_remove_extra_buffer_no_lock(session_info);
+       drm_buffer->index = session_info->max_idx;
+       list_add_tail(&drm_buffer->list, &session_info->buffer_list);
+       session_info->max_idx++;
+       if ((session_info->max_idx & 0xfffffff) == 0)
+               session_info->max_idx = 0;
+       mutex_unlock(&session_info->list_mutex);
+
+       return drm_buffer->index;
+
+free_table:
+       kfree(table);
+free_pages:
+       list_for_each_entry_safe(page, tmp_page, &pages, lru)
+       __free_pages(page, 8);
+
+       kfree(drm_buffer);
+
+       return -ENOMEM;
+}
+
+static int vpu_drm_create(struct vpu_iommu_info *iommu_info)
+{
+       struct vpu_iommu_drm_info *drm_info;
+       struct iommu_group *group;
+       int ret;
+
+       iommu_info->private = kzalloc(sizeof(*drm_info),
+                                     GFP_KERNEL);
+       drm_info = iommu_info->private;
+       if (!drm_info)
+               return -ENOMEM;
+
+       drm_info->domain = iommu_domain_alloc(&platform_bus_type);
+       drm_info->attached = false;
+       if (!drm_info->domain)
+               return -ENOMEM;
+
+       ret = iommu_get_dma_cookie(drm_info->domain);
+       if (ret)
+               goto err_free_domain;
+
+       group = iommu_group_get(iommu_info->dev);
+       if (!group) {
+               group = iommu_group_alloc();
+               if (IS_ERR(group)) {
+                       dev_err(iommu_info->dev,
+                               "Failed to allocate IOMMU group\n");
+                       goto err_put_cookie;
+               }
+               ret = iommu_group_add_device(group, iommu_info->dev);
+               if (ret) {
+                       dev_err(iommu_info->dev,
+                               "failed to add device to IOMMU group\n");
+                       goto err_put_cookie;
+               }
+       }
+       iommu_dma_init_domain(drm_info->domain, 0x10000000, SZ_2G);
+       iommu_group_put(group);
+
+       return 0;
+
+err_put_cookie:
+       iommu_put_dma_cookie(drm_info->domain);
+err_free_domain:
+       iommu_domain_free(drm_info->domain);
+
+       return ret;
+}
+
+static int vpu_drm_destroy(struct vpu_iommu_info *iommu_info)
+{
+       struct vpu_iommu_drm_info *drm_info = iommu_info->private;
+
+       iommu_put_dma_cookie(drm_info->domain);
+       iommu_domain_free(drm_info->domain);
+
+       kfree(drm_info);
+       iommu_info->private = NULL;
+
+       return 0;
+}
+
+static struct vpu_iommu_ops drm_ops = {
+       .create = vpu_drm_create,
+       .alloc = vpu_drm_alloc,
+       .import = vpu_drm_import,
+       .free = vpu_drm_free,
+       .free_fd = vpu_drm_free_fd,
+       .map_kernel = vpu_drm_map_kernel,
+       .unmap_kernel = vpu_drm_unmap_kernel,
+       .map_iommu = vpu_drm_map_iommu,
+       .unmap_iommu = vpu_drm_unmap_iommu,
+       .destroy = vpu_drm_destroy,
+       .dump = vcdoec_drm_dump_info,
+       .attach = vpu_drm_attach,
+       .detach = vpu_drm_detach,
+       .clear = vpu_drm_clear_session,
+};
+
+void vpu_iommu_drm_set_ops(struct vpu_iommu_info *iommu_info)
+{
+       if (!iommu_info)
+               return;
+       iommu_info->ops = &drm_ops;
+}
diff --git a/drivers/video/rockchip/vpu/vpu_iommu_ion.c b/drivers/video/rockchip/vpu/vpu_iommu_ion.c
new file mode 100644 (file)
index 0000000..0dcd238
--- /dev/null
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: Jung Zhao jung.zhao@rock-chips.com
+ *         Randy Li, randy.li@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "vpu_iommu_ops.h"
+
+#if defined(CONFIG_ION_ROCKCHIP)
+#include <linux/rockchip_ion.h>
+#include <linux/rockchip-iovmm.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/component.h>
+#include <linux/fence.h>
+#include <linux/console.h>
+#include <linux/kref.h>
+#include <linux/fdtable.h>
+
+struct vpu_ion_buffer {
+       struct list_head list;
+       struct ion_handle *handle;
+       int index;
+};
+
+struct vpu_iommu_ion_info {
+       struct ion_client *ion_client;
+       bool attached;
+};
+
+static struct vpu_ion_buffer*
+vpu_ion_get_buffer_no_lock(struct vpu_iommu_session_info *session_info,
+                          int idx)
+{
+       struct vpu_ion_buffer *ion_buffer = NULL, *n;
+
+       list_for_each_entry_safe(ion_buffer, n,
+                                &session_info->buffer_list, list) {
+               if (ion_buffer->index == idx)
+                       return ion_buffer;
+       }
+
+       return NULL;
+}
+
+static void
+vpu_ion_clear_session(struct vpu_iommu_session_info *session_info)
+{
+       /* do nothing */
+}
+
+static int vpu_ion_attach(struct vpu_iommu_info *iommu_info)
+{
+       struct vpu_iommu_ion_info *ion_info = iommu_info->private;
+                                             int ret;
+
+       mutex_lock(&iommu_info->iommu_mutex);
+
+       if (ion_info->attached) {
+               mutex_unlock(&iommu_info->iommu_mutex);
+               return 0;
+       }
+
+       rockchip_iovmm_activate(iommu_info->dev);
+
+       ion_info->attached = true;
+
+       mutex_unlock(&iommu_info->iommu_mutex);
+
+       return ret;
+}
+
+static void vpu_ion_detach(struct vpu_iommu_info *iommu_info)
+{
+       struct vpu_iommu_ion_info *ion_info = iommu_info->private;
+
+       mutex_lock(&iommu_info->iommu_mutex);
+
+       if (!ion_info->attached) {
+               mutex_unlock(&iommu_info->iommu_mutex);
+               return;
+       }
+
+       rockchip_iovmm_deactivate(iommu_info->dev);
+       ion_info->attached = false;
+
+       mutex_unlock(&iommu_info->iommu_mutex);
+}
+
+static int vpu_ion_destroy(struct vpu_iommu_info *iommu_info)
+{
+       struct vpu_iommu_ion_info *ion_info = iommu_info->private;
+
+       vpu_ion_detach(iommu_info);
+       kfree(ion_info);
+       iommu_info->private = NULL;
+
+       return 0;
+}
+
+static int
+vpu_ion_free(struct vpu_iommu_session_info *session_info, int idx)
+{
+       struct vpu_ion_buffer *ion_buffer;
+
+       mutex_lock(&session_info->list_mutex);
+       ion_buffer = vpu_ion_get_buffer_no_lock(session_info, idx);
+
+       if (!ion_buffer) {
+               mutex_unlock(&session_info->list_mutex);
+               pr_err("%s can not find %d buffer in list\n", __func__, idx);
+
+               return -EINVAL;
+       }
+
+       list_del_init(&ion_buffer->list);
+       mutex_unlock(&session_info->list_mutex);
+       kfree(ion_buffer);
+
+       return 0;
+}
+
+static int
+vpu_ion_unmap_iommu(struct vpu_iommu_session_info *session_info, int idx)
+{
+       struct vpu_ion_buffer *ion_buffer;
+       struct vpu_iommu_info *iommu_info = session_info->iommu_info;
+       struct vpu_iommu_ion_info *ion_info = iommu_info->private;
+
+       mutex_lock(&session_info->list_mutex);
+       ion_buffer = vpu_ion_get_buffer_no_lock(session_info, idx);
+       mutex_unlock(&session_info->list_mutex);
+
+       if (!ion_buffer) {
+               pr_err("%s can not find %d buffer in list\n", __func__, idx);
+
+               return -EINVAL;
+       }
+
+       ion_free(ion_info->ion_client, ion_buffer->handle);
+
+       return 0;
+}
+
+static int
+vpu_ion_map_iommu(struct vpu_iommu_session_info *session_info, int idx,
+                 unsigned long *iova, unsigned long *size)
+{
+       struct vpu_ion_buffer *ion_buffer;
+       struct device *dev = session_info->dev;
+       struct vpu_iommu_info *iommu_info = session_info->iommu_info;
+       struct vpu_iommu_ion_info *ion_info = iommu_info->private;
+                                             int ret = 0;
+
+       /* Force to flush iommu table */
+       rockchip_iovmm_invalidate_tlb(session_info->dev);
+
+       mutex_lock(&session_info->list_mutex);
+       ion_buffer = vpu_ion_get_buffer_no_lock(session_info, idx);
+       mutex_unlock(&session_info->list_mutex);
+
+       if (!ion_buffer) {
+               pr_err("%s can not find %d buffer in list\n", __func__, idx);
+
+               return -EINVAL;
+       }
+
+       if (session_info->mmu_dev)
+               ret = ion_map_iommu(dev, ion_info->ion_client,
+                                   ion_buffer->handle, iova, size);
+       else
+               ret = ion_phys(ion_info->ion_client, ion_buffer->handle,
+                              iova, (size_t *)size);
+
+       return ret;
+}
+
+static int
+vpu_ion_unmap_kernel(struct vpu_iommu_session_info *session_info,
+                    int idx)
+{
+       struct vpu_ion_buffer *ion_buffer;
+
+       mutex_lock(&session_info->list_mutex);
+       ion_buffer = vpu_ion_get_buffer_no_lock(session_info, idx);
+       mutex_unlock(&session_info->list_mutex);
+
+       if (!ion_buffer) {
+               pr_err("%s can not find %d buffer in list\n", __func__, idx);
+
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void*
+vpu_ion_map_kernel(struct vpu_iommu_session_info *session_info, int idx)
+{
+       struct vpu_ion_buffer *ion_buffer;
+       struct vpu_iommu_info *iommu_info = session_info->iommu_info;
+       struct vpu_iommu_ion_info *ion_info = iommu_info->private;
+
+       rockchip_iovmm_invalidate_tlb(session_info->dev);
+
+       mutex_lock(&session_info->list_mutex);
+       ion_buffer = vpu_ion_get_buffer_no_lock(session_info, idx);
+       mutex_unlock(&session_info->list_mutex);
+
+       if (!ion_buffer) {
+               pr_err("%s can not find %d buffer in list\n", __func__, idx);
+
+               return NULL;
+       }
+
+       return ion_map_kernel(ion_info->ion_client, ion_buffer->handle);
+}
+
+static int vpu_ion_alloc(struct vpu_iommu_session_info *session_info,
+                        unsigned long size,
+                        unsigned long align)
+{
+       struct vpu_ion_buffer *ion_buffer = NULL;
+       struct vpu_iommu_info *iommu_info = session_info->iommu_info;
+       struct vpu_iommu_ion_info *ion_info = iommu_info->private;
+       unsigned int heap_id_mask;
+
+       if (iommu_info->mmu_dev)
+               heap_id_mask = ION_HEAP(ION_VMALLOC_HEAP_ID);
+       else
+               heap_id_mask = ION_HEAP(ION_CMA_HEAP_ID);
+
+       ion_buffer = kzalloc(sizeof(*ion_buffer), GFP_KERNEL);
+       if (!ion_buffer)
+               return -ENOMEM;
+
+       ion_buffer->handle = ion_alloc(ion_info->ion_client, size,
+                                      align, heap_id_mask, 0);
+
+       INIT_LIST_HEAD(&ion_buffer->list);
+       mutex_lock(&session_info->list_mutex);
+       ion_buffer->index = session_info->max_idx;
+       list_add_tail(&ion_buffer->list, &session_info->buffer_list);
+       session_info->max_idx++;
+       if ((session_info->max_idx & 0xfffffff) == 0)
+               session_info->max_idx = 0;
+       mutex_unlock(&session_info->list_mutex);
+
+       return ion_buffer->index;
+}
+
+static int
+vpu_ion_import(struct vpu_iommu_session_info *session_info, int fd)
+{
+       struct vpu_ion_buffer *ion_buffer = NULL;
+       struct vpu_iommu_info *iommu_info = session_info->iommu_info;
+       struct vpu_iommu_ion_info *ion_info = iommu_info->private;
+
+       ion_buffer = kzalloc(sizeof(*ion_buffer), GFP_KERNEL);
+       if (!ion_buffer)
+               return -ENOMEM;
+
+       ion_buffer->handle = ion_import_dma_buf(ion_info->ion_client, fd);
+
+       INIT_LIST_HEAD(&ion_buffer->list);
+       mutex_lock(&session_info->list_mutex);
+       ion_buffer->index = session_info->max_idx;
+       list_add_tail(&ion_buffer->list, &session_info->buffer_list);
+       session_info->max_idx++;
+       if ((session_info->max_idx & 0xfffffff) == 0)
+               session_info->max_idx = 0;
+       mutex_unlock(&session_info->list_mutex);
+
+       return ion_buffer->index;
+}
+
+static int vpu_ion_create(struct vpu_iommu_info *iommu_info)
+{
+       struct vpu_iommu_ion_info *ion_info;
+
+       iommu_info->private = kmalloc(sizeof(*ion_info), GFP_KERNEL);
+
+       ion_info = iommu_info->private;
+       if (!ion_info)
+               return -ENOMEM;
+
+       ion_info->ion_client = rockchip_ion_client_create("vpu");
+       ion_info->attached = false;
+
+       vpu_ion_attach(iommu_info);
+
+       return IS_ERR(ion_info->ion_client) ? -1 : 0;
+}
+
+#else
+
+static void
+vpu_ion_clear_session(struct vpu_iommu_session_info *session_info)
+{
+       /* do nothing */
+}
+
+static int vpu_ion_attach(struct vpu_iommu_info *iommu_info)
+{
+       return 0;
+}
+
+static void vpu_ion_detach(struct vpu_iommu_info *iommu_info)
+{
+}
+
+static int vpu_ion_destroy(struct vpu_iommu_info *iommu_info)
+{
+       return 0;
+}
+
+static int
+vpu_ion_free(struct vpu_iommu_session_info *session_info, int idx)
+{
+       return 0;
+}
+
+static int
+vpu_ion_unmap_iommu(struct vpu_iommu_session_info *session_info, int idx)
+{
+       return 0;
+}
+
+static int
+vpu_ion_map_iommu(struct vpu_iommu_session_info *session_info, int idx,
+                 unsigned long *iova, unsigned long *size)
+{
+       return 0;
+}
+
+static int
+vpu_ion_unmap_kernel(struct vpu_iommu_session_info *session_info,
+                    int idx)
+{
+       return 0;
+}
+
+static void*
+vpu_ion_map_kernel(struct vpu_iommu_session_info *session_info, int idx)
+{
+       return NULL;
+}
+
+static int vpu_ion_alloc(struct vpu_iommu_session_info *session_info,
+                        unsigned long size,
+                        unsigned long align)
+{
+       return 0;
+}
+
+static int
+vpu_ion_import(struct vpu_iommu_session_info *session_info, int fd)
+{
+       return 0;
+}
+
+static int vpu_ion_create(struct vpu_iommu_info *iommu_info)
+{
+       return -1;
+}
+#endif
+
+static struct vpu_iommu_ops ion_ops = {
+       .create = vpu_ion_create,
+       .destroy = vpu_ion_destroy,
+       .alloc = vpu_ion_alloc,
+       .import = vpu_ion_import,
+       .free = vpu_ion_free,
+       .free_fd = NULL,
+       .map_kernel = vpu_ion_map_kernel,
+       .unmap_kernel = vpu_ion_unmap_kernel,
+       .map_iommu = vpu_ion_map_iommu,
+       .unmap_iommu = vpu_ion_unmap_iommu,
+       .dump = NULL,
+       .attach = vpu_ion_attach,
+       .detach = vpu_ion_detach,
+       .clear = vpu_ion_clear_session,
+};
+
+/*
+ * we do not manage the ref number ourselves,
+ * since ion will help us to do that. what we
+ * need to do is just map/unmap and import/free
+ * every time
+ */
+void vpu_iommu_ion_set_ops(struct vpu_iommu_info *iommu_info)
+{
+       if (!iommu_info)
+               return;
+       iommu_info->ops = &ion_ops;
+}
diff --git a/drivers/video/rockchip/vpu/vpu_iommu_ops.c b/drivers/video/rockchip/vpu/vpu_iommu_ops.c
new file mode 100644 (file)
index 0000000..369358c
--- /dev/null
@@ -0,0 +1,291 @@
+/**
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: Jung Zhao jung.zhao@rock-chips.com
+ *         Randy Li, randy.li@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "vpu_iommu_ops.h"
+
+static
+struct vpu_iommu_session_info *vpu_iommu_get_session_info(struct vpu_iommu_info *iommu_info,
+                                                         struct mpp_session *session)
+{
+       struct vpu_iommu_session_info *session_info = NULL, *n;
+
+       list_for_each_entry_safe(session_info, n, &iommu_info->session_list,
+                                head) {
+               if (session_info->session == session)
+                       return session_info;
+       }
+
+       return NULL;
+}
+
+int vpu_iommu_create(struct vpu_iommu_info *iommu_info)
+{
+       if (!iommu_info || !iommu_info->ops->create)
+               return -EINVAL;
+
+       return iommu_info->ops->create(iommu_info);
+}
+
+int vpu_iommu_alloc(struct vpu_iommu_info *iommu_info,
+                   struct mpp_session *session,
+                   unsigned long size,
+                   unsigned long align)
+{
+       struct vpu_iommu_session_info *session_info = NULL;
+
+       if (!iommu_info || !iommu_info->ops->alloc || !session)
+               return -EINVAL;
+
+       session_info = vpu_iommu_get_session_info(iommu_info, session);
+       if (!session_info) {
+               session_info = kzalloc(sizeof(*session_info), GFP_KERNEL);
+               if (!session_info)
+                       return -ENOMEM;
+
+               INIT_LIST_HEAD(&session_info->head);
+               INIT_LIST_HEAD(&session_info->buffer_list);
+               mutex_init(&session_info->list_mutex);
+               session_info->max_idx = 0;
+               session_info->session = session;
+               session_info->mmu_dev = iommu_info->mmu_dev;
+               session_info->dev = iommu_info->dev;
+               session_info->iommu_info = iommu_info;
+               session_info->buffer_nums = 0;
+               mutex_lock(&iommu_info->list_mutex);
+               list_add_tail(&session_info->head, &iommu_info->session_list);
+               mutex_unlock(&iommu_info->list_mutex);
+       }
+
+       session_info->debug_level = iommu_info->debug_level;
+
+       return iommu_info->ops->alloc(session_info, size, align);
+}
+
+int vpu_iommu_import(struct vpu_iommu_info *iommu_info,
+                    struct mpp_session *session, int fd)
+{
+       struct vpu_iommu_session_info *session_info = NULL;
+
+       if (!iommu_info || !iommu_info->ops->import || !session)
+               return -EINVAL;
+
+       session_info = vpu_iommu_get_session_info(iommu_info, session);
+       if (!session_info) {
+               session_info = kzalloc(sizeof(*session_info), GFP_KERNEL);
+               if (!session_info)
+                       return -ENOMEM;
+
+               INIT_LIST_HEAD(&session_info->head);
+               INIT_LIST_HEAD(&session_info->buffer_list);
+               mutex_init(&session_info->list_mutex);
+               session_info->max_idx = 0;
+               session_info->session = session;
+               session_info->mmu_dev = iommu_info->mmu_dev;
+               session_info->dev = iommu_info->dev;
+               session_info->iommu_info = iommu_info;
+               session_info->buffer_nums = 0;
+               mutex_lock(&iommu_info->list_mutex);
+               list_add_tail(&session_info->head, &iommu_info->session_list);
+               mutex_unlock(&iommu_info->list_mutex);
+       }
+
+       session_info->debug_level = iommu_info->debug_level;
+
+       return iommu_info->ops->import(session_info, fd);
+}
+
+int vpu_iommu_free(struct vpu_iommu_info *iommu_info,
+                  struct mpp_session *session, int idx)
+{
+       struct vpu_iommu_session_info *session_info = NULL;
+
+       session_info = vpu_iommu_get_session_info(iommu_info, session);
+
+       if (!iommu_info || !iommu_info->ops->free || !session_info)
+               return -EINVAL;
+
+       return iommu_info->ops->free(session_info, idx);
+}
+
+int vpu_iommu_free_fd(struct vpu_iommu_info *iommu_info,
+                     struct mpp_session *session, int fd)
+{
+       struct vpu_iommu_session_info *session_info = NULL;
+
+       session_info = vpu_iommu_get_session_info(iommu_info, session);
+
+       if (!iommu_info || !iommu_info->ops->free_fd || !session_info)
+               return -EINVAL;
+
+       return iommu_info->ops->free_fd(session_info, fd);
+}
+
+void *vpu_iommu_map_kernel(struct vpu_iommu_info *iommu_info,
+                          struct mpp_session *session, int idx)
+{
+       struct vpu_iommu_session_info *session_info = NULL;
+
+       session_info = vpu_iommu_get_session_info(iommu_info, session);
+
+       if (!iommu_info || !iommu_info->ops->map_kernel || !session_info)
+               return NULL;
+
+       return iommu_info->ops->map_kernel(session_info, idx);
+}
+
+int vpu_iommu_unmap_kernel(struct vpu_iommu_info *iommu_info,
+                          struct mpp_session *session, int idx)
+{
+       struct vpu_iommu_session_info *session_info = NULL;
+
+       session_info = vpu_iommu_get_session_info(iommu_info, session);
+
+       if (!iommu_info || !iommu_info->ops->unmap_kernel || !session_info)
+               return -EINVAL;
+
+       return iommu_info->ops->unmap_kernel(session_info, idx);
+}
+
+int vpu_iommu_map_iommu(struct vpu_iommu_info *iommu_info,
+                       struct mpp_session *session,
+                       int idx, unsigned long *iova,
+                       unsigned long *size)
+{
+       struct vpu_iommu_session_info *session_info = NULL;
+
+       session_info = vpu_iommu_get_session_info(iommu_info, session);
+
+       if (!iommu_info || !iommu_info->ops->map_iommu || !session_info)
+               return -EINVAL;
+
+       return iommu_info->ops->map_iommu(session_info, idx, iova, size);
+}
+
+int vpu_iommu_unmap_iommu(struct vpu_iommu_info *iommu_info,
+                         struct mpp_session *session, int idx)
+{
+       struct vpu_iommu_session_info *session_info = NULL;
+
+       session_info = vpu_iommu_get_session_info(iommu_info, session);
+
+       if (!iommu_info || !iommu_info->ops->unmap_iommu || !session_info)
+               return -EINVAL;
+
+       return iommu_info->ops->unmap_iommu(session_info, idx);
+}
+
+int vpu_iommu_destroy(struct vpu_iommu_info *iommu_info)
+{
+       if (!iommu_info || !iommu_info->ops->destroy)
+               return -EINVAL;
+
+       return iommu_info->ops->destroy(iommu_info);
+}
+
+void vpu_iommu_dump(struct vpu_iommu_info *iommu_info,
+                   struct mpp_session *session)
+{
+       struct vpu_iommu_session_info *session_info = NULL;
+
+       session_info = vpu_iommu_get_session_info(iommu_info, session);
+
+       if (!iommu_info || !iommu_info->ops->dump || !session_info)
+               return;
+
+       iommu_info->ops->dump(session_info);
+}
+
+void vpu_iommu_clear(struct vpu_iommu_info *iommu_info,
+                    struct mpp_session *session)
+{
+       struct vpu_iommu_session_info *session_info = NULL;
+
+       session_info = vpu_iommu_get_session_info(iommu_info, session);
+
+       if (!iommu_info || !iommu_info->ops->clear || !session_info)
+               return;
+
+       iommu_info->ops->clear(session_info);
+
+       mutex_lock(&iommu_info->list_mutex);
+       list_del_init(&session_info->head);
+       kfree(session_info);
+       mutex_unlock(&iommu_info->list_mutex);
+}
+
+int vpu_iommu_attach(struct vpu_iommu_info *iommu_info)
+{
+       if (!iommu_info || !iommu_info->ops->attach)
+               return 0;
+
+       return iommu_info->ops->attach(iommu_info);
+}
+
+void vpu_iommu_detach(struct vpu_iommu_info *iommu_info)
+{
+       if (!iommu_info || !iommu_info->ops->detach)
+               return;
+
+       return iommu_info->ops->detach(iommu_info);
+}
+
+struct vpu_iommu_info*
+vpu_iommu_info_create(struct device *dev,
+                     struct device *mmu_dev,
+                     int alloc_type)
+{
+       struct vpu_iommu_info *iommu_info = NULL;
+
+       iommu_info = kzalloc(sizeof(*iommu_info), GFP_KERNEL);
+       if (!iommu_info)
+               return NULL;
+
+       iommu_info->dev = dev;
+       INIT_LIST_HEAD(&iommu_info->session_list);
+       mutex_init(&iommu_info->list_mutex);
+       mutex_init(&iommu_info->iommu_mutex);
+       switch (alloc_type) {
+#ifdef CONFIG_DRM
+       case ALLOCATOR_USE_DRM:
+               vpu_iommu_drm_set_ops(iommu_info);
+               break;
+#endif
+#ifdef CONFIG_ION
+       case ALLOCATOR_USE_ION:
+               vpu_iommu_ion_set_ops(iommu_info);
+               break;
+#endif
+       default:
+               iommu_info->ops = NULL;
+               break;
+       }
+
+       iommu_info->mmu_dev = mmu_dev;
+
+       vpu_iommu_create(iommu_info);
+
+       return iommu_info;
+}
+
+int vpu_iommu_info_destroy(struct vpu_iommu_info *iommu_info)
+{
+       vpu_iommu_destroy(iommu_info);
+       kfree(iommu_info);
+
+       return 0;
+}
diff --git a/drivers/video/rockchip/vpu/vpu_iommu_ops.h b/drivers/video/rockchip/vpu/vpu_iommu_ops.h
new file mode 100644 (file)
index 0000000..032f6c5
--- /dev/null
@@ -0,0 +1,139 @@
+/**
+ * Copyright (C) 2016 Fuzhou Rockchip Electronics Co., Ltd
+ * author: Jung Zhao jung.zhao@rock-chips.com
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VPU_IOMMU_OPS_H__
+#define __VPU_IOMMU_OPS_H__
+
+#include <linux/platform_device.h>
+#include "mpp_service.h"
+
+#define BUFFER_LIST_MAX_NUMS   30
+
+#define ALLOCATOR_USE_ION              0x00000000
+#define ALLOCATOR_USE_DRM              0x00000001
+
+#define DEBUG_IOMMU_OPS_DUMP   0x00020000
+#define DEBUG_IOMMU_NORMAL     0x00040000
+
+#define vpu_iommu_debug_func(debug_level, type, fmt, args...)  \
+       do {                                                    \
+               if (unlikely((debug_level) & type)) {           \
+                       pr_info("%s:%d: " fmt,                  \
+                                __func__, __LINE__, ##args);   \
+               }                                               \
+       } while (0)
+#define vpu_iommu_debug(debug_level, type, fmt, args...)       \
+       do {                                                    \
+               if (unlikely((debug_level) & type)) {           \
+                       pr_info(fmt, ##args);                   \
+               }                                               \
+       } while (0)
+
+struct vpu_iommu_info;
+struct vpu_iommu_session_info;
+
+struct vpu_iommu_ops {
+       int (*create)(struct vpu_iommu_info *iommu_info);
+       int (*alloc)(struct vpu_iommu_session_info *session_info,
+                    unsigned long size,
+                    unsigned long align);
+       int (*import)(struct vpu_iommu_session_info *session_info, int fd);
+       int (*free)(struct vpu_iommu_session_info *session_info, int idx);
+       int (*free_fd)(struct vpu_iommu_session_info *session_info, int fd);
+       void* (*map_kernel)(struct vpu_iommu_session_info *session_info,
+                           int idx);
+       int (*unmap_kernel)(struct vpu_iommu_session_info *session_info,
+                           int idx);
+       int (*map_iommu)(struct vpu_iommu_session_info *session_info,
+                        int idx,
+                        unsigned long *iova, unsigned long *size);
+       int (*unmap_iommu)(struct vpu_iommu_session_info *session_info,
+                          int idx);
+       int (*destroy)(struct vpu_iommu_info *iommu_info);
+       void (*dump)(struct vpu_iommu_session_info *session_info);
+       int (*attach)(struct vpu_iommu_info *iommu_info);
+       void (*detach)(struct vpu_iommu_info *iommu_info);
+       void (*clear)(struct vpu_iommu_session_info *session_info);
+};
+
+struct vpu_iommu_session_info {
+       struct list_head head;
+       struct mpp_session *session;
+       int buffer_nums;
+       struct list_head buffer_list;
+       struct mutex list_mutex;
+       int max_idx;
+       struct device *dev;
+       struct device *mmu_dev;
+       struct vpu_iommu_info *iommu_info;
+       int debug_level;
+};
+
+struct vpu_iommu_info {
+       struct list_head session_list;
+       struct mutex list_mutex;
+       struct mutex iommu_mutex;
+       struct device *dev;
+       struct device *mmu_dev;
+       struct vpu_iommu_ops *ops;
+       int debug_level;
+       void *private;
+};
+
+#ifdef CONFIG_DRM
+void vpu_iommu_drm_set_ops(struct vpu_iommu_info *iommu_info);
+#endif
+#ifdef CONFIG_ION
+void vpu_iommu_ion_set_ops(struct vpu_iommu_info *iommu_info);
+#endif
+
+struct vpu_iommu_info *vpu_iommu_info_create(struct device *dev,
+                                            struct device *mmu_dev,
+                                            int alloc_type);
+int vpu_iommu_info_destroy(struct vpu_iommu_info *iommu_info);
+
+int vpu_iommu_create(struct vpu_iommu_info *iommu_info);
+int vpu_iommu_alloc(struct vpu_iommu_info *iommu_info,
+                   struct mpp_session *session,
+                   unsigned long size,
+                   unsigned long align);
+int vpu_iommu_import(struct vpu_iommu_info *iommu_info,
+                    struct mpp_session *session, int fd);
+int vpu_iommu_free(struct vpu_iommu_info *iommu_info,
+                  struct mpp_session *session, int idx);
+int vpu_iommu_free_fd(struct vpu_iommu_info *iommu_info,
+                     struct mpp_session *session, int fd);
+void *vpu_iommu_map_kernel(struct vpu_iommu_info *iommu_info,
+                          struct mpp_session *session, int idx);
+int vpu_iommu_unmap_kernel(struct vpu_iommu_info *iommu_info,
+                          struct mpp_session *session, int idx);
+int vpu_iommu_map_iommu(struct vpu_iommu_info *iommu_info,
+                       struct mpp_session *session,
+                       int idx,
+                       unsigned long *iova,
+                       unsigned long *size);
+int vpu_iommu_unmap_iommu(struct vpu_iommu_info *iommu_info,
+                         struct mpp_session *session,
+                         int idx);
+int vpu_iommu_destroy(struct vpu_iommu_info *iommu_info);
+void vpu_iommu_dump(struct vpu_iommu_info *iommu_info,
+                   struct mpp_session *session);
+void vpu_iommu_clear(struct vpu_iommu_info *iommu_info,
+                    struct mpp_session *session);
+
+int vpu_iommu_attach(struct vpu_iommu_info *iommu_info);
+void vpu_iommu_detach(struct vpu_iommu_info *iommu_info);
+
+#endif