--- /dev/null
+\r
+/* arch/arm/mach-rk29/vpu.c\r
+ *\r
+ * Copyright (C) 2010 ROCKCHIP, Inc.\r
+ * author: chenhengming chm@rock-chips.com\r
+ *\r
+ * This software is licensed under the terms of the GNU General Public\r
+ * License version 2, as published by the Free Software Foundation, and\r
+ * may be copied, distributed, and modified under those terms.\r
+ *\r
+ * This program is distributed in the hope that it will be useful,\r
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+ * GNU General Public License for more details.\r
+ *\r
+ */\r
+\r
+#include <linux/clk.h>\r
+#include <linux/delay.h>\r
+#include <linux/init.h>\r
+#include <linux/interrupt.h>\r
+#include <linux/io.h>\r
+#include <linux/kernel.h>\r
+#include <linux/module.h>\r
+#include <linux/fs.h>\r
+#include <linux/ioport.h>\r
+#include <linux/miscdevice.h>\r
+#include <linux/mm.h>\r
+#include <linux/poll.h>\r
+#include <linux/platform_device.h>\r
+#include <linux/sched.h>\r
+#include <linux/slab.h>\r
+#include <linux/wakelock.h>\r
+#include <linux/cdev.h>\r
+#include <linux/of.h>\r
+\r
+#include <asm/cacheflush.h>\r
+#include <asm/uaccess.h>\r
+\r
+#ifdef CONFIG_DEBUG_FS\r
+#include <linux/debugfs.h>\r
+#endif\r
+\r
+#include "cru.h"\r
+\r
+#if defined(CONFIG_ARCH_RK319X)\r
+#include <mach/grf.h>\r
+#endif\r
+\r
+#include "vcodec_service.h"\r
+#include "cpu.h"\r
+\r
+#define HEVC_TEST_ENABLE 0\r
+#define HEVC_SIM_ENABLE 0\r
+\r
+typedef enum {\r
+ VPU_DEC_ID_9190 = 0x6731,\r
+ VPU_ID_8270 = 0x8270,\r
+ VPU_ID_4831 = 0x4831,\r
+ HEVC_ID = 0x6867,\r
+} VPU_HW_ID;\r
+\r
+typedef enum {\r
+ VPU_DEC_TYPE_9190 = 0,\r
+ VPU_ENC_TYPE_8270 = 0x100,\r
+ VPU_ENC_TYPE_4831 ,\r
+} VPU_HW_TYPE_E;\r
+\r
+typedef enum VPU_FREQ {\r
+ VPU_FREQ_200M,\r
+ VPU_FREQ_266M,\r
+ VPU_FREQ_300M,\r
+ VPU_FREQ_400M,\r
+ VPU_FREQ_DEFAULT,\r
+ VPU_FREQ_BUT,\r
+} VPU_FREQ;\r
+\r
+typedef struct {\r
+ VPU_HW_ID hw_id;\r
+ unsigned long hw_addr;\r
+ unsigned long enc_offset;\r
+ unsigned long enc_reg_num;\r
+ unsigned long enc_io_size;\r
+ unsigned long dec_offset;\r
+ unsigned long dec_reg_num;\r
+ unsigned long dec_io_size;\r
+} VPU_HW_INFO_E;\r
+\r
+#define VPU_SERVICE_SHOW_TIME 0\r
+\r
+#if VPU_SERVICE_SHOW_TIME\r
+static struct timeval enc_start, enc_end;\r
+static struct timeval dec_start, dec_end;\r
+static struct timeval pp_start, pp_end;\r
+#endif\r
+\r
+#define MHZ (1000*1000)\r
+\r
+#if 0\r
+#if defined(CONFIG_ARCH_RK319X)\r
+#define VCODEC_PHYS RK319X_VCODEC_PHYS\r
+#else\r
+#define VCODEC_PHYS (0x10104000)\r
+#endif\r
+#endif\r
+\r
+#define REG_NUM_9190_DEC (60)\r
+#define REG_NUM_9190_PP (41)\r
+#define REG_NUM_9190_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP)\r
+\r
+#define REG_NUM_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP)\r
+\r
+#define REG_NUM_ENC_8270 (96)\r
+#define REG_SIZE_ENC_8270 (0x200)\r
+#define REG_NUM_ENC_4831 (164)\r
+#define REG_SIZE_ENC_4831 (0x400)\r
+\r
+#define REG_NUM_HEVC_DEC (68)\r
+\r
+#define SIZE_REG(reg) ((reg)*4)\r
+\r
+static VPU_HW_INFO_E vpu_hw_set[] = {\r
+ [0] = {\r
+ .hw_id = VPU_ID_8270,\r
+ .hw_addr = 0,\r
+ .enc_offset = 0x0,\r
+ .enc_reg_num = REG_NUM_ENC_8270,\r
+ .enc_io_size = REG_NUM_ENC_8270 * 4,\r
+ .dec_offset = REG_SIZE_ENC_8270,\r
+ .dec_reg_num = REG_NUM_9190_DEC_PP,\r
+ .dec_io_size = REG_NUM_9190_DEC_PP * 4,\r
+ },\r
+ [1] = {\r
+ .hw_id = VPU_ID_4831,\r
+ .hw_addr = 0,\r
+ .enc_offset = 0x0,\r
+ .enc_reg_num = REG_NUM_ENC_4831,\r
+ .enc_io_size = REG_NUM_ENC_4831 * 4,\r
+ .dec_offset = REG_SIZE_ENC_4831,\r
+ .dec_reg_num = REG_NUM_9190_DEC_PP,\r
+ .dec_io_size = REG_NUM_9190_DEC_PP * 4,\r
+ },\r
+ [2] = {\r
+ .hw_id = HEVC_ID,\r
+ .hw_addr = 0,\r
+ .dec_offset = 0x0,\r
+ .dec_reg_num = REG_NUM_HEVC_DEC,\r
+ .dec_io_size = REG_NUM_HEVC_DEC * 4,\r
+ },\r
+};\r
+\r
+\r
+#define DEC_INTERRUPT_REGISTER 1\r
+#define PP_INTERRUPT_REGISTER 60\r
+#define ENC_INTERRUPT_REGISTER 1\r
+\r
+#define DEC_INTERRUPT_BIT 0x100\r
+#define DEC_BUFFER_EMPTY_BIT 0x4000\r
+#define PP_INTERRUPT_BIT 0x100\r
+#define ENC_INTERRUPT_BIT 0x1\r
+\r
+#define HEVC_DEC_INT_RAW_BIT 0x200\r
+#define HEVC_DEC_STR_ERROR_BIT 0x4000\r
+#define HEVC_DEC_BUS_ERROR_BIT 0x2000\r
+#define HEVC_DEC_BUFFER_EMPTY_BIT 0x10000\r
+\r
+#define VPU_REG_EN_ENC 14\r
+#define VPU_REG_ENC_GATE 2\r
+#define VPU_REG_ENC_GATE_BIT (1<<4)\r
+\r
+#define VPU_REG_EN_DEC 1\r
+#define VPU_REG_DEC_GATE 2\r
+#define VPU_REG_DEC_GATE_BIT (1<<10)\r
+#define VPU_REG_EN_PP 0\r
+#define VPU_REG_PP_GATE 1\r
+#define VPU_REG_PP_GATE_BIT (1<<8)\r
+#define VPU_REG_EN_DEC_PP 1\r
+#define VPU_REG_DEC_PP_GATE 61\r
+#define VPU_REG_DEC_PP_GATE_BIT (1<<8)\r
+\r
+/**\r
+ * struct for process session which connect to vpu\r
+ *\r
+ * @author ChenHengming (2011-5-3)\r
+ */\r
+typedef struct vpu_session {\r
+ VPU_CLIENT_TYPE type;\r
+ /* a linked list of data so we can access them for debugging */\r
+ struct list_head list_session;\r
+ /* a linked list of register data waiting for process */\r
+ struct list_head waiting;\r
+ /* a linked list of register data in processing */\r
+ struct list_head running;\r
+ /* a linked list of register data processed */\r
+ struct list_head done;\r
+ wait_queue_head_t wait;\r
+ pid_t pid;\r
+ atomic_t task_running;\r
+} vpu_session;\r
+\r
+/**\r
+ * struct for process register set\r
+ *\r
+ * @author ChenHengming (2011-5-4)\r
+ */\r
+typedef struct vpu_reg {\r
+ VPU_CLIENT_TYPE type;\r
+ VPU_FREQ freq;\r
+ vpu_session *session;\r
+ struct list_head session_link; /* link to vpu service session */\r
+ struct list_head status_link; /* link to register set list */\r
+ unsigned long size;\r
+ unsigned long *reg;\r
+} vpu_reg;\r
+\r
+typedef struct vpu_device {\r
+ atomic_t irq_count_codec;\r
+ atomic_t irq_count_pp;\r
+ unsigned long iobaseaddr;\r
+ unsigned int iosize;\r
+ volatile u32 *hwregs;\r
+} vpu_device;\r
+\r
+typedef struct vpu_service_info {\r
+ struct wake_lock wake_lock;\r
+ struct delayed_work power_off_work;\r
+ struct mutex lock;\r
+ struct list_head waiting; /* link to link_reg in struct vpu_reg */\r
+ struct list_head running; /* link to link_reg in struct vpu_reg */\r
+ struct list_head done; /* link to link_reg in struct vpu_reg */\r
+ struct list_head session; /* link to list_session in struct vpu_session */\r
+ atomic_t total_running;\r
+ bool enabled;\r
+ vpu_reg *reg_codec;\r
+ vpu_reg *reg_pproc;\r
+ vpu_reg *reg_resev;\r
+ VPUHwDecConfig_t dec_config;\r
+ VPUHwEncConfig_t enc_config;\r
+ VPU_HW_INFO_E *hw_info;\r
+ unsigned long reg_size;\r
+ bool auto_freq;\r
+ bool bug_dec_addr;\r
+ atomic_t freq_status;\r
+\r
+ struct clk *aclk_vcodec;\r
+ struct clk *hclk_vcodec;\r
+\r
+ int irq_dec;\r
+ int irq_enc;\r
+\r
+ vpu_device enc_dev;\r
+ vpu_device dec_dev;\r
+\r
+ struct device *dev;\r
+\r
+ struct cdev cdev;\r
+ dev_t dev_t;\r
+ struct class *cls;\r
+ struct device *child_dev;\r
+\r
+ struct dentry *debugfs_dir;\r
+ struct dentry *debugfs_file_regs;\r
+\r
+ u32 irq_status;\r
+\r
+ struct delayed_work simulate_work;\r
+} vpu_service_info;\r
+\r
+typedef struct vpu_request\r
+{\r
+ unsigned long *req;\r
+ unsigned long size;\r
+} vpu_request;\r
+\r
+/// global variable\r
+//static struct clk *pd_video;\r
+static struct dentry *parent; // debugfs root directory for all device (vpu, hevc).\r
+\r
+#ifdef CONFIG_DEBUG_FS\r
+static int vcodec_debugfs_init(void);\r
+static void vcodec_debugfs_exit(void);\r
+static struct dentry* vcodec_debugfs_create_device_dir(char *dirname, struct dentry *parent);\r
+static int debug_vcodec_open(struct inode *inode, struct file *file);\r
+\r
+static const struct file_operations debug_vcodec_fops = {\r
+ .open = debug_vcodec_open,\r
+ .read = seq_read,\r
+ .llseek = seq_lseek,\r
+ .release = single_release,\r
+};\r
+#endif\r
+\r
+#define VPU_POWER_OFF_DELAY 4*HZ /* 4s */\r
+#define VPU_TIMEOUT_DELAY 2*HZ /* 2s */\r
+\r
+#define VPU_SIMULATE_DELAY msecs_to_jiffies(5)\r
+\r
+static void vpu_get_clk(struct vpu_service_info *pservice)\r
+{\r
+ /*pd_video = clk_get(NULL, "pd_video");\r
+ if (IS_ERR(pd_video)) {\r
+ pr_err("failed on clk_get pd_video\n");\r
+ }*/\r
+ pservice->aclk_vcodec = devm_clk_get(pservice->dev, "aclk_vcodec");\r
+ if (IS_ERR(pservice->aclk_vcodec)) {\r
+ dev_err(pservice->dev, "failed on clk_get aclk_vcodec\n");\r
+ }\r
+ pservice->hclk_vcodec = devm_clk_get(pservice->dev, "hclk_vcodec");\r
+ if (IS_ERR(pservice->hclk_vcodec)) {\r
+ dev_err(pservice->dev, "failed on clk_get hclk_vcodec\n");\r
+ }\r
+}\r
+\r
+static void vpu_put_clk(struct vpu_service_info *pservice)\r
+{\r
+ //clk_put(pd_video);\r
+\r
+ if (pservice->aclk_vcodec) {\r
+ devm_clk_put(pservice->dev, pservice->aclk_vcodec);\r
+ }\r
+\r
+ if (pservice->hclk_vcodec) {\r
+ devm_clk_put(pservice->dev, pservice->hclk_vcodec);\r
+ }\r
+}\r
+\r
+static void vpu_reset(struct vpu_service_info *pservice)\r
+{\r
+#if defined(CONFIG_ARCH_RK29)\r
+ clk_disable(aclk_ddr_vepu);\r
+ cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);\r
+ cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);\r
+ mdelay(10);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);\r
+ cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);\r
+ cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);\r
+ clk_enable(aclk_ddr_vepu);\r
+#elif defined(CONFIG_ARCH_RK30)\r
+ pmu_set_idle_request(IDLE_REQ_VIDEO, true);\r
+ cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);\r
+ mdelay(1);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);\r
+ cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);\r
+ pmu_set_idle_request(IDLE_REQ_VIDEO, false);\r
+#endif\r
+ pservice->reg_codec = NULL;\r
+ pservice->reg_pproc = NULL;\r
+ pservice->reg_resev = NULL;\r
+}\r
+\r
+static void reg_deinit(struct vpu_service_info *pservice, vpu_reg *reg);\r
+static void vpu_service_session_clear(struct vpu_service_info *pservice, vpu_session *session)\r
+{\r
+ vpu_reg *reg, *n;\r
+ list_for_each_entry_safe(reg, n, &session->waiting, session_link) {\r
+ reg_deinit(pservice, reg);\r
+ }\r
+ list_for_each_entry_safe(reg, n, &session->running, session_link) {\r
+ reg_deinit(pservice, reg);\r
+ }\r
+ list_for_each_entry_safe(reg, n, &session->done, session_link) {\r
+ reg_deinit(pservice, reg);\r
+ }\r
+}\r
+\r
+static void vpu_service_dump(struct vpu_service_info *pservice)\r
+{\r
+ int running;\r
+ vpu_reg *reg, *reg_tmp;\r
+ vpu_session *session, *session_tmp;\r
+\r
+ running = atomic_read(&pservice->total_running);\r
+ printk("total_running %d\n", running);\r
+\r
+ printk("reg_codec 0x%.8x\n", (unsigned int)pservice->reg_codec);\r
+ printk("reg_pproc 0x%.8x\n", (unsigned int)pservice->reg_pproc);\r
+ printk("reg_resev 0x%.8x\n", (unsigned int)pservice->reg_resev);\r
+\r
+ list_for_each_entry_safe(session, session_tmp, &pservice->session, list_session) {\r
+ printk("session pid %d type %d:\n", session->pid, session->type);\r
+ running = atomic_read(&session->task_running);\r
+ printk("task_running %d\n", running);\r
+ list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {\r
+ printk("waiting register set 0x%.8x\n", (unsigned int)reg);\r
+ }\r
+ list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {\r
+ printk("running register set 0x%.8x\n", (unsigned int)reg);\r
+ }\r
+ list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {\r
+ printk("done register set 0x%.8x\n", (unsigned int)reg);\r
+ }\r
+ }\r
+}\r
+\r
+static void vpu_service_power_off(struct vpu_service_info *pservice)\r
+{\r
+ int total_running;\r
+ if (!pservice->enabled) {\r
+ return;\r
+ }\r
+\r
+ pservice->enabled = false;\r
+ total_running = atomic_read(&pservice->total_running);\r
+ if (total_running) {\r
+ pr_alert("alert: power off when %d task running!!\n", total_running);\r
+ mdelay(50);\r
+ pr_alert("alert: delay 50 ms for running task\n");\r
+ vpu_service_dump(pservice);\r
+ }\r
+\r
+ printk("vpu: power off...");\r
+#ifdef CONFIG_ARCH_RK29\r
+ pmu_set_power_domain(PD_VCODEC, false);\r
+#else\r
+ //clk_disable(pd_video);\r
+#endif\r
+ udelay(10);\r
+ //clk_disable(hclk_cpu_vcodec);\r
+ //clk_disable(aclk_ddr_vepu);\r
+#if 0\r
+ clk_disable_unprepare(pservice->hclk_vcodec);\r
+ clk_disable_unprepare(pservice->aclk_vcodec);\r
+#endif\r
+ wake_unlock(&pservice->wake_lock);\r
+ printk("done\n");\r
+}\r
+\r
+static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)\r
+{\r
+ queue_delayed_work(system_nrt_wq, &pservice->power_off_work, VPU_POWER_OFF_DELAY);\r
+}\r
+\r
+static void vpu_power_off_work(struct work_struct *work_s)\r
+{\r
+ struct delayed_work *dlwork = container_of(work_s, struct delayed_work, work);\r
+ struct vpu_service_info *pservice = container_of(dlwork, struct vpu_service_info, power_off_work);\r
+\r
+ if (mutex_trylock(&pservice->lock)) {\r
+ vpu_service_power_off(pservice);\r
+ mutex_unlock(&pservice->lock);\r
+ } else {\r
+ /* Come back later if the device is busy... */\r
+ vpu_queue_power_off_work(pservice);\r
+ }\r
+}\r
+\r
+static void vpu_service_power_on(struct vpu_service_info *pservice)\r
+{\r
+ static ktime_t last;\r
+ ktime_t now = ktime_get();\r
+ if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {\r
+ cancel_delayed_work_sync(&pservice->power_off_work);\r
+ vpu_queue_power_off_work(pservice);\r
+ last = now;\r
+ }\r
+ if (pservice->enabled)\r
+ return ;\r
+\r
+ pservice->enabled = true;\r
+ printk("vpu: power on\n");\r
+\r
+#if 0\r
+ clk_prepare_enable(pservice->aclk_vcodec);\r
+ clk_prepare_enable(pservice->hclk_vcodec);\r
+#endif\r
+ //clk_prepare_enable(hclk_cpu_vcodec);\r
+#if defined(CONFIG_ARCH_RK319X)\r
+ /// select aclk_vepu as vcodec clock source. \r
+ #define BIT_VCODEC_SEL (1<<7)\r
+ writel_relaxed(readl_relaxed(RK319X_GRF_BASE + GRF_SOC_CON1) | (BIT_VCODEC_SEL) | (BIT_VCODEC_SEL << 16), RK319X_GRF_BASE + GRF_SOC_CON1);\r
+#endif\r
+ udelay(10);\r
+#ifdef CONFIG_ARCH_RK29\r
+ pmu_set_power_domain(PD_VCODEC, true);\r
+#else\r
+ //clk_enable(pd_video);\r
+#endif\r
+ udelay(10);\r
+ //clk_enable(aclk_ddr_vepu);\r
+ wake_lock(&pservice->wake_lock);\r
+}\r
+\r
+static inline bool reg_check_rmvb_wmv(vpu_reg *reg)\r
+{\r
+ unsigned long type = (reg->reg[3] & 0xF0000000) >> 28;\r
+ return ((type == 8) || (type == 4));\r
+}\r
+\r
+static inline bool reg_check_interlace(vpu_reg *reg)\r
+{\r
+ unsigned long type = (reg->reg[3] & (1 << 23));\r
+ return (type > 0);\r
+}\r
+\r
+static vpu_reg *reg_init(struct vpu_service_info *pservice, vpu_session *session, void __user *src, unsigned long size)\r
+{\r
+ vpu_reg *reg = kmalloc(sizeof(vpu_reg)+pservice->reg_size, GFP_KERNEL);\r
+ if (NULL == reg) {\r
+ pr_err("error: kmalloc fail in reg_init\n");\r
+ return NULL;\r
+ }\r
+\r
+ if (size > pservice->reg_size) {\r
+ printk("warning: vpu reg size %lu is larger than hw reg size %lu\n", size, pservice->reg_size);\r
+ size = pservice->reg_size;\r
+ }\r
+ reg->session = session;\r
+ reg->type = session->type;\r
+ reg->size = size;\r
+ reg->freq = VPU_FREQ_DEFAULT;\r
+ reg->reg = (unsigned long *)®[1];\r
+ INIT_LIST_HEAD(®->session_link);\r
+ INIT_LIST_HEAD(®->status_link);\r
+\r
+ if (copy_from_user(®->reg[0], (void __user *)src, size)) {\r
+ pr_err("error: copy_from_user failed in reg_init\n");\r
+ kfree(reg);\r
+ return NULL;\r
+ }\r
+\r
+ mutex_lock(&pservice->lock);\r
+ list_add_tail(®->status_link, &pservice->waiting);\r
+ list_add_tail(®->session_link, &session->waiting);\r
+ mutex_unlock(&pservice->lock);\r
+\r
+ if (pservice->auto_freq) {\r
+ if (!soc_is_rk2928g()) {\r
+ if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {\r
+ if (reg_check_rmvb_wmv(reg)) {\r
+ reg->freq = VPU_FREQ_200M;\r
+ } else {\r
+ if (reg_check_interlace(reg)) {\r
+ reg->freq = VPU_FREQ_400M;\r
+ }\r
+ }\r
+ }\r
+ if (reg->type == VPU_PP) {\r
+ reg->freq = VPU_FREQ_400M;\r
+ }\r
+ }\r
+ }\r
+\r
+ return reg;\r
+}\r
+\r
+static void reg_deinit(struct vpu_service_info *pservice, vpu_reg *reg)\r
+{\r
+ list_del_init(®->session_link);\r
+ list_del_init(®->status_link);\r
+ if (reg == pservice->reg_codec) pservice->reg_codec = NULL;\r
+ if (reg == pservice->reg_pproc) pservice->reg_pproc = NULL;\r
+ kfree(reg);\r
+}\r
+\r
+static void reg_from_wait_to_run(struct vpu_service_info *pservice, vpu_reg *reg)\r
+{\r
+ list_del_init(®->status_link);\r
+ list_add_tail(®->status_link, &pservice->running);\r
+\r
+ list_del_init(®->session_link);\r
+ list_add_tail(®->session_link, ®->session->running);\r
+}\r
+\r
+static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)\r
+{\r
+ int i;\r
+ u32 *dst = (u32 *)®->reg[0];\r
+ for (i = 0; i < count; i++)\r
+ *dst++ = *src++;\r
+}\r
+\r
+static void reg_from_run_to_done(struct vpu_service_info *pservice, vpu_reg *reg)\r
+{\r
+ int irq_reg = -1;\r
+ list_del_init(®->status_link);\r
+ list_add_tail(®->status_link, &pservice->done);\r
+\r
+ list_del_init(®->session_link);\r
+ list_add_tail(®->session_link, ®->session->done);\r
+\r
+ switch (reg->type) {\r
+ case VPU_ENC : {\r
+ pservice->reg_codec = NULL;\r
+ reg_copy_from_hw(reg, pservice->enc_dev.hwregs, pservice->hw_info->enc_reg_num);\r
+ irq_reg = ENC_INTERRUPT_REGISTER;\r
+ break;\r
+ }\r
+ case VPU_DEC : {\r
+ pservice->reg_codec = NULL;\r
+ reg_copy_from_hw(reg, pservice->dec_dev.hwregs, REG_NUM_9190_DEC);\r
+ irq_reg = DEC_INTERRUPT_REGISTER;\r
+ break;\r
+ }\r
+ case VPU_PP : {\r
+ pservice->reg_pproc = NULL;\r
+ reg_copy_from_hw(reg, pservice->dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_9190_PP);\r
+ pservice->dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;\r
+ break;\r
+ }\r
+ case VPU_DEC_PP : {\r
+ pservice->reg_codec = NULL;\r
+ pservice->reg_pproc = NULL;\r
+ reg_copy_from_hw(reg, pservice->dec_dev.hwregs, REG_NUM_9190_DEC_PP);\r
+ pservice->dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;\r
+ break;\r
+ }\r
+ default : {\r
+ pr_err("error: copy reg from hw with unknown type %d\n", reg->type);\r
+ break;\r
+ }\r
+ }\r
+\r
+ if (irq_reg != -1) {\r
+ reg->reg[irq_reg] = pservice->irq_status;\r
+ }\r
+\r
+ atomic_sub(1, ®->session->task_running);\r
+ atomic_sub(1, &pservice->total_running);\r
+ wake_up(®->session->wait);\r
+}\r
+\r
+static void vpu_service_set_freq(struct vpu_service_info *pservice, vpu_reg *reg)\r
+{\r
+ VPU_FREQ curr = atomic_read(&pservice->freq_status);\r
+ if (curr == reg->freq) {\r
+ return ;\r
+ }\r
+ atomic_set(&pservice->freq_status, reg->freq);\r
+ switch (reg->freq) {\r
+ case VPU_FREQ_200M : {\r
+ clk_set_rate(pservice->aclk_vcodec, 200*MHZ);\r
+ //printk("default: 200M\n");\r
+ } break;\r
+ case VPU_FREQ_266M : {\r
+ clk_set_rate(pservice->aclk_vcodec, 266*MHZ);\r
+ //printk("default: 266M\n");\r
+ } break;\r
+ case VPU_FREQ_300M : {\r
+ clk_set_rate(pservice->aclk_vcodec, 300*MHZ);\r
+ //printk("default: 300M\n");\r
+ } break;\r
+ case VPU_FREQ_400M : {\r
+ clk_set_rate(pservice->aclk_vcodec, 400*MHZ);\r
+ //printk("default: 400M\n");\r
+ } break;\r
+ default : {\r
+ if (soc_is_rk2928g()) {\r
+ clk_set_rate(pservice->aclk_vcodec, 400*MHZ);\r
+ } else {\r
+ clk_set_rate(pservice->aclk_vcodec, 300*MHZ);\r
+ }\r
+ //printk("default: 300M\n");\r
+ } break;\r
+ }\r
+}\r
+\r
+#if HEVC_SIM_ENABLE\r
+static void simulate_start(struct vpu_service_info *pservice);\r
+#endif\r
+static void reg_copy_to_hw(struct vpu_service_info *pservice, vpu_reg *reg)\r
+{\r
+ int i;\r
+ u32 *src = (u32 *)®->reg[0];\r
+ atomic_add(1, &pservice->total_running);\r
+ atomic_add(1, ®->session->task_running);\r
+ if (pservice->auto_freq) {\r
+ vpu_service_set_freq(pservice, reg);\r
+ }\r
+ switch (reg->type) {\r
+ case VPU_ENC : {\r
+ int enc_count = pservice->hw_info->enc_reg_num;\r
+ u32 *dst = (u32 *)pservice->enc_dev.hwregs;\r
+#if 0\r
+ if (pservice->bug_dec_addr) {\r
+#if !defined(CONFIG_ARCH_RK319X)\r
+ cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);\r
+#endif\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);\r
+ cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);\r
+#if !defined(CONFIG_ARCH_RK319X)\r
+ cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);\r
+#endif\r
+ }\r
+#endif\r
+ pservice->reg_codec = reg;\r
+\r
+ dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;\r
+\r
+ for (i = 0; i < VPU_REG_EN_ENC; i++)\r
+ dst[i] = src[i];\r
+\r
+ for (i = VPU_REG_EN_ENC + 1; i < enc_count; i++)\r
+ dst[i] = src[i];\r
+\r
+ dsb();\r
+\r
+ dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;\r
+ dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC];\r
+\r
+#if VPU_SERVICE_SHOW_TIME\r
+ do_gettimeofday(&enc_start);\r
+#endif\r
+\r
+ } break;\r
+ case VPU_DEC : {\r
+ u32 *dst = (u32 *)pservice->dec_dev.hwregs;\r
+ pservice->reg_codec = reg;\r
+\r
+ for (i = REG_NUM_9190_DEC - 1; i > VPU_REG_DEC_GATE; i--)\r
+ dst[i] = src[i];\r
+\r
+ dsb();\r
+\r
+ dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;\r
+ dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];\r
+\r
+#if VPU_SERVICE_SHOW_TIME\r
+ do_gettimeofday(&dec_start);\r
+#endif\r
+\r
+ } break;\r
+ case VPU_PP : {\r
+ u32 *dst = (u32 *)pservice->dec_dev.hwregs + PP_INTERRUPT_REGISTER;\r
+ pservice->reg_pproc = reg;\r
+\r
+ dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;\r
+\r
+ for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_9190_PP; i++)\r
+ dst[i] = src[i];\r
+\r
+ dsb();\r
+\r
+ dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];\r
+\r
+#if VPU_SERVICE_SHOW_TIME\r
+ do_gettimeofday(&pp_start);\r
+#endif\r
+\r
+ } break;\r
+ case VPU_DEC_PP : {\r
+ u32 *dst = (u32 *)pservice->dec_dev.hwregs;\r
+ pservice->reg_codec = reg;\r
+ pservice->reg_pproc = reg;\r
+\r
+ for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_9190_DEC_PP; i++)\r
+ dst[i] = src[i];\r
+\r
+ dst[VPU_REG_EN_DEC_PP] = src[VPU_REG_EN_DEC_PP] | 0x2;\r
+ dsb();\r
+\r
+ dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;\r
+ dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;\r
+ dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];\r
+\r
+#if VPU_SERVICE_SHOW_TIME\r
+ do_gettimeofday(&dec_start);\r
+#endif\r
+\r
+ } break;\r
+ default : {\r
+ pr_err("error: unsupport session type %d", reg->type);\r
+ atomic_sub(1, &pservice->total_running);\r
+ atomic_sub(1, ®->session->task_running);\r
+ break;\r
+ }\r
+ }\r
+\r
+#if HEVC_SIM_ENABLE\r
+ if (pservice->hw_info->hw_id == HEVC_ID) {\r
+ simulate_start(pservice);\r
+ }\r
+#endif\r
+}\r
+\r
+static void try_set_reg(struct vpu_service_info *pservice)\r
+{\r
+ // first get reg from reg list\r
+ if (!list_empty(&pservice->waiting)) {\r
+ int can_set = 0;\r
+ vpu_reg *reg = list_entry(pservice->waiting.next, vpu_reg, status_link);\r
+\r
+ vpu_service_power_on(pservice);\r
+\r
+ switch (reg->type) {\r
+ case VPU_ENC : {\r
+ if ((NULL == pservice->reg_codec) && (NULL == pservice->reg_pproc))\r
+ can_set = 1;\r
+ } break;\r
+ case VPU_DEC : {\r
+ if (NULL == pservice->reg_codec)\r
+ can_set = 1;\r
+ if (pservice->auto_freq && (NULL != pservice->reg_pproc)) {\r
+ can_set = 0;\r
+ }\r
+ } break;\r
+ case VPU_PP : {\r
+ if (NULL == pservice->reg_codec) {\r
+ if (NULL == pservice->reg_pproc)\r
+ can_set = 1;\r
+ } else {\r
+ if ((VPU_DEC == pservice->reg_codec->type) && (NULL == pservice->reg_pproc))\r
+ can_set = 1;\r
+ // can not charge frequency when vpu is working\r
+ if (pservice->auto_freq) {\r
+ can_set = 0;\r
+ }\r
+ }\r
+ } break;\r
+ case VPU_DEC_PP : {\r
+ if ((NULL == pservice->reg_codec) && (NULL == pservice->reg_pproc))\r
+ can_set = 1;\r
+ } break;\r
+ default : {\r
+ printk("undefined reg type %d\n", reg->type);\r
+ } break;\r
+ }\r
+ if (can_set) {\r
+ reg_from_wait_to_run(pservice, reg);\r
+ reg_copy_to_hw(pservice, reg);\r
+ }\r
+ }\r
+}\r
+\r
+static int return_reg(struct vpu_service_info *pservice, vpu_reg *reg, u32 __user *dst)\r
+{\r
+ int ret = 0;\r
+ switch (reg->type) {\r
+ case VPU_ENC : {\r
+ if (copy_to_user(dst, ®->reg[0], pservice->hw_info->enc_io_size))\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+ case VPU_DEC : {\r
+ if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_DEC)))\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+ case VPU_PP : {\r
+ if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_PP)))\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+ case VPU_DEC_PP : {\r
+ if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_DEC_PP)))\r
+ ret = -EFAULT;\r
+ break;\r
+ }\r
+ default : {\r
+ ret = -EFAULT;\r
+ pr_err("error: copy reg to user with unknown type %d\n", reg->type);\r
+ break;\r
+ }\r
+ }\r
+ reg_deinit(pservice, reg);\r
+ return ret;\r
+}\r
+\r
+static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)\r
+{\r
+ struct vpu_service_info *pservice = container_of(filp->f_dentry->d_inode->i_cdev, struct vpu_service_info, cdev);\r
+ vpu_session *session = (vpu_session *)filp->private_data;\r
+ if (NULL == session) {\r
+ return -EINVAL;\r
+ }\r
+\r
+ switch (cmd) {\r
+ case VPU_IOC_SET_CLIENT_TYPE : {\r
+ session->type = (VPU_CLIENT_TYPE)arg;\r
+ break;\r
+ }\r
+ case VPU_IOC_GET_HW_FUSE_STATUS : {\r
+ vpu_request req;\r
+ if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {\r
+ pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");\r
+ return -EFAULT;\r
+ } else {\r
+ if (VPU_ENC != session->type) {\r
+ if (copy_to_user((void __user *)req.req, &pservice->dec_config, sizeof(VPUHwDecConfig_t))) {\r
+ pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);\r
+ return -EFAULT;\r
+ }\r
+ } else {\r
+ if (copy_to_user((void __user *)req.req, &pservice->enc_config, sizeof(VPUHwEncConfig_t))) {\r
+ pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);\r
+ return -EFAULT;\r
+ }\r
+ }\r
+ }\r
+\r
+ break;\r
+ }\r
+ case VPU_IOC_SET_REG : {\r
+ vpu_request req;\r
+ vpu_reg *reg;\r
+ if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {\r
+ pr_err("error: VPU_IOC_SET_REG copy_from_user failed\n");\r
+ return -EFAULT;\r
+ }\r
+ reg = reg_init(pservice, session, (void __user *)req.req, req.size);\r
+ if (NULL == reg) {\r
+ return -EFAULT;\r
+ } else {\r
+ mutex_lock(&pservice->lock);\r
+ try_set_reg(pservice);\r
+ mutex_unlock(&pservice->lock);\r
+ }\r
+\r
+ break;\r
+ }\r
+ case VPU_IOC_GET_REG : {\r
+ vpu_request req;\r
+ vpu_reg *reg;\r
+ if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {\r
+ pr_err("error: VPU_IOC_GET_REG copy_from_user failed\n");\r
+ return -EFAULT;\r
+ } else {\r
+ int ret = wait_event_timeout(session->wait, !list_empty(&session->done), VPU_TIMEOUT_DELAY);\r
+ if (!list_empty(&session->done)) {\r
+ if (ret < 0) {\r
+ pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);\r
+ }\r
+ ret = 0;\r
+ } else {\r
+ if (unlikely(ret < 0)) {\r
+ pr_err("error: pid %d wait task ret %d\n", session->pid, ret);\r
+ } else if (0 == ret) {\r
+ pr_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));\r
+ ret = -ETIMEDOUT;\r
+ }\r
+ }\r
+ if (ret < 0) {\r
+ int task_running = atomic_read(&session->task_running);\r
+ mutex_lock(&pservice->lock);\r
+ vpu_service_dump(pservice);\r
+ if (task_running) {\r
+ atomic_set(&session->task_running, 0);\r
+ atomic_sub(task_running, &pservice->total_running);\r
+ printk("%d task is running but not return, reset hardware...", task_running);\r
+ vpu_reset(pservice);\r
+ printk("done\n");\r
+ }\r
+ vpu_service_session_clear(pservice, session);\r
+ mutex_unlock(&pservice->lock);\r
+ return ret;\r
+ }\r
+ }\r
+ mutex_lock(&pservice->lock);\r
+ reg = list_entry(session->done.next, vpu_reg, session_link);\r
+ return_reg(pservice, reg, (u32 __user *)req.req);\r
+ mutex_unlock(&pservice->lock);\r
+ break;\r
+ }\r
+ default : {\r
+ pr_err("error: unknow vpu service ioctl cmd %x\n", cmd);\r
+ break;\r
+ }\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+static int vpu_service_check_hw(vpu_service_info *p, unsigned long hw_addr)\r
+{\r
+ int ret = -EINVAL, i = 0;\r
+ volatile u32 *tmp = (volatile u32 *)ioremap_nocache(hw_addr, 0x4);\r
+ u32 enc_id = *tmp;\r
+\r
+#if 0\r
+ /// temporary, hevc driver test.\r
+ if (strncmp(dev_name(p->dev), "hevc_service", strlen("hevc_service")) == 0) {\r
+ p->hw_info = &vpu_hw_set[2];\r
+ return 0;\r
+ }\r
+#endif\r
+\r
+ enc_id = (enc_id >> 16) & 0xFFFF;\r
+ pr_info("checking hw id %x\n", enc_id);\r
+ p->hw_info = NULL;\r
+ for (i = 0; i < ARRAY_SIZE(vpu_hw_set); i++) {\r
+ if (enc_id == vpu_hw_set[i].hw_id) {\r
+ p->hw_info = &vpu_hw_set[i];\r
+ ret = 0;\r
+ break;\r
+ }\r
+ }\r
+ iounmap((void *)tmp);\r
+ return ret;\r
+}\r
+\r
+static int vpu_service_open(struct inode *inode, struct file *filp)\r
+{\r
+ struct vpu_service_info *pservice = container_of(inode->i_cdev, struct vpu_service_info, cdev);\r
+ vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);\r
+ if (NULL == session) {\r
+ pr_err("error: unable to allocate memory for vpu_session.");\r
+ return -ENOMEM;\r
+ }\r
+\r
+ session->type = VPU_TYPE_BUTT;\r
+ session->pid = current->pid;\r
+ INIT_LIST_HEAD(&session->waiting);\r
+ INIT_LIST_HEAD(&session->running);\r
+ INIT_LIST_HEAD(&session->done);\r
+ INIT_LIST_HEAD(&session->list_session);\r
+ init_waitqueue_head(&session->wait);\r
+ atomic_set(&session->task_running, 0);\r
+ mutex_lock(&pservice->lock);\r
+ list_add_tail(&session->list_session, &pservice->session);\r
+ filp->private_data = (void *)session;\r
+ mutex_unlock(&pservice->lock);\r
+\r
+ pr_debug("dev opened\n");\r
+ return nonseekable_open(inode, filp);\r
+}\r
+\r
+static int vpu_service_release(struct inode *inode, struct file *filp)\r
+{\r
+ struct vpu_service_info *pservice = container_of(inode->i_cdev, struct vpu_service_info, cdev);\r
+ int task_running;\r
+ vpu_session *session = (vpu_session *)filp->private_data;\r
+ if (NULL == session)\r
+ return -EINVAL;\r
+\r
+ task_running = atomic_read(&session->task_running);\r
+ if (task_running) {\r
+ pr_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running);\r
+ msleep(50);\r
+ }\r
+ wake_up(&session->wait);\r
+\r
+ mutex_lock(&pservice->lock);\r
+ /* remove this filp from the asynchronusly notified filp's */\r
+ list_del_init(&session->list_session);\r
+ vpu_service_session_clear(pservice, session);\r
+ kfree(session);\r
+ filp->private_data = NULL;\r
+ mutex_unlock(&pservice->lock);\r
+\r
+ pr_debug("dev closed\n");\r
+ return 0;\r
+}\r
+\r
+static const struct file_operations vpu_service_fops = {\r
+ .unlocked_ioctl = vpu_service_ioctl,\r
+ .open = vpu_service_open,\r
+ .release = vpu_service_release,\r
+ //.fasync = vpu_service_fasync,\r
+};\r
+\r
+static irqreturn_t vdpu_irq(int irq, void *dev_id);\r
+static irqreturn_t vdpu_isr(int irq, void *dev_id);\r
+static irqreturn_t vepu_irq(int irq, void *dev_id);\r
+static irqreturn_t vepu_isr(int irq, void *dev_id);\r
+static void get_hw_info(struct vpu_service_info *pservice);\r
+\r
+#if HEVC_SIM_ENABLE\r
+static void simulate_work(struct work_struct *work_s)\r
+{\r
+ struct delayed_work *dlwork = container_of(work_s, struct delayed_work, work);\r
+ struct vpu_service_info *pservice = container_of(dlwork, struct vpu_service_info, simulate_work);\r
+ vpu_device *dev = &pservice->dec_dev;\r
+\r
+ if (!list_empty(&pservice->running)) {\r
+ atomic_add(1, &dev->irq_count_codec);\r
+ vdpu_isr(0, (void*)pservice);\r
+ } else {\r
+ //simulate_start(pservice);\r
+ pr_err("empty running queue\n");\r
+ }\r
+}\r
+\r
+static void simulate_init(struct vpu_service_info *pservice)\r
+{\r
+ INIT_DELAYED_WORK(&pservice->simulate_work, simulate_work);\r
+}\r
+\r
+static void simulate_start(struct vpu_service_info *pservice)\r
+{\r
+ cancel_delayed_work_sync(&pservice->power_off_work);\r
+ queue_delayed_work(system_nrt_wq, &pservice->simulate_work, VPU_SIMULATE_DELAY);\r
+}\r
+#endif\r
+\r
+#if HEVC_TEST_ENABLE\r
+static int hevc_test_case0(vpu_service_info *pservice);\r
+#endif\r
+static int vcodec_probe(struct platform_device *pdev)\r
+{\r
+ int ret = 0;\r
+ struct resource *res = NULL;\r
+ struct device *dev = &pdev->dev;\r
+ void __iomem *regs = NULL;\r
+ struct device_node *np = pdev->dev.of_node;\r
+ struct vpu_service_info *pservice = devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);\r
+ char *prop = (char*)dev_name(dev);\r
+\r
+ pr_info("probe device %s\n", dev_name(dev));\r
+\r
+ of_property_read_string(np, "name", (const char**)&prop);\r
+ dev_set_name(dev, prop);\r
+\r
+ wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");\r
+ INIT_LIST_HEAD(&pservice->waiting);\r
+ INIT_LIST_HEAD(&pservice->running);\r
+ INIT_LIST_HEAD(&pservice->done);\r
+ INIT_LIST_HEAD(&pservice->session);\r
+ mutex_init(&pservice->lock);\r
+ pservice->reg_codec = NULL;\r
+ pservice->reg_pproc = NULL;\r
+ atomic_set(&pservice->total_running, 0);\r
+ pservice->enabled = false;\r
+\r
+ pservice->dev = dev;\r
+\r
+ vpu_get_clk(pservice);\r
+\r
+ INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);\r
+\r
+ vpu_service_power_on(pservice);\r
+\r
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);\r
+\r
+ regs = devm_ioremap_resource(pservice->dev, res);\r
+ if (IS_ERR(regs)) {\r
+ ret = PTR_ERR(regs);\r
+ goto err;\r
+ }\r
+\r
+ ret = vpu_service_check_hw(pservice, res->start);\r
+ if (ret < 0) {\r
+ pr_err("error: hw info check faild\n");\r
+ goto err;\r
+ }\r
+\r
+ /// define regs address.\r
+ pservice->dec_dev.iobaseaddr = res->start + pservice->hw_info->dec_offset;\r
+ pservice->dec_dev.iosize = pservice->hw_info->dec_io_size;\r
+\r
+ pservice->dec_dev.hwregs = (volatile u32 *)((u8 *)regs + pservice->hw_info->dec_offset);\r
+\r
+ pservice->reg_size = pservice->dec_dev.iosize;\r
+\r
+ if (pservice->hw_info->hw_id != HEVC_ID) {\r
+ pservice->enc_dev.iobaseaddr = res->start + pservice->hw_info->enc_offset;\r
+ pservice->enc_dev.iosize = pservice->hw_info->enc_io_size;\r
+\r
+ pservice->reg_size = pservice->reg_size > pservice->enc_dev.iosize ? pservice->reg_size : pservice->enc_dev.iosize;\r
+\r
+ pservice->enc_dev.hwregs = (volatile u32 *)((u8 *)regs + pservice->hw_info->enc_offset);\r
+\r
+ pservice->irq_enc = platform_get_irq_byname(pdev, "irq_enc");\r
+ if (pservice->irq_enc < 0) {\r
+ dev_err(pservice->dev, "cannot find IRQ encoder\n");\r
+ ret = -ENXIO;\r
+ goto err;\r
+ }\r
+\r
+ ret = devm_request_threaded_irq(pservice->dev, pservice->irq_enc, vepu_irq, vepu_isr, 0, dev_name(pservice->dev), (void *)pservice);\r
+ if (ret) {\r
+ dev_err(pservice->dev, "error: can't request vepu irq %d\n", pservice->irq_enc);\r
+ goto err;\r
+ }\r
+ }\r
+\r
+ pservice->irq_dec = platform_get_irq_byname(pdev, "irq_dec");\r
+ if (pservice->irq_dec < 0) {\r
+ dev_err(pservice->dev, "cannot find IRQ decoder\n");\r
+ ret = -ENXIO;\r
+ goto err;\r
+ }\r
+\r
+ /* get the IRQ line */\r
+ ret = devm_request_threaded_irq(pservice->dev, pservice->irq_dec, vdpu_irq, vdpu_isr, 0, dev_name(pservice->dev), (void *)pservice);\r
+ if (ret) {\r
+ dev_err(pservice->dev, "error: can't request vdpu irq %d\n", pservice->irq_dec);\r
+ goto err;\r
+ }\r
+\r
+ atomic_set(&pservice->dec_dev.irq_count_codec, 0);\r
+ atomic_set(&pservice->dec_dev.irq_count_pp, 0);\r
+ atomic_set(&pservice->enc_dev.irq_count_codec, 0);\r
+ atomic_set(&pservice->enc_dev.irq_count_pp, 0);\r
+\r
+ /// create device\r
+ ret = alloc_chrdev_region(&pservice->dev_t, 0, 1, dev_name(dev));\r
+ if (ret) {\r
+ dev_err(dev, "alloc dev_t failed\n");\r
+ goto err;\r
+ }\r
+\r
+ cdev_init(&pservice->cdev, &vpu_service_fops);\r
+\r
+ pservice->cdev.owner = THIS_MODULE;\r
+ pservice->cdev.ops = &vpu_service_fops;\r
+\r
+ ret = cdev_add(&pservice->cdev, pservice->dev_t, 1);\r
+\r
+ if (ret) {\r
+ dev_err(dev, "add dev_t failed\n");\r
+ goto err;\r
+ }\r
+\r
+ pservice->cls = class_create(THIS_MODULE, dev_name(dev));\r
+\r
+ if (IS_ERR(pservice->cls)) {\r
+ ret = PTR_ERR(pservice->cls);\r
+ dev_err(dev, "class_create err:%d\n", ret);\r
+ goto err;\r
+ }\r
+\r
+ pservice->child_dev = device_create(pservice->cls, dev, pservice->dev_t, NULL, dev_name(dev));\r
+\r
+ platform_set_drvdata(pdev, pservice);\r
+\r
+ if (pservice->hw_info->hw_id != HEVC_ID) {\r
+ get_hw_info(pservice);\r
+ }\r
+\r
+#ifdef CONFIG_DEBUG_FS\r
+ pservice->debugfs_dir = vcodec_debugfs_create_device_dir((char*)dev_name(dev), parent);\r
+ \r
+ if (pservice->debugfs_dir == NULL) {\r
+ pr_err("create debugfs dir %s failed\n", dev_name(dev));\r
+ }\r
+\r
+ pservice->debugfs_file_regs = debugfs_create_file("regs", 0664,\r
+ pservice->debugfs_dir, pservice,\r
+ &debug_vcodec_fops);\r
+#endif\r
+\r
+ vpu_service_power_off(pservice);\r
+ pr_info("init success\n");\r
+\r
+#if HEVC_SIM_ENABLE\r
+ if (pservice->hw_info->hw_id == HEVC_ID) {\r
+ simulate_init(pservice);\r
+ }\r
+#endif\r
+\r
+#if HEVC_TEST_ENABLE\r
+ hevc_test_case0(pservice);\r
+#endif\r
+\r
+ return 0;\r
+\r
+err:\r
+ pr_info("init failed\n");\r
+ vpu_service_power_off(pservice);\r
+ vpu_put_clk(pservice);\r
+ wake_lock_destroy(&pservice->wake_lock);\r
+\r
+ if (res) {\r
+ if (regs) {\r
+ devm_ioremap_release(&pdev->dev, res);\r
+ }\r
+ devm_release_mem_region(&pdev->dev, res->start, resource_size(res));\r
+ }\r
+\r
+ if (pservice->irq_enc > 0) {\r
+ free_irq(pservice->irq_enc, (void *)pservice);\r
+ }\r
+\r
+ if (pservice->irq_dec > 0) {\r
+ free_irq(pservice->irq_dec, (void *)pservice);\r
+ }\r
+\r
+ if (pservice->child_dev) {\r
+ device_destroy(pservice->cls, pservice->dev_t);\r
+ cdev_del(&pservice->cdev);\r
+ unregister_chrdev_region(pservice->dev_t, 1);\r
+ }\r
+\r
+ if (pservice->cls) {\r
+ class_destroy(pservice->cls);\r
+ }\r
+\r
+ return ret;\r
+}\r
+\r
+static int vcodec_remove(struct platform_device *pdev)\r
+{\r
+ struct vpu_service_info *pservice = platform_get_drvdata(pdev);\r
+ struct resource *res;\r
+\r
+ device_destroy(pservice->cls, pservice->dev_t);\r
+ class_destroy(pservice->cls);\r
+ cdev_del(&pservice->cdev);\r
+ unregister_chrdev_region(pservice->dev_t, 1);\r
+\r
+ free_irq(pservice->irq_enc, (void *)&pservice->enc_dev);\r
+ free_irq(pservice->irq_dec, (void *)&pservice->dec_dev);\r
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);\r
+ devm_ioremap_release(&pdev->dev, res);\r
+ devm_release_mem_region(&pdev->dev, res->start, resource_size(res));\r
+ vpu_put_clk(pservice);\r
+ wake_lock_destroy(&pservice->wake_lock);\r
+ \r
+#ifdef CONFIG_DEBUG_FS\r
+ if (pservice->debugfs_file_regs) {\r
+ debugfs_remove(pservice->debugfs_file_regs);\r
+ }\r
+\r
+ if (pservice->debugfs_dir) {\r
+ debugfs_remove(pservice->debugfs_dir);\r
+ }\r
+#endif\r
+\r
+ return 0;\r
+}\r
+\r
+#if defined(CONFIG_OF)\r
+static const struct of_device_id vcodec_service_dt_ids[] = {\r
+ //{.compatible = "vpu_service",},\r
+ {.compatible = "rockchip,hevc_service",},\r
+ {},\r
+};\r
+#endif\r
+\r
+static struct platform_driver vcodec_driver = {\r
+ .probe = vcodec_probe,\r
+ .remove = vcodec_remove,\r
+ .driver = {\r
+ .name = "vcodec",\r
+ .owner = THIS_MODULE,\r
+#if defined(CONFIG_OF)\r
+ .of_match_table = of_match_ptr(vcodec_service_dt_ids),\r
+#endif\r
+ },\r
+};\r
+\r
+static void get_hw_info(struct vpu_service_info *pservice)\r
+{\r
+ VPUHwDecConfig_t *dec = &pservice->dec_config;\r
+ VPUHwEncConfig_t *enc = &pservice->enc_config;\r
+ u32 configReg = pservice->dec_dev.hwregs[VPU_DEC_HWCFG0];\r
+ u32 asicID = pservice->dec_dev.hwregs[0];\r
+\r
+ dec->h264Support = (configReg >> DWL_H264_E) & 0x3U;\r
+ dec->jpegSupport = (configReg >> DWL_JPEG_E) & 0x01U;\r
+ if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))\r
+ dec->jpegSupport = JPEG_PROGRESSIVE;\r
+ dec->mpeg4Support = (configReg >> DWL_MPEG4_E) & 0x3U;\r
+ dec->vc1Support = (configReg >> DWL_VC1_E) & 0x3U;\r
+ dec->mpeg2Support = (configReg >> DWL_MPEG2_E) & 0x01U;\r
+ dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;\r
+ dec->refBufSupport = (configReg >> DWL_REF_BUFF_E) & 0x01U;\r
+ dec->vp6Support = (configReg >> DWL_VP6_E) & 0x01U;\r
+#if !defined(CONFIG_ARCH_RK319X)\r
+ /// invalidate max decode picture width value in rk319x vpu\r
+ dec->maxDecPicWidth = configReg & 0x07FFU;\r
+#else\r
+ dec->maxDecPicWidth = 3840;\r
+#endif\r
+\r
+ /* 2nd Config register */\r
+ configReg = pservice->dec_dev.hwregs[VPU_DEC_HWCFG1];\r
+ if (dec->refBufSupport) {\r
+ if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)\r
+ dec->refBufSupport |= 2;\r
+ if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)\r
+ dec->refBufSupport |= 4;\r
+ }\r
+ dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;\r
+ dec->vp7Support = (configReg >> DWL_VP7_E) & 0x01U;\r
+ dec->vp8Support = (configReg >> DWL_VP8_E) & 0x01U;\r
+ dec->avsSupport = (configReg >> DWL_AVS_E) & 0x01U;\r
+\r
+ /* JPEG xtensions */\r
+ if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {\r
+ dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;\r
+ } else {\r
+ dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;\r
+ }\r
+\r
+ if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) {\r
+ dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;\r
+ } else {\r
+ dec->rvSupport = RV_NOT_SUPPORTED;\r
+ }\r
+\r
+ dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;\r
+\r
+ if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) {\r
+ dec->refBufSupport |= 8; /* enable HW support for offset */\r
+ }\r
+\r
+#if !defined(CONFIG_ARCH_RK319X)\r
+ /// invalidate fuse register value in rk319x vpu\r
+ {\r
+ VPUHwFuseStatus_t hwFuseSts;\r
+ /* Decoder fuse configuration */\r
+ u32 fuseReg = pservice->dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];\r
+\r
+ hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U;\r
+ hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U;\r
+ hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U;\r
+ hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U;\r
+ hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U;\r
+ hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U;\r
+ hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U;\r
+ hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U;\r
+ hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U;\r
+ hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U;\r
+ hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U;\r
+ hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U;\r
+ hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U;\r
+ hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U;\r
+\r
+ /* check max. decoder output width */\r
+\r
+ if (fuseReg & 0x8000U)\r
+ hwFuseSts.maxDecPicWidthFuse = 1920;\r
+ else if (fuseReg & 0x4000U)\r
+ hwFuseSts.maxDecPicWidthFuse = 1280;\r
+ else if (fuseReg & 0x2000U)\r
+ hwFuseSts.maxDecPicWidthFuse = 720;\r
+ else if (fuseReg & 0x1000U)\r
+ hwFuseSts.maxDecPicWidthFuse = 352;\r
+ else /* remove warning */\r
+ hwFuseSts.maxDecPicWidthFuse = 352;\r
+\r
+ hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U;\r
+\r
+ /* Pp configuration */\r
+ configReg = pservice->dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG];\r
+\r
+ if ((configReg >> DWL_PP_E) & 0x01U) {\r
+ dec->ppSupport = 1;\r
+ dec->maxPpOutPicWidth = configReg & 0x07FFU;\r
+ /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */\r
+ dec->ppConfig = configReg;\r
+ } else {\r
+ dec->ppSupport = 0;\r
+ dec->maxPpOutPicWidth = 0;\r
+ dec->ppConfig = 0;\r
+ }\r
+\r
+ /* check the HW versio */\r
+ if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {\r
+ /* Pp configuration */\r
+ configReg = pservice->dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];\r
+\r
+ if ((configReg >> DWL_PP_E) & 0x01U) {\r
+ /* Pp fuse configuration */\r
+ u32 fuseRegPp = pservice->dec_dev.hwregs[VPU_PP_HW_FUSE_CFG];\r
+\r
+ if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) {\r
+ hwFuseSts.ppSupportFuse = 1;\r
+ /* check max. pp output width */\r
+ if (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920;\r
+ else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280;\r
+ else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720;\r
+ else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352;\r
+ else hwFuseSts.maxPpOutPicWidthFuse = 352;\r
+ hwFuseSts.ppConfigFuse = fuseRegPp;\r
+ } else {\r
+ hwFuseSts.ppSupportFuse = 0;\r
+ hwFuseSts.maxPpOutPicWidthFuse = 0;\r
+ hwFuseSts.ppConfigFuse = 0;\r
+ }\r
+ } else {\r
+ hwFuseSts.ppSupportFuse = 0;\r
+ hwFuseSts.maxPpOutPicWidthFuse = 0;\r
+ hwFuseSts.ppConfigFuse = 0;\r
+ }\r
+\r
+ if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse)\r
+ dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse;\r
+ if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse)\r
+ dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse;\r
+ if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED;\r
+ if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED;\r
+ if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED;\r
+ if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED;\r
+ if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse)\r
+ dec->jpegSupport = JPEG_BASELINE;\r
+ if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED;\r
+ if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED;\r
+ if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED;\r
+ if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED;\r
+ if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED;\r
+ if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED;\r
+\r
+ /* check the pp config vs fuse status */\r
+ if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) {\r
+ u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25);\r
+ u32 alphaBlend = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24);\r
+ u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25);\r
+ u32 alphaBlendFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24);\r
+\r
+ if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000;\r
+ if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000;\r
+ }\r
+ if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED;\r
+ if (!hwFuseSts.refBufSupportFuse) dec->refBufSupport = REF_BUF_NOT_SUPPORTED;\r
+ if (!hwFuseSts.rvSupportFuse) dec->rvSupport = RV_NOT_SUPPORTED;\r
+ if (!hwFuseSts.avsSupportFuse) dec->avsSupport = AVS_NOT_SUPPORTED;\r
+ if (!hwFuseSts.mvcSupportFuse) dec->mvcSupport = MVC_NOT_SUPPORTED;\r
+ }\r
+ }\r
+#endif\r
+ configReg = pservice->enc_dev.hwregs[63];\r
+ enc->maxEncodedWidth = configReg & ((1 << 11) - 1);\r
+ enc->h264Enabled = (configReg >> 27) & 1;\r
+ enc->mpeg4Enabled = (configReg >> 26) & 1;\r
+ enc->jpegEnabled = (configReg >> 25) & 1;\r
+ enc->vsEnabled = (configReg >> 24) & 1;\r
+ enc->rgbEnabled = (configReg >> 28) & 1;\r
+ //enc->busType = (configReg >> 20) & 15;\r
+ //enc->synthesisLanguage = (configReg >> 16) & 15;\r
+ //enc->busWidth = (configReg >> 12) & 15;\r
+ enc->reg_size = pservice->reg_size;\r
+ enc->reserv[0] = enc->reserv[1] = 0;\r
+\r
+ pservice->auto_freq = soc_is_rk2928g() || soc_is_rk2928l() || soc_is_rk2926();\r
+ if (pservice->auto_freq) {\r
+ printk("vpu_service set to auto frequency mode\n");\r
+ atomic_set(&pservice->freq_status, VPU_FREQ_BUT);\r
+ }\r
+ pservice->bug_dec_addr = cpu_is_rk30xx();\r
+ //printk("cpu 3066b bug %d\n", service.bug_dec_addr);\r
+}\r
+\r
+static irqreturn_t vdpu_irq(int irq, void *dev_id)\r
+{\r
+ struct vpu_service_info *pservice = (struct vpu_service_info*)dev_id;\r
+ vpu_device *dev = &pservice->dec_dev;\r
+ u32 irq_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);\r
+\r
+ pr_debug("dec_irq\n");\r
+\r
+ if (irq_status & DEC_INTERRUPT_BIT) {\r
+ pr_debug("dec_isr dec %x\n", irq_status);\r
+ if ((irq_status & 0x40001) == 0x40001)\r
+ {\r
+ do {\r
+ irq_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);\r
+ } while ((irq_status & 0x40001) == 0x40001);\r
+ }\r
+\r
+ /* clear dec IRQ */\r
+ if (pservice->hw_info->hw_id != HEVC_ID) {\r
+ writel(irq_status & (~DEC_INTERRUPT_BIT|DEC_BUFFER_EMPTY_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER);\r
+ } else {\r
+ /*writel(irq_status \r
+ & (~(DEC_INTERRUPT_BIT|HEVC_DEC_INT_RAW_BIT|HEVC_DEC_STR_ERROR_BIT|HEVC_DEC_BUS_ERROR_BIT|HEVC_DEC_BUFFER_EMPTY_BIT)), \r
+ dev->hwregs + DEC_INTERRUPT_REGISTER);*/\r
+\r
+ writel(0, dev->hwregs + DEC_INTERRUPT_REGISTER);\r
+ }\r
+ atomic_add(1, &dev->irq_count_codec);\r
+ }\r
+\r
+ if (pservice->hw_info->hw_id != HEVC_ID) {\r
+ irq_status = readl(dev->hwregs + PP_INTERRUPT_REGISTER);\r
+ if (irq_status & PP_INTERRUPT_BIT) {\r
+ pr_debug("vdpu_isr pp %x\n", irq_status);\r
+ /* clear pp IRQ */\r
+ writel(irq_status & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);\r
+ atomic_add(1, &dev->irq_count_pp);\r
+ }\r
+ }\r
+\r
+ pservice->irq_status = irq_status;\r
+\r
+ return IRQ_WAKE_THREAD;\r
+}\r
+\r
+static irqreturn_t vdpu_isr(int irq, void *dev_id)\r
+{\r
+ struct vpu_service_info *pservice = (struct vpu_service_info*)dev_id;\r
+ vpu_device *dev = &pservice->dec_dev;\r
+\r
+ mutex_lock(&pservice->lock);\r
+ if (atomic_read(&dev->irq_count_codec)) {\r
+#if VPU_SERVICE_SHOW_TIME\r
+ do_gettimeofday(&dec_end);\r
+ pr_info("dec task: %ld ms\n",\r
+ (dec_end.tv_sec - dec_start.tv_sec) * 1000 +\r
+ (dec_end.tv_usec - dec_start.tv_usec) / 1000);\r
+#endif\r
+ atomic_sub(1, &dev->irq_count_codec);\r
+ if (NULL == pservice->reg_codec) {\r
+ pr_err("error: dec isr with no task waiting\n");\r
+ } else {\r
+ reg_from_run_to_done(pservice, pservice->reg_codec);\r
+ }\r
+ }\r
+\r
+ if (atomic_read(&dev->irq_count_pp)) {\r
+\r
+#if VPU_SERVICE_SHOW_TIME\r
+ do_gettimeofday(&pp_end);\r
+ printk("pp task: %ld ms\n",\r
+ (pp_end.tv_sec - pp_start.tv_sec) * 1000 +\r
+ (pp_end.tv_usec - pp_start.tv_usec) / 1000);\r
+#endif\r
+\r
+ atomic_sub(1, &dev->irq_count_pp);\r
+ if (NULL == pservice->reg_pproc) {\r
+ pr_err("error: pp isr with no task waiting\n");\r
+ } else {\r
+ reg_from_run_to_done(pservice, pservice->reg_pproc);\r
+ }\r
+ }\r
+ try_set_reg(pservice);\r
+ mutex_unlock(&pservice->lock);\r
+ return IRQ_HANDLED;\r
+}\r
+\r
+static irqreturn_t vepu_irq(int irq, void *dev_id)\r
+{\r
+ //struct vpu_device *dev = (struct vpu_device *) dev_id;\r
+ struct vpu_service_info *pservice = (struct vpu_service_info*)dev_id;\r
+ vpu_device *dev = &pservice->enc_dev;\r
+ u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER);\r
+\r
+ pr_debug("vepu_irq irq status %x\n", irq_status);\r
+\r
+#if VPU_SERVICE_SHOW_TIME\r
+ do_gettimeofday(&enc_end);\r
+ pr_info("enc task: %ld ms\n",\r
+ (enc_end.tv_sec - enc_start.tv_sec) * 1000 +\r
+ (enc_end.tv_usec - enc_start.tv_usec) / 1000);\r
+#endif\r
+\r
+ if (likely(irq_status & ENC_INTERRUPT_BIT)) {\r
+ /* clear enc IRQ */\r
+ writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);\r
+ atomic_add(1, &dev->irq_count_codec);\r
+ }\r
+\r
+ return IRQ_WAKE_THREAD;\r
+}\r
+\r
+static irqreturn_t vepu_isr(int irq, void *dev_id)\r
+{\r
+ //struct vpu_device *dev = (struct vpu_device *) dev_id;\r
+ struct vpu_service_info *pservice = (struct vpu_service_info*)dev_id;\r
+ vpu_device *dev = &pservice->enc_dev;\r
+\r
+ mutex_lock(&pservice->lock);\r
+ if (atomic_read(&dev->irq_count_codec)) {\r
+ atomic_sub(1, &dev->irq_count_codec);\r
+ if (NULL == pservice->reg_codec) {\r
+ pr_err("error: enc isr with no task waiting\n");\r
+ } else {\r
+ reg_from_run_to_done(pservice, pservice->reg_codec);\r
+ }\r
+ }\r
+ try_set_reg(pservice);\r
+ mutex_unlock(&pservice->lock);\r
+ return IRQ_HANDLED;\r
+}\r
+\r
+static int __init vcodec_service_init(void)\r
+{\r
+ int ret;\r
+\r
+ if ((ret = platform_driver_register(&vcodec_driver)) != 0) {\r
+ pr_err("Platform device register failed (%d).\n", ret);\r
+ return ret;\r
+ }\r
+\r
+#ifdef CONFIG_DEBUG_FS\r
+ vcodec_debugfs_init();\r
+#endif\r
+\r
+ return ret;\r
+}\r
+\r
+static void __exit vcodec_service_exit(void)\r
+{\r
+#ifdef CONFIG_DEBUG_FS\r
+ vcodec_debugfs_exit();\r
+#endif\r
+\r
+ platform_driver_unregister(&vcodec_driver);\r
+}\r
+\r
+module_init(vcodec_service_init);\r
+module_exit(vcodec_service_exit);\r
+\r
+#ifdef CONFIG_DEBUG_FS\r
+#include <linux/seq_file.h>\r
+\r
+static int vcodec_debugfs_init()\r
+{\r
+ parent = debugfs_create_dir("vcodec", NULL);\r
+ if (!parent)\r
+ return -1;\r
+\r
+ return 0;\r
+}\r
+\r
+static void vcodec_debugfs_exit()\r
+{\r
+ debugfs_remove(parent);\r
+}\r
+\r
+static struct dentry* vcodec_debugfs_create_device_dir(char *dirname, struct dentry *parent)\r
+{\r
+ return debugfs_create_dir(dirname, parent);\r
+}\r
+\r
+static int debug_vcodec_show(struct seq_file *s, void *unused)\r
+{\r
+ struct vpu_service_info *pservice = s->private;\r
+ unsigned int i, n;\r
+ vpu_reg *reg, *reg_tmp;\r
+ vpu_session *session, *session_tmp;\r
+\r
+ mutex_lock(&pservice->lock);\r
+ vpu_service_power_on(pservice);\r
+ if (pservice->hw_info->hw_id != HEVC_ID) {\r
+ seq_printf(s, "\nENC Registers:\n");\r
+ n = pservice->enc_dev.iosize >> 2;\r
+ for (i = 0; i < n; i++) {\r
+ seq_printf(s, "\tswreg%d = %08X\n", i, readl(pservice->enc_dev.hwregs + i));\r
+ }\r
+ }\r
+ seq_printf(s, "\nDEC Registers:\n");\r
+ n = pservice->dec_dev.iosize >> 2;\r
+ for (i = 0; i < n; i++) {\r
+ seq_printf(s, "\tswreg%d = %08X\n", i, readl(pservice->dec_dev.hwregs + i));\r
+ }\r
+\r
+ seq_printf(s, "\nvpu service status:\n");\r
+ list_for_each_entry_safe(session, session_tmp, &pservice->session, list_session) {\r
+ seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);\r
+ //seq_printf(s, "waiting reg set %d\n");\r
+ list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {\r
+ seq_printf(s, "waiting register set\n");\r
+ }\r
+ list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {\r
+ seq_printf(s, "running register set\n");\r
+ }\r
+ list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {\r
+ seq_printf(s, "done register set\n");\r
+ }\r
+ }\r
+ mutex_unlock(&pservice->lock);\r
+\r
+ return 0;\r
+}\r
+\r
+static int debug_vcodec_open(struct inode *inode, struct file *file)\r
+{\r
+ return single_open(file, debug_vcodec_show, inode->i_private);\r
+}\r
+\r
+#endif\r
+\r
+#if HEVC_TEST_ENABLE\r
+#include "hevc_test_inc/pps_00.h"\r
+#include "hevc_test_inc/register_00.h"\r
+#include "hevc_test_inc/rps_00.h"\r
+#include "hevc_test_inc/scaling_list_00.h"\r
+#include "hevc_test_inc/stream_00.h"\r
+\r
+#include "hevc_test_inc/pps_01.h"\r
+#include "hevc_test_inc/register_01.h"\r
+#include "hevc_test_inc/rps_01.h"\r
+#include "hevc_test_inc/scaling_list_01.h"\r
+#include "hevc_test_inc/stream_01.h"\r
+\r
+#include "hevc_test_inc/cabac.h"\r
+\r
+#define TEST_CNT 2\r
+static int hevc_test_case0(vpu_service_info *pservice)\r
+{\r
+ vpu_session session;\r
+ vpu_reg *reg; \r
+ unsigned long size = sizeof(register_00); // registers array length\r
+ int testidx = 0;\r
+ int ret = 0;\r
+\r
+ u8 *pps_tbl[TEST_CNT];\r
+ u8 *register_tbl[TEST_CNT];\r
+ u8 *rps_tbl[TEST_CNT];\r
+ u8 *scaling_list_tbl[TEST_CNT];\r
+ u8 *stream_tbl[TEST_CNT];\r
+\r
+ int stream_size[2];\r
+\r
+ u32 phy_pps;\r
+ u32 phy_rps;\r
+ u32 phy_scl;\r
+ u32 phy_str;\r
+ u32 phy_yuv;\r
+ u32 phy_cabac;\r
+\r
+ u8 *pps;\r
+ u8 *yuv;\r
+ int i;\r
+ \r
+ pps_tbl[0] = pps_00;\r
+ pps_tbl[1] = pps_01;\r
+\r
+ register_tbl[0] = register_00;\r
+ register_tbl[1] = register_01;\r
+ \r
+ rps_tbl[0] = rps_00;\r
+ rps_tbl[1] = rps_01;\r
+ \r
+ scaling_list_tbl[0] = scaling_list_00;\r
+ scaling_list_tbl[1] = scaling_list_01;\r
+\r
+ stream_tbl[0] = stream_00;\r
+ stream_tbl[1] = stream_01;\r
+\r
+ stream_size[0] = sizeof(stream_00);\r
+ stream_size[1] = sizeof(stream_01);\r
+\r
+ // create session\r
+ session.pid = current->pid;\r
+ session.type = VPU_DEC;\r
+ INIT_LIST_HEAD(&session.waiting);\r
+ INIT_LIST_HEAD(&session.running);\r
+ INIT_LIST_HEAD(&session.done);\r
+ INIT_LIST_HEAD(&session.list_session);\r
+ init_waitqueue_head(&session.wait);\r
+ atomic_set(&session.task_running, 0);\r
+ list_add_tail(&session.list_session, &pservice->session);\r
+\r
+ while (testidx < TEST_CNT) {\r
+ // create registers\r
+ reg = kmalloc(sizeof(vpu_reg)+pservice->reg_size, GFP_KERNEL);\r
+ if (NULL == reg) {\r
+ pr_err("error: kmalloc fail in reg_init\n");\r
+ return -1;\r
+ }\r
+\r
+ if (size > pservice->reg_size) {\r
+ printk("warning: vpu reg size %lu is larger than hw reg size %lu\n", size, pservice->reg_size);\r
+ size = pservice->reg_size;\r
+ }\r
+ reg->session = &session;\r
+ reg->type = session.type;\r
+ reg->size = size;\r
+ reg->freq = VPU_FREQ_DEFAULT;\r
+ reg->reg = (unsigned long *)®[1];\r
+ INIT_LIST_HEAD(®->session_link);\r
+ INIT_LIST_HEAD(®->status_link);\r
+\r
+ pps = kmalloc(sizeof(pps_00), GFP_KERNEL);\r
+ yuv = kzalloc(256*256*3/2, GFP_KERNEL);\r
+ memcpy(pps, pps_tbl[testidx], sizeof(pps_00));\r
+\r
+ // TODO: stuff registers\r
+ memcpy(®->reg[0], register_tbl[testidx], sizeof(register_00));\r
+\r
+ // TODO: replace reigster address\r
+ phy_pps = virt_to_phys(pps);\r
+ phy_rps = virt_to_phys(rps_tbl[testidx]);\r
+ phy_scl = virt_to_phys(scaling_list_tbl[testidx]);\r
+ phy_str = virt_to_phys(stream_tbl[testidx]);\r
+ phy_yuv = virt_to_phys(yuv);\r
+ phy_cabac = virt_to_phys(Cabac_table);\r
+\r
+ for (i=0; i<64; i++) {\r
+ u32 scaling_offset;\r
+ u32 tmp;\r
+\r
+ scaling_offset = (u32)pps[i*80+74];\r
+ scaling_offset += (u32)pps[i*80+75] << 8;\r
+ scaling_offset += (u32)pps[i*80+76] << 16;\r
+ scaling_offset += (u32)pps[i*80+77] << 24;\r
+\r
+ tmp = phy_scl + scaling_offset;\r
+\r
+ pps[i*80+74] = tmp & 0xff;\r
+ pps[i*80+75] = (tmp >> 8) & 0xff;\r
+ pps[i*80+76] = (tmp >> 16) & 0xff;\r
+ pps[i*80+77] = (tmp >> 24) & 0xff;\r
+ }\r
+\r
+ dmac_flush_range(&pps[0], &pps[sizeof(pps_00) - 1]);\r
+ outer_flush_range(phy_pps, phy_pps + sizeof(pps_00) - 1);\r
+\r
+ printk("%s %d, phy stream %08x, phy pps %08x, phy rps %08x\n", __func__, __LINE__, phy_str, phy_pps, phy_rps);\r
+\r
+ reg->reg[4] = phy_str;\r
+ reg->reg[5] = ((stream_size[testidx]+15)&(~15))+64;\r
+ reg->reg[6] = phy_cabac;\r
+ reg->reg[7] = phy_yuv;\r
+ reg->reg[42] = phy_pps;\r
+ reg->reg[43] = phy_rps;\r
+\r
+ mutex_lock(&pservice->lock);\r
+ list_add_tail(®->status_link, &pservice->waiting);\r
+ list_add_tail(®->session_link, &session.waiting);\r
+ mutex_unlock(&pservice->lock);\r
+\r
+ printk("%s %d %p\n", __func__, __LINE__, pservice);\r
+\r
+ // stuff hardware\r
+ try_set_reg(pservice);\r
+\r
+ // wait for result\r
+ ret = wait_event_timeout(session.wait, !list_empty(&session.done), VPU_TIMEOUT_DELAY);\r
+ if (!list_empty(&session.done)) {\r
+ if (ret < 0) {\r
+ pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session.pid, ret);\r
+ }\r
+ ret = 0;\r
+ } else {\r
+ if (unlikely(ret < 0)) {\r
+ pr_err("error: pid %d wait task ret %d\n", session.pid, ret);\r
+ } else if (0 == ret) {\r
+ pr_err("error: pid %d wait %d task done timeout\n", session.pid, atomic_read(&session.task_running));\r
+ ret = -ETIMEDOUT;\r
+ }\r
+ }\r
+ if (ret < 0) {\r
+ int task_running = atomic_read(&session.task_running);\r
+ int n;\r
+ mutex_lock(&pservice->lock);\r
+ vpu_service_dump(pservice);\r
+ if (task_running) {\r
+ atomic_set(&session.task_running, 0);\r
+ atomic_sub(task_running, &pservice->total_running);\r
+ printk("%d task is running but not return, reset hardware...", task_running);\r
+ vpu_reset(pservice);\r
+ printk("done\n");\r
+ }\r
+ vpu_service_session_clear(pservice, &session);\r
+ mutex_unlock(&pservice->lock);\r
+\r
+ printk("\nDEC Registers:\n");\r
+ n = pservice->dec_dev.iosize >> 2;\r
+ for (i=0; i<n; i++) {\r
+ printk("\tswreg%d = %08X\n", i, readl(pservice->dec_dev.hwregs + i));\r
+ }\r
+\r
+ pr_err("test index %d failed\n", testidx);\r
+ kfree(pps);\r
+ kfree(yuv);\r
+ break;\r
+ } else {\r
+ pr_info("test index %d success\n", testidx);\r
+\r
+ vpu_reg *reg = list_entry(session.done.next, vpu_reg, session_link);\r
+\r
+ for (i=0; i<68; i++) {\r
+ if (i % 4 == 0) {\r
+ printk("%02d: ", i);\r
+ }\r
+ printk("%08x ", reg->reg[i]);\r
+ if ((i+1) % 4 == 0) {\r
+ printk("\n");\r
+ }\r
+ }\r
+\r
+ testidx++;\r
+ }\r
+\r
+ reg_deinit(pservice, reg);\r
+ kfree(pps);\r
+ kfree(yuv);\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+#endif\r
+\r