1 /* arch/arm/mach-rk29/vpu.c
3 * Copyright (C) 2010 ROCKCHIP, Inc.
4 * author: chenhengming chm@rock-chips.com
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #ifdef CONFIG_RK29_VPU_DEBUG
19 #define pr_fmt(fmt) "VPU_SERVICE: %s: " fmt, __func__
21 #define pr_fmt(fmt) "VPU_SERVICE: " fmt
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
33 #include <linux/ioport.h>
34 #include <linux/miscdevice.h>
36 #include <linux/poll.h>
37 #include <linux/platform_device.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/timer.h>
42 #include <asm/uaccess.h>
44 #include <mach/irqs.h>
45 #include <plat/vpu_service.h>
50 #define DEC_INTERRUPT_REGISTER 1
51 #define PP_INTERRUPT_REGISTER 60
52 #define ENC_INTERRUPT_REGISTER 1
54 #define DEC_INTERRUPT_BIT 0x100
55 #define PP_INTERRUPT_BIT 0x100
56 #define ENC_INTERRUPT_BIT 0x1
58 #define REG_NUM_DEC (60)
59 #define REG_NUM_PP (41)
60 #if defined(CONFIG_ARCH_RK29)
61 #define REG_NUM_ENC (96)
62 #elif defined(CONFIG_ARCH_RK30)
63 #define REG_NUM_ENC (164)
65 #define REG_NUM_DEC_PP (REG_NUM_DEC+REG_NUM_PP)
66 #define SIZE_REG(reg) ((reg)*4)
68 #define DEC_IO_SIZE ((100 + 1) * 4) /* bytes */
69 #if defined(CONFIG_ARCH_RK29)
70 #define ENC_IO_SIZE (96 * 4) /* bytes */
71 #elif defined(CONFIG_ARCH_RK30)
72 #define ENC_IO_SIZE (164 * 4) /* bytes */
74 #define REG_NUM_DEC_PP (REG_NUM_DEC+REG_NUM_PP)
75 static const u16 dec_hw_ids[] = { 0x8190, 0x8170, 0x9170, 0x9190, 0x6731 };
76 #if defined(CONFIG_ARCH_RK29)
77 static const u16 enc_hw_ids[] = { 0x6280, 0x7280, 0x8270 };
78 #define DEC_PHY_OFFSET 0x200
79 #elif defined(CONFIG_ARCH_RK30)
80 static const u16 enc_hw_ids[] = { 0x6280, 0x7280, 0x8270, 0x8290, 0x4831 };
81 #define DEC_PHY_OFFSET 0x400
82 #define RK29_VCODEC_PHYS RK30_VCODEC_PHYS
85 #define VPU_REG_EN_ENC 14
86 #define VPU_REG_ENC_GATE 2
87 #define VPU_REG_ENC_GATE_BIT (1<<4)
89 #define VPU_REG_EN_DEC 1
90 #define VPU_REG_DEC_GATE 2
91 #define VPU_REG_DEC_GATE_BIT (1<<10)
92 #define VPU_REG_EN_PP 0
93 #define VPU_REG_PP_GATE 1
94 #define VPU_REG_PP_GATE_BIT (1<<8)
95 #define VPU_REG_EN_DEC_PP 1
96 #define VPU_REG_DEC_PP_GATE 61
97 #define VPU_REG_DEC_PP_GATE_BIT (1<<8)
101 * struct for process session which connect to vpu
103 * @author ChenHengming (2011-5-3)
105 typedef struct vpu_session {
106 VPU_CLIENT_TYPE type;
107 /* a linked list of data so we can access them for debugging */
108 struct list_head list_session;
109 /* a linked list of register data waiting for process */
110 struct list_head waiting;
111 /* a linked list of register data in processing */
112 struct list_head running;
113 /* a linked list of register data processed */
114 struct list_head done;
115 wait_queue_head_t wait;
117 atomic_t task_running;
121 * struct for process register set
123 * @author ChenHengming (2011-5-4)
125 #define VPU_REG_NUM_MAX (((VPU_REG_NUM_ENC)>(VPU_REG_NUM_DEC_PP))?(VPU_REG_NUM_ENC):(VPU_REG_NUM_DEC_PP))
126 typedef struct vpu_reg {
127 VPU_CLIENT_TYPE type;
128 vpu_session *session;
129 struct list_head session_link; /* link to vpu service session */
130 struct list_head status_link; /* link to register set list */
132 unsigned long reg[VPU_REG_NUM_MAX];
135 typedef struct vpu_device {
136 unsigned long iobaseaddr;
138 volatile u32 *hwregs;
143 typedef struct vpu_service_info {
145 struct timer_list timer; /* timer for power off */
146 struct list_head waiting; /* link to link_reg in struct vpu_reg */
147 struct list_head running; /* link to link_reg in struct vpu_reg */
148 struct list_head done; /* link to link_reg in struct vpu_reg */
149 struct list_head session; /* link to list_session in struct vpu_session */
150 atomic_t total_running;
155 VPUHwDecConfig_t dec_config;
156 VPUHwEncConfig_t enc_config;
159 typedef struct vpu_request
165 static struct clk *pd_video;
166 static struct clk *clk_vpu; /* for power on notify */
167 static struct clk *aclk_vepu;
168 static struct clk *hclk_vepu;
169 static struct clk *aclk_ddr_vepu;
170 static struct clk *hclk_cpu_vcodec;
171 static vpu_service_info service;
172 static vpu_device dec_dev;
173 static vpu_device enc_dev;
175 #define POWER_OFF_DELAY 4*HZ /* 4s */
176 #define TIMEOUT_DELAY 2*HZ /* 2s */
178 static void vpu_get_clk(void)
180 pd_video = clk_get(NULL, "pd_video");
181 clk_vpu = clk_get(NULL, "vpu");
182 aclk_vepu = clk_get(NULL, "aclk_vepu");
183 hclk_vepu = clk_get(NULL, "hclk_vepu");
184 aclk_ddr_vepu = clk_get(NULL, "aclk_ddr_vepu");
185 hclk_cpu_vcodec = clk_get(NULL, "hclk_cpu_vcodec");
188 static void vpu_put_clk(void)
194 clk_put(aclk_ddr_vepu);
195 clk_put(hclk_cpu_vcodec);
198 static void vpu_reset(void)
200 #if defined(CONFIG_ARCH_RK29)
201 clk_disable(aclk_ddr_vepu);
202 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
203 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
204 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
205 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
207 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
208 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
209 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
210 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
211 clk_enable(aclk_ddr_vepu);
212 #elif defined(CONFIG_ARCH_RK30)
213 pmu_set_idle_request(IDLE_REQ_VIDEO, true);
214 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
215 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
216 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
217 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
219 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
220 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
221 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
222 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
223 pmu_set_idle_request(IDLE_REQ_VIDEO, false);
225 service.reg_codec = NULL;
226 service.reg_pproc = NULL;
227 service.reg_resev = NULL;
230 static void reg_deinit(vpu_reg *reg);
231 static void vpu_service_session_clear(vpu_session *session)
234 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
237 list_for_each_entry_safe(reg, n, &session->running, session_link) {
240 list_for_each_entry_safe(reg, n, &session->done, session_link) {
245 static void vpu_service_dump(void)
248 vpu_reg *reg, *reg_tmp;
249 vpu_session *session, *session_tmp;
251 running = atomic_read(&service.total_running);
252 printk("total_running %d\n", running);
254 printk("reg_codec 0x%.8x\n", (unsigned int)service.reg_codec);
255 printk("reg_pproc 0x%.8x\n", (unsigned int)service.reg_pproc);
256 printk("reg_resev 0x%.8x\n", (unsigned int)service.reg_resev);
258 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
259 printk("session pid %d type %d:\n", session->pid, session->type);
260 running = atomic_read(&session->task_running);
261 printk("task_running %d\n", running);
262 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
263 printk("waiting register set 0x%.8x\n", (unsigned int)reg);
265 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
266 printk("running register set 0x%.8x\n", (unsigned int)reg);
268 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
269 printk("done register set 0x%.8x\n", (unsigned int)reg);
274 static void vpu_service_power_off(void)
278 mutex_lock(&service.lock);
279 if (!service.enabled) {
280 mutex_unlock(&service.lock);
284 service.enabled = false;
285 total_running = atomic_read(&service.total_running);
287 pr_alert("alert: power off when %d task running!!\n", total_running);
289 pr_alert("alert: delay 50 ms for running task\n");
293 printk("vpu: power off...");
294 #ifdef CONFIG_ARCH_RK29
295 pmu_set_power_domain(PD_VCODEC, false);
297 clk_disable(pd_video);
300 clk_disable(hclk_cpu_vcodec);
301 clk_disable(aclk_ddr_vepu);
302 clk_disable(hclk_vepu);
303 clk_disable(aclk_vepu);
304 clk_disable(clk_vpu);
306 mutex_unlock(&service.lock);
309 static void vpu_service_power_off_work_func(unsigned long data)
312 vpu_service_power_off();
315 static void vpu_service_power_on(void)
317 clk_enable(clk_vpu); /* notify vpu on without lock. */
319 mutex_lock(&service.lock);
320 if (!service.enabled) {
321 service.enabled = true;
322 printk("vpu: power on\n");
325 clk_enable(aclk_vepu);
326 clk_enable(hclk_vepu);
327 clk_enable(hclk_cpu_vcodec);
329 #ifdef CONFIG_ARCH_RK29
330 pmu_set_power_domain(PD_VCODEC, true);
332 clk_enable(pd_video);
335 clk_enable(aclk_ddr_vepu);
337 mod_timer(&service.timer, jiffies + POWER_OFF_DELAY);
338 mutex_unlock(&service.lock);
340 clk_disable(clk_vpu);
343 static vpu_reg *reg_init(vpu_session *session, void __user *src, unsigned long size)
345 vpu_reg *reg = kmalloc(sizeof(vpu_reg), GFP_KERNEL);
347 pr_err("error: kmalloc fail in reg_init\n");
351 reg->session = session;
352 reg->type = session->type;
354 INIT_LIST_HEAD(®->session_link);
355 INIT_LIST_HEAD(®->status_link);
357 if (copy_from_user(®->reg[0], (void __user *)src, size)) {
358 pr_err("error: copy_from_user failed in reg_init\n");
363 mutex_lock(&service.lock);
364 list_add_tail(®->status_link, &service.waiting);
365 list_add_tail(®->session_link, &session->waiting);
366 mutex_unlock(&service.lock);
371 static void reg_deinit(vpu_reg *reg)
373 list_del_init(®->session_link);
374 list_del_init(®->status_link);
375 if (reg == service.reg_codec) service.reg_codec = NULL;
376 if (reg == service.reg_pproc) service.reg_pproc = NULL;
380 static void reg_from_wait_to_run(vpu_reg *reg)
382 list_del_init(®->status_link);
383 list_add_tail(®->status_link, &service.running);
385 list_del_init(®->session_link);
386 list_add_tail(®->session_link, ®->session->running);
389 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
392 u32 *dst = (u32 *)®->reg[0];
393 for (i = 0; i < count; i++)
397 static void reg_from_run_to_done(vpu_reg *reg)
399 list_del_init(®->status_link);
400 list_add_tail(®->status_link, &service.done);
402 list_del_init(®->session_link);
403 list_add_tail(®->session_link, ®->session->done);
407 service.reg_codec = NULL;
408 reg_copy_from_hw(reg, enc_dev.hwregs, REG_NUM_ENC);
412 service.reg_codec = NULL;
413 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC);
417 service.reg_pproc = NULL;
418 reg_copy_from_hw(reg, dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_PP);
419 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
423 service.reg_codec = NULL;
424 service.reg_pproc = NULL;
425 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC_PP);
426 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
430 pr_err("error: copy reg from hw with unknown type %d\n", reg->type);
434 atomic_sub(1, ®->session->task_running);
435 atomic_sub(1, &service.total_running);
436 wake_up_interruptible_sync(®->session->wait);
439 void reg_copy_to_hw(vpu_reg *reg)
442 u32 *src = (u32 *)®->reg[0];
443 atomic_add(1, &service.total_running);
444 atomic_add(1, ®->session->task_running);
447 u32 *dst = (u32 *)enc_dev.hwregs;
448 #if defined(CONFIG_ARCH_RK30)
449 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
450 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
451 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
452 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
454 service.reg_codec = reg;
456 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
458 for (i = 0; i < VPU_REG_EN_ENC; i++)
461 for (i = VPU_REG_EN_ENC + 1; i < REG_NUM_ENC; i++)
466 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
467 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC];
470 u32 *dst = (u32 *)dec_dev.hwregs;
471 service.reg_codec = reg;
473 for (i = REG_NUM_DEC - 1; i > VPU_REG_DEC_GATE; i--)
478 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
479 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
482 u32 *dst = (u32 *)dec_dev.hwregs + PP_INTERRUPT_REGISTER;
483 service.reg_pproc = reg;
485 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
487 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_PP; i++)
492 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
495 u32 *dst = (u32 *)dec_dev.hwregs;
496 service.reg_codec = reg;
497 service.reg_pproc = reg;
499 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_DEC_PP; i++)
502 dst[VPU_REG_EN_DEC_PP] = src[VPU_REG_EN_DEC_PP] | 0x2;
505 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
506 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
507 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
510 pr_err("error: unsupport session type %d", reg->type);
511 atomic_sub(1, &service.total_running);
512 atomic_sub(1, ®->session->task_running);
518 static void try_set_reg(void)
520 // first get reg from reg list
521 mutex_lock(&service.lock);
522 if (!list_empty(&service.waiting)) {
524 vpu_reg *reg = list_entry(service.waiting.next, vpu_reg, status_link);
526 mod_timer(&service.timer, jiffies + POWER_OFF_DELAY);
529 if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
533 if (NULL == service.reg_codec)
537 if (NULL == service.reg_codec) {
538 if (NULL == service.reg_pproc)
541 if ((VPU_DEC == service.reg_codec->type) && (NULL == service.reg_pproc))
546 if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
550 printk("undefined reg type %d\n", reg->type);
554 reg_from_wait_to_run(reg);
558 mutex_unlock(&service.lock);
561 static int return_reg(vpu_reg *reg, u32 __user *dst)
566 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_ENC)))
571 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_DEC)))
576 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_PP)))
581 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_DEC_PP)))
587 pr_err("error: copy reg to user with unknown type %d\n", reg->type);
595 static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
597 vpu_session *session = (vpu_session *)filp->private_data;
598 if (NULL == session) {
603 case VPU_IOC_SET_CLIENT_TYPE : {
604 session->type = (VPU_CLIENT_TYPE)arg;
607 case VPU_IOC_GET_HW_FUSE_STATUS : {
609 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
610 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
613 if (VPU_ENC != session->type) {
614 if (copy_to_user((void __user *)req.req, &service.dec_config, sizeof(VPUHwDecConfig_t))) {
615 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
619 if (copy_to_user((void __user *)req.req, &service.enc_config, sizeof(VPUHwEncConfig_t))) {
620 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
628 case VPU_IOC_SET_REG : {
631 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
632 pr_err("error: VPU_IOC_SET_REG copy_from_user failed\n");
636 reg = reg_init(session, (void __user *)req.req, req.size);
640 vpu_service_power_on();
646 case VPU_IOC_GET_REG : {
649 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
650 pr_err("error: VPU_IOC_GET_REG copy_from_user failed\n");
653 int ret = wait_event_interruptible_timeout(session->wait, !list_empty(&session->done), TIMEOUT_DELAY);
654 if (!list_empty(&session->done)) {
656 pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
660 if (unlikely(ret < 0)) {
661 pr_err("error: pid %d wait task ret %d\n", session->pid, ret);
662 } else if (0 == ret) {
663 pr_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
667 mutex_lock(&service.lock);
669 int task_running = atomic_read(&session->task_running);
672 atomic_set(&session->task_running, 0);
673 atomic_sub(task_running, &service.total_running);
674 printk("%d task is running but not return, reset hardware...", task_running);
678 vpu_service_session_clear(session);
679 mutex_unlock(&service.lock);
682 mutex_unlock(&service.lock);
684 mutex_lock(&service.lock);
685 reg = list_entry(session->done.next, vpu_reg, session_link);
686 return_reg(reg, (u32 __user *)req.req);
687 mutex_unlock(&service.lock);
691 pr_err("error: unknow vpu service ioctl cmd %x\n", cmd);
699 static int vpu_service_check_hw_id(struct vpu_device * dev, const u16 *hwids, size_t num)
701 u32 hwid = readl(dev->hwregs);
702 pr_info("HW ID = 0x%08x\n", hwid);
704 hwid = (hwid >> 16) & 0xFFFF; /* product version only */
707 if (hwid == hwids[num]) {
708 pr_info("Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
713 pr_info("No Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
717 static void vpu_service_release_io(void)
720 iounmap((void *)dec_dev.hwregs);
721 release_mem_region(dec_dev.iobaseaddr, dec_dev.iosize);
724 iounmap((void *)enc_dev.hwregs);
725 release_mem_region(enc_dev.iobaseaddr, enc_dev.iosize);
728 static int vpu_service_reserve_io(void)
730 unsigned long iobaseaddr;
731 unsigned long iosize;
733 iobaseaddr = dec_dev.iobaseaddr;
734 iosize = dec_dev.iosize;
736 if (!request_mem_region(iobaseaddr, iosize, "vdpu_io")) {
737 pr_info("failed to reserve dec HW regs\n");
741 dec_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
743 if (dec_dev.hwregs == NULL) {
744 pr_info("failed to ioremap dec HW regs\n");
748 /* check for correct HW */
749 if (!vpu_service_check_hw_id(&dec_dev, dec_hw_ids, ARRAY_SIZE(dec_hw_ids))) {
753 iobaseaddr = enc_dev.iobaseaddr;
754 iosize = enc_dev.iosize;
756 if (!request_mem_region(iobaseaddr, iosize, "hx280enc")) {
757 pr_info("failed to reserve enc HW regs\n");
761 enc_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
763 if (enc_dev.hwregs == NULL) {
764 pr_info("failed to ioremap enc HW regs\n");
768 /* check for correct HW */
769 if (!vpu_service_check_hw_id(&enc_dev, enc_hw_ids, ARRAY_SIZE(enc_hw_ids))) {
775 vpu_service_release_io();
779 static int vpu_service_open(struct inode *inode, struct file *filp)
781 vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
782 if (NULL == session) {
783 pr_err("error: unable to allocate memory for vpu_session.");
787 session->type = VPU_TYPE_BUTT;
788 session->pid = current->pid;
789 INIT_LIST_HEAD(&session->waiting);
790 INIT_LIST_HEAD(&session->running);
791 INIT_LIST_HEAD(&session->done);
792 INIT_LIST_HEAD(&session->list_session);
793 init_waitqueue_head(&session->wait);
794 atomic_set(&session->task_running, 0);
795 mutex_lock(&service.lock);
796 list_add_tail(&session->list_session, &service.session);
797 filp->private_data = (void *)session;
798 mutex_unlock(&service.lock);
800 pr_debug("dev opened\n");
801 return nonseekable_open(inode, filp);
804 static int vpu_service_release(struct inode *inode, struct file *filp)
807 vpu_session *session = (vpu_session *)filp->private_data;
811 task_running = atomic_read(&session->task_running);
813 pr_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running);
816 wake_up_interruptible_sync(&session->wait);
818 mutex_lock(&service.lock);
819 /* remove this filp from the asynchronusly notified filp's */
820 //vpu_service_fasync(-1, filp, 0);
821 list_del_init(&session->list_session);
822 vpu_service_session_clear(session);
824 filp->private_data = NULL;
825 mutex_unlock(&service.lock);
827 pr_debug("dev closed\n");
831 static const struct file_operations vpu_service_fops = {
832 .unlocked_ioctl = vpu_service_ioctl,
833 .open = vpu_service_open,
834 .release = vpu_service_release,
835 //.fasync = vpu_service_fasync,
838 static struct miscdevice vpu_service_misc_device = {
839 .minor = MISC_DYNAMIC_MINOR,
840 .name = "vpu_service",
841 .fops = &vpu_service_fops,
844 static void vpu_service_shutdown(struct platform_device *pdev)
846 pr_cont("shutdown...");
847 del_timer(&service.timer);
848 vpu_service_power_off();
852 static int vpu_service_suspend(struct platform_device *pdev, pm_message_t state)
855 pr_info("suspend...");
856 del_timer(&service.timer);
857 enabled = service.enabled;
858 vpu_service_power_off();
859 service.enabled = enabled;
863 static int vpu_service_resume(struct platform_device *pdev)
865 pr_info("resume...");
866 if (service.enabled) {
867 service.enabled = false;
868 vpu_service_power_on();
874 static struct platform_device vpu_service_device = {
875 .name = "vpu_service",
879 static struct platform_driver vpu_service_driver = {
881 .name = "vpu_service",
882 .owner = THIS_MODULE,
884 .shutdown = vpu_service_shutdown,
885 .suspend = vpu_service_suspend,
886 .resume = vpu_service_resume,
889 static void get_hw_info(void)
891 VPUHwDecConfig_t *dec = &service.dec_config;
892 VPUHwEncConfig_t *enc = &service.enc_config;
893 u32 configReg = dec_dev.hwregs[VPU_DEC_HWCFG0];
894 u32 asicID = dec_dev.hwregs[0];
896 dec->h264Support = (configReg >> DWL_H264_E) & 0x3U;
897 dec->jpegSupport = (configReg >> DWL_JPEG_E) & 0x01U;
898 if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
899 dec->jpegSupport = JPEG_PROGRESSIVE;
900 dec->mpeg4Support = (configReg >> DWL_MPEG4_E) & 0x3U;
901 dec->vc1Support = (configReg >> DWL_VC1_E) & 0x3U;
902 dec->mpeg2Support = (configReg >> DWL_MPEG2_E) & 0x01U;
903 dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
904 dec->refBufSupport = (configReg >> DWL_REF_BUFF_E) & 0x01U;
905 dec->vp6Support = (configReg >> DWL_VP6_E) & 0x01U;
906 dec->maxDecPicWidth = configReg & 0x07FFU;
908 /* 2nd Config register */
909 configReg = dec_dev.hwregs[VPU_DEC_HWCFG1];
910 if (dec->refBufSupport) {
911 if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
912 dec->refBufSupport |= 2;
913 if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
914 dec->refBufSupport |= 4;
916 dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
917 dec->vp7Support = (configReg >> DWL_VP7_E) & 0x01U;
918 dec->vp8Support = (configReg >> DWL_VP8_E) & 0x01U;
919 dec->avsSupport = (configReg >> DWL_AVS_E) & 0x01U;
922 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
923 dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
925 dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
928 if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) {
929 dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
931 dec->rvSupport = RV_NOT_SUPPORTED;
934 dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
936 if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) {
937 dec->refBufSupport |= 8; /* enable HW support for offset */
941 VPUHwFuseStatus_t hwFuseSts;
942 /* Decoder fuse configuration */
943 u32 fuseReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
945 hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U;
946 hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U;
947 hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U;
948 hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U;
949 hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U;
950 hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U;
951 hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U;
952 hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U;
953 hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U;
954 hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U;
955 hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U;
956 hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U;
957 hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U;
958 hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U;
960 /* check max. decoder output width */
962 if (fuseReg & 0x8000U)
963 hwFuseSts.maxDecPicWidthFuse = 1920;
964 else if (fuseReg & 0x4000U)
965 hwFuseSts.maxDecPicWidthFuse = 1280;
966 else if (fuseReg & 0x2000U)
967 hwFuseSts.maxDecPicWidthFuse = 720;
968 else if (fuseReg & 0x1000U)
969 hwFuseSts.maxDecPicWidthFuse = 352;
970 else /* remove warning */
971 hwFuseSts.maxDecPicWidthFuse = 352;
973 hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U;
975 /* Pp configuration */
976 configReg = dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG];
978 if ((configReg >> DWL_PP_E) & 0x01U) {
980 dec->maxPpOutPicWidth = configReg & 0x07FFU;
981 /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */
982 dec->ppConfig = configReg;
985 dec->maxPpOutPicWidth = 0;
989 /* check the HW versio */
990 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
991 /* Pp configuration */
992 configReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
994 if ((configReg >> DWL_PP_E) & 0x01U) {
995 /* Pp fuse configuration */
996 u32 fuseRegPp = dec_dev.hwregs[VPU_PP_HW_FUSE_CFG];
998 if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) {
999 hwFuseSts.ppSupportFuse = 1;
1000 /* check max. pp output width */
1001 if (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920;
1002 else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280;
1003 else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720;
1004 else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352;
1005 else hwFuseSts.maxPpOutPicWidthFuse = 352;
1006 hwFuseSts.ppConfigFuse = fuseRegPp;
1008 hwFuseSts.ppSupportFuse = 0;
1009 hwFuseSts.maxPpOutPicWidthFuse = 0;
1010 hwFuseSts.ppConfigFuse = 0;
1013 hwFuseSts.ppSupportFuse = 0;
1014 hwFuseSts.maxPpOutPicWidthFuse = 0;
1015 hwFuseSts.ppConfigFuse = 0;
1018 if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse)
1019 dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse;
1020 if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse)
1021 dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse;
1022 if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED;
1023 if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED;
1024 if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED;
1025 if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED;
1026 if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse)
1027 dec->jpegSupport = JPEG_BASELINE;
1028 if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED;
1029 if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED;
1030 if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED;
1031 if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED;
1032 if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED;
1033 if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED;
1035 /* check the pp config vs fuse status */
1036 if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) {
1037 u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25);
1038 u32 alphaBlend = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24);
1039 u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25);
1040 u32 alphaBlendFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24);
1042 if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000;
1043 if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000;
1045 if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED;
1046 if (!hwFuseSts.refBufSupportFuse) dec->refBufSupport = REF_BUF_NOT_SUPPORTED;
1047 if (!hwFuseSts.rvSupportFuse) dec->rvSupport = RV_NOT_SUPPORTED;
1048 if (!hwFuseSts.avsSupportFuse) dec->avsSupport = AVS_NOT_SUPPORTED;
1049 if (!hwFuseSts.mvcSupportFuse) dec->mvcSupport = MVC_NOT_SUPPORTED;
1052 configReg = enc_dev.hwregs[63];
1053 enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
1054 enc->h264Enabled = (configReg >> 27) & 1;
1055 enc->mpeg4Enabled = (configReg >> 26) & 1;
1056 enc->jpegEnabled = (configReg >> 25) & 1;
1057 enc->vsEnabled = (configReg >> 24) & 1;
1058 enc->rgbEnabled = (configReg >> 28) & 1;
1059 enc->busType = (configReg >> 20) & 15;
1060 enc->synthesisLanguage = (configReg >> 16) & 15;
1061 enc->busWidth = (configReg >> 12) & 15;
1064 static irqreturn_t vdpu_isr_thread(int irq, void *dev_id)
1066 vpu_device *dev = (vpu_device *) dev_id;
1068 pr_debug("vdpu_isr_thread dec %d pp %d\n", atomic_read(&dev->isr_codec), atomic_read(&dev->isr_pp));
1070 if (likely(atomic_read(&dev->isr_codec))) {
1071 atomic_sub(1, &dev->isr_codec);
1072 pr_debug("DEC IRQ thread proc!\n");
1073 mutex_lock(&service.lock);
1074 if (NULL == service.reg_codec) {
1075 pr_err("error: dec isr with no task waiting\n");
1077 reg_from_run_to_done(service.reg_codec);
1079 mutex_unlock(&service.lock);
1082 if (atomic_read(&dev->isr_pp)) {
1083 atomic_sub(1, &dev->isr_pp);
1084 pr_debug("PP IRQ thread proc!\n");
1085 mutex_lock(&service.lock);
1086 if (NULL == service.reg_pproc) {
1087 pr_err("error: pp isr with no task waiting\n");
1089 reg_from_run_to_done(service.reg_pproc);
1091 mutex_unlock(&service.lock);
1097 static irqreturn_t vdpu_isr(int irq, void *dev_id)
1099 vpu_device *dev = (vpu_device *) dev_id;
1100 u32 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1101 u32 irq_status_pp = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
1103 pr_debug("vdpu_isr dec %x pp %x\n", irq_status_dec, irq_status_pp);
1105 if (irq_status_dec & DEC_INTERRUPT_BIT) {
1106 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1107 if ((irq_status_dec & 0x40001) == 0x40001)
1110 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1111 } while ((irq_status_dec & 0x40001) == 0x40001);
1114 writel(irq_status_dec & (~DEC_INTERRUPT_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER);
1115 pr_debug("DEC IRQ received!\n");
1116 atomic_add(1, &dev->isr_codec);
1119 if (irq_status_pp & PP_INTERRUPT_BIT) {
1121 writel(irq_status_pp & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
1122 pr_debug("PP IRQ received!\n");
1123 atomic_add(1, &dev->isr_pp);
1125 return IRQ_WAKE_THREAD;
1128 static irqreturn_t vepu_isr_thread(int irq, void *dev_id)
1130 struct vpu_device *dev = (struct vpu_device *) dev_id;
1132 pr_debug("enc_isr_thread\n");
1134 if (likely(atomic_read(&dev->isr_codec))) {
1135 atomic_sub(1, &dev->isr_codec);
1136 pr_debug("ENC IRQ thread proc!\n");
1137 mutex_lock(&service.lock);
1138 if (NULL == service.reg_codec) {
1139 pr_err("error: enc isr with no task waiting\n");
1141 reg_from_run_to_done(service.reg_codec);
1143 mutex_unlock(&service.lock);
1149 static irqreturn_t vepu_isr(int irq, void *dev_id)
1151 struct vpu_device *dev = (struct vpu_device *) dev_id;
1152 u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
1154 pr_debug("enc_isr\n");
1156 if (likely(irq_status & ENC_INTERRUPT_BIT)) {
1158 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
1159 pr_debug("ENC IRQ received!\n");
1160 atomic_add(1, &dev->isr_codec);
1162 return IRQ_WAKE_THREAD;
1165 static int __init vpu_service_proc_init(void);
1166 static int __init vpu_service_init(void)
1170 pr_debug("baseaddr = 0x%08x vdpu irq = %d vepu irq = %d\n", RK29_VCODEC_PHYS, IRQ_VDPU, IRQ_VEPU);
1172 dec_dev.iobaseaddr = RK29_VCODEC_PHYS + DEC_PHY_OFFSET;
1173 dec_dev.iosize = DEC_IO_SIZE;
1174 atomic_set(&dec_dev.isr_codec, 0);
1175 atomic_set(&dec_dev.isr_pp, 0);
1176 enc_dev.iobaseaddr = RK29_VCODEC_PHYS;
1177 enc_dev.iosize = ENC_IO_SIZE;
1178 atomic_set(&enc_dev.isr_codec, 0);
1179 atomic_set(&enc_dev.isr_pp, 0);
1181 INIT_LIST_HEAD(&service.waiting);
1182 INIT_LIST_HEAD(&service.running);
1183 INIT_LIST_HEAD(&service.done);
1184 INIT_LIST_HEAD(&service.session);
1185 mutex_init(&service.lock);
1186 service.reg_codec = NULL;
1187 service.reg_pproc = NULL;
1188 atomic_set(&service.total_running, 0);
1189 service.enabled = false;
1192 init_timer(&service.timer);
1193 service.timer.expires = jiffies + POWER_OFF_DELAY;
1194 service.timer.function = vpu_service_power_off_work_func;
1195 vpu_service_power_on();
1197 ret = vpu_service_reserve_io();
1199 pr_err("error: reserve io failed\n");
1200 goto err_reserve_io;
1203 /* get the IRQ line */
1204 ret = request_threaded_irq(IRQ_VDPU, vdpu_isr, vdpu_isr_thread, IRQF_SHARED, "vdpu", (void *)&dec_dev);
1206 pr_err("error: can't request vdpu irq %d\n", IRQ_VDPU);
1207 goto err_req_vdpu_irq;
1210 ret = request_threaded_irq(IRQ_VEPU, vepu_isr, vepu_isr_thread, IRQF_SHARED, "vepu", (void *)&enc_dev);
1212 pr_err("error: can't request vepu irq %d\n", IRQ_VEPU);
1213 goto err_req_vepu_irq;
1216 ret = misc_register(&vpu_service_misc_device);
1218 pr_err("error: misc_register failed\n");
1222 platform_device_register(&vpu_service_device);
1223 platform_driver_probe(&vpu_service_driver, NULL);
1225 del_timer(&service.timer);
1226 vpu_service_power_off();
1227 pr_info("init success\n");
1229 vpu_service_proc_init();
1233 free_irq(IRQ_VEPU, (void *)&enc_dev);
1235 free_irq(IRQ_VDPU, (void *)&dec_dev);
1237 pr_info("init failed\n");
1239 del_timer(&service.timer);
1240 vpu_service_power_off();
1241 vpu_service_release_io();
1243 pr_info("init failed\n");
1247 static void __exit vpu_service_exit(void)
1249 del_timer(&service.timer);
1250 vpu_service_power_off();
1251 platform_device_unregister(&vpu_service_device);
1252 platform_driver_unregister(&vpu_service_driver);
1253 misc_deregister(&vpu_service_misc_device);
1254 free_irq(IRQ_VEPU, (void *)&enc_dev);
1255 free_irq(IRQ_VDPU, (void *)&dec_dev);
1259 module_init(vpu_service_init);
1260 module_exit(vpu_service_exit);
1262 #ifdef CONFIG_PROC_FS
1263 #include <linux/proc_fs.h>
1264 #include <linux/seq_file.h>
1266 static int proc_vpu_service_show(struct seq_file *s, void *v)
1269 vpu_reg *reg, *reg_tmp;
1270 vpu_session *session, *session_tmp;
1272 vpu_service_power_on();
1273 seq_printf(s, "\nENC Registers:\n");
1274 n = enc_dev.iosize >> 2;
1275 for (i = 0; i < n; i++) {
1276 seq_printf(s, "\tswreg%.3d = %08X\n", i, readl(enc_dev.hwregs + i));
1278 seq_printf(s, "\nDEC Registers:\n");
1279 n = dec_dev.iosize >> 2;
1280 for (i = 0; i < n; i++) {
1281 seq_printf(s, "\tswreg%.3d = %08X\n", i, readl(dec_dev.hwregs + i));
1284 seq_printf(s, "\nvpu service status:\n");
1285 mutex_lock(&service.lock);
1286 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
1287 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
1288 //seq_printf(s, "waiting reg set %d\n");
1289 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
1290 seq_printf(s, "waiting register set\n");
1292 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
1293 seq_printf(s, "running register set\n");
1295 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
1296 seq_printf(s, "done register set\n");
1299 mutex_unlock(&service.lock);
1304 static int proc_vpu_service_open(struct inode *inode, struct file *file)
1306 return single_open(file, proc_vpu_service_show, NULL);
1309 static const struct file_operations proc_vpu_service_fops = {
1310 .open = proc_vpu_service_open,
1312 .llseek = seq_lseek,
1313 .release = single_release,
1316 static int __init vpu_service_proc_init(void)
1318 proc_create("vpu_service", 0, NULL, &proc_vpu_service_fops);
1322 #endif /* CONFIG_PROC_FS */