1 /* arch/arm/mach-rk29/vpu.c
3 * Copyright (C) 2010 ROCKCHIP, Inc.
4 * author: chenhengming chm@rock-chips.com
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #ifdef CONFIG_RK29_VPU_DEBUG
19 #define pr_fmt(fmt) "VPU_SERVICE: %s: " fmt, __func__
21 #define pr_fmt(fmt) "VPU_SERVICE: " fmt
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
33 #include <linux/ioport.h>
34 #include <linux/miscdevice.h>
36 #include <linux/poll.h>
37 #include <linux/platform_device.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/timer.h>
42 #include <asm/uaccess.h>
44 #include <mach/irqs.h>
45 #include <plat/vpu_service.h>
50 #define DEC_INTERRUPT_REGISTER 1
51 #define PP_INTERRUPT_REGISTER 60
52 #define ENC_INTERRUPT_REGISTER 1
54 #define DEC_INTERRUPT_BIT 0x100
55 #define PP_INTERRUPT_BIT 0x100
56 #define ENC_INTERRUPT_BIT 0x1
58 #define REG_NUM_DEC (60)
59 #define REG_NUM_PP (41)
60 #if defined(CONFIG_ARCH_RK29)
61 #define REG_NUM_ENC (96)
62 #elif defined(CONFIG_ARCH_RK30)
63 #define REG_NUM_ENC (164)
65 #define REG_NUM_DEC_PP (REG_NUM_DEC+REG_NUM_PP)
66 #define SIZE_REG(reg) ((reg)*4)
68 #define DEC_IO_SIZE ((100 + 1) * 4) /* bytes */
69 #if defined(CONFIG_ARCH_RK29)
70 #define ENC_IO_SIZE (96 * 4) /* bytes */
71 #elif defined(CONFIG_ARCH_RK30)
72 #define ENC_IO_SIZE (164 * 4) /* bytes */
74 #define REG_NUM_DEC_PP (REG_NUM_DEC+REG_NUM_PP)
75 static const u16 dec_hw_ids[] = { 0x8190, 0x8170, 0x9170, 0x9190, 0x6731 };
76 #if defined(CONFIG_ARCH_RK29)
77 static const u16 enc_hw_ids[] = { 0x6280, 0x7280, 0x8270 };
78 #define DEC_PHY_OFFSET 0x200
79 #elif defined(CONFIG_ARCH_RK30)
80 static const u16 enc_hw_ids[] = { 0x6280, 0x7280, 0x8270, 0x8290, 0x4831 };
81 #define DEC_PHY_OFFSET 0x400
82 #define RK29_VCODEC_PHYS RK30_VCODEC_PHYS
85 #define VPU_REG_EN_ENC 14
86 #define VPU_REG_ENC_GATE 2
87 #define VPU_REG_ENC_GATE_BIT (1<<4)
89 #define VPU_REG_EN_DEC 1
90 #define VPU_REG_DEC_GATE 2
91 #define VPU_REG_DEC_GATE_BIT (1<<10)
92 #define VPU_REG_EN_PP 0
93 #define VPU_REG_PP_GATE 1
94 #define VPU_REG_PP_GATE_BIT (1<<8)
95 #define VPU_REG_EN_DEC_PP 1
96 #define VPU_REG_DEC_PP_GATE 61
97 #define VPU_REG_DEC_PP_GATE_BIT (1<<8)
101 * struct for process session which connect to vpu
103 * @author ChenHengming (2011-5-3)
105 typedef struct vpu_session {
106 VPU_CLIENT_TYPE type;
107 /* a linked list of data so we can access them for debugging */
108 struct list_head list_session;
109 /* a linked list of register data waiting for process */
110 struct list_head waiting;
111 /* a linked list of register data in processing */
112 struct list_head running;
113 /* a linked list of register data processed */
114 struct list_head done;
115 wait_queue_head_t wait;
117 atomic_t task_running;
121 * struct for process register set
123 * @author ChenHengming (2011-5-4)
125 #define VPU_REG_NUM_MAX (((VPU_REG_NUM_ENC)>(VPU_REG_NUM_DEC_PP))?(VPU_REG_NUM_ENC):(VPU_REG_NUM_DEC_PP))
126 typedef struct vpu_reg {
127 VPU_CLIENT_TYPE type;
128 vpu_session *session;
129 struct list_head session_link; /* link to vpu service session */
130 struct list_head status_link; /* link to register set list */
132 unsigned long reg[VPU_REG_NUM_MAX];
135 typedef struct vpu_device {
136 unsigned long iobaseaddr;
138 volatile u32 *hwregs;
141 typedef struct vpu_service_info {
143 spinlock_t lock_power;
144 struct timer_list timer; /* timer for power off */
145 struct list_head waiting; /* link to link_reg in struct vpu_reg */
146 struct list_head running; /* link to link_reg in struct vpu_reg */
147 struct list_head done; /* link to link_reg in struct vpu_reg */
148 struct list_head session; /* link to list_session in struct vpu_session */
149 atomic_t total_running;
154 VPUHwDecConfig_t dec_config;
155 VPUHwEncConfig_t enc_config;
158 typedef struct vpu_request
164 static struct clk *pd_video;
165 static struct clk *clk_vpu; /* for power on notify */
166 static struct clk *aclk_vepu;
167 static struct clk *hclk_vepu;
168 static struct clk *aclk_ddr_vepu;
169 static struct clk *hclk_cpu_vcodec;
170 static vpu_service_info service;
171 static vpu_device dec_dev;
172 static vpu_device enc_dev;
174 #define POWER_OFF_DELAY 4*HZ /* 4s */
175 #define TIMEOUT_DELAY 2*HZ /* 2s */
177 static void vpu_get_clk(void)
179 pd_video = clk_get(NULL, "pd_video");
180 clk_vpu = clk_get(NULL, "vpu");
181 aclk_vepu = clk_get(NULL, "aclk_vepu");
182 hclk_vepu = clk_get(NULL, "hclk_vepu");
183 aclk_ddr_vepu = clk_get(NULL, "aclk_ddr_vepu");
184 hclk_cpu_vcodec = clk_get(NULL, "hclk_cpu_vcodec");
187 static void vpu_put_clk(void)
193 clk_put(aclk_ddr_vepu);
194 clk_put(hclk_cpu_vcodec);
197 static void vpu_reset(void)
199 #if defined(CONFIG_ARCH_RK29)
200 clk_disable(aclk_ddr_vepu);
201 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
202 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
203 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
204 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
206 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
207 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
208 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
209 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
210 clk_enable(aclk_ddr_vepu);
211 #elif defined(CONFIG_ARCH_RK30)
212 pmu_set_idle_request(IDLE_REQ_VIDEO, true);
213 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
214 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
215 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
216 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
218 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
219 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
220 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
221 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
222 pmu_set_idle_request(IDLE_REQ_VIDEO, false);
224 service.reg_codec = NULL;
225 service.reg_pproc = NULL;
226 service.reg_resev = NULL;
229 static void reg_deinit(vpu_reg *reg);
230 static void vpu_service_session_clear(vpu_session *session)
233 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
236 list_for_each_entry_safe(reg, n, &session->running, session_link) {
239 list_for_each_entry_safe(reg, n, &session->done, session_link) {
244 static void vpu_service_dump(void)
247 vpu_reg *reg, *reg_tmp;
248 vpu_session *session, *session_tmp;
250 running = atomic_read(&service.total_running);
251 printk("total_running %d\n", running);
253 printk("reg_codec 0x%.8x\n", (unsigned int)service.reg_codec);
254 printk("reg_pproc 0x%.8x\n", (unsigned int)service.reg_pproc);
255 printk("reg_resev 0x%.8x\n", (unsigned int)service.reg_resev);
257 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
258 printk("session pid %d type %d:\n", session->pid, session->type);
259 running = atomic_read(&session->task_running);
260 printk("task_running %d\n", running);
261 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
262 printk("waiting register set 0x%.8x\n", (unsigned int)reg);
264 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
265 printk("running register set 0x%.8x\n", (unsigned int)reg);
267 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
268 printk("done register set 0x%.8x\n", (unsigned int)reg);
273 static void vpu_service_power_off(void)
277 spin_lock_bh(&service.lock_power);
278 if (!service.enabled) {
279 spin_unlock_bh(&service.lock_power);
283 service.enabled = false;
284 total_running = atomic_read(&service.total_running);
286 pr_alert("alert: power off when %d task running!!\n", total_running);
288 pr_alert("alert: delay 50 ms for running task\n");
292 printk("vpu: power off...");
293 #ifdef CONFIG_ARCH_RK29
294 pmu_set_power_domain(PD_VCODEC, false);
296 clk_disable(pd_video);
299 clk_disable(hclk_cpu_vcodec);
300 clk_disable(aclk_ddr_vepu);
301 clk_disable(hclk_vepu);
302 clk_disable(aclk_vepu);
303 clk_disable(clk_vpu);
305 spin_unlock_bh(&service.lock_power);
308 static void vpu_service_power_off_work_func(unsigned long data)
311 vpu_service_power_off();
314 static void vpu_service_power_maintain(void)
316 if (service.enabled) {
317 mod_timer(&service.timer, jiffies + POWER_OFF_DELAY);
319 pr_err("error: maintain power when power is off!\n");
323 static void vpu_service_power_on(void)
325 clk_enable(clk_vpu); /* notify vpu on without lock. */
327 spin_lock_bh(&service.lock_power);
328 if (!service.enabled) {
329 service.enabled = true;
330 printk("vpu: power on\n");
333 clk_enable(aclk_vepu);
334 clk_enable(hclk_vepu);
335 clk_enable(hclk_cpu_vcodec);
337 #ifdef CONFIG_ARCH_RK29
338 pmu_set_power_domain(PD_VCODEC, true);
340 clk_enable(pd_video);
343 clk_enable(aclk_ddr_vepu);
344 mod_timer(&service.timer, jiffies + POWER_OFF_DELAY);
345 spin_unlock_bh(&service.lock_power);
347 spin_unlock_bh(&service.lock_power);
348 vpu_service_power_maintain();
351 clk_disable(clk_vpu);
354 static vpu_reg *reg_init(vpu_session *session, void __user *src, unsigned long size)
357 vpu_reg *reg = kmalloc(sizeof(vpu_reg), GFP_KERNEL);
359 pr_err("error: kmalloc fail in reg_init\n");
363 reg->session = session;
364 reg->type = session->type;
366 INIT_LIST_HEAD(®->session_link);
367 INIT_LIST_HEAD(®->status_link);
369 if (copy_from_user(®->reg[0], (void __user *)src, size)) {
370 pr_err("error: copy_from_user failed in reg_init\n");
375 spin_lock_irqsave(&service.lock, flag);
376 list_add_tail(®->status_link, &service.waiting);
377 list_add_tail(®->session_link, &session->waiting);
378 spin_unlock_irqrestore(&service.lock, flag);
383 static void reg_deinit(vpu_reg *reg)
385 list_del_init(®->session_link);
386 list_del_init(®->status_link);
387 if (reg == service.reg_codec) service.reg_codec = NULL;
388 if (reg == service.reg_pproc) service.reg_pproc = NULL;
392 static void reg_from_wait_to_run(vpu_reg *reg)
394 list_del_init(®->status_link);
395 list_add_tail(®->status_link, &service.running);
397 list_del_init(®->session_link);
398 list_add_tail(®->session_link, ®->session->running);
401 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
404 u32 *dst = (u32 *)®->reg[0];
405 for (i = 0; i < count; i++)
409 static void reg_from_run_to_done(vpu_reg *reg)
411 list_del_init(®->status_link);
412 list_add_tail(®->status_link, &service.done);
414 list_del_init(®->session_link);
415 list_add_tail(®->session_link, ®->session->done);
419 service.reg_codec = NULL;
420 reg_copy_from_hw(reg, enc_dev.hwregs, REG_NUM_ENC);
424 service.reg_codec = NULL;
425 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC);
429 service.reg_pproc = NULL;
430 reg_copy_from_hw(reg, dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_PP);
431 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
435 service.reg_codec = NULL;
436 service.reg_pproc = NULL;
437 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC_PP);
438 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
442 pr_err("error: copy reg from hw with unknown type %d\n", reg->type);
446 atomic_sub(1, ®->session->task_running);
447 atomic_sub(1, &service.total_running);
448 wake_up_interruptible_sync(®->session->wait);
451 void reg_copy_to_hw(vpu_reg *reg)
454 u32 *src = (u32 *)®->reg[0];
455 atomic_add(1, &service.total_running);
456 atomic_add(1, ®->session->task_running);
459 u32 *dst = (u32 *)enc_dev.hwregs;
460 #if defined(CONFIG_ARCH_RK30)
461 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
462 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
463 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
464 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
466 service.reg_codec = reg;
468 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
470 for (i = 0; i < VPU_REG_EN_ENC; i++)
473 for (i = VPU_REG_EN_ENC + 1; i < REG_NUM_ENC; i++)
478 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
479 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC];
482 u32 *dst = (u32 *)dec_dev.hwregs;
483 service.reg_codec = reg;
485 for (i = REG_NUM_DEC - 1; i > VPU_REG_DEC_GATE; i--)
490 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
491 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
494 u32 *dst = (u32 *)dec_dev.hwregs + PP_INTERRUPT_REGISTER;
495 service.reg_pproc = reg;
497 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
499 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_PP; i++)
504 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
507 u32 *dst = (u32 *)dec_dev.hwregs;
508 service.reg_codec = reg;
509 service.reg_pproc = reg;
511 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_DEC_PP; i++)
514 dst[VPU_REG_EN_DEC_PP] = src[VPU_REG_EN_DEC_PP] | 0x2;
517 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
518 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
519 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
522 pr_err("error: unsupport session type %d", reg->type);
523 atomic_sub(1, &service.total_running);
524 atomic_sub(1, ®->session->task_running);
530 static void try_set_reg(void)
533 // first get reg from reg list
534 spin_lock_irqsave(&service.lock, flag);
535 if (!list_empty(&service.waiting)) {
537 vpu_reg *reg = list_entry(service.waiting.next, vpu_reg, status_link);
539 vpu_service_power_maintain();
542 if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
546 if (NULL == service.reg_codec)
550 if (NULL == service.reg_codec) {
551 if (NULL == service.reg_pproc)
554 if ((VPU_DEC == service.reg_codec->type) && (NULL == service.reg_pproc))
559 if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
563 printk("undefined reg type %d\n", reg->type);
567 reg_from_wait_to_run(reg);
571 spin_unlock_irqrestore(&service.lock, flag);
574 static int return_reg(vpu_reg *reg, u32 __user *dst)
579 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_ENC)))
584 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_DEC)))
589 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_PP)))
594 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_DEC_PP)))
600 pr_err("error: copy reg to user with unknown type %d\n", reg->type);
608 static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
610 vpu_session *session = (vpu_session *)filp->private_data;
611 if (NULL == session) {
616 case VPU_IOC_SET_CLIENT_TYPE : {
617 session->type = (VPU_CLIENT_TYPE)arg;
620 case VPU_IOC_GET_HW_FUSE_STATUS : {
622 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
623 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
626 if (VPU_ENC != session->type) {
627 if (copy_to_user((void __user *)req.req, &service.dec_config, sizeof(VPUHwDecConfig_t))) {
628 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
632 if (copy_to_user((void __user *)req.req, &service.enc_config, sizeof(VPUHwEncConfig_t))) {
633 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
641 case VPU_IOC_SET_REG : {
644 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
645 pr_err("error: VPU_IOC_SET_REG copy_from_user failed\n");
649 reg = reg_init(session, (void __user *)req.req, req.size);
653 vpu_service_power_on();
659 case VPU_IOC_GET_REG : {
663 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
664 pr_err("error: VPU_IOC_GET_REG copy_from_user failed\n");
667 int ret = wait_event_interruptible_timeout(session->wait, !list_empty(&session->done), TIMEOUT_DELAY);
668 if (!list_empty(&session->done)) {
670 pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
674 if (unlikely(ret < 0)) {
675 pr_err("error: pid %d wait task ret %d\n", session->pid, ret);
676 } else if (0 == ret) {
677 pr_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
681 spin_lock_irqsave(&service.lock, flag);
683 int task_running = atomic_read(&session->task_running);
686 atomic_set(&session->task_running, 0);
687 atomic_sub(task_running, &service.total_running);
688 printk("%d task is running but not return, reset hardware...", task_running);
692 vpu_service_session_clear(session);
693 spin_unlock_irqrestore(&service.lock, flag);
696 spin_unlock_irqrestore(&service.lock, flag);
698 spin_lock_irqsave(&service.lock, flag);
699 reg = list_entry(session->done.next, vpu_reg, session_link);
700 return_reg(reg, (u32 __user *)req.req);
701 spin_unlock_irqrestore(&service.lock, flag);
705 pr_err("error: unknow vpu service ioctl cmd %x\n", cmd);
713 static int vpu_service_check_hw_id(struct vpu_device * dev, const u16 *hwids, size_t num)
715 u32 hwid = readl(dev->hwregs);
716 pr_info("HW ID = 0x%08x\n", hwid);
718 hwid = (hwid >> 16) & 0xFFFF; /* product version only */
721 if (hwid == hwids[num]) {
722 pr_info("Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
727 pr_info("No Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
731 static void vpu_service_release_io(void)
734 iounmap((void *)dec_dev.hwregs);
735 release_mem_region(dec_dev.iobaseaddr, dec_dev.iosize);
738 iounmap((void *)enc_dev.hwregs);
739 release_mem_region(enc_dev.iobaseaddr, enc_dev.iosize);
742 static int vpu_service_reserve_io(void)
744 unsigned long iobaseaddr;
745 unsigned long iosize;
747 iobaseaddr = dec_dev.iobaseaddr;
748 iosize = dec_dev.iosize;
750 if (!request_mem_region(iobaseaddr, iosize, "vdpu_io")) {
751 pr_info("failed to reserve dec HW regs\n");
755 dec_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
757 if (dec_dev.hwregs == NULL) {
758 pr_info("failed to ioremap dec HW regs\n");
762 /* check for correct HW */
763 if (!vpu_service_check_hw_id(&dec_dev, dec_hw_ids, ARRAY_SIZE(dec_hw_ids))) {
767 iobaseaddr = enc_dev.iobaseaddr;
768 iosize = enc_dev.iosize;
770 if (!request_mem_region(iobaseaddr, iosize, "hx280enc")) {
771 pr_info("failed to reserve enc HW regs\n");
775 enc_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
777 if (enc_dev.hwregs == NULL) {
778 pr_info("failed to ioremap enc HW regs\n");
782 /* check for correct HW */
783 if (!vpu_service_check_hw_id(&enc_dev, enc_hw_ids, ARRAY_SIZE(enc_hw_ids))) {
789 vpu_service_release_io();
793 static int vpu_service_open(struct inode *inode, struct file *filp)
796 vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
797 if (NULL == session) {
798 pr_err("error: unable to allocate memory for vpu_session.");
802 session->type = VPU_TYPE_BUTT;
803 session->pid = current->pid;
804 INIT_LIST_HEAD(&session->waiting);
805 INIT_LIST_HEAD(&session->running);
806 INIT_LIST_HEAD(&session->done);
807 INIT_LIST_HEAD(&session->list_session);
808 init_waitqueue_head(&session->wait);
809 atomic_set(&session->task_running, 0);
810 spin_lock_irqsave(&service.lock, flag);
811 list_add_tail(&session->list_session, &service.session);
812 filp->private_data = (void *)session;
813 spin_unlock_irqrestore(&service.lock, flag);
815 pr_debug("dev opened\n");
816 return nonseekable_open(inode, filp);
819 static int vpu_service_release(struct inode *inode, struct file *filp)
823 vpu_session *session = (vpu_session *)filp->private_data;
827 task_running = atomic_read(&session->task_running);
829 pr_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running);
832 wake_up_interruptible_sync(&session->wait);
834 spin_lock_irqsave(&service.lock, flag);
835 /* remove this filp from the asynchronusly notified filp's */
836 //vpu_service_fasync(-1, filp, 0);
837 list_del_init(&session->list_session);
838 vpu_service_session_clear(session);
840 filp->private_data = NULL;
841 spin_unlock_irqrestore(&service.lock, flag);
843 pr_debug("dev closed\n");
847 static const struct file_operations vpu_service_fops = {
848 .unlocked_ioctl = vpu_service_ioctl,
849 .open = vpu_service_open,
850 .release = vpu_service_release,
851 //.fasync = vpu_service_fasync,
854 static struct miscdevice vpu_service_misc_device = {
855 .minor = MISC_DYNAMIC_MINOR,
856 .name = "vpu_service",
857 .fops = &vpu_service_fops,
860 static void vpu_service_shutdown(struct platform_device *pdev)
862 pr_cont("shutdown...");
863 del_timer(&service.timer);
864 vpu_service_power_off();
868 static int vpu_service_suspend(struct platform_device *pdev, pm_message_t state)
871 pr_info("suspend...");
872 del_timer(&service.timer);
873 enabled = service.enabled;
874 vpu_service_power_off();
875 service.enabled = enabled;
879 static int vpu_service_resume(struct platform_device *pdev)
881 pr_info("resume...");
882 if (service.enabled) {
883 service.enabled = false;
884 vpu_service_power_on();
890 static struct platform_device vpu_service_device = {
891 .name = "vpu_service",
895 static struct platform_driver vpu_service_driver = {
897 .name = "vpu_service",
898 .owner = THIS_MODULE,
900 .shutdown = vpu_service_shutdown,
901 .suspend = vpu_service_suspend,
902 .resume = vpu_service_resume,
905 static void get_hw_info(void)
907 VPUHwDecConfig_t *dec = &service.dec_config;
908 VPUHwEncConfig_t *enc = &service.enc_config;
909 u32 configReg = dec_dev.hwregs[VPU_DEC_HWCFG0];
910 u32 asicID = dec_dev.hwregs[0];
912 dec->h264Support = (configReg >> DWL_H264_E) & 0x3U;
913 dec->jpegSupport = (configReg >> DWL_JPEG_E) & 0x01U;
914 if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
915 dec->jpegSupport = JPEG_PROGRESSIVE;
916 dec->mpeg4Support = (configReg >> DWL_MPEG4_E) & 0x3U;
917 dec->vc1Support = (configReg >> DWL_VC1_E) & 0x3U;
918 dec->mpeg2Support = (configReg >> DWL_MPEG2_E) & 0x01U;
919 dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
920 dec->refBufSupport = (configReg >> DWL_REF_BUFF_E) & 0x01U;
921 dec->vp6Support = (configReg >> DWL_VP6_E) & 0x01U;
922 dec->maxDecPicWidth = configReg & 0x07FFU;
924 /* 2nd Config register */
925 configReg = dec_dev.hwregs[VPU_DEC_HWCFG1];
926 if (dec->refBufSupport) {
927 if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
928 dec->refBufSupport |= 2;
929 if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
930 dec->refBufSupport |= 4;
932 dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
933 dec->vp7Support = (configReg >> DWL_VP7_E) & 0x01U;
934 dec->vp8Support = (configReg >> DWL_VP8_E) & 0x01U;
935 dec->avsSupport = (configReg >> DWL_AVS_E) & 0x01U;
938 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
939 dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
941 dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
944 if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) {
945 dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
947 dec->rvSupport = RV_NOT_SUPPORTED;
950 dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
952 if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) {
953 dec->refBufSupport |= 8; /* enable HW support for offset */
957 VPUHwFuseStatus_t hwFuseSts;
958 /* Decoder fuse configuration */
959 u32 fuseReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
961 hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U;
962 hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U;
963 hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U;
964 hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U;
965 hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U;
966 hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U;
967 hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U;
968 hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U;
969 hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U;
970 hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U;
971 hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U;
972 hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U;
973 hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U;
974 hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U;
976 /* check max. decoder output width */
978 if (fuseReg & 0x8000U)
979 hwFuseSts.maxDecPicWidthFuse = 1920;
980 else if (fuseReg & 0x4000U)
981 hwFuseSts.maxDecPicWidthFuse = 1280;
982 else if (fuseReg & 0x2000U)
983 hwFuseSts.maxDecPicWidthFuse = 720;
984 else if (fuseReg & 0x1000U)
985 hwFuseSts.maxDecPicWidthFuse = 352;
986 else /* remove warning */
987 hwFuseSts.maxDecPicWidthFuse = 352;
989 hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U;
991 /* Pp configuration */
992 configReg = dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG];
994 if ((configReg >> DWL_PP_E) & 0x01U) {
996 dec->maxPpOutPicWidth = configReg & 0x07FFU;
997 /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */
998 dec->ppConfig = configReg;
1001 dec->maxPpOutPicWidth = 0;
1005 /* check the HW versio */
1006 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
1007 /* Pp configuration */
1008 configReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
1010 if ((configReg >> DWL_PP_E) & 0x01U) {
1011 /* Pp fuse configuration */
1012 u32 fuseRegPp = dec_dev.hwregs[VPU_PP_HW_FUSE_CFG];
1014 if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) {
1015 hwFuseSts.ppSupportFuse = 1;
1016 /* check max. pp output width */
1017 if (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920;
1018 else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280;
1019 else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720;
1020 else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352;
1021 else hwFuseSts.maxPpOutPicWidthFuse = 352;
1022 hwFuseSts.ppConfigFuse = fuseRegPp;
1024 hwFuseSts.ppSupportFuse = 0;
1025 hwFuseSts.maxPpOutPicWidthFuse = 0;
1026 hwFuseSts.ppConfigFuse = 0;
1029 hwFuseSts.ppSupportFuse = 0;
1030 hwFuseSts.maxPpOutPicWidthFuse = 0;
1031 hwFuseSts.ppConfigFuse = 0;
1034 if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse)
1035 dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse;
1036 if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse)
1037 dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse;
1038 if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED;
1039 if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED;
1040 if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED;
1041 if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED;
1042 if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse)
1043 dec->jpegSupport = JPEG_BASELINE;
1044 if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED;
1045 if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED;
1046 if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED;
1047 if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED;
1048 if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED;
1049 if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED;
1051 /* check the pp config vs fuse status */
1052 if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) {
1053 u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25);
1054 u32 alphaBlend = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24);
1055 u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25);
1056 u32 alphaBlendFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24);
1058 if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000;
1059 if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000;
1061 if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED;
1062 if (!hwFuseSts.refBufSupportFuse) dec->refBufSupport = REF_BUF_NOT_SUPPORTED;
1063 if (!hwFuseSts.rvSupportFuse) dec->rvSupport = RV_NOT_SUPPORTED;
1064 if (!hwFuseSts.avsSupportFuse) dec->avsSupport = AVS_NOT_SUPPORTED;
1065 if (!hwFuseSts.mvcSupportFuse) dec->mvcSupport = MVC_NOT_SUPPORTED;
1068 configReg = enc_dev.hwregs[63];
1069 enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
1070 enc->h264Enabled = (configReg >> 27) & 1;
1071 enc->mpeg4Enabled = (configReg >> 26) & 1;
1072 enc->jpegEnabled = (configReg >> 25) & 1;
1073 enc->vsEnabled = (configReg >> 24) & 1;
1074 enc->rgbEnabled = (configReg >> 28) & 1;
1075 enc->busType = (configReg >> 20) & 15;
1076 enc->synthesisLanguage = (configReg >> 16) & 15;
1077 enc->busWidth = (configReg >> 12) & 15;
1080 static irqreturn_t vdpu_isr(int irq, void *dev_id)
1082 vpu_device *dev = (vpu_device *) dev_id;
1083 u32 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1084 u32 irq_status_pp = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
1086 pr_debug("vdpu_isr dec %x pp %x\n", irq_status_dec, irq_status_pp);
1088 if (irq_status_dec & DEC_INTERRUPT_BIT) {
1089 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1090 if ((irq_status_dec & 0x40001) == 0x40001)
1093 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1094 } while ((irq_status_dec & 0x40001) == 0x40001);
1097 writel(irq_status_dec & (~DEC_INTERRUPT_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER);
1098 pr_debug("DEC IRQ received!\n");
1099 spin_lock(&service.lock);
1100 if (NULL == service.reg_codec) {
1101 pr_err("error: dec isr with no task waiting\n");
1103 reg_from_run_to_done(service.reg_codec);
1105 spin_unlock(&service.lock);
1108 if (irq_status_pp & PP_INTERRUPT_BIT) {
1110 writel(irq_status_pp & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
1111 pr_debug("PP IRQ received!\n");
1112 spin_lock(&service.lock);
1113 if (NULL == service.reg_pproc) {
1114 pr_err("error: pp isr with no task waiting\n");
1116 reg_from_run_to_done(service.reg_pproc);
1118 spin_unlock(&service.lock);
1124 static irqreturn_t vepu_isr(int irq, void *dev_id)
1126 struct vpu_device *dev = (struct vpu_device *) dev_id;
1127 u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
1129 pr_debug("enc_isr\n");
1131 if (likely(irq_status & ENC_INTERRUPT_BIT)) {
1133 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
1134 pr_debug("ENC IRQ received!\n");
1135 spin_lock(&service.lock);
1136 if (NULL == service.reg_codec) {
1137 pr_err("error: enc isr with no task waiting\n");
1139 reg_from_run_to_done(service.reg_codec);
1141 spin_unlock(&service.lock);
1147 static int __init vpu_service_proc_init(void);
1148 static int __init vpu_service_init(void)
1152 pr_debug("baseaddr = 0x%08x vdpu irq = %d vepu irq = %d\n", RK29_VCODEC_PHYS, IRQ_VDPU, IRQ_VEPU);
1154 dec_dev.iobaseaddr = RK29_VCODEC_PHYS + DEC_PHY_OFFSET;
1155 dec_dev.iosize = DEC_IO_SIZE;
1156 enc_dev.iobaseaddr = RK29_VCODEC_PHYS;
1157 enc_dev.iosize = ENC_IO_SIZE;
1159 INIT_LIST_HEAD(&service.waiting);
1160 INIT_LIST_HEAD(&service.running);
1161 INIT_LIST_HEAD(&service.done);
1162 INIT_LIST_HEAD(&service.session);
1163 spin_lock_init(&service.lock);
1164 spin_lock_init(&service.lock_power);
1165 service.reg_codec = NULL;
1166 service.reg_pproc = NULL;
1167 atomic_set(&service.total_running, 0);
1168 service.enabled = false;
1171 init_timer(&service.timer);
1172 service.timer.expires = jiffies + POWER_OFF_DELAY;
1173 service.timer.function = vpu_service_power_off_work_func;
1174 vpu_service_power_on();
1176 ret = vpu_service_reserve_io();
1178 pr_err("error: reserve io failed\n");
1179 goto err_reserve_io;
1182 /* get the IRQ line */
1183 ret = request_irq(IRQ_VDPU, vdpu_isr, IRQF_SHARED, "vdpu", (void *)&dec_dev);
1185 pr_err("error: can't request vdpu irq %d\n", IRQ_VDPU);
1186 goto err_req_vdpu_irq;
1189 ret = request_irq(IRQ_VEPU, vepu_isr, IRQF_SHARED, "vepu", (void *)&enc_dev);
1191 pr_err("error: can't request vepu irq %d\n", IRQ_VEPU);
1192 goto err_req_vepu_irq;
1195 ret = misc_register(&vpu_service_misc_device);
1197 pr_err("error: misc_register failed\n");
1201 platform_device_register(&vpu_service_device);
1202 platform_driver_probe(&vpu_service_driver, NULL);
1204 vpu_service_power_off();
1205 pr_info("init success\n");
1207 vpu_service_proc_init();
1211 free_irq(IRQ_VEPU, (void *)&enc_dev);
1213 free_irq(IRQ_VDPU, (void *)&dec_dev);
1215 pr_info("init failed\n");
1217 vpu_service_power_off();
1218 vpu_service_release_io();
1220 pr_info("init failed\n");
1224 static void __exit vpu_service_exit(void)
1226 del_timer(&service.timer);
1227 vpu_service_power_off();
1228 platform_device_unregister(&vpu_service_device);
1229 platform_driver_unregister(&vpu_service_driver);
1230 misc_deregister(&vpu_service_misc_device);
1231 free_irq(IRQ_VEPU, (void *)&enc_dev);
1232 free_irq(IRQ_VDPU, (void *)&dec_dev);
1236 module_init(vpu_service_init);
1237 module_exit(vpu_service_exit);
1239 #ifdef CONFIG_PROC_FS
1240 #include <linux/proc_fs.h>
1241 #include <linux/seq_file.h>
1243 static int proc_vpu_service_show(struct seq_file *s, void *v)
1247 vpu_reg *reg, *reg_tmp;
1248 vpu_session *session, *session_tmp;
1250 vpu_service_power_on();
1251 seq_printf(s, "\nENC Registers:\n");
1252 n = enc_dev.iosize >> 2;
1253 for (i = 0; i < n; i++) {
1254 seq_printf(s, "\tswreg%d = %08X\n", i, readl(enc_dev.hwregs + i));
1256 seq_printf(s, "\nDEC Registers:\n");
1257 n = dec_dev.iosize >> 2;
1258 for (i = 0; i < n; i++) {
1259 seq_printf(s, "\tswreg%d = %08X\n", i, readl(dec_dev.hwregs + i));
1262 seq_printf(s, "\nvpu service status:\n");
1263 spin_lock_irqsave(&service.lock, flag);
1264 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
1265 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
1266 //seq_printf(s, "waiting reg set %d\n");
1267 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
1268 seq_printf(s, "waiting register set\n");
1270 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
1271 seq_printf(s, "running register set\n");
1273 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
1274 seq_printf(s, "done register set\n");
1277 spin_unlock_irqrestore(&service.lock, flag);
1282 static int proc_vpu_service_open(struct inode *inode, struct file *file)
1284 return single_open(file, proc_vpu_service_show, NULL);
1287 static const struct file_operations proc_vpu_service_fops = {
1288 .open = proc_vpu_service_open,
1290 .llseek = seq_lseek,
1291 .release = single_release,
1294 static int __init vpu_service_proc_init(void)
1296 proc_create("vpu_service", 0, NULL, &proc_vpu_service_fops);
1300 #endif /* CONFIG_PROC_FS */