1 /* arch/arm/mach-rk29/vpu.c
3 * Copyright (C) 2010 ROCKCHIP, Inc.
4 * author: chenhengming chm@rock-chips.com
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #ifdef CONFIG_RK29_VPU_DEBUG
19 #define pr_fmt(fmt) "VPU_SERVICE: %s: " fmt, __func__
21 #define pr_fmt(fmt) "VPU_SERVICE: " fmt
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
33 #include <linux/ioport.h>
34 #include <linux/miscdevice.h>
36 #include <linux/poll.h>
37 #include <linux/platform_device.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/wakelock.h>
42 #include <asm/uaccess.h>
44 #include <mach/irqs.h>
48 #include <plat/vpu_service.h>
52 VPU_DEC_ID_9190 = 0x6731,
58 VPU_DEC_TYPE_9190 = 0,
59 VPU_ENC_TYPE_8270 = 0x100,
63 typedef enum VPU_FREQ {
74 unsigned long hw_addr;
75 unsigned long enc_offset;
76 unsigned long enc_reg_num;
77 unsigned long enc_io_size;
78 unsigned long dec_offset;
79 unsigned long dec_reg_num;
80 unsigned long dec_io_size;
83 #define VPU_SERVICE_SHOW_TIME 0
85 #if VPU_SERVICE_SHOW_TIME
86 static struct timeval enc_start, enc_end;
87 static struct timeval dec_start, dec_end;
88 static struct timeval pp_start, pp_end;
91 #define MHZ (1000*1000)
93 #define VCODEC_PHYS (0x10104000)
95 #define REG_NUM_9190_DEC (60)
96 #define REG_NUM_9190_PP (41)
97 #define REG_NUM_9190_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP)
99 #define REG_NUM_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP)
101 #define REG_NUM_ENC_8270 (96)
102 #define REG_SIZE_ENC_8270 (0x200)
103 #define REG_NUM_ENC_4831 (164)
104 #define REG_SIZE_ENC_4831 (0x400)
106 #define SIZE_REG(reg) ((reg)*4)
108 VPU_HW_INFO_E vpu_hw_set[] = {
110 .hw_id = VPU_ID_8270,
111 .hw_addr = VCODEC_PHYS,
113 .enc_reg_num = REG_NUM_ENC_8270,
114 .enc_io_size = REG_NUM_ENC_8270 * 4,
115 .dec_offset = REG_SIZE_ENC_8270,
116 .dec_reg_num = REG_NUM_9190_DEC_PP,
117 .dec_io_size = REG_NUM_9190_DEC_PP * 4,
120 .hw_id = VPU_ID_4831,
121 .hw_addr = VCODEC_PHYS,
123 .enc_reg_num = REG_NUM_ENC_4831,
124 .enc_io_size = REG_NUM_ENC_4831 * 4,
125 .dec_offset = REG_SIZE_ENC_4831,
126 .dec_reg_num = REG_NUM_9190_DEC_PP,
127 .dec_io_size = REG_NUM_9190_DEC_PP * 4,
132 #define DEC_INTERRUPT_REGISTER 1
133 #define PP_INTERRUPT_REGISTER 60
134 #define ENC_INTERRUPT_REGISTER 1
136 #define DEC_INTERRUPT_BIT 0x100
137 #define PP_INTERRUPT_BIT 0x100
138 #define ENC_INTERRUPT_BIT 0x1
140 #define VPU_REG_EN_ENC 14
141 #define VPU_REG_ENC_GATE 2
142 #define VPU_REG_ENC_GATE_BIT (1<<4)
144 #define VPU_REG_EN_DEC 1
145 #define VPU_REG_DEC_GATE 2
146 #define VPU_REG_DEC_GATE_BIT (1<<10)
147 #define VPU_REG_EN_PP 0
148 #define VPU_REG_PP_GATE 1
149 #define VPU_REG_PP_GATE_BIT (1<<8)
150 #define VPU_REG_EN_DEC_PP 1
151 #define VPU_REG_DEC_PP_GATE 61
152 #define VPU_REG_DEC_PP_GATE_BIT (1<<8)
155 * struct for process session which connect to vpu
157 * @author ChenHengming (2011-5-3)
159 typedef struct vpu_session {
160 VPU_CLIENT_TYPE type;
161 /* a linked list of data so we can access them for debugging */
162 struct list_head list_session;
163 /* a linked list of register data waiting for process */
164 struct list_head waiting;
165 /* a linked list of register data in processing */
166 struct list_head running;
167 /* a linked list of register data processed */
168 struct list_head done;
169 wait_queue_head_t wait;
171 atomic_t task_running;
175 * struct for process register set
177 * @author ChenHengming (2011-5-4)
179 typedef struct vpu_reg {
180 VPU_CLIENT_TYPE type;
182 vpu_session *session;
183 struct list_head session_link; /* link to vpu service session */
184 struct list_head status_link; /* link to register set list */
189 typedef struct vpu_device {
190 atomic_t irq_count_codec;
191 atomic_t irq_count_pp;
192 unsigned long iobaseaddr;
194 volatile u32 *hwregs;
197 typedef struct vpu_service_info {
198 struct wake_lock wake_lock;
199 struct delayed_work power_off_work;
201 struct list_head waiting; /* link to link_reg in struct vpu_reg */
202 struct list_head running; /* link to link_reg in struct vpu_reg */
203 struct list_head done; /* link to link_reg in struct vpu_reg */
204 struct list_head session; /* link to list_session in struct vpu_session */
205 atomic_t total_running;
210 VPUHwDecConfig_t dec_config;
211 VPUHwEncConfig_t enc_config;
212 VPU_HW_INFO_E *hw_info;
213 unsigned long reg_size;
215 atomic_t freq_status;
218 typedef struct vpu_request
224 static struct clk *pd_video;
225 static struct clk *aclk_vepu;
226 static struct clk *hclk_vepu;
227 static struct clk *aclk_ddr_vepu;
228 static struct clk *hclk_cpu_vcodec;
229 static vpu_service_info service;
230 static vpu_device dec_dev;
231 static vpu_device enc_dev;
233 #define VPU_POWER_OFF_DELAY 4*HZ /* 4s */
234 #define VPU_TIMEOUT_DELAY 2*HZ /* 2s */
236 static void vpu_get_clk(void)
238 pd_video = clk_get(NULL, "pd_video");
239 if (IS_ERR(pd_video)) {
240 pr_err("failed on clk_get pd_video\n");
242 aclk_vepu = clk_get(NULL, "aclk_vepu");
243 if (IS_ERR(aclk_vepu)) {
244 pr_err("failed on clk_get aclk_vepu\n");
246 hclk_vepu = clk_get(NULL, "hclk_vepu");
247 if (IS_ERR(hclk_vepu)) {
248 pr_err("failed on clk_get hclk_vepu\n");
250 aclk_ddr_vepu = clk_get(NULL, "aclk_ddr_vepu");
251 if (IS_ERR(aclk_ddr_vepu)) {
252 pr_err("failed on clk_get aclk_ddr_vepu\n");
254 hclk_cpu_vcodec = clk_get(NULL, "hclk_cpu_vcodec");
255 if (IS_ERR(hclk_cpu_vcodec)) {
256 pr_err("failed on clk_get hclk_cpu_vcodec\n");
260 static void vpu_put_clk(void)
265 clk_put(aclk_ddr_vepu);
266 clk_put(hclk_cpu_vcodec);
269 static void vpu_reset(void)
271 #if defined(CONFIG_ARCH_RK29)
272 clk_disable(aclk_ddr_vepu);
273 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
274 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
275 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
276 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
278 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
279 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
280 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
281 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
282 clk_enable(aclk_ddr_vepu);
283 #elif defined(CONFIG_ARCH_RK30)
284 pmu_set_idle_request(IDLE_REQ_VIDEO, true);
285 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
286 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
287 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
288 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
290 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
291 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
292 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
293 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
294 pmu_set_idle_request(IDLE_REQ_VIDEO, false);
296 service.reg_codec = NULL;
297 service.reg_pproc = NULL;
298 service.reg_resev = NULL;
301 static void reg_deinit(vpu_reg *reg);
302 static void vpu_service_session_clear(vpu_session *session)
305 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
308 list_for_each_entry_safe(reg, n, &session->running, session_link) {
311 list_for_each_entry_safe(reg, n, &session->done, session_link) {
316 static void vpu_service_dump(void)
319 vpu_reg *reg, *reg_tmp;
320 vpu_session *session, *session_tmp;
322 running = atomic_read(&service.total_running);
323 printk("total_running %d\n", running);
325 printk("reg_codec 0x%.8x\n", (unsigned int)service.reg_codec);
326 printk("reg_pproc 0x%.8x\n", (unsigned int)service.reg_pproc);
327 printk("reg_resev 0x%.8x\n", (unsigned int)service.reg_resev);
329 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
330 printk("session pid %d type %d:\n", session->pid, session->type);
331 running = atomic_read(&session->task_running);
332 printk("task_running %d\n", running);
333 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
334 printk("waiting register set 0x%.8x\n", (unsigned int)reg);
336 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
337 printk("running register set 0x%.8x\n", (unsigned int)reg);
339 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
340 printk("done register set 0x%.8x\n", (unsigned int)reg);
345 static void vpu_service_power_off(void)
348 if (!service.enabled) {
352 service.enabled = false;
353 total_running = atomic_read(&service.total_running);
355 pr_alert("alert: power off when %d task running!!\n", total_running);
357 pr_alert("alert: delay 50 ms for running task\n");
361 printk("vpu: power off...");
362 #ifdef CONFIG_ARCH_RK29
363 pmu_set_power_domain(PD_VCODEC, false);
365 clk_disable(pd_video);
368 clk_disable(hclk_cpu_vcodec);
369 clk_disable(aclk_ddr_vepu);
370 clk_disable(hclk_vepu);
371 clk_disable(aclk_vepu);
372 wake_unlock(&service.wake_lock);
376 static inline void vpu_queue_power_off_work(void)
378 queue_delayed_work(system_nrt_wq, &service.power_off_work, VPU_POWER_OFF_DELAY);
381 static void vpu_power_off_work(struct work_struct *work)
383 if (mutex_trylock(&service.lock)) {
384 vpu_service_power_off();
385 mutex_unlock(&service.lock);
387 /* Come back later if the device is busy... */
388 vpu_queue_power_off_work();
392 static void vpu_service_power_on(void)
395 ktime_t now = ktime_get();
396 if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
397 cancel_delayed_work_sync(&service.power_off_work);
398 vpu_queue_power_off_work();
404 service.enabled = true;
405 printk("vpu: power on\n");
407 clk_enable(aclk_vepu);
408 clk_enable(hclk_vepu);
409 clk_enable(hclk_cpu_vcodec);
411 #ifdef CONFIG_ARCH_RK29
412 pmu_set_power_domain(PD_VCODEC, true);
414 clk_enable(pd_video);
417 clk_enable(aclk_ddr_vepu);
418 wake_lock(&service.wake_lock);
421 static inline bool reg_check_rmvb_wmv(vpu_reg *reg)
423 unsigned long type = (reg->reg[3] & 0xF0000000) >> 28;
424 return ((type == 8) || (type == 4));
427 static inline bool reg_check_interlace(vpu_reg *reg)
429 unsigned long type = (reg->reg[3] & (1 << 23));
433 static vpu_reg *reg_init(vpu_session *session, void __user *src, unsigned long size)
435 vpu_reg *reg = kmalloc(sizeof(vpu_reg)+service.reg_size, GFP_KERNEL);
437 pr_err("error: kmalloc fail in reg_init\n");
441 reg->session = session;
442 reg->type = session->type;
444 reg->freq = VPU_FREQ_DEFAULT;
445 reg->reg = (unsigned long *)®[1];
446 INIT_LIST_HEAD(®->session_link);
447 INIT_LIST_HEAD(®->status_link);
449 if (copy_from_user(®->reg[0], (void __user *)src, size)) {
450 pr_err("error: copy_from_user failed in reg_init\n");
455 mutex_lock(&service.lock);
456 list_add_tail(®->status_link, &service.waiting);
457 list_add_tail(®->session_link, &session->waiting);
458 mutex_unlock(&service.lock);
460 if (service.auto_freq) {
461 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
462 if (reg_check_rmvb_wmv(reg)) {
463 reg->freq = VPU_FREQ_200M;
465 if (reg_check_interlace(reg)) {
466 reg->freq = VPU_FREQ_400M;
470 if (reg->type == VPU_PP) {
471 reg->freq = VPU_FREQ_400M;
478 static void reg_deinit(vpu_reg *reg)
480 list_del_init(®->session_link);
481 list_del_init(®->status_link);
482 if (reg == service.reg_codec) service.reg_codec = NULL;
483 if (reg == service.reg_pproc) service.reg_pproc = NULL;
487 static void reg_from_wait_to_run(vpu_reg *reg)
489 list_del_init(®->status_link);
490 list_add_tail(®->status_link, &service.running);
492 list_del_init(®->session_link);
493 list_add_tail(®->session_link, ®->session->running);
496 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
499 u32 *dst = (u32 *)®->reg[0];
500 for (i = 0; i < count; i++)
504 static void reg_from_run_to_done(vpu_reg *reg)
506 list_del_init(®->status_link);
507 list_add_tail(®->status_link, &service.done);
509 list_del_init(®->session_link);
510 list_add_tail(®->session_link, ®->session->done);
514 service.reg_codec = NULL;
515 reg_copy_from_hw(reg, enc_dev.hwregs, service.hw_info->enc_reg_num);
519 service.reg_codec = NULL;
520 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_9190_DEC);
524 service.reg_pproc = NULL;
525 reg_copy_from_hw(reg, dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_9190_PP);
526 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
530 service.reg_codec = NULL;
531 service.reg_pproc = NULL;
532 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_9190_DEC_PP);
533 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
537 pr_err("error: copy reg from hw with unknown type %d\n", reg->type);
541 atomic_sub(1, ®->session->task_running);
542 atomic_sub(1, &service.total_running);
543 wake_up_interruptible_sync(®->session->wait);
546 static void vpu_service_set_freq(vpu_reg *reg)
548 VPU_FREQ curr = atomic_read(&service.freq_status);
549 if (curr == reg->freq) {
552 atomic_set(&service.freq_status, reg->freq);
554 case VPU_FREQ_200M : {
555 clk_set_rate(aclk_vepu, 200*MHZ);
556 //printk("default: 200M\n");
558 case VPU_FREQ_266M : {
559 clk_set_rate(aclk_vepu, 266*MHZ);
560 //printk("default: 266M\n");
562 case VPU_FREQ_300M : {
563 clk_set_rate(aclk_vepu, 300*MHZ);
564 //printk("default: 300M\n");
566 case VPU_FREQ_400M : {
567 clk_set_rate(aclk_vepu, 400*MHZ);
568 //printk("default: 400M\n");
571 clk_set_rate(aclk_vepu, 300*MHZ);
572 //printk("default: 300M\n");
577 static void reg_copy_to_hw(vpu_reg *reg)
580 u32 *src = (u32 *)®->reg[0];
581 atomic_add(1, &service.total_running);
582 atomic_add(1, ®->session->task_running);
583 if (service.auto_freq) {
584 vpu_service_set_freq(reg);
588 int enc_count = service.hw_info->enc_reg_num;
589 u32 *dst = (u32 *)enc_dev.hwregs;
590 #if defined(CONFIG_ARCH_RK30)
591 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
592 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
593 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
594 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
596 service.reg_codec = reg;
598 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
600 for (i = 0; i < VPU_REG_EN_ENC; i++)
603 for (i = VPU_REG_EN_ENC + 1; i < enc_count; i++)
608 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
609 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC];
611 #if VPU_SERVICE_SHOW_TIME
612 do_gettimeofday(&enc_start);
617 u32 *dst = (u32 *)dec_dev.hwregs;
618 service.reg_codec = reg;
620 for (i = REG_NUM_9190_DEC - 1; i > VPU_REG_DEC_GATE; i--)
625 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
626 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
628 #if VPU_SERVICE_SHOW_TIME
629 do_gettimeofday(&dec_start);
634 u32 *dst = (u32 *)dec_dev.hwregs + PP_INTERRUPT_REGISTER;
635 service.reg_pproc = reg;
637 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
639 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_9190_PP; i++)
644 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
646 #if VPU_SERVICE_SHOW_TIME
647 do_gettimeofday(&pp_start);
652 u32 *dst = (u32 *)dec_dev.hwregs;
653 service.reg_codec = reg;
654 service.reg_pproc = reg;
656 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_9190_DEC_PP; i++)
659 dst[VPU_REG_EN_DEC_PP] = src[VPU_REG_EN_DEC_PP] | 0x2;
662 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
663 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
664 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
666 #if VPU_SERVICE_SHOW_TIME
667 do_gettimeofday(&dec_start);
672 pr_err("error: unsupport session type %d", reg->type);
673 atomic_sub(1, &service.total_running);
674 atomic_sub(1, ®->session->task_running);
680 static void try_set_reg(void)
682 // first get reg from reg list
683 if (!list_empty(&service.waiting)) {
685 vpu_reg *reg = list_entry(service.waiting.next, vpu_reg, status_link);
687 vpu_service_power_on();
691 if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
695 if (NULL == service.reg_codec)
697 if (service.auto_freq && (NULL != service.reg_pproc)) {
702 if (NULL == service.reg_codec) {
703 if (NULL == service.reg_pproc)
706 if ((VPU_DEC == service.reg_codec->type) && (NULL == service.reg_pproc))
708 // can not charge frequency when vpu is working
709 if (service.auto_freq) {
715 if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
719 printk("undefined reg type %d\n", reg->type);
723 reg_from_wait_to_run(reg);
729 static int return_reg(vpu_reg *reg, u32 __user *dst)
734 if (copy_to_user(dst, ®->reg[0], service.hw_info->enc_io_size))
739 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_DEC)))
744 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_PP)))
749 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_DEC_PP)))
755 pr_err("error: copy reg to user with unknown type %d\n", reg->type);
763 static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
765 vpu_session *session = (vpu_session *)filp->private_data;
766 if (NULL == session) {
771 case VPU_IOC_SET_CLIENT_TYPE : {
772 session->type = (VPU_CLIENT_TYPE)arg;
775 case VPU_IOC_GET_HW_FUSE_STATUS : {
777 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
778 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
781 if (VPU_ENC != session->type) {
782 if (copy_to_user((void __user *)req.req, &service.dec_config, sizeof(VPUHwDecConfig_t))) {
783 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
787 if (copy_to_user((void __user *)req.req, &service.enc_config, sizeof(VPUHwEncConfig_t))) {
788 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
796 case VPU_IOC_SET_REG : {
799 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
800 pr_err("error: VPU_IOC_SET_REG copy_from_user failed\n");
804 reg = reg_init(session, (void __user *)req.req, req.size);
808 mutex_lock(&service.lock);
810 mutex_unlock(&service.lock);
815 case VPU_IOC_GET_REG : {
818 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
819 pr_err("error: VPU_IOC_GET_REG copy_from_user failed\n");
822 int ret = wait_event_interruptible_timeout(session->wait, !list_empty(&session->done), VPU_TIMEOUT_DELAY);
823 if (!list_empty(&session->done)) {
825 pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
829 if (unlikely(ret < 0)) {
830 pr_err("error: pid %d wait task ret %d\n", session->pid, ret);
831 } else if (0 == ret) {
832 pr_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
837 int task_running = atomic_read(&session->task_running);
838 mutex_lock(&service.lock);
841 atomic_set(&session->task_running, 0);
842 atomic_sub(task_running, &service.total_running);
843 printk("%d task is running but not return, reset hardware...", task_running);
847 vpu_service_session_clear(session);
848 mutex_unlock(&service.lock);
852 mutex_lock(&service.lock);
853 reg = list_entry(session->done.next, vpu_reg, session_link);
854 return_reg(reg, (u32 __user *)req.req);
855 mutex_unlock(&service.lock);
859 pr_err("error: unknow vpu service ioctl cmd %x\n", cmd);
867 static int vpu_service_check_hw(vpu_service_info *p, unsigned long hw_addr)
869 int ret = -EINVAL, i = 0;
870 volatile u32 *tmp = (volatile u32 *)ioremap_nocache(hw_addr, 0x4);
872 enc_id = (enc_id >> 16) & 0xFFFF;
873 pr_info("checking hw id %x\n", enc_id);
875 for (i = 0; i < ARRAY_SIZE(vpu_hw_set); i++) {
876 if (enc_id == vpu_hw_set[i].hw_id) {
877 p->hw_info = &vpu_hw_set[i];
882 iounmap((void *)tmp);
886 static void vpu_service_release_io(void)
888 if (dec_dev.hwregs) {
889 iounmap((void *)dec_dev.hwregs);
890 dec_dev.hwregs = NULL;
892 if (dec_dev.iobaseaddr) {
893 release_mem_region(dec_dev.iobaseaddr, dec_dev.iosize);
894 dec_dev.iobaseaddr = 0;
898 if (enc_dev.hwregs) {
899 iounmap((void *)enc_dev.hwregs);
900 enc_dev.hwregs = NULL;
902 if (enc_dev.iobaseaddr) {
903 release_mem_region(enc_dev.iobaseaddr, enc_dev.iosize);
904 enc_dev.iobaseaddr = 0;
909 static int vpu_service_reserve_io(void)
911 unsigned long iobaseaddr;
912 unsigned long iosize;
914 iobaseaddr = dec_dev.iobaseaddr;
915 iosize = dec_dev.iosize;
917 if (!request_mem_region(iobaseaddr, iosize, "vdpu_io")) {
918 pr_info("failed to reserve dec HW regs\n");
922 dec_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
924 if (dec_dev.hwregs == NULL) {
925 pr_info("failed to ioremap dec HW regs\n");
929 iobaseaddr = enc_dev.iobaseaddr;
930 iosize = enc_dev.iosize;
932 if (!request_mem_region(iobaseaddr, iosize, "vepu_io")) {
933 pr_info("failed to reserve enc HW regs\n");
937 enc_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
939 if (enc_dev.hwregs == NULL) {
940 pr_info("failed to ioremap enc HW regs\n");
950 static int vpu_service_open(struct inode *inode, struct file *filp)
952 vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
953 if (NULL == session) {
954 pr_err("error: unable to allocate memory for vpu_session.");
958 session->type = VPU_TYPE_BUTT;
959 session->pid = current->pid;
960 INIT_LIST_HEAD(&session->waiting);
961 INIT_LIST_HEAD(&session->running);
962 INIT_LIST_HEAD(&session->done);
963 INIT_LIST_HEAD(&session->list_session);
964 init_waitqueue_head(&session->wait);
965 atomic_set(&session->task_running, 0);
966 mutex_lock(&service.lock);
967 list_add_tail(&session->list_session, &service.session);
968 filp->private_data = (void *)session;
969 mutex_unlock(&service.lock);
971 pr_debug("dev opened\n");
972 return nonseekable_open(inode, filp);
975 static int vpu_service_release(struct inode *inode, struct file *filp)
978 vpu_session *session = (vpu_session *)filp->private_data;
982 task_running = atomic_read(&session->task_running);
984 pr_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running);
987 wake_up_interruptible_sync(&session->wait);
989 mutex_lock(&service.lock);
990 /* remove this filp from the asynchronusly notified filp's */
991 list_del_init(&session->list_session);
992 vpu_service_session_clear(session);
994 filp->private_data = NULL;
995 mutex_unlock(&service.lock);
997 pr_debug("dev closed\n");
1001 static const struct file_operations vpu_service_fops = {
1002 .unlocked_ioctl = vpu_service_ioctl,
1003 .open = vpu_service_open,
1004 .release = vpu_service_release,
1005 //.fasync = vpu_service_fasync,
1008 static struct miscdevice vpu_service_misc_device = {
1009 .minor = MISC_DYNAMIC_MINOR,
1010 .name = "vpu_service",
1011 .fops = &vpu_service_fops,
1014 static struct platform_device vpu_service_device = {
1015 .name = "vpu_service",
1019 static struct platform_driver vpu_service_driver = {
1021 .name = "vpu_service",
1022 .owner = THIS_MODULE,
1026 static void get_hw_info(void)
1028 VPUHwDecConfig_t *dec = &service.dec_config;
1029 VPUHwEncConfig_t *enc = &service.enc_config;
1030 u32 configReg = dec_dev.hwregs[VPU_DEC_HWCFG0];
1031 u32 asicID = dec_dev.hwregs[0];
1033 dec->h264Support = (configReg >> DWL_H264_E) & 0x3U;
1034 dec->jpegSupport = (configReg >> DWL_JPEG_E) & 0x01U;
1035 if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
1036 dec->jpegSupport = JPEG_PROGRESSIVE;
1037 dec->mpeg4Support = (configReg >> DWL_MPEG4_E) & 0x3U;
1038 dec->vc1Support = (configReg >> DWL_VC1_E) & 0x3U;
1039 dec->mpeg2Support = (configReg >> DWL_MPEG2_E) & 0x01U;
1040 dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
1041 dec->refBufSupport = (configReg >> DWL_REF_BUFF_E) & 0x01U;
1042 dec->vp6Support = (configReg >> DWL_VP6_E) & 0x01U;
1043 dec->maxDecPicWidth = configReg & 0x07FFU;
1045 /* 2nd Config register */
1046 configReg = dec_dev.hwregs[VPU_DEC_HWCFG1];
1047 if (dec->refBufSupport) {
1048 if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
1049 dec->refBufSupport |= 2;
1050 if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
1051 dec->refBufSupport |= 4;
1053 dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
1054 dec->vp7Support = (configReg >> DWL_VP7_E) & 0x01U;
1055 dec->vp8Support = (configReg >> DWL_VP8_E) & 0x01U;
1056 dec->avsSupport = (configReg >> DWL_AVS_E) & 0x01U;
1058 /* JPEG xtensions */
1059 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
1060 dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
1062 dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
1065 if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) {
1066 dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
1068 dec->rvSupport = RV_NOT_SUPPORTED;
1071 dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
1073 if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) {
1074 dec->refBufSupport |= 8; /* enable HW support for offset */
1078 VPUHwFuseStatus_t hwFuseSts;
1079 /* Decoder fuse configuration */
1080 u32 fuseReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
1082 hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U;
1083 hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U;
1084 hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U;
1085 hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U;
1086 hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U;
1087 hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U;
1088 hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U;
1089 hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U;
1090 hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U;
1091 hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U;
1092 hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U;
1093 hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U;
1094 hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U;
1095 hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U;
1097 /* check max. decoder output width */
1099 if (fuseReg & 0x8000U)
1100 hwFuseSts.maxDecPicWidthFuse = 1920;
1101 else if (fuseReg & 0x4000U)
1102 hwFuseSts.maxDecPicWidthFuse = 1280;
1103 else if (fuseReg & 0x2000U)
1104 hwFuseSts.maxDecPicWidthFuse = 720;
1105 else if (fuseReg & 0x1000U)
1106 hwFuseSts.maxDecPicWidthFuse = 352;
1107 else /* remove warning */
1108 hwFuseSts.maxDecPicWidthFuse = 352;
1110 hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U;
1112 /* Pp configuration */
1113 configReg = dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG];
1115 if ((configReg >> DWL_PP_E) & 0x01U) {
1117 dec->maxPpOutPicWidth = configReg & 0x07FFU;
1118 /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */
1119 dec->ppConfig = configReg;
1122 dec->maxPpOutPicWidth = 0;
1126 /* check the HW versio */
1127 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
1128 /* Pp configuration */
1129 configReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
1131 if ((configReg >> DWL_PP_E) & 0x01U) {
1132 /* Pp fuse configuration */
1133 u32 fuseRegPp = dec_dev.hwregs[VPU_PP_HW_FUSE_CFG];
1135 if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) {
1136 hwFuseSts.ppSupportFuse = 1;
1137 /* check max. pp output width */
1138 if (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920;
1139 else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280;
1140 else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720;
1141 else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352;
1142 else hwFuseSts.maxPpOutPicWidthFuse = 352;
1143 hwFuseSts.ppConfigFuse = fuseRegPp;
1145 hwFuseSts.ppSupportFuse = 0;
1146 hwFuseSts.maxPpOutPicWidthFuse = 0;
1147 hwFuseSts.ppConfigFuse = 0;
1150 hwFuseSts.ppSupportFuse = 0;
1151 hwFuseSts.maxPpOutPicWidthFuse = 0;
1152 hwFuseSts.ppConfigFuse = 0;
1155 if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse)
1156 dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse;
1157 if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse)
1158 dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse;
1159 if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED;
1160 if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED;
1161 if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED;
1162 if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED;
1163 if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse)
1164 dec->jpegSupport = JPEG_BASELINE;
1165 if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED;
1166 if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED;
1167 if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED;
1168 if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED;
1169 if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED;
1170 if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED;
1172 /* check the pp config vs fuse status */
1173 if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) {
1174 u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25);
1175 u32 alphaBlend = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24);
1176 u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25);
1177 u32 alphaBlendFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24);
1179 if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000;
1180 if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000;
1182 if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED;
1183 if (!hwFuseSts.refBufSupportFuse) dec->refBufSupport = REF_BUF_NOT_SUPPORTED;
1184 if (!hwFuseSts.rvSupportFuse) dec->rvSupport = RV_NOT_SUPPORTED;
1185 if (!hwFuseSts.avsSupportFuse) dec->avsSupport = AVS_NOT_SUPPORTED;
1186 if (!hwFuseSts.mvcSupportFuse) dec->mvcSupport = MVC_NOT_SUPPORTED;
1189 configReg = enc_dev.hwregs[63];
1190 enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
1191 enc->h264Enabled = (configReg >> 27) & 1;
1192 enc->mpeg4Enabled = (configReg >> 26) & 1;
1193 enc->jpegEnabled = (configReg >> 25) & 1;
1194 enc->vsEnabled = (configReg >> 24) & 1;
1195 enc->rgbEnabled = (configReg >> 28) & 1;
1196 //enc->busType = (configReg >> 20) & 15;
1197 //enc->synthesisLanguage = (configReg >> 16) & 15;
1198 //enc->busWidth = (configReg >> 12) & 15;
1199 enc->reg_size = service.reg_size;
1200 enc->reserv[0] = enc->reserv[1] = 0;
1202 service.auto_freq = soc_is_rk2928g() || soc_is_rk2928l() || soc_is_rk2926();
1203 if (service.auto_freq) {
1204 printk("vpu_service set to auto frequency mode\n");
1205 atomic_set(&service.freq_status, VPU_FREQ_BUT);
1209 static irqreturn_t vdpu_irq(int irq, void *dev_id)
1211 vpu_device *dev = (vpu_device *) dev_id;
1212 u32 irq_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1214 pr_debug("vdpu_irq\n");
1216 if (irq_status & DEC_INTERRUPT_BIT) {
1217 pr_debug("vdpu_isr dec %x\n", irq_status);
1218 if ((irq_status & 0x40001) == 0x40001)
1221 irq_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1222 } while ((irq_status & 0x40001) == 0x40001);
1225 writel(irq_status & (~DEC_INTERRUPT_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER);
1226 atomic_add(1, &dev->irq_count_codec);
1229 irq_status = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
1230 if (irq_status & PP_INTERRUPT_BIT) {
1231 pr_debug("vdpu_isr pp %x\n", irq_status);
1233 writel(irq_status & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
1234 atomic_add(1, &dev->irq_count_pp);
1237 return IRQ_WAKE_THREAD;
1240 static irqreturn_t vdpu_isr(int irq, void *dev_id)
1242 vpu_device *dev = (vpu_device *) dev_id;
1244 mutex_lock(&service.lock);
1245 if (atomic_read(&dev->irq_count_codec)) {
1246 #if VPU_SERVICE_SHOW_TIME
1247 do_gettimeofday(&dec_end);
1248 printk("dec task: %ld ms\n",
1249 (dec_end.tv_sec - dec_start.tv_sec) * 1000 +
1250 (dec_end.tv_usec - dec_start.tv_usec) / 1000);
1252 atomic_sub(1, &dev->irq_count_codec);
1253 if (NULL == service.reg_codec) {
1254 pr_err("error: dec isr with no task waiting\n");
1256 reg_from_run_to_done(service.reg_codec);
1260 if (atomic_read(&dev->irq_count_pp)) {
1262 #if VPU_SERVICE_SHOW_TIME
1263 do_gettimeofday(&pp_end);
1264 printk("pp task: %ld ms\n",
1265 (pp_end.tv_sec - pp_start.tv_sec) * 1000 +
1266 (pp_end.tv_usec - pp_start.tv_usec) / 1000);
1269 atomic_sub(1, &dev->irq_count_pp);
1270 if (NULL == service.reg_pproc) {
1271 pr_err("error: pp isr with no task waiting\n");
1273 reg_from_run_to_done(service.reg_pproc);
1277 mutex_unlock(&service.lock);
1281 static irqreturn_t vepu_irq(int irq, void *dev_id)
1283 struct vpu_device *dev = (struct vpu_device *) dev_id;
1284 u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
1286 pr_debug("vepu_irq irq status %x\n", irq_status);
1288 #if VPU_SERVICE_SHOW_TIME
1289 do_gettimeofday(&enc_end);
1290 printk("enc task: %ld ms\n",
1291 (enc_end.tv_sec - enc_start.tv_sec) * 1000 +
1292 (enc_end.tv_usec - enc_start.tv_usec) / 1000);
1295 if (likely(irq_status & ENC_INTERRUPT_BIT)) {
1297 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
1298 atomic_add(1, &dev->irq_count_codec);
1301 return IRQ_WAKE_THREAD;
1304 static irqreturn_t vepu_isr(int irq, void *dev_id)
1306 struct vpu_device *dev = (struct vpu_device *) dev_id;
1308 mutex_lock(&service.lock);
1309 if (atomic_read(&dev->irq_count_codec)) {
1310 atomic_sub(1, &dev->irq_count_codec);
1311 if (NULL == service.reg_codec) {
1312 pr_err("error: enc isr with no task waiting\n");
1314 reg_from_run_to_done(service.reg_codec);
1318 mutex_unlock(&service.lock);
1322 static int __init vpu_service_proc_init(void);
1323 static int __init vpu_service_init(void)
1327 pr_debug("baseaddr = 0x%08x vdpu irq = %d vepu irq = %d\n", VCODEC_PHYS, IRQ_VDPU, IRQ_VEPU);
1329 wake_lock_init(&service.wake_lock, WAKE_LOCK_SUSPEND, "vpu");
1330 INIT_LIST_HEAD(&service.waiting);
1331 INIT_LIST_HEAD(&service.running);
1332 INIT_LIST_HEAD(&service.done);
1333 INIT_LIST_HEAD(&service.session);
1334 mutex_init(&service.lock);
1335 service.reg_codec = NULL;
1336 service.reg_pproc = NULL;
1337 atomic_set(&service.total_running, 0);
1338 service.enabled = false;
1342 INIT_DELAYED_WORK(&service.power_off_work, vpu_power_off_work);
1344 vpu_service_power_on();
1345 ret = vpu_service_check_hw(&service, VCODEC_PHYS);
1347 pr_err("error: hw info check faild\n");
1348 goto err_hw_id_check;
1351 atomic_set(&dec_dev.irq_count_codec, 0);
1352 atomic_set(&dec_dev.irq_count_pp, 0);
1353 dec_dev.iobaseaddr = service.hw_info->hw_addr + service.hw_info->dec_offset;
1354 dec_dev.iosize = service.hw_info->dec_io_size;
1355 atomic_set(&enc_dev.irq_count_codec, 0);
1356 atomic_set(&enc_dev.irq_count_pp, 0);
1357 enc_dev.iobaseaddr = service.hw_info->hw_addr + service.hw_info->enc_offset;
1358 enc_dev.iosize = service.hw_info->enc_io_size;;
1359 service.reg_size = max(dec_dev.iosize, enc_dev.iosize);
1361 ret = vpu_service_reserve_io();
1363 pr_err("error: reserve io failed\n");
1364 goto err_reserve_io;
1367 /* get the IRQ line */
1368 ret = request_threaded_irq(IRQ_VDPU, vdpu_irq, vdpu_isr, IRQF_SHARED, "vdpu", (void *)&dec_dev);
1370 pr_err("error: can't request vdpu irq %d\n", IRQ_VDPU);
1371 goto err_req_vdpu_irq;
1374 ret = request_threaded_irq(IRQ_VEPU, vepu_irq, vepu_isr, IRQF_SHARED, "vepu", (void *)&enc_dev);
1376 pr_err("error: can't request vepu irq %d\n", IRQ_VEPU);
1377 goto err_req_vepu_irq;
1380 ret = misc_register(&vpu_service_misc_device);
1382 pr_err("error: misc_register failed\n");
1386 platform_device_register(&vpu_service_device);
1387 platform_driver_probe(&vpu_service_driver, NULL);
1389 vpu_service_power_off();
1390 pr_info("init success\n");
1392 vpu_service_proc_init();
1396 free_irq(IRQ_VEPU, (void *)&enc_dev);
1398 free_irq(IRQ_VDPU, (void *)&dec_dev);
1400 pr_info("init failed\n");
1402 vpu_service_release_io();
1404 vpu_service_power_off();
1406 wake_lock_destroy(&service.wake_lock);
1407 pr_info("init failed\n");
1411 static void __exit vpu_service_proc_release(void);
1412 static void __exit vpu_service_exit(void)
1414 vpu_service_proc_release();
1415 vpu_service_power_off();
1416 platform_device_unregister(&vpu_service_device);
1417 platform_driver_unregister(&vpu_service_driver);
1418 misc_deregister(&vpu_service_misc_device);
1419 free_irq(IRQ_VEPU, (void *)&enc_dev);
1420 free_irq(IRQ_VDPU, (void *)&dec_dev);
1421 vpu_service_release_io();
1423 wake_lock_destroy(&service.wake_lock);
1426 module_init(vpu_service_init);
1427 module_exit(vpu_service_exit);
1429 #ifdef CONFIG_PROC_FS
1430 #include <linux/proc_fs.h>
1431 #include <linux/seq_file.h>
1433 static int proc_vpu_service_show(struct seq_file *s, void *v)
1436 vpu_reg *reg, *reg_tmp;
1437 vpu_session *session, *session_tmp;
1439 mutex_lock(&service.lock);
1440 vpu_service_power_on();
1441 seq_printf(s, "\nENC Registers:\n");
1442 n = enc_dev.iosize >> 2;
1443 for (i = 0; i < n; i++) {
1444 seq_printf(s, "\tswreg%d = %08X\n", i, readl(enc_dev.hwregs + i));
1446 seq_printf(s, "\nDEC Registers:\n");
1447 n = dec_dev.iosize >> 2;
1448 for (i = 0; i < n; i++) {
1449 seq_printf(s, "\tswreg%d = %08X\n", i, readl(dec_dev.hwregs + i));
1452 seq_printf(s, "\nvpu service status:\n");
1453 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
1454 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
1455 //seq_printf(s, "waiting reg set %d\n");
1456 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
1457 seq_printf(s, "waiting register set\n");
1459 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
1460 seq_printf(s, "running register set\n");
1462 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
1463 seq_printf(s, "done register set\n");
1466 mutex_unlock(&service.lock);
1471 static int proc_vpu_service_open(struct inode *inode, struct file *file)
1473 return single_open(file, proc_vpu_service_show, NULL);
1476 static const struct file_operations proc_vpu_service_fops = {
1477 .open = proc_vpu_service_open,
1479 .llseek = seq_lseek,
1480 .release = single_release,
1483 static int __init vpu_service_proc_init(void)
1485 proc_create("vpu_service", 0, NULL, &proc_vpu_service_fops);
1489 static void __exit vpu_service_proc_release(void)
1491 remove_proc_entry("vpu_service", NULL);
1493 #endif /* CONFIG_PROC_FS */