1 /* arch/arm/mach-rk29/vpu.c
3 * Copyright (C) 2010 ROCKCHIP, Inc.
4 * author: chenhengming chm@rock-chips.com
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #ifdef CONFIG_RK29_VPU_DEBUG
19 #define pr_fmt(fmt) "VPU_SERVICE: %s: " fmt, __func__
21 #define pr_fmt(fmt) "VPU_SERVICE: " fmt
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
33 #include <linux/ioport.h>
34 #include <linux/miscdevice.h>
36 #include <linux/poll.h>
37 #include <linux/platform_device.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/wakelock.h>
41 #include <linux/rockchip/cpu.h>
42 #include <linux/rockchip/cru.h>
44 #include <asm/uaccess.h>
46 #include <mach/irqs.h>
53 #if defined(CONFIG_ARCH_RK319X)
57 #include "vpu_service.h"
60 VPU_DEC_ID_9190 = 0x6731,
66 VPU_DEC_TYPE_9190 = 0,
67 VPU_ENC_TYPE_8270 = 0x100,
71 typedef enum VPU_FREQ {
82 unsigned long hw_addr;
83 unsigned long enc_offset;
84 unsigned long enc_reg_num;
85 unsigned long enc_io_size;
86 unsigned long dec_offset;
87 unsigned long dec_reg_num;
88 unsigned long dec_io_size;
91 #define VPU_SERVICE_SHOW_TIME 0
93 #if VPU_SERVICE_SHOW_TIME
94 static struct timeval enc_start, enc_end;
95 static struct timeval dec_start, dec_end;
96 static struct timeval pp_start, pp_end;
99 #define MHZ (1000*1000)
101 #if defined(CONFIG_ARCH_RK319X)
102 #define VCODEC_PHYS RK319X_VCODEC_PHYS
104 #define VCODEC_PHYS (0x10104000)
107 #define REG_NUM_9190_DEC (60)
108 #define REG_NUM_9190_PP (41)
109 #define REG_NUM_9190_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP)
111 #define REG_NUM_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP)
113 #define REG_NUM_ENC_8270 (96)
114 #define REG_SIZE_ENC_8270 (0x200)
115 #define REG_NUM_ENC_4831 (164)
116 #define REG_SIZE_ENC_4831 (0x400)
118 #define SIZE_REG(reg) ((reg)*4)
120 VPU_HW_INFO_E vpu_hw_set[] = {
122 .hw_id = VPU_ID_8270,
123 .hw_addr = VCODEC_PHYS,
125 .enc_reg_num = REG_NUM_ENC_8270,
126 .enc_io_size = REG_NUM_ENC_8270 * 4,
127 .dec_offset = REG_SIZE_ENC_8270,
128 .dec_reg_num = REG_NUM_9190_DEC_PP,
129 .dec_io_size = REG_NUM_9190_DEC_PP * 4,
132 .hw_id = VPU_ID_4831,
133 .hw_addr = VCODEC_PHYS,
135 .enc_reg_num = REG_NUM_ENC_4831,
136 .enc_io_size = REG_NUM_ENC_4831 * 4,
137 .dec_offset = REG_SIZE_ENC_4831,
138 .dec_reg_num = REG_NUM_9190_DEC_PP,
139 .dec_io_size = REG_NUM_9190_DEC_PP * 4,
144 #define DEC_INTERRUPT_REGISTER 1
145 #define PP_INTERRUPT_REGISTER 60
146 #define ENC_INTERRUPT_REGISTER 1
148 #define DEC_INTERRUPT_BIT 0x100
149 #define DEC_BUFFER_EMPTY_BIT 0x4000
150 #define PP_INTERRUPT_BIT 0x100
151 #define ENC_INTERRUPT_BIT 0x1
153 #define VPU_REG_EN_ENC 14
154 #define VPU_REG_ENC_GATE 2
155 #define VPU_REG_ENC_GATE_BIT (1<<4)
157 #define VPU_REG_EN_DEC 1
158 #define VPU_REG_DEC_GATE 2
159 #define VPU_REG_DEC_GATE_BIT (1<<10)
160 #define VPU_REG_EN_PP 0
161 #define VPU_REG_PP_GATE 1
162 #define VPU_REG_PP_GATE_BIT (1<<8)
163 #define VPU_REG_EN_DEC_PP 1
164 #define VPU_REG_DEC_PP_GATE 61
165 #define VPU_REG_DEC_PP_GATE_BIT (1<<8)
168 * struct for process session which connect to vpu
170 * @author ChenHengming (2011-5-3)
172 typedef struct vpu_session {
173 VPU_CLIENT_TYPE type;
174 /* a linked list of data so we can access them for debugging */
175 struct list_head list_session;
176 /* a linked list of register data waiting for process */
177 struct list_head waiting;
178 /* a linked list of register data in processing */
179 struct list_head running;
180 /* a linked list of register data processed */
181 struct list_head done;
182 wait_queue_head_t wait;
184 atomic_t task_running;
188 * struct for process register set
190 * @author ChenHengming (2011-5-4)
192 typedef struct vpu_reg {
193 VPU_CLIENT_TYPE type;
195 vpu_session *session;
196 struct list_head session_link; /* link to vpu service session */
197 struct list_head status_link; /* link to register set list */
202 typedef struct vpu_device {
203 atomic_t irq_count_codec;
204 atomic_t irq_count_pp;
205 unsigned long iobaseaddr;
207 volatile u32 *hwregs;
210 typedef struct vpu_service_info {
211 struct wake_lock wake_lock;
212 struct delayed_work power_off_work;
214 struct list_head waiting; /* link to link_reg in struct vpu_reg */
215 struct list_head running; /* link to link_reg in struct vpu_reg */
216 struct list_head done; /* link to link_reg in struct vpu_reg */
217 struct list_head session; /* link to list_session in struct vpu_session */
218 atomic_t total_running;
223 VPUHwDecConfig_t dec_config;
224 VPUHwEncConfig_t enc_config;
225 VPU_HW_INFO_E *hw_info;
226 unsigned long reg_size;
229 atomic_t freq_status;
232 typedef struct vpu_request
238 //static struct clk *pd_video;
239 static struct clk *aclk_vepu;
240 static struct clk *hclk_vepu;
241 //static struct clk *aclk_ddr_vepu;
242 //static struct clk *hclk_cpu_vcodec;
243 static vpu_service_info service;
244 static vpu_device dec_dev;
245 static vpu_device enc_dev;
246 static unsigned int irq_vdpu = IRQ_VDPU;
247 static unsigned int irq_vepu = IRQ_VEPU;
249 #define VPU_POWER_OFF_DELAY 4*HZ /* 4s */
250 #define VPU_TIMEOUT_DELAY 2*HZ /* 2s */
252 static void vpu_get_clk(void)
254 /*pd_video = clk_get(NULL, "pd_video");
255 if (IS_ERR(pd_video)) {
256 pr_err("failed on clk_get pd_video\n");
258 aclk_vepu = clk_get(NULL, "clk_vepu");
259 if (IS_ERR(aclk_vepu)) {
260 pr_err("failed on clk_get aclk_vepu\n");
262 hclk_vepu = clk_get(NULL, "g_h_vepu");
263 if (IS_ERR(hclk_vepu)) {
264 pr_err("failed on clk_get hclk_vepu\n");
267 aclk_ddr_vepu = clk_get(NULL, "aclk_ddr_vepu");
268 if (IS_ERR(aclk_ddr_vepu)) {
269 ;//pr_err("failed on clk_get aclk_ddr_vepu\n");
271 hclk_cpu_vcodec = clk_get(NULL, "hclk_cpu_vcodec");
272 if (IS_ERR(hclk_cpu_vcodec)) {
273 ;//pr_err("failed on clk_get hclk_cpu_vcodec\n");
278 static void vpu_put_clk(void)
283 //clk_put(aclk_ddr_vepu);
284 //clk_put(hclk_cpu_vcodec);
287 static void vpu_reset(void)
289 #if defined(CONFIG_ARCH_RK29)
290 clk_disable(aclk_ddr_vepu);
291 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
292 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
293 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
294 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
296 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
297 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
298 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
299 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
300 clk_enable(aclk_ddr_vepu);
301 #elif defined(CONFIG_ARCH_RK30)
302 pmu_set_idle_request(IDLE_REQ_VIDEO, true);
303 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
304 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
305 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
306 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
308 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
309 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
310 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
311 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
312 pmu_set_idle_request(IDLE_REQ_VIDEO, false);
314 service.reg_codec = NULL;
315 service.reg_pproc = NULL;
316 service.reg_resev = NULL;
319 static void reg_deinit(vpu_reg *reg);
320 static void vpu_service_session_clear(vpu_session *session)
323 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
326 list_for_each_entry_safe(reg, n, &session->running, session_link) {
329 list_for_each_entry_safe(reg, n, &session->done, session_link) {
334 static void vpu_service_dump(void)
337 vpu_reg *reg, *reg_tmp;
338 vpu_session *session, *session_tmp;
340 running = atomic_read(&service.total_running);
341 printk("total_running %d\n", running);
343 printk("reg_codec 0x%.8x\n", (unsigned int)service.reg_codec);
344 printk("reg_pproc 0x%.8x\n", (unsigned int)service.reg_pproc);
345 printk("reg_resev 0x%.8x\n", (unsigned int)service.reg_resev);
347 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
348 printk("session pid %d type %d:\n", session->pid, session->type);
349 running = atomic_read(&session->task_running);
350 printk("task_running %d\n", running);
351 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
352 printk("waiting register set 0x%.8x\n", (unsigned int)reg);
354 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
355 printk("running register set 0x%.8x\n", (unsigned int)reg);
357 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
358 printk("done register set 0x%.8x\n", (unsigned int)reg);
363 static void vpu_service_power_off(void)
366 if (!service.enabled) {
370 service.enabled = false;
371 total_running = atomic_read(&service.total_running);
373 pr_alert("alert: power off when %d task running!!\n", total_running);
375 pr_alert("alert: delay 50 ms for running task\n");
379 printk("vpu: power off...");
380 #ifdef CONFIG_ARCH_RK29
381 pmu_set_power_domain(PD_VCODEC, false);
383 //clk_disable(pd_video);
386 //clk_disable(hclk_cpu_vcodec);
387 //clk_disable(aclk_ddr_vepu);
388 clk_disable_unprepare(hclk_vepu);
389 clk_disable_unprepare(aclk_vepu);
390 wake_unlock(&service.wake_lock);
394 static inline void vpu_queue_power_off_work(void)
396 queue_delayed_work(system_nrt_wq, &service.power_off_work, VPU_POWER_OFF_DELAY);
399 static void vpu_power_off_work(struct work_struct *work)
401 if (mutex_trylock(&service.lock)) {
402 vpu_service_power_off();
403 mutex_unlock(&service.lock);
405 /* Come back later if the device is busy... */
406 vpu_queue_power_off_work();
410 static void vpu_service_power_on(void)
413 ktime_t now = ktime_get();
414 if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
415 cancel_delayed_work_sync(&service.power_off_work);
416 vpu_queue_power_off_work();
422 service.enabled = true;
423 printk("vpu: power on\n");
425 clk_prepare_enable(aclk_vepu);
426 clk_prepare_enable(hclk_vepu);
427 //clk_prepare_enable(hclk_cpu_vcodec);
428 #if defined(CONFIG_ARCH_RK319X)
429 /// select aclk_vepu as vcodec clock source.
430 #define BIT_VCODEC_SEL (1<<7)
431 writel_relaxed(readl_relaxed(RK319X_GRF_BASE + GRF_SOC_CON1) | (BIT_VCODEC_SEL) | (BIT_VCODEC_SEL << 16), RK319X_GRF_BASE + GRF_SOC_CON1);
434 #ifdef CONFIG_ARCH_RK29
435 pmu_set_power_domain(PD_VCODEC, true);
437 //clk_enable(pd_video);
440 //clk_enable(aclk_ddr_vepu);
441 wake_lock(&service.wake_lock);
444 static inline bool reg_check_rmvb_wmv(vpu_reg *reg)
446 unsigned long type = (reg->reg[3] & 0xF0000000) >> 28;
447 return ((type == 8) || (type == 4));
450 static inline bool reg_check_interlace(vpu_reg *reg)
452 unsigned long type = (reg->reg[3] & (1 << 23));
456 static vpu_reg *reg_init(vpu_session *session, void __user *src, unsigned long size)
458 vpu_reg *reg = kmalloc(sizeof(vpu_reg)+service.reg_size, GFP_KERNEL);
460 pr_err("error: kmalloc fail in reg_init\n");
464 if (size > service.reg_size) {
465 printk("warning: vpu reg size %lu is larger than hw reg size %lu\n", size, service.reg_size);
466 size = service.reg_size;
468 reg->session = session;
469 reg->type = session->type;
471 reg->freq = VPU_FREQ_DEFAULT;
472 reg->reg = (unsigned long *)®[1];
473 INIT_LIST_HEAD(®->session_link);
474 INIT_LIST_HEAD(®->status_link);
476 if (copy_from_user(®->reg[0], (void __user *)src, size)) {
477 pr_err("error: copy_from_user failed in reg_init\n");
482 mutex_lock(&service.lock);
483 list_add_tail(®->status_link, &service.waiting);
484 list_add_tail(®->session_link, &session->waiting);
485 mutex_unlock(&service.lock);
487 if (service.auto_freq) {
488 if (!soc_is_rk2928g()) {
489 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
490 if (reg_check_rmvb_wmv(reg)) {
491 reg->freq = VPU_FREQ_200M;
493 if (reg_check_interlace(reg)) {
494 reg->freq = VPU_FREQ_400M;
498 if (reg->type == VPU_PP) {
499 reg->freq = VPU_FREQ_400M;
507 static void reg_deinit(vpu_reg *reg)
509 list_del_init(®->session_link);
510 list_del_init(®->status_link);
511 if (reg == service.reg_codec) service.reg_codec = NULL;
512 if (reg == service.reg_pproc) service.reg_pproc = NULL;
516 static void reg_from_wait_to_run(vpu_reg *reg)
518 list_del_init(®->status_link);
519 list_add_tail(®->status_link, &service.running);
521 list_del_init(®->session_link);
522 list_add_tail(®->session_link, ®->session->running);
525 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
528 u32 *dst = (u32 *)®->reg[0];
529 for (i = 0; i < count; i++)
533 static void reg_from_run_to_done(vpu_reg *reg)
535 list_del_init(®->status_link);
536 list_add_tail(®->status_link, &service.done);
538 list_del_init(®->session_link);
539 list_add_tail(®->session_link, ®->session->done);
543 service.reg_codec = NULL;
544 reg_copy_from_hw(reg, enc_dev.hwregs, service.hw_info->enc_reg_num);
548 service.reg_codec = NULL;
549 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_9190_DEC);
553 service.reg_pproc = NULL;
554 reg_copy_from_hw(reg, dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_9190_PP);
555 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
559 service.reg_codec = NULL;
560 service.reg_pproc = NULL;
561 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_9190_DEC_PP);
562 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
566 pr_err("error: copy reg from hw with unknown type %d\n", reg->type);
570 atomic_sub(1, ®->session->task_running);
571 atomic_sub(1, &service.total_running);
572 wake_up(®->session->wait);
575 static void vpu_service_set_freq(vpu_reg *reg)
577 VPU_FREQ curr = atomic_read(&service.freq_status);
578 if (curr == reg->freq) {
581 atomic_set(&service.freq_status, reg->freq);
583 case VPU_FREQ_200M : {
584 clk_set_rate(aclk_vepu, 200*MHZ);
585 //printk("default: 200M\n");
587 case VPU_FREQ_266M : {
588 clk_set_rate(aclk_vepu, 266*MHZ);
589 //printk("default: 266M\n");
591 case VPU_FREQ_300M : {
592 clk_set_rate(aclk_vepu, 300*MHZ);
593 //printk("default: 300M\n");
595 case VPU_FREQ_400M : {
596 clk_set_rate(aclk_vepu, 400*MHZ);
597 //printk("default: 400M\n");
600 if (soc_is_rk2928g()) {
601 clk_set_rate(aclk_vepu, 400*MHZ);
603 clk_set_rate(aclk_vepu, 300*MHZ);
605 //printk("default: 300M\n");
610 static void reg_copy_to_hw(vpu_reg *reg)
613 u32 *src = (u32 *)®->reg[0];
614 atomic_add(1, &service.total_running);
615 atomic_add(1, ®->session->task_running);
616 if (service.auto_freq) {
617 vpu_service_set_freq(reg);
621 int enc_count = service.hw_info->enc_reg_num;
622 u32 *dst = (u32 *)enc_dev.hwregs;
624 if (service.bug_dec_addr) {
625 #if !defined(CONFIG_ARCH_RK319X)
626 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
628 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
629 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
630 #if !defined(CONFIG_ARCH_RK319X)
631 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
635 service.reg_codec = reg;
637 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
639 for (i = 0; i < VPU_REG_EN_ENC; i++)
642 for (i = VPU_REG_EN_ENC + 1; i < enc_count; i++)
647 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
648 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC];
650 #if VPU_SERVICE_SHOW_TIME
651 do_gettimeofday(&enc_start);
656 u32 *dst = (u32 *)dec_dev.hwregs;
657 service.reg_codec = reg;
659 for (i = REG_NUM_9190_DEC - 1; i > VPU_REG_DEC_GATE; i--)
664 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
665 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
667 #if VPU_SERVICE_SHOW_TIME
668 do_gettimeofday(&dec_start);
673 u32 *dst = (u32 *)dec_dev.hwregs + PP_INTERRUPT_REGISTER;
674 service.reg_pproc = reg;
676 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
678 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_9190_PP; i++)
683 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
685 #if VPU_SERVICE_SHOW_TIME
686 do_gettimeofday(&pp_start);
691 u32 *dst = (u32 *)dec_dev.hwregs;
692 service.reg_codec = reg;
693 service.reg_pproc = reg;
695 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_9190_DEC_PP; i++)
698 dst[VPU_REG_EN_DEC_PP] = src[VPU_REG_EN_DEC_PP] | 0x2;
701 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
702 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
703 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
705 #if VPU_SERVICE_SHOW_TIME
706 do_gettimeofday(&dec_start);
711 pr_err("error: unsupport session type %d", reg->type);
712 atomic_sub(1, &service.total_running);
713 atomic_sub(1, ®->session->task_running);
719 static void try_set_reg(void)
721 // first get reg from reg list
722 if (!list_empty(&service.waiting)) {
724 vpu_reg *reg = list_entry(service.waiting.next, vpu_reg, status_link);
726 vpu_service_power_on();
730 if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
734 if (NULL == service.reg_codec)
736 if (service.auto_freq && (NULL != service.reg_pproc)) {
741 if (NULL == service.reg_codec) {
742 if (NULL == service.reg_pproc)
745 if ((VPU_DEC == service.reg_codec->type) && (NULL == service.reg_pproc))
747 // can not charge frequency when vpu is working
748 if (service.auto_freq) {
754 if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
758 printk("undefined reg type %d\n", reg->type);
762 reg_from_wait_to_run(reg);
768 static int return_reg(vpu_reg *reg, u32 __user *dst)
773 if (copy_to_user(dst, ®->reg[0], service.hw_info->enc_io_size))
778 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_DEC)))
783 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_PP)))
788 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_DEC_PP)))
794 pr_err("error: copy reg to user with unknown type %d\n", reg->type);
802 static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
804 vpu_session *session = (vpu_session *)filp->private_data;
805 if (NULL == session) {
810 case VPU_IOC_SET_CLIENT_TYPE : {
811 session->type = (VPU_CLIENT_TYPE)arg;
814 case VPU_IOC_GET_HW_FUSE_STATUS : {
816 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
817 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
820 if (VPU_ENC != session->type) {
821 if (copy_to_user((void __user *)req.req, &service.dec_config, sizeof(VPUHwDecConfig_t))) {
822 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
826 if (copy_to_user((void __user *)req.req, &service.enc_config, sizeof(VPUHwEncConfig_t))) {
827 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
835 case VPU_IOC_SET_REG : {
838 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
839 pr_err("error: VPU_IOC_SET_REG copy_from_user failed\n");
842 reg = reg_init(session, (void __user *)req.req, req.size);
846 mutex_lock(&service.lock);
848 mutex_unlock(&service.lock);
853 case VPU_IOC_GET_REG : {
856 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
857 pr_err("error: VPU_IOC_GET_REG copy_from_user failed\n");
860 int ret = wait_event_timeout(session->wait, !list_empty(&session->done), VPU_TIMEOUT_DELAY);
861 if (!list_empty(&session->done)) {
863 pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
867 if (unlikely(ret < 0)) {
868 pr_err("error: pid %d wait task ret %d\n", session->pid, ret);
869 } else if (0 == ret) {
870 pr_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
875 int task_running = atomic_read(&session->task_running);
876 mutex_lock(&service.lock);
879 atomic_set(&session->task_running, 0);
880 atomic_sub(task_running, &service.total_running);
881 printk("%d task is running but not return, reset hardware...", task_running);
885 vpu_service_session_clear(session);
886 mutex_unlock(&service.lock);
890 mutex_lock(&service.lock);
891 reg = list_entry(session->done.next, vpu_reg, session_link);
892 return_reg(reg, (u32 __user *)req.req);
893 mutex_unlock(&service.lock);
897 pr_err("error: unknow vpu service ioctl cmd %x\n", cmd);
905 static int vpu_service_check_hw(vpu_service_info *p, unsigned long hw_addr)
907 int ret = -EINVAL, i = 0;
908 volatile u32 *tmp = (volatile u32 *)ioremap_nocache(hw_addr, 0x4);
910 enc_id = (enc_id >> 16) & 0xFFFF;
911 pr_info("checking hw id %x\n", enc_id);
913 for (i = 0; i < ARRAY_SIZE(vpu_hw_set); i++) {
914 if (enc_id == vpu_hw_set[i].hw_id) {
915 p->hw_info = &vpu_hw_set[i];
920 iounmap((void *)tmp);
924 static void vpu_service_release_io(void)
926 if (dec_dev.hwregs) {
927 iounmap((void *)dec_dev.hwregs);
928 dec_dev.hwregs = NULL;
930 if (dec_dev.iobaseaddr) {
931 release_mem_region(dec_dev.iobaseaddr, dec_dev.iosize);
932 dec_dev.iobaseaddr = 0;
936 if (enc_dev.hwregs) {
937 iounmap((void *)enc_dev.hwregs);
938 enc_dev.hwregs = NULL;
940 if (enc_dev.iobaseaddr) {
941 release_mem_region(enc_dev.iobaseaddr, enc_dev.iosize);
942 enc_dev.iobaseaddr = 0;
947 static int vpu_service_reserve_io(void)
949 unsigned long iobaseaddr;
950 unsigned long iosize;
952 iobaseaddr = dec_dev.iobaseaddr;
953 iosize = dec_dev.iosize;
955 if (!request_mem_region(iobaseaddr, iosize, "vdpu_io")) {
956 pr_info("failed to reserve dec HW regs\n");
960 dec_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
962 if (dec_dev.hwregs == NULL) {
963 pr_info("failed to ioremap dec HW regs\n");
967 iobaseaddr = enc_dev.iobaseaddr;
968 iosize = enc_dev.iosize;
970 if (!request_mem_region(iobaseaddr, iosize, "vepu_io")) {
971 pr_info("failed to reserve enc HW regs\n");
975 enc_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
977 if (enc_dev.hwregs == NULL) {
978 pr_info("failed to ioremap enc HW regs\n");
988 static int vpu_service_open(struct inode *inode, struct file *filp)
990 vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
991 if (NULL == session) {
992 pr_err("error: unable to allocate memory for vpu_session.");
996 session->type = VPU_TYPE_BUTT;
997 session->pid = current->pid;
998 INIT_LIST_HEAD(&session->waiting);
999 INIT_LIST_HEAD(&session->running);
1000 INIT_LIST_HEAD(&session->done);
1001 INIT_LIST_HEAD(&session->list_session);
1002 init_waitqueue_head(&session->wait);
1003 atomic_set(&session->task_running, 0);
1004 mutex_lock(&service.lock);
1005 list_add_tail(&session->list_session, &service.session);
1006 filp->private_data = (void *)session;
1007 mutex_unlock(&service.lock);
1009 pr_debug("dev opened\n");
1010 return nonseekable_open(inode, filp);
1013 static int vpu_service_release(struct inode *inode, struct file *filp)
1016 vpu_session *session = (vpu_session *)filp->private_data;
1017 if (NULL == session)
1020 task_running = atomic_read(&session->task_running);
1022 pr_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running);
1025 wake_up(&session->wait);
1027 mutex_lock(&service.lock);
1028 /* remove this filp from the asynchronusly notified filp's */
1029 list_del_init(&session->list_session);
1030 vpu_service_session_clear(session);
1032 filp->private_data = NULL;
1033 mutex_unlock(&service.lock);
1035 pr_debug("dev closed\n");
1039 static const struct file_operations vpu_service_fops = {
1040 .unlocked_ioctl = vpu_service_ioctl,
1041 .open = vpu_service_open,
1042 .release = vpu_service_release,
1043 //.fasync = vpu_service_fasync,
1046 static struct miscdevice vpu_service_misc_device = {
1047 .minor = MISC_DYNAMIC_MINOR,
1048 .name = "vpu_service",
1049 .fops = &vpu_service_fops,
1052 static struct platform_device vpu_service_device = {
1053 .name = "vpu_service",
1057 static int vpu_probe(struct platform_device *pdev)
1061 irq = platform_get_irq_byname(pdev, "irq_vdpu");
1064 irq = platform_get_irq_byname(pdev, "irq_vepu");
1071 static struct platform_driver vpu_driver = {
1075 .owner = THIS_MODULE,
1079 static struct platform_driver vpu_service_driver = {
1081 .name = "vpu_service",
1082 .owner = THIS_MODULE,
1086 static void get_hw_info(void)
1088 VPUHwDecConfig_t *dec = &service.dec_config;
1089 VPUHwEncConfig_t *enc = &service.enc_config;
1090 u32 configReg = dec_dev.hwregs[VPU_DEC_HWCFG0];
1091 u32 asicID = dec_dev.hwregs[0];
1093 dec->h264Support = (configReg >> DWL_H264_E) & 0x3U;
1094 dec->jpegSupport = (configReg >> DWL_JPEG_E) & 0x01U;
1095 if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
1096 dec->jpegSupport = JPEG_PROGRESSIVE;
1097 dec->mpeg4Support = (configReg >> DWL_MPEG4_E) & 0x3U;
1098 dec->vc1Support = (configReg >> DWL_VC1_E) & 0x3U;
1099 dec->mpeg2Support = (configReg >> DWL_MPEG2_E) & 0x01U;
1100 dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
1101 dec->refBufSupport = (configReg >> DWL_REF_BUFF_E) & 0x01U;
1102 dec->vp6Support = (configReg >> DWL_VP6_E) & 0x01U;
1103 #if !defined(CONFIG_ARCH_RK319X)
1104 /// invalidate max decode picture width value in rk319x vpu
1105 dec->maxDecPicWidth = configReg & 0x07FFU;
1107 dec->maxDecPicWidth = 3840;
1110 /* 2nd Config register */
1111 configReg = dec_dev.hwregs[VPU_DEC_HWCFG1];
1112 if (dec->refBufSupport) {
1113 if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
1114 dec->refBufSupport |= 2;
1115 if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
1116 dec->refBufSupport |= 4;
1118 dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
1119 dec->vp7Support = (configReg >> DWL_VP7_E) & 0x01U;
1120 dec->vp8Support = (configReg >> DWL_VP8_E) & 0x01U;
1121 dec->avsSupport = (configReg >> DWL_AVS_E) & 0x01U;
1123 /* JPEG xtensions */
1124 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
1125 dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
1127 dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
1130 if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) {
1131 dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
1133 dec->rvSupport = RV_NOT_SUPPORTED;
1136 dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
1138 if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) {
1139 dec->refBufSupport |= 8; /* enable HW support for offset */
1142 #if !defined(CONFIG_ARCH_RK319X)
1143 /// invalidate fuse register value in rk319x vpu
1145 VPUHwFuseStatus_t hwFuseSts;
1146 /* Decoder fuse configuration */
1147 u32 fuseReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
1149 hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U;
1150 hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U;
1151 hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U;
1152 hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U;
1153 hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U;
1154 hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U;
1155 hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U;
1156 hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U;
1157 hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U;
1158 hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U;
1159 hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U;
1160 hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U;
1161 hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U;
1162 hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U;
1164 /* check max. decoder output width */
1166 if (fuseReg & 0x8000U)
1167 hwFuseSts.maxDecPicWidthFuse = 1920;
1168 else if (fuseReg & 0x4000U)
1169 hwFuseSts.maxDecPicWidthFuse = 1280;
1170 else if (fuseReg & 0x2000U)
1171 hwFuseSts.maxDecPicWidthFuse = 720;
1172 else if (fuseReg & 0x1000U)
1173 hwFuseSts.maxDecPicWidthFuse = 352;
1174 else /* remove warning */
1175 hwFuseSts.maxDecPicWidthFuse = 352;
1177 hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U;
1179 /* Pp configuration */
1180 configReg = dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG];
1182 if ((configReg >> DWL_PP_E) & 0x01U) {
1184 dec->maxPpOutPicWidth = configReg & 0x07FFU;
1185 /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */
1186 dec->ppConfig = configReg;
1189 dec->maxPpOutPicWidth = 0;
1193 /* check the HW versio */
1194 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
1195 /* Pp configuration */
1196 configReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
1198 if ((configReg >> DWL_PP_E) & 0x01U) {
1199 /* Pp fuse configuration */
1200 u32 fuseRegPp = dec_dev.hwregs[VPU_PP_HW_FUSE_CFG];
1202 if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) {
1203 hwFuseSts.ppSupportFuse = 1;
1204 /* check max. pp output width */
1205 if (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920;
1206 else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280;
1207 else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720;
1208 else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352;
1209 else hwFuseSts.maxPpOutPicWidthFuse = 352;
1210 hwFuseSts.ppConfigFuse = fuseRegPp;
1212 hwFuseSts.ppSupportFuse = 0;
1213 hwFuseSts.maxPpOutPicWidthFuse = 0;
1214 hwFuseSts.ppConfigFuse = 0;
1217 hwFuseSts.ppSupportFuse = 0;
1218 hwFuseSts.maxPpOutPicWidthFuse = 0;
1219 hwFuseSts.ppConfigFuse = 0;
1222 if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse)
1223 dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse;
1224 if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse)
1225 dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse;
1226 if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED;
1227 if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED;
1228 if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED;
1229 if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED;
1230 if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse)
1231 dec->jpegSupport = JPEG_BASELINE;
1232 if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED;
1233 if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED;
1234 if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED;
1235 if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED;
1236 if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED;
1237 if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED;
1239 /* check the pp config vs fuse status */
1240 if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) {
1241 u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25);
1242 u32 alphaBlend = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24);
1243 u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25);
1244 u32 alphaBlendFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24);
1246 if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000;
1247 if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000;
1249 if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED;
1250 if (!hwFuseSts.refBufSupportFuse) dec->refBufSupport = REF_BUF_NOT_SUPPORTED;
1251 if (!hwFuseSts.rvSupportFuse) dec->rvSupport = RV_NOT_SUPPORTED;
1252 if (!hwFuseSts.avsSupportFuse) dec->avsSupport = AVS_NOT_SUPPORTED;
1253 if (!hwFuseSts.mvcSupportFuse) dec->mvcSupport = MVC_NOT_SUPPORTED;
1257 configReg = enc_dev.hwregs[63];
1258 enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
1259 enc->h264Enabled = (configReg >> 27) & 1;
1260 enc->mpeg4Enabled = (configReg >> 26) & 1;
1261 enc->jpegEnabled = (configReg >> 25) & 1;
1262 enc->vsEnabled = (configReg >> 24) & 1;
1263 enc->rgbEnabled = (configReg >> 28) & 1;
1264 //enc->busType = (configReg >> 20) & 15;
1265 //enc->synthesisLanguage = (configReg >> 16) & 15;
1266 //enc->busWidth = (configReg >> 12) & 15;
1267 enc->reg_size = service.reg_size;
1268 enc->reserv[0] = enc->reserv[1] = 0;
1270 service.auto_freq = soc_is_rk2928g() || soc_is_rk2928l() || soc_is_rk2926();
1271 if (service.auto_freq) {
1272 printk("vpu_service set to auto frequency mode\n");
1273 atomic_set(&service.freq_status, VPU_FREQ_BUT);
1275 service.bug_dec_addr = cpu_is_rk30xx();
1276 //printk("cpu 3066b bug %d\n", service.bug_dec_addr);
1279 static irqreturn_t vdpu_irq(int irq, void *dev_id)
1281 vpu_device *dev = (vpu_device *) dev_id;
1282 u32 irq_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1284 pr_debug("vdpu_irq\n");
1286 if (irq_status & DEC_INTERRUPT_BIT) {
1287 pr_debug("vdpu_isr dec %x\n", irq_status);
1288 if ((irq_status & 0x40001) == 0x40001)
1291 irq_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1292 } while ((irq_status & 0x40001) == 0x40001);
1295 writel(irq_status & (~DEC_INTERRUPT_BIT|DEC_BUFFER_EMPTY_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER);
1296 atomic_add(1, &dev->irq_count_codec);
1299 irq_status = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
1300 if (irq_status & PP_INTERRUPT_BIT) {
1301 pr_debug("vdpu_isr pp %x\n", irq_status);
1303 writel(irq_status & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
1304 atomic_add(1, &dev->irq_count_pp);
1307 return IRQ_WAKE_THREAD;
1310 static irqreturn_t vdpu_isr(int irq, void *dev_id)
1312 vpu_device *dev = (vpu_device *) dev_id;
1314 mutex_lock(&service.lock);
1315 if (atomic_read(&dev->irq_count_codec)) {
1316 #if VPU_SERVICE_SHOW_TIME
1317 do_gettimeofday(&dec_end);
1318 printk("dec task: %ld ms\n",
1319 (dec_end.tv_sec - dec_start.tv_sec) * 1000 +
1320 (dec_end.tv_usec - dec_start.tv_usec) / 1000);
1322 atomic_sub(1, &dev->irq_count_codec);
1323 if (NULL == service.reg_codec) {
1324 pr_err("error: dec isr with no task waiting\n");
1326 reg_from_run_to_done(service.reg_codec);
1330 if (atomic_read(&dev->irq_count_pp)) {
1332 #if VPU_SERVICE_SHOW_TIME
1333 do_gettimeofday(&pp_end);
1334 printk("pp task: %ld ms\n",
1335 (pp_end.tv_sec - pp_start.tv_sec) * 1000 +
1336 (pp_end.tv_usec - pp_start.tv_usec) / 1000);
1339 atomic_sub(1, &dev->irq_count_pp);
1340 if (NULL == service.reg_pproc) {
1341 pr_err("error: pp isr with no task waiting\n");
1343 reg_from_run_to_done(service.reg_pproc);
1347 mutex_unlock(&service.lock);
1351 static irqreturn_t vepu_irq(int irq, void *dev_id)
1353 struct vpu_device *dev = (struct vpu_device *) dev_id;
1354 u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
1356 pr_debug("vepu_irq irq status %x\n", irq_status);
1358 #if VPU_SERVICE_SHOW_TIME
1359 do_gettimeofday(&enc_end);
1360 printk("enc task: %ld ms\n",
1361 (enc_end.tv_sec - enc_start.tv_sec) * 1000 +
1362 (enc_end.tv_usec - enc_start.tv_usec) / 1000);
1365 if (likely(irq_status & ENC_INTERRUPT_BIT)) {
1367 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
1368 atomic_add(1, &dev->irq_count_codec);
1371 return IRQ_WAKE_THREAD;
1374 static irqreturn_t vepu_isr(int irq, void *dev_id)
1376 struct vpu_device *dev = (struct vpu_device *) dev_id;
1378 mutex_lock(&service.lock);
1379 if (atomic_read(&dev->irq_count_codec)) {
1380 atomic_sub(1, &dev->irq_count_codec);
1381 if (NULL == service.reg_codec) {
1382 pr_err("error: enc isr with no task waiting\n");
1384 reg_from_run_to_done(service.reg_codec);
1388 mutex_unlock(&service.lock);
1392 #ifdef CONFIG_PROC_FS
1393 static int __init vpu_service_proc_init(void);
1395 static inline int vpu_service_proc_init(void) { return 0; }
1398 static int __init vpu_service_init(void)
1402 platform_driver_register(&vpu_driver);
1403 pr_debug("baseaddr = 0x%08x vdpu irq = %d vepu irq = %d\n", VCODEC_PHYS, irq_vdpu, irq_vepu);
1405 wake_lock_init(&service.wake_lock, WAKE_LOCK_SUSPEND, "vpu");
1406 INIT_LIST_HEAD(&service.waiting);
1407 INIT_LIST_HEAD(&service.running);
1408 INIT_LIST_HEAD(&service.done);
1409 INIT_LIST_HEAD(&service.session);
1410 mutex_init(&service.lock);
1411 service.reg_codec = NULL;
1412 service.reg_pproc = NULL;
1413 atomic_set(&service.total_running, 0);
1414 service.enabled = false;
1418 INIT_DELAYED_WORK(&service.power_off_work, vpu_power_off_work);
1420 vpu_service_power_on();
1422 ret = vpu_service_check_hw(&service, VCODEC_PHYS);
1424 pr_err("error: hw info check faild\n");
1425 goto err_hw_id_check;
1428 atomic_set(&dec_dev.irq_count_codec, 0);
1429 atomic_set(&dec_dev.irq_count_pp, 0);
1430 dec_dev.iobaseaddr = service.hw_info->hw_addr + service.hw_info->dec_offset;
1431 dec_dev.iosize = service.hw_info->dec_io_size;
1432 atomic_set(&enc_dev.irq_count_codec, 0);
1433 atomic_set(&enc_dev.irq_count_pp, 0);
1434 enc_dev.iobaseaddr = service.hw_info->hw_addr + service.hw_info->enc_offset;
1435 enc_dev.iosize = service.hw_info->enc_io_size;;
1436 service.reg_size = max(dec_dev.iosize, enc_dev.iosize);
1438 ret = vpu_service_reserve_io();
1440 pr_err("error: reserve io failed\n");
1441 goto err_reserve_io;
1444 /* get the IRQ line */
1445 ret = request_threaded_irq(irq_vdpu, vdpu_irq, vdpu_isr, 0, "vdpu", (void *)&dec_dev);
1447 pr_err("error: can't request vdpu irq %d\n", irq_vdpu);
1448 goto err_req_vdpu_irq;
1451 ret = request_threaded_irq(irq_vepu, vepu_irq, vepu_isr, 0, "vepu", (void *)&enc_dev);
1453 pr_err("error: can't request vepu irq %d\n", irq_vepu);
1454 goto err_req_vepu_irq;
1457 ret = misc_register(&vpu_service_misc_device);
1459 pr_err("error: misc_register failed\n");
1463 platform_device_register(&vpu_service_device);
1464 platform_driver_probe(&vpu_service_driver, NULL);
1466 vpu_service_power_off();
1467 pr_info("init success\n");
1469 vpu_service_proc_init();
1473 free_irq(irq_vepu, (void *)&enc_dev);
1475 free_irq(irq_vdpu, (void *)&dec_dev);
1477 pr_info("init failed\n");
1479 vpu_service_release_io();
1481 vpu_service_power_off();
1483 wake_lock_destroy(&service.wake_lock);
1484 pr_info("init failed\n");
1488 #ifdef CONFIG_PROC_FS
1489 static void __exit vpu_service_proc_release(void);
1491 #define vpu_service_proc_release() do {} while (0)
1494 static void __exit vpu_service_exit(void)
1496 vpu_service_proc_release();
1497 vpu_service_power_off();
1498 platform_device_unregister(&vpu_service_device);
1499 platform_driver_unregister(&vpu_service_driver);
1500 misc_deregister(&vpu_service_misc_device);
1501 free_irq(irq_vepu, (void *)&enc_dev);
1502 free_irq(irq_vdpu, (void *)&dec_dev);
1503 vpu_service_release_io();
1505 wake_lock_destroy(&service.wake_lock);
1508 module_init(vpu_service_init);
1509 module_exit(vpu_service_exit);
1511 #ifdef CONFIG_PROC_FS
1512 #include <linux/proc_fs.h>
1513 #include <linux/seq_file.h>
1515 static int proc_vpu_service_show(struct seq_file *s, void *v)
1518 vpu_reg *reg, *reg_tmp;
1519 vpu_session *session, *session_tmp;
1521 mutex_lock(&service.lock);
1522 vpu_service_power_on();
1523 seq_printf(s, "\nENC Registers:\n");
1524 n = enc_dev.iosize >> 2;
1525 for (i = 0; i < n; i++) {
1526 seq_printf(s, "\tswreg%d = %08X\n", i, readl(enc_dev.hwregs + i));
1528 seq_printf(s, "\nDEC Registers:\n");
1529 n = dec_dev.iosize >> 2;
1530 for (i = 0; i < n; i++) {
1531 seq_printf(s, "\tswreg%d = %08X\n", i, readl(dec_dev.hwregs + i));
1534 seq_printf(s, "\nvpu service status:\n");
1535 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
1536 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
1537 //seq_printf(s, "waiting reg set %d\n");
1538 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
1539 seq_printf(s, "waiting register set\n");
1541 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
1542 seq_printf(s, "running register set\n");
1544 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
1545 seq_printf(s, "done register set\n");
1548 mutex_unlock(&service.lock);
1553 static int proc_vpu_service_open(struct inode *inode, struct file *file)
1555 return single_open(file, proc_vpu_service_show, NULL);
1558 static const struct file_operations proc_vpu_service_fops = {
1559 .open = proc_vpu_service_open,
1561 .llseek = seq_lseek,
1562 .release = single_release,
1565 static int __init vpu_service_proc_init(void)
1567 proc_create("vpu_service", 0, NULL, &proc_vpu_service_fops);
1571 static void __exit vpu_service_proc_release(void)
1573 remove_proc_entry("vpu_service", NULL);
1575 #endif /* CONFIG_PROC_FS */