1 /* arch/arm/mach-rk29/vpu.c
3 * Copyright (C) 2010 ROCKCHIP, Inc.
4 * author: chenhengming chm@rock-chips.com
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #ifdef CONFIG_RK29_VPU_DEBUG
19 #define pr_fmt(fmt) "VPU_SERVICE: %s: " fmt, __func__
21 #define pr_fmt(fmt) "VPU_SERVICE: " fmt
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
33 #include <linux/ioport.h>
34 #include <linux/miscdevice.h>
36 #include <linux/poll.h>
37 #include <linux/platform_device.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/wakelock.h>
42 #include <asm/uaccess.h>
44 #include <mach/irqs.h>
48 #include <plat/vpu_service.h>
52 VPU_DEC_ID_9190 = 0x6731,
58 VPU_DEC_TYPE_9190 = 0,
59 VPU_ENC_TYPE_8270 = 0x100,
63 typedef enum VPU_FREQ {
73 unsigned long hw_addr;
74 unsigned long enc_offset;
75 unsigned long enc_reg_num;
76 unsigned long enc_io_size;
77 unsigned long dec_offset;
78 unsigned long dec_reg_num;
79 unsigned long dec_io_size;
82 #define MHZ (1000*1000)
84 #define VCODEC_PHYS (0x10104000)
86 #define REG_NUM_9190_DEC (60)
87 #define REG_NUM_9190_PP (41)
88 #define REG_NUM_9190_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP)
90 #define REG_NUM_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP)
92 #define REG_NUM_ENC_8270 (96)
93 #define REG_SIZE_ENC_8270 (0x200)
94 #define REG_NUM_ENC_4831 (164)
95 #define REG_SIZE_ENC_4831 (0x400)
97 #define SIZE_REG(reg) ((reg)*4)
99 VPU_HW_INFO_E vpu_hw_set[] = {
101 .hw_id = VPU_ID_8270,
102 .hw_addr = VCODEC_PHYS,
104 .enc_reg_num = REG_NUM_ENC_8270,
105 .enc_io_size = REG_NUM_ENC_8270 * 4,
106 .dec_offset = REG_SIZE_ENC_8270,
107 .dec_reg_num = REG_NUM_9190_DEC_PP,
108 .dec_io_size = REG_NUM_9190_DEC_PP * 4,
111 .hw_id = VPU_ID_4831,
112 .hw_addr = VCODEC_PHYS,
114 .enc_reg_num = REG_NUM_ENC_4831,
115 .enc_io_size = REG_NUM_ENC_4831 * 4,
116 .dec_offset = REG_SIZE_ENC_4831,
117 .dec_reg_num = REG_NUM_9190_DEC_PP,
118 .dec_io_size = REG_NUM_9190_DEC_PP * 4,
123 #define DEC_INTERRUPT_REGISTER 1
124 #define PP_INTERRUPT_REGISTER 60
125 #define ENC_INTERRUPT_REGISTER 1
127 #define DEC_INTERRUPT_BIT 0x100
128 #define PP_INTERRUPT_BIT 0x100
129 #define ENC_INTERRUPT_BIT 0x1
131 #define VPU_REG_EN_ENC 14
132 #define VPU_REG_ENC_GATE 2
133 #define VPU_REG_ENC_GATE_BIT (1<<4)
135 #define VPU_REG_EN_DEC 1
136 #define VPU_REG_DEC_GATE 2
137 #define VPU_REG_DEC_GATE_BIT (1<<10)
138 #define VPU_REG_EN_PP 0
139 #define VPU_REG_PP_GATE 1
140 #define VPU_REG_PP_GATE_BIT (1<<8)
141 #define VPU_REG_EN_DEC_PP 1
142 #define VPU_REG_DEC_PP_GATE 61
143 #define VPU_REG_DEC_PP_GATE_BIT (1<<8)
146 * struct for process session which connect to vpu
148 * @author ChenHengming (2011-5-3)
150 typedef struct vpu_session {
151 VPU_CLIENT_TYPE type;
152 /* a linked list of data so we can access them for debugging */
153 struct list_head list_session;
154 /* a linked list of register data waiting for process */
155 struct list_head waiting;
156 /* a linked list of register data in processing */
157 struct list_head running;
158 /* a linked list of register data processed */
159 struct list_head done;
160 wait_queue_head_t wait;
162 atomic_t task_running;
166 * struct for process register set
168 * @author ChenHengming (2011-5-4)
170 typedef struct vpu_reg {
171 VPU_CLIENT_TYPE type;
173 vpu_session *session;
174 struct list_head session_link; /* link to vpu service session */
175 struct list_head status_link; /* link to register set list */
180 typedef struct vpu_device {
181 atomic_t irq_count_codec;
182 atomic_t irq_count_pp;
183 unsigned long iobaseaddr;
185 volatile u32 *hwregs;
188 typedef struct vpu_service_info {
189 struct wake_lock wake_lock;
190 struct delayed_work power_off_work;
192 struct list_head waiting; /* link to link_reg in struct vpu_reg */
193 struct list_head running; /* link to link_reg in struct vpu_reg */
194 struct list_head done; /* link to link_reg in struct vpu_reg */
195 struct list_head session; /* link to list_session in struct vpu_session */
196 atomic_t total_running;
201 VPUHwDecConfig_t dec_config;
202 VPUHwEncConfig_t enc_config;
203 VPU_HW_INFO_E *hw_info;
204 unsigned long reg_size;
208 typedef struct vpu_request
214 static struct clk *pd_video;
215 static struct clk *aclk_vepu;
216 static struct clk *hclk_vepu;
217 static struct clk *aclk_ddr_vepu;
218 static struct clk *hclk_cpu_vcodec;
219 static vpu_service_info service;
220 static vpu_device dec_dev;
221 static vpu_device enc_dev;
223 #define VPU_POWER_OFF_DELAY 4*HZ /* 4s */
224 #define VPU_TIMEOUT_DELAY 2*HZ /* 2s */
226 static void vpu_get_clk(void)
228 pd_video = clk_get(NULL, "pd_video");
229 aclk_vepu = clk_get(NULL, "aclk_vepu");
230 hclk_vepu = clk_get(NULL, "hclk_vepu");
231 aclk_ddr_vepu = clk_get(NULL, "aclk_ddr_vepu");
232 hclk_cpu_vcodec = clk_get(NULL, "hclk_cpu_vcodec");
235 static void vpu_put_clk(void)
240 clk_put(aclk_ddr_vepu);
241 clk_put(hclk_cpu_vcodec);
244 static void vpu_reset(void)
246 #if defined(CONFIG_ARCH_RK29)
247 clk_disable(aclk_ddr_vepu);
248 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
249 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
250 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
251 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
253 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
254 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
255 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
256 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
257 clk_enable(aclk_ddr_vepu);
258 #elif defined(CONFIG_ARCH_RK30)
259 pmu_set_idle_request(IDLE_REQ_VIDEO, true);
260 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
261 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
262 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
263 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
265 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
266 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
267 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
268 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
269 pmu_set_idle_request(IDLE_REQ_VIDEO, false);
271 service.reg_codec = NULL;
272 service.reg_pproc = NULL;
273 service.reg_resev = NULL;
276 static void reg_deinit(vpu_reg *reg);
277 static void vpu_service_session_clear(vpu_session *session)
280 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
283 list_for_each_entry_safe(reg, n, &session->running, session_link) {
286 list_for_each_entry_safe(reg, n, &session->done, session_link) {
291 static void vpu_service_dump(void)
294 vpu_reg *reg, *reg_tmp;
295 vpu_session *session, *session_tmp;
297 running = atomic_read(&service.total_running);
298 printk("total_running %d\n", running);
300 printk("reg_codec 0x%.8x\n", (unsigned int)service.reg_codec);
301 printk("reg_pproc 0x%.8x\n", (unsigned int)service.reg_pproc);
302 printk("reg_resev 0x%.8x\n", (unsigned int)service.reg_resev);
304 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
305 printk("session pid %d type %d:\n", session->pid, session->type);
306 running = atomic_read(&session->task_running);
307 printk("task_running %d\n", running);
308 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
309 printk("waiting register set 0x%.8x\n", (unsigned int)reg);
311 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
312 printk("running register set 0x%.8x\n", (unsigned int)reg);
314 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
315 printk("done register set 0x%.8x\n", (unsigned int)reg);
320 static void vpu_service_power_off(void)
323 if (!service.enabled) {
327 service.enabled = false;
328 total_running = atomic_read(&service.total_running);
330 pr_alert("alert: power off when %d task running!!\n", total_running);
332 pr_alert("alert: delay 50 ms for running task\n");
336 printk("vpu: power off...");
337 #ifdef CONFIG_ARCH_RK29
338 pmu_set_power_domain(PD_VCODEC, false);
340 clk_disable(pd_video);
343 clk_disable(hclk_cpu_vcodec);
344 clk_disable(aclk_ddr_vepu);
345 clk_disable(hclk_vepu);
346 clk_disable(aclk_vepu);
347 wake_unlock(&service.wake_lock);
351 static inline void vpu_queue_power_off_work(void)
353 queue_delayed_work(system_nrt_wq, &service.power_off_work, VPU_POWER_OFF_DELAY);
356 static void vpu_power_off_work(struct work_struct *work)
358 if (mutex_trylock(&service.lock)) {
359 vpu_service_power_off();
360 mutex_unlock(&service.lock);
362 /* Come back later if the device is busy... */
363 vpu_queue_power_off_work();
367 static void vpu_service_power_on(void)
370 ktime_t now = ktime_get();
371 if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
372 cancel_delayed_work_sync(&service.power_off_work);
373 vpu_queue_power_off_work();
379 service.enabled = true;
380 printk("vpu: power on\n");
382 clk_enable(aclk_vepu);
383 clk_enable(hclk_vepu);
384 clk_enable(hclk_cpu_vcodec);
386 #ifdef CONFIG_ARCH_RK29
387 pmu_set_power_domain(PD_VCODEC, true);
389 clk_enable(pd_video);
392 clk_enable(aclk_ddr_vepu);
393 wake_lock(&service.wake_lock);
396 static inline bool reg_check_rmvb_wmv(vpu_reg *reg)
398 unsigned long type = (reg->reg[3] & 0xF0000000) >> 28;
399 return ((type == 8) || (type == 4));
402 static inline bool reg_check_interlace(vpu_reg *reg)
404 unsigned long type = (reg->reg[3] & (1 << 23));
408 static vpu_reg *reg_init(vpu_session *session, void __user *src, unsigned long size)
410 vpu_reg *reg = kmalloc(sizeof(vpu_reg)+service.reg_size, GFP_KERNEL);
412 pr_err("error: kmalloc fail in reg_init\n");
416 reg->session = session;
417 reg->type = session->type;
419 reg->freq = VPU_FREQ_DEFAULT;
420 reg->reg = (unsigned long *)®[1];
421 INIT_LIST_HEAD(®->session_link);
422 INIT_LIST_HEAD(®->status_link);
424 if (copy_from_user(®->reg[0], (void __user *)src, size)) {
425 pr_err("error: copy_from_user failed in reg_init\n");
430 mutex_lock(&service.lock);
431 list_add_tail(®->status_link, &service.waiting);
432 list_add_tail(®->session_link, &session->waiting);
433 mutex_unlock(&service.lock);
435 if (service.auto_freq) {
436 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
437 if (reg_check_rmvb_wmv(reg)) {
438 reg->freq = VPU_FREQ_266M;
440 if (reg_check_interlace(reg)) {
441 reg->freq = VPU_FREQ_400M;
445 if (reg->type == VPU_PP) {
446 reg->freq = VPU_FREQ_400M;
453 static void reg_deinit(vpu_reg *reg)
455 list_del_init(®->session_link);
456 list_del_init(®->status_link);
457 if (reg == service.reg_codec) service.reg_codec = NULL;
458 if (reg == service.reg_pproc) service.reg_pproc = NULL;
462 static void reg_from_wait_to_run(vpu_reg *reg)
464 list_del_init(®->status_link);
465 list_add_tail(®->status_link, &service.running);
467 list_del_init(®->session_link);
468 list_add_tail(®->session_link, ®->session->running);
471 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
474 u32 *dst = (u32 *)®->reg[0];
475 for (i = 0; i < count; i++)
479 static void reg_from_run_to_done(vpu_reg *reg)
481 list_del_init(®->status_link);
482 list_add_tail(®->status_link, &service.done);
484 list_del_init(®->session_link);
485 list_add_tail(®->session_link, ®->session->done);
489 service.reg_codec = NULL;
490 reg_copy_from_hw(reg, enc_dev.hwregs, service.hw_info->enc_reg_num);
494 service.reg_codec = NULL;
495 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_9190_DEC);
499 service.reg_pproc = NULL;
500 reg_copy_from_hw(reg, dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_9190_PP);
501 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
505 service.reg_codec = NULL;
506 service.reg_pproc = NULL;
507 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_9190_DEC_PP);
508 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
512 pr_err("error: copy reg from hw with unknown type %d\n", reg->type);
516 atomic_sub(1, ®->session->task_running);
517 atomic_sub(1, &service.total_running);
518 wake_up_interruptible_sync(®->session->wait);
521 static void vpu_service_set_freq(vpu_reg *reg)
524 case VPU_FREQ_200M : {
525 clk_set_rate(aclk_vepu, 200*MHZ);
527 case VPU_FREQ_266M : {
528 clk_set_rate(aclk_vepu, 266*MHZ);
530 case VPU_FREQ_300M : {
531 clk_set_rate(aclk_vepu, 300*MHZ);
533 case VPU_FREQ_400M : {
534 clk_set_rate(aclk_vepu, 400*MHZ);
537 clk_set_rate(aclk_vepu, 300*MHZ);
542 static void reg_copy_to_hw(vpu_reg *reg)
545 u32 *src = (u32 *)®->reg[0];
546 atomic_add(1, &service.total_running);
547 atomic_add(1, ®->session->task_running);
548 if (service.auto_freq) {
549 vpu_service_set_freq(reg);
553 int enc_count = service.hw_info->enc_reg_num;
554 u32 *dst = (u32 *)enc_dev.hwregs;
555 #if defined(CONFIG_ARCH_RK30)
556 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
557 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
558 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
559 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
561 service.reg_codec = reg;
563 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
565 for (i = 0; i < VPU_REG_EN_ENC; i++)
568 for (i = VPU_REG_EN_ENC + 1; i < enc_count; i++)
573 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
574 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC];
577 u32 *dst = (u32 *)dec_dev.hwregs;
578 service.reg_codec = reg;
580 for (i = REG_NUM_9190_DEC - 1; i > VPU_REG_DEC_GATE; i--)
585 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
586 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
590 u32 *dst = (u32 *)dec_dev.hwregs + PP_INTERRUPT_REGISTER;
591 service.reg_pproc = reg;
593 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
595 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_9190_PP; i++)
600 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
604 u32 *dst = (u32 *)dec_dev.hwregs;
605 service.reg_codec = reg;
606 service.reg_pproc = reg;
608 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_9190_DEC_PP; i++)
611 dst[VPU_REG_EN_DEC_PP] = src[VPU_REG_EN_DEC_PP] | 0x2;
614 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
615 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
616 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
620 pr_err("error: unsupport session type %d", reg->type);
621 atomic_sub(1, &service.total_running);
622 atomic_sub(1, ®->session->task_running);
628 static void try_set_reg(void)
630 // first get reg from reg list
631 if (!list_empty(&service.waiting)) {
633 vpu_reg *reg = list_entry(service.waiting.next, vpu_reg, status_link);
635 vpu_service_power_on();
639 if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
643 if (NULL == service.reg_codec)
645 if (service.auto_freq && (NULL != service.reg_pproc)) {
650 if (NULL == service.reg_codec) {
651 if (NULL == service.reg_pproc)
654 if ((VPU_DEC == service.reg_codec->type) && (NULL == service.reg_pproc))
656 // can not charge frequency when vpu is working
657 if (service.auto_freq) {
663 if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
667 printk("undefined reg type %d\n", reg->type);
671 reg_from_wait_to_run(reg);
677 static int return_reg(vpu_reg *reg, u32 __user *dst)
682 if (copy_to_user(dst, ®->reg[0], service.hw_info->enc_io_size))
687 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_DEC)))
692 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_PP)))
697 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_DEC_PP)))
703 pr_err("error: copy reg to user with unknown type %d\n", reg->type);
711 static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
713 vpu_session *session = (vpu_session *)filp->private_data;
714 if (NULL == session) {
719 case VPU_IOC_SET_CLIENT_TYPE : {
720 session->type = (VPU_CLIENT_TYPE)arg;
723 case VPU_IOC_GET_HW_FUSE_STATUS : {
725 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
726 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
729 if (VPU_ENC != session->type) {
730 if (copy_to_user((void __user *)req.req, &service.dec_config, sizeof(VPUHwDecConfig_t))) {
731 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
735 if (copy_to_user((void __user *)req.req, &service.enc_config, sizeof(VPUHwEncConfig_t))) {
736 pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
744 case VPU_IOC_SET_REG : {
747 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
748 pr_err("error: VPU_IOC_SET_REG copy_from_user failed\n");
752 reg = reg_init(session, (void __user *)req.req, req.size);
756 mutex_lock(&service.lock);
758 mutex_unlock(&service.lock);
763 case VPU_IOC_GET_REG : {
766 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
767 pr_err("error: VPU_IOC_GET_REG copy_from_user failed\n");
770 int ret = wait_event_interruptible_timeout(session->wait, !list_empty(&session->done), VPU_TIMEOUT_DELAY);
771 if (!list_empty(&session->done)) {
773 pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
777 if (unlikely(ret < 0)) {
778 pr_err("error: pid %d wait task ret %d\n", session->pid, ret);
779 } else if (0 == ret) {
780 pr_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
785 int task_running = atomic_read(&session->task_running);
786 mutex_lock(&service.lock);
789 atomic_set(&session->task_running, 0);
790 atomic_sub(task_running, &service.total_running);
791 printk("%d task is running but not return, reset hardware...", task_running);
795 vpu_service_session_clear(session);
796 mutex_unlock(&service.lock);
800 mutex_lock(&service.lock);
801 reg = list_entry(session->done.next, vpu_reg, session_link);
802 return_reg(reg, (u32 __user *)req.req);
803 mutex_unlock(&service.lock);
807 pr_err("error: unknow vpu service ioctl cmd %x\n", cmd);
815 static int vpu_service_check_hw(vpu_service_info *p, unsigned long hw_addr)
817 int ret = -EINVAL, i = 0;
818 volatile u32 *tmp = (volatile u32 *)ioremap_nocache(hw_addr, 0x4);
820 enc_id = (enc_id >> 16) & 0xFFFF;
821 pr_info("checking hw id %x\n", enc_id);
823 for (i = 0; i < ARRAY_SIZE(vpu_hw_set); i++) {
824 if (enc_id == vpu_hw_set[i].hw_id) {
825 p->hw_info = &vpu_hw_set[i];
830 iounmap((void *)tmp);
834 static void vpu_service_release_io(void)
836 if (dec_dev.hwregs) {
837 iounmap((void *)dec_dev.hwregs);
838 dec_dev.hwregs = NULL;
840 if (dec_dev.iobaseaddr) {
841 release_mem_region(dec_dev.iobaseaddr, dec_dev.iosize);
842 dec_dev.iobaseaddr = 0;
846 if (enc_dev.hwregs) {
847 iounmap((void *)enc_dev.hwregs);
848 enc_dev.hwregs = NULL;
850 if (enc_dev.iobaseaddr) {
851 release_mem_region(enc_dev.iobaseaddr, enc_dev.iosize);
852 enc_dev.iobaseaddr = 0;
857 static int vpu_service_reserve_io(void)
859 unsigned long iobaseaddr;
860 unsigned long iosize;
862 iobaseaddr = dec_dev.iobaseaddr;
863 iosize = dec_dev.iosize;
865 if (!request_mem_region(iobaseaddr, iosize, "vdpu_io")) {
866 pr_info("failed to reserve dec HW regs\n");
870 dec_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
872 if (dec_dev.hwregs == NULL) {
873 pr_info("failed to ioremap dec HW regs\n");
877 iobaseaddr = enc_dev.iobaseaddr;
878 iosize = enc_dev.iosize;
880 if (!request_mem_region(iobaseaddr, iosize, "vepu_io")) {
881 pr_info("failed to reserve enc HW regs\n");
885 enc_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
887 if (enc_dev.hwregs == NULL) {
888 pr_info("failed to ioremap enc HW regs\n");
898 static int vpu_service_open(struct inode *inode, struct file *filp)
900 vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
901 if (NULL == session) {
902 pr_err("error: unable to allocate memory for vpu_session.");
906 session->type = VPU_TYPE_BUTT;
907 session->pid = current->pid;
908 INIT_LIST_HEAD(&session->waiting);
909 INIT_LIST_HEAD(&session->running);
910 INIT_LIST_HEAD(&session->done);
911 INIT_LIST_HEAD(&session->list_session);
912 init_waitqueue_head(&session->wait);
913 atomic_set(&session->task_running, 0);
914 mutex_lock(&service.lock);
915 list_add_tail(&session->list_session, &service.session);
916 filp->private_data = (void *)session;
917 mutex_unlock(&service.lock);
919 pr_debug("dev opened\n");
920 return nonseekable_open(inode, filp);
923 static int vpu_service_release(struct inode *inode, struct file *filp)
926 vpu_session *session = (vpu_session *)filp->private_data;
930 task_running = atomic_read(&session->task_running);
932 pr_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running);
935 wake_up_interruptible_sync(&session->wait);
937 mutex_lock(&service.lock);
938 /* remove this filp from the asynchronusly notified filp's */
939 list_del_init(&session->list_session);
940 vpu_service_session_clear(session);
942 filp->private_data = NULL;
943 mutex_unlock(&service.lock);
945 pr_debug("dev closed\n");
949 static const struct file_operations vpu_service_fops = {
950 .unlocked_ioctl = vpu_service_ioctl,
951 .open = vpu_service_open,
952 .release = vpu_service_release,
953 //.fasync = vpu_service_fasync,
956 static struct miscdevice vpu_service_misc_device = {
957 .minor = MISC_DYNAMIC_MINOR,
958 .name = "vpu_service",
959 .fops = &vpu_service_fops,
962 static struct platform_device vpu_service_device = {
963 .name = "vpu_service",
967 static struct platform_driver vpu_service_driver = {
969 .name = "vpu_service",
970 .owner = THIS_MODULE,
974 static void get_hw_info(void)
976 VPUHwDecConfig_t *dec = &service.dec_config;
977 VPUHwEncConfig_t *enc = &service.enc_config;
978 u32 configReg = dec_dev.hwregs[VPU_DEC_HWCFG0];
979 u32 asicID = dec_dev.hwregs[0];
981 dec->h264Support = (configReg >> DWL_H264_E) & 0x3U;
982 dec->jpegSupport = (configReg >> DWL_JPEG_E) & 0x01U;
983 if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
984 dec->jpegSupport = JPEG_PROGRESSIVE;
985 dec->mpeg4Support = (configReg >> DWL_MPEG4_E) & 0x3U;
986 dec->vc1Support = (configReg >> DWL_VC1_E) & 0x3U;
987 dec->mpeg2Support = (configReg >> DWL_MPEG2_E) & 0x01U;
988 dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
989 dec->refBufSupport = (configReg >> DWL_REF_BUFF_E) & 0x01U;
990 dec->vp6Support = (configReg >> DWL_VP6_E) & 0x01U;
991 dec->maxDecPicWidth = configReg & 0x07FFU;
993 /* 2nd Config register */
994 configReg = dec_dev.hwregs[VPU_DEC_HWCFG1];
995 if (dec->refBufSupport) {
996 if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
997 dec->refBufSupport |= 2;
998 if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
999 dec->refBufSupport |= 4;
1001 dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
1002 dec->vp7Support = (configReg >> DWL_VP7_E) & 0x01U;
1003 dec->vp8Support = (configReg >> DWL_VP8_E) & 0x01U;
1004 dec->avsSupport = (configReg >> DWL_AVS_E) & 0x01U;
1006 /* JPEG xtensions */
1007 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
1008 dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
1010 dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
1013 if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) {
1014 dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
1016 dec->rvSupport = RV_NOT_SUPPORTED;
1019 dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
1021 if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) {
1022 dec->refBufSupport |= 8; /* enable HW support for offset */
1026 VPUHwFuseStatus_t hwFuseSts;
1027 /* Decoder fuse configuration */
1028 u32 fuseReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
1030 hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U;
1031 hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U;
1032 hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U;
1033 hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U;
1034 hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U;
1035 hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U;
1036 hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U;
1037 hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U;
1038 hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U;
1039 hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U;
1040 hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U;
1041 hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U;
1042 hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U;
1043 hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U;
1045 /* check max. decoder output width */
1047 if (fuseReg & 0x8000U)
1048 hwFuseSts.maxDecPicWidthFuse = 1920;
1049 else if (fuseReg & 0x4000U)
1050 hwFuseSts.maxDecPicWidthFuse = 1280;
1051 else if (fuseReg & 0x2000U)
1052 hwFuseSts.maxDecPicWidthFuse = 720;
1053 else if (fuseReg & 0x1000U)
1054 hwFuseSts.maxDecPicWidthFuse = 352;
1055 else /* remove warning */
1056 hwFuseSts.maxDecPicWidthFuse = 352;
1058 hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U;
1060 /* Pp configuration */
1061 configReg = dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG];
1063 if ((configReg >> DWL_PP_E) & 0x01U) {
1065 dec->maxPpOutPicWidth = configReg & 0x07FFU;
1066 /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */
1067 dec->ppConfig = configReg;
1070 dec->maxPpOutPicWidth = 0;
1074 /* check the HW versio */
1075 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
1076 /* Pp configuration */
1077 configReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
1079 if ((configReg >> DWL_PP_E) & 0x01U) {
1080 /* Pp fuse configuration */
1081 u32 fuseRegPp = dec_dev.hwregs[VPU_PP_HW_FUSE_CFG];
1083 if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) {
1084 hwFuseSts.ppSupportFuse = 1;
1085 /* check max. pp output width */
1086 if (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920;
1087 else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280;
1088 else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720;
1089 else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352;
1090 else hwFuseSts.maxPpOutPicWidthFuse = 352;
1091 hwFuseSts.ppConfigFuse = fuseRegPp;
1093 hwFuseSts.ppSupportFuse = 0;
1094 hwFuseSts.maxPpOutPicWidthFuse = 0;
1095 hwFuseSts.ppConfigFuse = 0;
1098 hwFuseSts.ppSupportFuse = 0;
1099 hwFuseSts.maxPpOutPicWidthFuse = 0;
1100 hwFuseSts.ppConfigFuse = 0;
1103 if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse)
1104 dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse;
1105 if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse)
1106 dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse;
1107 if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED;
1108 if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED;
1109 if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED;
1110 if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED;
1111 if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse)
1112 dec->jpegSupport = JPEG_BASELINE;
1113 if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED;
1114 if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED;
1115 if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED;
1116 if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED;
1117 if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED;
1118 if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED;
1120 /* check the pp config vs fuse status */
1121 if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) {
1122 u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25);
1123 u32 alphaBlend = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24);
1124 u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25);
1125 u32 alphaBlendFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24);
1127 if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000;
1128 if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000;
1130 if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED;
1131 if (!hwFuseSts.refBufSupportFuse) dec->refBufSupport = REF_BUF_NOT_SUPPORTED;
1132 if (!hwFuseSts.rvSupportFuse) dec->rvSupport = RV_NOT_SUPPORTED;
1133 if (!hwFuseSts.avsSupportFuse) dec->avsSupport = AVS_NOT_SUPPORTED;
1134 if (!hwFuseSts.mvcSupportFuse) dec->mvcSupport = MVC_NOT_SUPPORTED;
1137 configReg = enc_dev.hwregs[63];
1138 enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
1139 enc->h264Enabled = (configReg >> 27) & 1;
1140 enc->mpeg4Enabled = (configReg >> 26) & 1;
1141 enc->jpegEnabled = (configReg >> 25) & 1;
1142 enc->vsEnabled = (configReg >> 24) & 1;
1143 enc->rgbEnabled = (configReg >> 28) & 1;
1144 //enc->busType = (configReg >> 20) & 15;
1145 //enc->synthesisLanguage = (configReg >> 16) & 15;
1146 //enc->busWidth = (configReg >> 12) & 15;
1147 enc->reg_size = service.reg_size;
1148 enc->reserv[0] = enc->reserv[1] = 0;
1150 service.auto_freq = soc_is_rk2928g() || soc_is_rk2928l() || soc_is_rk2926();
1151 if (service.auto_freq) {
1152 printk("vpu_service set to auto frequency mode\n");
1156 static irqreturn_t vdpu_irq(int irq, void *dev_id)
1158 vpu_device *dev = (vpu_device *) dev_id;
1159 u32 irq_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1161 pr_debug("vdpu_irq\n");
1163 if (irq_status & DEC_INTERRUPT_BIT) {
1164 pr_debug("vdpu_isr dec %x\n", irq_status);
1165 if ((irq_status & 0x40001) == 0x40001)
1168 irq_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1169 } while ((irq_status & 0x40001) == 0x40001);
1172 writel(irq_status & (~DEC_INTERRUPT_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER);
1173 atomic_add(1, &dev->irq_count_codec);
1176 irq_status = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
1177 if (irq_status & PP_INTERRUPT_BIT) {
1178 pr_debug("vdpu_isr pp %x\n", irq_status);
1180 writel(irq_status & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
1181 atomic_add(1, &dev->irq_count_pp);
1184 return IRQ_WAKE_THREAD;
1187 static irqreturn_t vdpu_isr(int irq, void *dev_id)
1189 vpu_device *dev = (vpu_device *) dev_id;
1191 mutex_lock(&service.lock);
1192 if (atomic_read(&dev->irq_count_codec)) {
1193 atomic_sub(1, &dev->irq_count_codec);
1194 if (NULL == service.reg_codec) {
1195 pr_err("error: dec isr with no task waiting\n");
1197 reg_from_run_to_done(service.reg_codec);
1201 if (atomic_read(&dev->irq_count_pp)) {
1202 atomic_sub(1, &dev->irq_count_pp);
1203 if (NULL == service.reg_pproc) {
1204 pr_err("error: pp isr with no task waiting\n");
1206 reg_from_run_to_done(service.reg_pproc);
1210 mutex_unlock(&service.lock);
1214 static irqreturn_t vepu_irq(int irq, void *dev_id)
1216 struct vpu_device *dev = (struct vpu_device *) dev_id;
1217 u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
1219 pr_debug("vepu_irq irq status %x\n", irq_status);
1221 if (likely(irq_status & ENC_INTERRUPT_BIT)) {
1223 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
1224 atomic_add(1, &dev->irq_count_codec);
1227 return IRQ_WAKE_THREAD;
1230 static irqreturn_t vepu_isr(int irq, void *dev_id)
1232 struct vpu_device *dev = (struct vpu_device *) dev_id;
1234 mutex_lock(&service.lock);
1235 if (atomic_read(&dev->irq_count_codec)) {
1236 atomic_sub(1, &dev->irq_count_codec);
1237 if (NULL == service.reg_codec) {
1238 pr_err("error: enc isr with no task waiting\n");
1240 reg_from_run_to_done(service.reg_codec);
1244 mutex_unlock(&service.lock);
1248 static int __init vpu_service_proc_init(void);
1249 static int __init vpu_service_init(void)
1253 pr_debug("baseaddr = 0x%08x vdpu irq = %d vepu irq = %d\n", VCODEC_PHYS, IRQ_VDPU, IRQ_VEPU);
1255 wake_lock_init(&service.wake_lock, WAKE_LOCK_SUSPEND, "vpu");
1256 INIT_LIST_HEAD(&service.waiting);
1257 INIT_LIST_HEAD(&service.running);
1258 INIT_LIST_HEAD(&service.done);
1259 INIT_LIST_HEAD(&service.session);
1260 mutex_init(&service.lock);
1261 service.reg_codec = NULL;
1262 service.reg_pproc = NULL;
1263 atomic_set(&service.total_running, 0);
1264 service.enabled = false;
1268 INIT_DELAYED_WORK(&service.power_off_work, vpu_power_off_work);
1270 vpu_service_power_on();
1271 ret = vpu_service_check_hw(&service, VCODEC_PHYS);
1273 pr_err("error: hw info check faild\n");
1274 goto err_hw_id_check;
1277 atomic_set(&dec_dev.irq_count_codec, 0);
1278 atomic_set(&dec_dev.irq_count_pp, 0);
1279 dec_dev.iobaseaddr = service.hw_info->hw_addr + service.hw_info->dec_offset;
1280 dec_dev.iosize = service.hw_info->dec_io_size;
1281 atomic_set(&enc_dev.irq_count_codec, 0);
1282 atomic_set(&enc_dev.irq_count_pp, 0);
1283 enc_dev.iobaseaddr = service.hw_info->hw_addr + service.hw_info->enc_offset;
1284 enc_dev.iosize = service.hw_info->enc_io_size;;
1285 service.reg_size = max(dec_dev.iosize, enc_dev.iosize);
1287 ret = vpu_service_reserve_io();
1289 pr_err("error: reserve io failed\n");
1290 goto err_reserve_io;
1293 /* get the IRQ line */
1294 ret = request_threaded_irq(IRQ_VDPU, vdpu_irq, vdpu_isr, IRQF_SHARED, "vdpu", (void *)&dec_dev);
1296 pr_err("error: can't request vdpu irq %d\n", IRQ_VDPU);
1297 goto err_req_vdpu_irq;
1300 ret = request_threaded_irq(IRQ_VEPU, vepu_irq, vepu_isr, IRQF_SHARED, "vepu", (void *)&enc_dev);
1302 pr_err("error: can't request vepu irq %d\n", IRQ_VEPU);
1303 goto err_req_vepu_irq;
1306 ret = misc_register(&vpu_service_misc_device);
1308 pr_err("error: misc_register failed\n");
1312 platform_device_register(&vpu_service_device);
1313 platform_driver_probe(&vpu_service_driver, NULL);
1315 vpu_service_power_off();
1316 pr_info("init success\n");
1318 vpu_service_proc_init();
1322 free_irq(IRQ_VEPU, (void *)&enc_dev);
1324 free_irq(IRQ_VDPU, (void *)&dec_dev);
1326 pr_info("init failed\n");
1328 vpu_service_release_io();
1330 vpu_service_power_off();
1332 wake_lock_destroy(&service.wake_lock);
1333 pr_info("init failed\n");
1337 static void __exit vpu_service_proc_release(void);
1338 static void __exit vpu_service_exit(void)
1340 vpu_service_proc_release();
1341 vpu_service_power_off();
1342 platform_device_unregister(&vpu_service_device);
1343 platform_driver_unregister(&vpu_service_driver);
1344 misc_deregister(&vpu_service_misc_device);
1345 free_irq(IRQ_VEPU, (void *)&enc_dev);
1346 free_irq(IRQ_VDPU, (void *)&dec_dev);
1347 vpu_service_release_io();
1349 wake_lock_destroy(&service.wake_lock);
1352 module_init(vpu_service_init);
1353 module_exit(vpu_service_exit);
1355 #ifdef CONFIG_PROC_FS
1356 #include <linux/proc_fs.h>
1357 #include <linux/seq_file.h>
1359 static int proc_vpu_service_show(struct seq_file *s, void *v)
1362 vpu_reg *reg, *reg_tmp;
1363 vpu_session *session, *session_tmp;
1365 mutex_lock(&service.lock);
1366 vpu_service_power_on();
1367 seq_printf(s, "\nENC Registers:\n");
1368 n = enc_dev.iosize >> 2;
1369 for (i = 0; i < n; i++) {
1370 seq_printf(s, "\tswreg%d = %08X\n", i, readl(enc_dev.hwregs + i));
1372 seq_printf(s, "\nDEC Registers:\n");
1373 n = dec_dev.iosize >> 2;
1374 for (i = 0; i < n; i++) {
1375 seq_printf(s, "\tswreg%d = %08X\n", i, readl(dec_dev.hwregs + i));
1378 seq_printf(s, "\nvpu service status:\n");
1379 list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
1380 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
1381 //seq_printf(s, "waiting reg set %d\n");
1382 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
1383 seq_printf(s, "waiting register set\n");
1385 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
1386 seq_printf(s, "running register set\n");
1388 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
1389 seq_printf(s, "done register set\n");
1392 mutex_unlock(&service.lock);
1397 static int proc_vpu_service_open(struct inode *inode, struct file *file)
1399 return single_open(file, proc_vpu_service_show, NULL);
1402 static const struct file_operations proc_vpu_service_fops = {
1403 .open = proc_vpu_service_open,
1405 .llseek = seq_lseek,
1406 .release = single_release,
1409 static int __init vpu_service_proc_init(void)
1411 proc_create("vpu_service", 0, NULL, &proc_vpu_service_fops);
1415 static void __exit vpu_service_proc_release(void)
1417 remove_proc_entry("vpu_service", NULL);
1419 #endif /* CONFIG_PROC_FS */