rk2928: fix RK29_VCODEC_PHYS definition compile error
[firefly-linux-kernel-4.4.55.git] / arch / arm / plat-rk / vpu_service.c
1 /* arch/arm/mach-rk29/vpu.c
2  *
3  * Copyright (C) 2010 ROCKCHIP, Inc.
4  * author: chenhengming chm@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #ifdef CONFIG_RK29_VPU_DEBUG
18 #define DEBUG
19 #define pr_fmt(fmt) "VPU_SERVICE: %s: " fmt, __func__
20 #else
21 #define pr_fmt(fmt) "VPU_SERVICE: " fmt
22 #endif
23
24
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/fs.h>
33 #include <linux/ioport.h>
34 #include <linux/miscdevice.h>
35 #include <linux/mm.h>
36 #include <linux/poll.h>
37 #include <linux/platform_device.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/timer.h>
41
42 #include <asm/uaccess.h>
43
44 #include <mach/irqs.h>
45 #include <plat/vpu_service.h>
46 #include <mach/pmu.h>
47 #include <mach/cru.h>
48
49
50 #define DEC_INTERRUPT_REGISTER                  1
51 #define PP_INTERRUPT_REGISTER                   60
52 #define ENC_INTERRUPT_REGISTER                  1
53
54 #define DEC_INTERRUPT_BIT                        0x100
55 #define PP_INTERRUPT_BIT                         0x100
56 #define ENC_INTERRUPT_BIT                        0x1
57
58 #define REG_NUM_DEC                             (60)
59 #define REG_NUM_PP                              (41)
60 #if defined(CONFIG_ARCH_RK29) || defined(CONFIG_ARCH_RK2928)
61 #define REG_NUM_ENC                             (96)
62 #elif defined(CONFIG_ARCH_RK30)
63 #define REG_NUM_ENC                             (164)
64 #endif
65 #define REG_NUM_DEC_PP                          (REG_NUM_DEC+REG_NUM_PP)
66 #define SIZE_REG(reg)                           ((reg)*4)
67
68 #define DEC_IO_SIZE                             ((100 + 1) * 4) /* bytes */
69 #if defined(CONFIG_ARCH_RK29) || defined(CONFIG_ARCH_RK2928)
70 #define ENC_IO_SIZE                             (96 * 4)        /* bytes */
71 #elif defined(CONFIG_ARCH_RK30)
72 #define ENC_IO_SIZE                             (164 * 4)       /* bytes */
73 #endif
74 #define REG_NUM_DEC_PP                          (REG_NUM_DEC+REG_NUM_PP)
75 static const u16 dec_hw_ids[] = { 0x8190, 0x8170, 0x9170, 0x9190, 0x6731 };
76 #if defined(CONFIG_ARCH_RK29) || defined(CONFIG_ARCH_RK2928)
77 static const u16 enc_hw_ids[] = { 0x6280, 0x7280, 0x8270 };
78 #define DEC_PHY_OFFSET                          0x200
79 #if defined(CONFIG_ARCH_RK2928)
80 #define RK29_VCODEC_PHYS                        RK2928_VCODEC_PHYS
81 #endif
82 #elif defined(CONFIG_ARCH_RK30)
83 static const u16 enc_hw_ids[] = { 0x6280, 0x7280, 0x8270, 0x8290, 0x4831 };
84 #define DEC_PHY_OFFSET                          0x400
85 #define RK29_VCODEC_PHYS                        RK30_VCODEC_PHYS
86 #endif
87
88 #define VPU_REG_EN_ENC                          14
89 #define VPU_REG_ENC_GATE                        2
90 #define VPU_REG_ENC_GATE_BIT                    (1<<4)
91
92 #define VPU_REG_EN_DEC                          1
93 #define VPU_REG_DEC_GATE                        2
94 #define VPU_REG_DEC_GATE_BIT                    (1<<10)
95 #define VPU_REG_EN_PP                           0
96 #define VPU_REG_PP_GATE                         1
97 #define VPU_REG_PP_GATE_BIT                     (1<<8)
98 #define VPU_REG_EN_DEC_PP                       1
99 #define VPU_REG_DEC_PP_GATE                     61
100 #define VPU_REG_DEC_PP_GATE_BIT                 (1<<8)
101
102
103 /**
104  * struct for process session which connect to vpu
105  *
106  * @author ChenHengming (2011-5-3)
107  */
108 typedef struct vpu_session {
109         VPU_CLIENT_TYPE         type;
110         /* a linked list of data so we can access them for debugging */
111         struct list_head        list_session;
112         /* a linked list of register data waiting for process */
113         struct list_head        waiting;
114         /* a linked list of register data in processing */
115         struct list_head        running;
116         /* a linked list of register data processed */
117         struct list_head        done;
118         wait_queue_head_t       wait;
119         pid_t                   pid;
120         atomic_t                task_running;
121 } vpu_session;
122
123 /**
124  * struct for process register set
125  *
126  * @author ChenHengming (2011-5-4)
127  */
128 #define VPU_REG_NUM_MAX                     (((VPU_REG_NUM_ENC)>(VPU_REG_NUM_DEC_PP))?(VPU_REG_NUM_ENC):(VPU_REG_NUM_DEC_PP))
129 typedef struct vpu_reg {
130         VPU_CLIENT_TYPE         type;
131         vpu_session             *session;
132         struct list_head        session_link;           /* link to vpu service session */
133         struct list_head        status_link;            /* link to register set list */
134         unsigned long           size;
135         unsigned long           reg[VPU_REG_NUM_MAX];
136 } vpu_reg;
137
138 typedef struct vpu_device {
139         unsigned long           iobaseaddr;
140         unsigned int            iosize;
141         volatile u32            *hwregs;
142 } vpu_device;
143
144 typedef struct vpu_service_info {
145         spinlock_t              lock;
146         spinlock_t              lock_power;
147         struct timer_list       timer;                  /* timer for power off */
148         struct list_head        waiting;                /* link to link_reg in struct vpu_reg */
149         struct list_head        running;                /* link to link_reg in struct vpu_reg */
150         struct list_head        done;                   /* link to link_reg in struct vpu_reg */
151         struct list_head        session;                /* link to list_session in struct vpu_session */
152         atomic_t                total_running;
153         bool                    enabled;
154         vpu_reg                 *reg_codec;
155         vpu_reg                 *reg_pproc;
156         vpu_reg                 *reg_resev;
157         VPUHwDecConfig_t        dec_config;
158         VPUHwEncConfig_t        enc_config;
159 } vpu_service_info;
160
161 typedef struct vpu_request
162 {
163         unsigned long   *req;
164         unsigned long   size;
165 } vpu_request;
166
167 static struct clk *pd_video;
168 static struct clk *clk_vpu; /* for power on notify */
169 static struct clk *aclk_vepu;
170 static struct clk *hclk_vepu;
171 static struct clk *aclk_ddr_vepu;
172 static struct clk *hclk_cpu_vcodec;
173 static vpu_service_info service;
174 static vpu_device       dec_dev;
175 static vpu_device       enc_dev;
176
177 #define POWER_OFF_DELAY 4*HZ /* 4s */
178 #define TIMEOUT_DELAY   2*HZ /* 2s */
179
180 static void vpu_get_clk(void)
181 {
182         pd_video        = clk_get(NULL, "pd_video");
183         clk_vpu         = clk_get(NULL, "vpu");
184         aclk_vepu       = clk_get(NULL, "aclk_vepu");
185         hclk_vepu       = clk_get(NULL, "hclk_vepu");
186         aclk_ddr_vepu   = clk_get(NULL, "aclk_ddr_vepu");
187         hclk_cpu_vcodec = clk_get(NULL, "hclk_cpu_vcodec");
188 }
189
190 static void vpu_put_clk(void)
191 {
192         clk_put(pd_video);
193         clk_put(clk_vpu);
194         clk_put(aclk_vepu);
195         clk_put(hclk_vepu);
196         clk_put(aclk_ddr_vepu);
197         clk_put(hclk_cpu_vcodec);
198 }
199
200 static void vpu_reset(void)
201 {
202 #if defined(CONFIG_ARCH_RK29)
203         clk_disable(aclk_ddr_vepu);
204         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
205         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
206         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
207         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
208         mdelay(10);
209         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
210         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
211         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
212         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
213         clk_enable(aclk_ddr_vepu);
214 #elif defined(CONFIG_ARCH_RK30)
215         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
216         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
217         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
218         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
219         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
220         mdelay(1);
221         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
222         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
223         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
224         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
225         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
226 #endif
227         service.reg_codec = NULL;
228         service.reg_pproc = NULL;
229         service.reg_resev = NULL;
230 }
231
232 static void reg_deinit(vpu_reg *reg);
233 static void vpu_service_session_clear(vpu_session *session)
234 {
235         vpu_reg *reg, *n;
236         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
237                 reg_deinit(reg);
238         }
239         list_for_each_entry_safe(reg, n, &session->running, session_link) {
240                 reg_deinit(reg);
241         }
242         list_for_each_entry_safe(reg, n, &session->done, session_link) {
243                 reg_deinit(reg);
244         }
245 }
246
247 static void vpu_service_dump(void)
248 {
249         int running;
250         vpu_reg *reg, *reg_tmp;
251         vpu_session *session, *session_tmp;
252
253         running = atomic_read(&service.total_running);
254         printk("total_running %d\n", running);
255
256         printk("reg_codec 0x%.8x\n", (unsigned int)service.reg_codec);
257         printk("reg_pproc 0x%.8x\n", (unsigned int)service.reg_pproc);
258         printk("reg_resev 0x%.8x\n", (unsigned int)service.reg_resev);
259
260         list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
261                 printk("session pid %d type %d:\n", session->pid, session->type);
262                 running = atomic_read(&session->task_running);
263                 printk("task_running %d\n", running);
264                 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
265                         printk("waiting register set 0x%.8x\n", (unsigned int)reg);
266                 }
267                 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
268                         printk("running register set 0x%.8x\n", (unsigned int)reg);
269                 }
270                 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
271                         printk("done    register set 0x%.8x\n", (unsigned int)reg);
272                 }
273         }
274 }
275
276 static void vpu_service_power_off(void)
277 {
278         int total_running;
279
280         spin_lock_bh(&service.lock_power);
281         if (!service.enabled) {
282                 spin_unlock_bh(&service.lock_power);
283                 return;
284         }
285
286         service.enabled = false;
287         total_running = atomic_read(&service.total_running);
288         if (total_running) {
289                 pr_alert("alert: power off when %d task running!!\n", total_running);
290                 mdelay(50);
291                 pr_alert("alert: delay 50 ms for running task\n");
292                 vpu_service_dump();
293         }
294
295         printk("vpu: power off...");
296 #if defined(CONFIG_ARCH_RK29)
297         pmu_set_power_domain(PD_VCODEC, false);
298 #else
299         clk_disable(pd_video);
300 #endif
301         udelay(10);
302         clk_disable(hclk_cpu_vcodec);
303         clk_disable(aclk_ddr_vepu);
304         clk_disable(hclk_vepu);
305         clk_disable(aclk_vepu);
306         clk_disable(clk_vpu);
307         printk("done\n");
308         spin_unlock_bh(&service.lock_power);
309 }
310
311 static void vpu_service_power_off_work_func(unsigned long data)
312 {
313         printk("delayed ");
314         vpu_service_power_off();
315 }
316
317 static void vpu_service_power_maintain(void)
318 {
319         if (service.enabled) {
320                 mod_timer(&service.timer, jiffies + POWER_OFF_DELAY);
321         } else {
322                 pr_err("error: maintain power when power is off!\n");
323         }
324 }
325
326 static void vpu_service_power_on(void)
327 {
328         clk_enable(clk_vpu); /* notify vpu on without lock. */
329
330         spin_lock_bh(&service.lock_power);
331         if (!service.enabled) {
332                 service.enabled = true;
333                 printk("vpu: power on\n");
334
335                 clk_enable(clk_vpu);
336                 clk_enable(aclk_vepu);
337                 clk_enable(hclk_vepu);
338                 clk_enable(hclk_cpu_vcodec);
339                 udelay(10);
340 #if defined(CONFIG_ARCH_RK29)
341                 pmu_set_power_domain(PD_VCODEC, true);
342 #else
343                 clk_enable(pd_video);
344 #endif
345                 udelay(10);
346                 clk_enable(aclk_ddr_vepu);
347                 mod_timer(&service.timer, jiffies + POWER_OFF_DELAY);
348                 spin_unlock_bh(&service.lock_power);
349         } else {
350                 spin_unlock_bh(&service.lock_power);
351                 vpu_service_power_maintain();
352         }
353
354         clk_disable(clk_vpu);
355 }
356
357 static vpu_reg *reg_init(vpu_session *session, void __user *src, unsigned long size)
358 {
359         unsigned long flag;
360         vpu_reg *reg = kmalloc(sizeof(vpu_reg), GFP_KERNEL);
361         if (NULL == reg) {
362                 pr_err("error: kmalloc fail in reg_init\n");
363                 return NULL;
364         }
365
366         reg->session = session;
367         reg->type = session->type;
368         reg->size = size;
369         INIT_LIST_HEAD(&reg->session_link);
370         INIT_LIST_HEAD(&reg->status_link);
371
372         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
373                 pr_err("error: copy_from_user failed in reg_init\n");
374                 kfree(reg);
375                 return NULL;
376         }
377
378         spin_lock_irqsave(&service.lock, flag);
379         list_add_tail(&reg->status_link, &service.waiting);
380         list_add_tail(&reg->session_link, &session->waiting);
381         spin_unlock_irqrestore(&service.lock, flag);
382
383         return reg;
384 }
385
386 static void reg_deinit(vpu_reg *reg)
387 {
388         list_del_init(&reg->session_link);
389         list_del_init(&reg->status_link);
390         if (reg == service.reg_codec) service.reg_codec = NULL;
391         if (reg == service.reg_pproc) service.reg_pproc = NULL;
392         kfree(reg);
393 }
394
395 static void reg_from_wait_to_run(vpu_reg *reg)
396 {
397         list_del_init(&reg->status_link);
398         list_add_tail(&reg->status_link, &service.running);
399
400         list_del_init(&reg->session_link);
401         list_add_tail(&reg->session_link, &reg->session->running);
402 }
403
404 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
405 {
406         int i;
407         u32 *dst = (u32 *)&reg->reg[0];
408         for (i = 0; i < count; i++)
409                 *dst++ = *src++;
410 }
411
412 static void reg_from_run_to_done(vpu_reg *reg)
413 {
414         list_del_init(&reg->status_link);
415         list_add_tail(&reg->status_link, &service.done);
416
417         list_del_init(&reg->session_link);
418         list_add_tail(&reg->session_link, &reg->session->done);
419
420         switch (reg->type) {
421         case VPU_ENC : {
422                 service.reg_codec = NULL;
423                 reg_copy_from_hw(reg, enc_dev.hwregs, REG_NUM_ENC);
424                 break;
425         }
426         case VPU_DEC : {
427                 service.reg_codec = NULL;
428                 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC);
429                 break;
430         }
431         case VPU_PP : {
432                 service.reg_pproc = NULL;
433                 reg_copy_from_hw(reg, dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_PP);
434                 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
435                 break;
436         }
437         case VPU_DEC_PP : {
438                 service.reg_codec = NULL;
439                 service.reg_pproc = NULL;
440                 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC_PP);
441                 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
442                 break;
443         }
444         default : {
445                 pr_err("error: copy reg from hw with unknown type %d\n", reg->type);
446                 break;
447         }
448         }
449         atomic_sub(1, &reg->session->task_running);
450         atomic_sub(1, &service.total_running);
451         wake_up_interruptible_sync(&reg->session->wait);
452 }
453
454 void reg_copy_to_hw(vpu_reg *reg)
455 {
456         int i;
457         u32 *src = (u32 *)&reg->reg[0];
458         atomic_add(1, &service.total_running);
459         atomic_add(1, &reg->session->task_running);
460         switch (reg->type) {
461         case VPU_ENC : {
462                 u32 *dst = (u32 *)enc_dev.hwregs;
463 #if defined(CONFIG_ARCH_RK30)
464                 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
465                 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
466                 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
467                 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
468 #endif
469                 service.reg_codec = reg;
470
471                 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
472
473                 for (i = 0; i < VPU_REG_EN_ENC; i++)
474                         dst[i] = src[i];
475
476                 for (i = VPU_REG_EN_ENC + 1; i < REG_NUM_ENC; i++)
477                         dst[i] = src[i];
478
479                 dsb();
480
481                 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
482                 dst[VPU_REG_EN_ENC]   = src[VPU_REG_EN_ENC];
483         } break;
484         case VPU_DEC : {
485                 u32 *dst = (u32 *)dec_dev.hwregs;
486                 service.reg_codec = reg;
487
488                 for (i = REG_NUM_DEC - 1; i > VPU_REG_DEC_GATE; i--)
489                         dst[i] = src[i];
490
491                 dsb();
492
493                 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
494                 dst[VPU_REG_EN_DEC]   = src[VPU_REG_EN_DEC];
495         } break;
496         case VPU_PP : {
497                 u32 *dst = (u32 *)dec_dev.hwregs + PP_INTERRUPT_REGISTER;
498                 service.reg_pproc = reg;
499
500                 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
501
502                 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_PP; i++)
503                         dst[i] = src[i];
504
505                 dsb();
506
507                 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
508         } break;
509         case VPU_DEC_PP : {
510                 u32 *dst = (u32 *)dec_dev.hwregs;
511                 service.reg_codec = reg;
512                 service.reg_pproc = reg;
513
514                 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_DEC_PP; i++)
515                         dst[i] = src[i];
516
517                 dst[VPU_REG_EN_DEC_PP]   = src[VPU_REG_EN_DEC_PP] | 0x2;
518                 dsb();
519
520                 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
521                 dst[VPU_REG_DEC_GATE]    = src[VPU_REG_DEC_GATE]    | VPU_REG_DEC_GATE_BIT;
522                 dst[VPU_REG_EN_DEC]      = src[VPU_REG_EN_DEC];
523         } break;
524         default : {
525                 pr_err("error: unsupport session type %d", reg->type);
526                 atomic_sub(1, &service.total_running);
527                 atomic_sub(1, &reg->session->task_running);
528                 break;
529         }
530         }
531 }
532
533 static void try_set_reg(void)
534 {
535         unsigned long flag;
536         // first get reg from reg list
537         spin_lock_irqsave(&service.lock, flag);
538         if (!list_empty(&service.waiting)) {
539                 int can_set = 0;
540                 vpu_reg *reg = list_entry(service.waiting.next, vpu_reg, status_link);
541
542                 vpu_service_power_maintain();
543                 switch (reg->type) {
544                 case VPU_ENC : {
545                         if ((NULL == service.reg_codec) &&  (NULL == service.reg_pproc))
546                         can_set = 1;
547                 } break;
548                 case VPU_DEC : {
549                         if (NULL == service.reg_codec)
550                                 can_set = 1;
551                 } break;
552                 case VPU_PP : {
553                         if (NULL == service.reg_codec) {
554                                 if (NULL == service.reg_pproc)
555                                         can_set = 1;
556                         } else {
557                                 if ((VPU_DEC == service.reg_codec->type) && (NULL == service.reg_pproc))
558                                         can_set = 1;
559                         }
560                 } break;
561                 case VPU_DEC_PP : {
562                         if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
563                                 can_set = 1;
564                         } break;
565                 default : {
566                         printk("undefined reg type %d\n", reg->type);
567                 } break;
568                 }
569                 if (can_set) {
570                         reg_from_wait_to_run(reg);
571                         reg_copy_to_hw(reg);
572                 }
573         }
574         spin_unlock_irqrestore(&service.lock, flag);
575 }
576
577 static int return_reg(vpu_reg *reg, u32 __user *dst)
578 {
579         int ret = 0;
580         switch (reg->type) {
581         case VPU_ENC : {
582                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_ENC)))
583                         ret = -EFAULT;
584                 break;
585         }
586         case VPU_DEC : {
587                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_DEC)))
588                         ret = -EFAULT;
589                 break;
590         }
591         case VPU_PP : {
592                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_PP)))
593                         ret = -EFAULT;
594                 break;
595         }
596         case VPU_DEC_PP : {
597                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_DEC_PP)))
598                         ret = -EFAULT;
599                 break;
600         }
601         default : {
602                 ret = -EFAULT;
603                 pr_err("error: copy reg to user with unknown type %d\n", reg->type);
604                 break;
605         }
606         }
607         reg_deinit(reg);
608         return ret;
609 }
610
611 static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
612 {
613         vpu_session *session = (vpu_session *)filp->private_data;
614         if (NULL == session) {
615                 return -EINVAL;
616         }
617
618         switch (cmd) {
619         case VPU_IOC_SET_CLIENT_TYPE : {
620                 session->type = (VPU_CLIENT_TYPE)arg;
621                 break;
622         }
623         case VPU_IOC_GET_HW_FUSE_STATUS : {
624                 vpu_request req;
625                 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
626                         pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
627                         return -EFAULT;
628                 } else {
629                         if (VPU_ENC != session->type) {
630                                 if (copy_to_user((void __user *)req.req, &service.dec_config, sizeof(VPUHwDecConfig_t))) {
631                                         pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
632                                         return -EFAULT;
633                                 }
634                         } else {
635                                 if (copy_to_user((void __user *)req.req, &service.enc_config, sizeof(VPUHwEncConfig_t))) {
636                                         pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
637                                         return -EFAULT;
638                                 }
639                         }
640                 }
641
642                 break;
643         }
644         case VPU_IOC_SET_REG : {
645                 vpu_request req;
646                 vpu_reg *reg;
647                 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
648                         pr_err("error: VPU_IOC_SET_REG copy_from_user failed\n");
649                         return -EFAULT;
650                 }
651
652                 reg = reg_init(session, (void __user *)req.req, req.size);
653                 if (NULL == reg) {
654                         return -EFAULT;
655                 } else {
656                         vpu_service_power_on();
657                         try_set_reg();
658                 }
659
660                 break;
661         }
662         case VPU_IOC_GET_REG : {
663                 vpu_request req;
664                 vpu_reg *reg;
665                 unsigned long flag;
666                 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
667                         pr_err("error: VPU_IOC_GET_REG copy_from_user failed\n");
668                         return -EFAULT;
669                 } else {
670                         int ret = wait_event_interruptible_timeout(session->wait, !list_empty(&session->done), TIMEOUT_DELAY);
671                         if (!list_empty(&session->done)) {
672                                 if (ret < 0) {
673                                         pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
674                                 }
675                                 ret = 0;
676                         } else {
677                                 if (unlikely(ret < 0)) {
678                                         pr_err("error: pid %d wait task ret %d\n", session->pid, ret);
679                                 } else if (0 == ret) {
680                                         pr_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
681                                         ret = -ETIMEDOUT;
682                                 }
683                         }
684                         spin_lock_irqsave(&service.lock, flag);
685                         if (ret < 0) {
686                                 int task_running = atomic_read(&session->task_running);
687                                 vpu_service_dump();
688                                 if (task_running) {
689                                         atomic_set(&session->task_running, 0);
690                                         atomic_sub(task_running, &service.total_running);
691                                         printk("%d task is running but not return, reset hardware...", task_running);
692                                         vpu_reset();
693                                         printk("done\n");
694                                 }
695                                 vpu_service_session_clear(session);
696                                 spin_unlock_irqrestore(&service.lock, flag);
697                                 return ret;
698                         }
699                         spin_unlock_irqrestore(&service.lock, flag);
700                 }
701                 spin_lock_irqsave(&service.lock, flag);
702                 reg = list_entry(session->done.next, vpu_reg, session_link);
703                 return_reg(reg, (u32 __user *)req.req);
704                 spin_unlock_irqrestore(&service.lock, flag);
705                 break;
706         }
707         default : {
708                 pr_err("error: unknow vpu service ioctl cmd %x\n", cmd);
709                 break;
710         }
711         }
712
713         return 0;
714 }
715
716 static int vpu_service_check_hw_id(struct vpu_device * dev, const u16 *hwids, size_t num)
717 {
718         u32 hwid = readl(dev->hwregs);
719         pr_info("HW ID = 0x%08x\n", hwid);
720
721         hwid = (hwid >> 16) & 0xFFFF;   /* product version only */
722
723         while (num--) {
724                 if (hwid == hwids[num]) {
725                         pr_info("Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
726                         return 1;
727                 }
728         }
729
730         pr_info("No Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
731         return 0;
732 }
733
734 static void vpu_service_release_io(void)
735 {
736         if (dec_dev.hwregs)
737                 iounmap((void *)dec_dev.hwregs);
738         release_mem_region(dec_dev.iobaseaddr, dec_dev.iosize);
739
740         if (enc_dev.hwregs)
741                 iounmap((void *)enc_dev.hwregs);
742         release_mem_region(enc_dev.iobaseaddr, enc_dev.iosize);
743 }
744
745 static int vpu_service_reserve_io(void)
746 {
747         unsigned long iobaseaddr;
748         unsigned long iosize;
749
750         iobaseaddr      = dec_dev.iobaseaddr;
751         iosize          = dec_dev.iosize;
752
753         if (!request_mem_region(iobaseaddr, iosize, "vdpu_io")) {
754                 pr_info("failed to reserve dec HW regs\n");
755                 return -EBUSY;
756         }
757
758         dec_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
759
760         if (dec_dev.hwregs == NULL) {
761                 pr_info("failed to ioremap dec HW regs\n");
762                 goto err;
763         }
764
765         /* check for correct HW */
766         if (!vpu_service_check_hw_id(&dec_dev, dec_hw_ids, ARRAY_SIZE(dec_hw_ids))) {
767                 goto err;
768         }
769
770         iobaseaddr      = enc_dev.iobaseaddr;
771         iosize          = enc_dev.iosize;
772
773         if (!request_mem_region(iobaseaddr, iosize, "hx280enc")) {
774                 pr_info("failed to reserve enc HW regs\n");
775                 goto err;
776         }
777
778         enc_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
779
780         if (enc_dev.hwregs == NULL) {
781                 pr_info("failed to ioremap enc HW regs\n");
782                 goto err;
783         }
784
785         /* check for correct HW */
786         if (!vpu_service_check_hw_id(&enc_dev, enc_hw_ids, ARRAY_SIZE(enc_hw_ids))) {
787                 goto err;
788         }
789         return 0;
790
791 err:
792         vpu_service_release_io();
793         return -EBUSY;
794 }
795
796 static int vpu_service_open(struct inode *inode, struct file *filp)
797 {
798         unsigned long flag;
799         vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
800         if (NULL == session) {
801                 pr_err("error: unable to allocate memory for vpu_session.");
802                 return -ENOMEM;
803         }
804
805         session->type   = VPU_TYPE_BUTT;
806         session->pid    = current->pid;
807         INIT_LIST_HEAD(&session->waiting);
808         INIT_LIST_HEAD(&session->running);
809         INIT_LIST_HEAD(&session->done);
810         INIT_LIST_HEAD(&session->list_session);
811         init_waitqueue_head(&session->wait);
812         atomic_set(&session->task_running, 0);
813         spin_lock_irqsave(&service.lock, flag);
814         list_add_tail(&session->list_session, &service.session);
815         filp->private_data = (void *)session;
816         spin_unlock_irqrestore(&service.lock, flag);
817
818         pr_debug("dev opened\n");
819         return nonseekable_open(inode, filp);
820 }
821
822 static int vpu_service_release(struct inode *inode, struct file *filp)
823 {
824         int task_running;
825         unsigned long flag;
826         vpu_session *session = (vpu_session *)filp->private_data;
827         if (NULL == session)
828                 return -EINVAL;
829
830         task_running = atomic_read(&session->task_running);
831         if (task_running) {
832                 pr_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running);
833                 msleep(50);
834         }
835         wake_up_interruptible_sync(&session->wait);
836
837         spin_lock_irqsave(&service.lock, flag);
838         /* remove this filp from the asynchronusly notified filp's */
839         //vpu_service_fasync(-1, filp, 0);
840         list_del_init(&session->list_session);
841         vpu_service_session_clear(session);
842         kfree(session);
843         filp->private_data = NULL;
844         spin_unlock_irqrestore(&service.lock, flag);
845
846         pr_debug("dev closed\n");
847         return 0;
848 }
849
850 static const struct file_operations vpu_service_fops = {
851         .unlocked_ioctl = vpu_service_ioctl,
852         .open           = vpu_service_open,
853         .release        = vpu_service_release,
854         //.fasync       = vpu_service_fasync,
855 };
856
857 static struct miscdevice vpu_service_misc_device = {
858         .minor          = MISC_DYNAMIC_MINOR,
859         .name           = "vpu_service",
860         .fops           = &vpu_service_fops,
861 };
862
863 static void vpu_service_shutdown(struct platform_device *pdev)
864 {
865         pr_cont("shutdown...");
866         del_timer(&service.timer);
867         vpu_service_power_off();
868         pr_cont("done\n");
869 }
870
871 static int vpu_service_suspend(struct platform_device *pdev, pm_message_t state)
872 {
873         bool enabled;
874         pr_info("suspend...");
875         del_timer(&service.timer);
876         enabled = service.enabled;
877         vpu_service_power_off();
878         service.enabled = enabled;
879         return 0;
880 }
881
882 static int vpu_service_resume(struct platform_device *pdev)
883 {
884         pr_info("resume...");
885         if (service.enabled) {
886                 service.enabled = false;
887                 vpu_service_power_on();
888                 try_set_reg();
889         }
890         return 0;
891 }
892
893 static struct platform_device vpu_service_device = {
894         .name              = "vpu_service",
895         .id                = -1,
896 };
897
898 static struct platform_driver vpu_service_driver = {
899         .driver    = {
900                 .name  = "vpu_service",
901                 .owner = THIS_MODULE,
902         },
903         .shutdown  = vpu_service_shutdown,
904         .suspend   = vpu_service_suspend,
905         .resume    = vpu_service_resume,
906 };
907
908 static void get_hw_info(void)
909 {
910         VPUHwDecConfig_t *dec = &service.dec_config;
911         VPUHwEncConfig_t *enc = &service.enc_config;
912         u32 configReg   = dec_dev.hwregs[VPU_DEC_HWCFG0];
913         u32 asicID      = dec_dev.hwregs[0];
914
915         dec->h264Support    = (configReg >> DWL_H264_E) & 0x3U;
916         dec->jpegSupport    = (configReg >> DWL_JPEG_E) & 0x01U;
917         if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
918                 dec->jpegSupport = JPEG_PROGRESSIVE;
919         dec->mpeg4Support   = (configReg >> DWL_MPEG4_E) & 0x3U;
920         dec->vc1Support     = (configReg >> DWL_VC1_E) & 0x3U;
921         dec->mpeg2Support   = (configReg >> DWL_MPEG2_E) & 0x01U;
922         dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
923         dec->refBufSupport  = (configReg >> DWL_REF_BUFF_E) & 0x01U;
924         dec->vp6Support     = (configReg >> DWL_VP6_E) & 0x01U;
925         dec->maxDecPicWidth = configReg & 0x07FFU;
926
927         /* 2nd Config register */
928         configReg   = dec_dev.hwregs[VPU_DEC_HWCFG1];
929         if (dec->refBufSupport) {
930                 if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
931                         dec->refBufSupport |= 2;
932                 if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
933                         dec->refBufSupport |= 4;
934         }
935         dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
936         dec->vp7Support     = (configReg >> DWL_VP7_E) & 0x01U;
937         dec->vp8Support     = (configReg >> DWL_VP8_E) & 0x01U;
938         dec->avsSupport     = (configReg >> DWL_AVS_E) & 0x01U;
939
940         /* JPEG xtensions */
941         if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
942                 dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
943         } else {
944                 dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
945         }
946
947         if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) {
948                 dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
949         } else {
950                 dec->rvSupport = RV_NOT_SUPPORTED;
951         }
952
953         dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
954
955         if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) {
956                 dec->refBufSupport |= 8; /* enable HW support for offset */
957         }
958
959         {
960         VPUHwFuseStatus_t hwFuseSts;
961         /* Decoder fuse configuration */
962         u32 fuseReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
963
964         hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U;
965         hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U;
966         hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U;
967         hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U;
968         hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U;
969         hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U;
970         hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U;
971         hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U;
972         hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U;
973         hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U;
974         hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U;
975         hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U;
976         hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U;
977         hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U;
978
979         /* check max. decoder output width */
980
981         if (fuseReg & 0x8000U)
982                 hwFuseSts.maxDecPicWidthFuse = 1920;
983         else if (fuseReg & 0x4000U)
984                 hwFuseSts.maxDecPicWidthFuse = 1280;
985         else if (fuseReg & 0x2000U)
986                 hwFuseSts.maxDecPicWidthFuse = 720;
987         else if (fuseReg & 0x1000U)
988                 hwFuseSts.maxDecPicWidthFuse = 352;
989         else    /* remove warning */
990                 hwFuseSts.maxDecPicWidthFuse = 352;
991
992         hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U;
993
994         /* Pp configuration */
995         configReg = dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG];
996
997         if ((configReg >> DWL_PP_E) & 0x01U) {
998                 dec->ppSupport = 1;
999                 dec->maxPpOutPicWidth = configReg & 0x07FFU;
1000                 /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */
1001                 dec->ppConfig = configReg;
1002         } else {
1003                 dec->ppSupport = 0;
1004                 dec->maxPpOutPicWidth = 0;
1005                 dec->ppConfig = 0;
1006         }
1007
1008         /* check the HW versio */
1009         if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
1010                 /* Pp configuration */
1011                 configReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
1012
1013                 if ((configReg >> DWL_PP_E) & 0x01U) {
1014                         /* Pp fuse configuration */
1015                         u32 fuseRegPp = dec_dev.hwregs[VPU_PP_HW_FUSE_CFG];
1016
1017                         if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) {
1018                                 hwFuseSts.ppSupportFuse = 1;
1019                                 /* check max. pp output width */
1020                                 if      (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920;
1021                                 else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280;
1022                                 else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720;
1023                                 else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352;
1024                                 else                          hwFuseSts.maxPpOutPicWidthFuse = 352;
1025                                 hwFuseSts.ppConfigFuse = fuseRegPp;
1026                         } else {
1027                                 hwFuseSts.ppSupportFuse = 0;
1028                                 hwFuseSts.maxPpOutPicWidthFuse = 0;
1029                                 hwFuseSts.ppConfigFuse = 0;
1030                         }
1031                 } else {
1032                         hwFuseSts.ppSupportFuse = 0;
1033                         hwFuseSts.maxPpOutPicWidthFuse = 0;
1034                         hwFuseSts.ppConfigFuse = 0;
1035                 }
1036
1037                 if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse)
1038                         dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse;
1039                 if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse)
1040                         dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse;
1041                 if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED;
1042                 if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED;
1043                 if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED;
1044                 if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED;
1045                 if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse)
1046                         dec->jpegSupport = JPEG_BASELINE;
1047                 if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED;
1048                 if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED;
1049                 if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED;
1050                 if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED;
1051                 if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED;
1052                 if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED;
1053
1054                 /* check the pp config vs fuse status */
1055                 if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) {
1056                         u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25);
1057                         u32 alphaBlend  = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24);
1058                         u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25);
1059                         u32 alphaBlendFuse  = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24);
1060
1061                         if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000;
1062                         if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000;
1063                 }
1064                 if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED;
1065                 if (!hwFuseSts.refBufSupportFuse)   dec->refBufSupport = REF_BUF_NOT_SUPPORTED;
1066                 if (!hwFuseSts.rvSupportFuse)       dec->rvSupport = RV_NOT_SUPPORTED;
1067                 if (!hwFuseSts.avsSupportFuse)      dec->avsSupport = AVS_NOT_SUPPORTED;
1068                 if (!hwFuseSts.mvcSupportFuse)      dec->mvcSupport = MVC_NOT_SUPPORTED;
1069         }
1070         }
1071         configReg = enc_dev.hwregs[63];
1072         enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
1073         enc->h264Enabled = (configReg >> 27) & 1;
1074         enc->mpeg4Enabled = (configReg >> 26) & 1;
1075         enc->jpegEnabled = (configReg >> 25) & 1;
1076         enc->vsEnabled = (configReg >> 24) & 1;
1077         enc->rgbEnabled = (configReg >> 28) & 1;
1078         enc->busType = (configReg >> 20) & 15;
1079         enc->synthesisLanguage = (configReg >> 16) & 15;
1080         enc->busWidth = (configReg >> 12) & 15;
1081 }
1082
1083 static irqreturn_t vdpu_isr(int irq, void *dev_id)
1084 {
1085         vpu_device *dev = (vpu_device *) dev_id;
1086         u32 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1087         u32 irq_status_pp  = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
1088
1089         pr_debug("vdpu_isr dec %x pp %x\n", irq_status_dec, irq_status_pp);
1090
1091         if (irq_status_dec & DEC_INTERRUPT_BIT) {
1092                 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1093                 if ((irq_status_dec & 0x40001) == 0x40001)
1094                 {
1095                         do {
1096                                 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1097                         } while ((irq_status_dec & 0x40001) == 0x40001);
1098                 }
1099                 /* clear dec IRQ */
1100                 writel(irq_status_dec & (~DEC_INTERRUPT_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER);
1101                 pr_debug("DEC IRQ received!\n");
1102                 spin_lock(&service.lock);
1103                 if (NULL == service.reg_codec) {
1104                         pr_err("error: dec isr with no task waiting\n");
1105                 } else {
1106                         reg_from_run_to_done(service.reg_codec);
1107                 }
1108                 spin_unlock(&service.lock);
1109         }
1110
1111         if (irq_status_pp & PP_INTERRUPT_BIT) {
1112                 /* clear pp IRQ */
1113                 writel(irq_status_pp & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
1114                 pr_debug("PP IRQ received!\n");
1115                 spin_lock(&service.lock);
1116                 if (NULL == service.reg_pproc) {
1117                         pr_err("error: pp isr with no task waiting\n");
1118                 } else {
1119                         reg_from_run_to_done(service.reg_pproc);
1120                 }
1121                 spin_unlock(&service.lock);
1122         }
1123         try_set_reg();
1124         return IRQ_HANDLED;
1125 }
1126
1127 static irqreturn_t vepu_isr(int irq, void *dev_id)
1128 {
1129         struct vpu_device *dev = (struct vpu_device *) dev_id;
1130         u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
1131
1132         pr_debug("enc_isr\n");
1133
1134         if (likely(irq_status & ENC_INTERRUPT_BIT)) {
1135                 /* clear enc IRQ */
1136                 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
1137                 pr_debug("ENC IRQ received!\n");
1138                 spin_lock(&service.lock);
1139                 if (NULL == service.reg_codec) {
1140                         pr_err("error: enc isr with no task waiting\n");
1141                 } else {
1142                         reg_from_run_to_done(service.reg_codec);
1143                 }
1144                 spin_unlock(&service.lock);
1145         }
1146         try_set_reg();
1147         return IRQ_HANDLED;
1148 }
1149
1150 static int __init vpu_service_proc_init(void);
1151 static int __init vpu_service_init(void)
1152 {
1153         int ret;
1154
1155         pr_debug("baseaddr = 0x%08x vdpu irq = %d vepu irq = %d\n", RK29_VCODEC_PHYS, IRQ_VDPU, IRQ_VEPU);
1156
1157         dec_dev.iobaseaddr      = RK29_VCODEC_PHYS + DEC_PHY_OFFSET;
1158         dec_dev.iosize          = DEC_IO_SIZE;
1159         enc_dev.iobaseaddr      = RK29_VCODEC_PHYS;
1160         enc_dev.iosize          = ENC_IO_SIZE;
1161
1162         INIT_LIST_HEAD(&service.waiting);
1163         INIT_LIST_HEAD(&service.running);
1164         INIT_LIST_HEAD(&service.done);
1165         INIT_LIST_HEAD(&service.session);
1166         spin_lock_init(&service.lock);
1167         spin_lock_init(&service.lock_power);
1168         service.reg_codec       = NULL;
1169         service.reg_pproc       = NULL;
1170         atomic_set(&service.total_running, 0);
1171         service.enabled         = false;
1172
1173         vpu_get_clk();
1174         init_timer(&service.timer);
1175         service.timer.expires = jiffies + POWER_OFF_DELAY;
1176         service.timer.function = vpu_service_power_off_work_func;
1177         vpu_service_power_on();
1178
1179         ret = vpu_service_reserve_io();
1180         if (ret < 0) {
1181                 pr_err("error: reserve io failed\n");
1182                 goto err_reserve_io;
1183         }
1184
1185         /* get the IRQ line */
1186         ret = request_irq(IRQ_VDPU, vdpu_isr, IRQF_SHARED, "vdpu", (void *)&dec_dev);
1187         if (ret) {
1188                 pr_err("error: can't request vdpu irq %d\n", IRQ_VDPU);
1189                 goto err_req_vdpu_irq;
1190         }
1191
1192         ret = request_irq(IRQ_VEPU, vepu_isr, IRQF_SHARED, "vepu", (void *)&enc_dev);
1193         if (ret) {
1194                 pr_err("error: can't request vepu irq %d\n", IRQ_VEPU);
1195                 goto err_req_vepu_irq;
1196         }
1197
1198         ret = misc_register(&vpu_service_misc_device);
1199         if (ret) {
1200                 pr_err("error: misc_register failed\n");
1201                 goto err_register;
1202         }
1203
1204         platform_device_register(&vpu_service_device);
1205         platform_driver_probe(&vpu_service_driver, NULL);
1206         get_hw_info();
1207         vpu_service_power_off();
1208         pr_info("init success\n");
1209
1210         vpu_service_proc_init();
1211         return 0;
1212
1213 err_register:
1214         free_irq(IRQ_VEPU, (void *)&enc_dev);
1215 err_req_vepu_irq:
1216         free_irq(IRQ_VDPU, (void *)&dec_dev);
1217 err_req_vdpu_irq:
1218         pr_info("init failed\n");
1219 err_reserve_io:
1220         vpu_service_power_off();
1221         vpu_service_release_io();
1222         vpu_put_clk();
1223         pr_info("init failed\n");
1224         return ret;
1225 }
1226
1227 static void __exit vpu_service_exit(void)
1228 {
1229         del_timer(&service.timer);
1230         vpu_service_power_off();
1231         platform_device_unregister(&vpu_service_device);
1232         platform_driver_unregister(&vpu_service_driver);
1233         misc_deregister(&vpu_service_misc_device);
1234         free_irq(IRQ_VEPU, (void *)&enc_dev);
1235         free_irq(IRQ_VDPU, (void *)&dec_dev);
1236         vpu_put_clk();
1237 }
1238
1239 module_init(vpu_service_init);
1240 module_exit(vpu_service_exit);
1241
1242 #ifdef CONFIG_PROC_FS
1243 #include <linux/proc_fs.h>
1244 #include <linux/seq_file.h>
1245
1246 static int proc_vpu_service_show(struct seq_file *s, void *v)
1247 {
1248         unsigned int i, n;
1249         unsigned long flag;
1250         vpu_reg *reg, *reg_tmp;
1251         vpu_session *session, *session_tmp;
1252
1253         vpu_service_power_on();
1254         seq_printf(s, "\nENC Registers:\n");
1255         n = enc_dev.iosize >> 2;
1256         for (i = 0; i < n; i++) {
1257                 seq_printf(s, "\tswreg%d = %08X\n", i, readl(enc_dev.hwregs + i));
1258         }
1259         seq_printf(s, "\nDEC Registers:\n");
1260         n = dec_dev.iosize >> 2;
1261         for (i = 0; i < n; i++) {
1262                 seq_printf(s, "\tswreg%d = %08X\n", i, readl(dec_dev.hwregs + i));
1263         }
1264
1265         seq_printf(s, "\nvpu service status:\n");
1266         spin_lock_irqsave(&service.lock, flag);
1267         list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
1268                 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
1269                 //seq_printf(s, "waiting reg set %d\n");
1270                 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
1271                         seq_printf(s, "waiting register set\n");
1272                 }
1273                 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
1274                         seq_printf(s, "running register set\n");
1275                 }
1276                 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
1277                         seq_printf(s, "done    register set\n");
1278                 }
1279         }
1280         spin_unlock_irqrestore(&service.lock, flag);
1281
1282         return 0;
1283 }
1284
1285 static int proc_vpu_service_open(struct inode *inode, struct file *file)
1286 {
1287         return single_open(file, proc_vpu_service_show, NULL);
1288 }
1289
1290 static const struct file_operations proc_vpu_service_fops = {
1291         .open           = proc_vpu_service_open,
1292         .read           = seq_read,
1293         .llseek         = seq_lseek,
1294         .release        = single_release,
1295 };
1296
1297 static int __init vpu_service_proc_init(void)
1298 {
1299         proc_create("vpu_service", 0, NULL, &proc_vpu_service_fops);
1300         return 0;
1301
1302 }
1303 #endif /* CONFIG_PROC_FS */
1304