vpu_service: replace spin_lock by mutex and use irq_thread to handle irq
[firefly-linux-kernel-4.4.55.git] / arch / arm / plat-rk / vpu_service.c
1 /* arch/arm/mach-rk29/vpu.c
2  *
3  * Copyright (C) 2010 ROCKCHIP, Inc.
4  * author: chenhengming chm@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #ifdef CONFIG_RK29_VPU_DEBUG
18 #define DEBUG
19 #define pr_fmt(fmt) "VPU_SERVICE: %s: " fmt, __func__
20 #else
21 #define pr_fmt(fmt) "VPU_SERVICE: " fmt
22 #endif
23
24
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/fs.h>
33 #include <linux/ioport.h>
34 #include <linux/miscdevice.h>
35 #include <linux/mm.h>
36 #include <linux/poll.h>
37 #include <linux/platform_device.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/timer.h>
41
42 #include <asm/uaccess.h>
43
44 #include <mach/irqs.h>
45 #include <plat/vpu_service.h>
46 #include <mach/pmu.h>
47 #include <mach/cru.h>
48
49
50 #define DEC_INTERRUPT_REGISTER                  1
51 #define PP_INTERRUPT_REGISTER                   60
52 #define ENC_INTERRUPT_REGISTER                  1
53
54 #define DEC_INTERRUPT_BIT                        0x100
55 #define PP_INTERRUPT_BIT                         0x100
56 #define ENC_INTERRUPT_BIT                        0x1
57
58 #define REG_NUM_DEC                             (60)
59 #define REG_NUM_PP                              (41)
60 #if defined(CONFIG_ARCH_RK29)
61 #define REG_NUM_ENC                             (96)
62 #elif defined(CONFIG_ARCH_RK30)
63 #define REG_NUM_ENC                             (164)
64 #endif
65 #define REG_NUM_DEC_PP                          (REG_NUM_DEC+REG_NUM_PP)
66 #define SIZE_REG(reg)                           ((reg)*4)
67
68 #define DEC_IO_SIZE                             ((100 + 1) * 4) /* bytes */
69 #if defined(CONFIG_ARCH_RK29)
70 #define ENC_IO_SIZE                             (96 * 4)        /* bytes */
71 #elif defined(CONFIG_ARCH_RK30)
72 #define ENC_IO_SIZE                             (164 * 4)       /* bytes */
73 #endif
74 #define REG_NUM_DEC_PP                          (REG_NUM_DEC+REG_NUM_PP)
75 static const u16 dec_hw_ids[] = { 0x8190, 0x8170, 0x9170, 0x9190, 0x6731 };
76 #if defined(CONFIG_ARCH_RK29)
77 static const u16 enc_hw_ids[] = { 0x6280, 0x7280, 0x8270 };
78 #define DEC_PHY_OFFSET                          0x200
79 #elif defined(CONFIG_ARCH_RK30)
80 static const u16 enc_hw_ids[] = { 0x6280, 0x7280, 0x8270, 0x8290, 0x4831 };
81 #define DEC_PHY_OFFSET                          0x400
82 #define RK29_VCODEC_PHYS                        RK30_VCODEC_PHYS
83 #endif
84
85 #define VPU_REG_EN_ENC                          14
86 #define VPU_REG_ENC_GATE                        2
87 #define VPU_REG_ENC_GATE_BIT                    (1<<4)
88
89 #define VPU_REG_EN_DEC                          1
90 #define VPU_REG_DEC_GATE                        2
91 #define VPU_REG_DEC_GATE_BIT                    (1<<10)
92 #define VPU_REG_EN_PP                           0
93 #define VPU_REG_PP_GATE                         1
94 #define VPU_REG_PP_GATE_BIT                     (1<<8)
95 #define VPU_REG_EN_DEC_PP                       1
96 #define VPU_REG_DEC_PP_GATE                     61
97 #define VPU_REG_DEC_PP_GATE_BIT                 (1<<8)
98
99
100 /**
101  * struct for process session which connect to vpu
102  *
103  * @author ChenHengming (2011-5-3)
104  */
105 typedef struct vpu_session {
106         VPU_CLIENT_TYPE         type;
107         /* a linked list of data so we can access them for debugging */
108         struct list_head        list_session;
109         /* a linked list of register data waiting for process */
110         struct list_head        waiting;
111         /* a linked list of register data in processing */
112         struct list_head        running;
113         /* a linked list of register data processed */
114         struct list_head        done;
115         wait_queue_head_t       wait;
116         pid_t                   pid;
117         atomic_t                task_running;
118 } vpu_session;
119
120 /**
121  * struct for process register set
122  *
123  * @author ChenHengming (2011-5-4)
124  */
125 #define VPU_REG_NUM_MAX                     (((VPU_REG_NUM_ENC)>(VPU_REG_NUM_DEC_PP))?(VPU_REG_NUM_ENC):(VPU_REG_NUM_DEC_PP))
126 typedef struct vpu_reg {
127         VPU_CLIENT_TYPE         type;
128         vpu_session             *session;
129         struct list_head        session_link;           /* link to vpu service session */
130         struct list_head        status_link;            /* link to register set list */
131         unsigned long           size;
132         unsigned long           reg[VPU_REG_NUM_MAX];
133 } vpu_reg;
134
135 typedef struct vpu_device {
136         unsigned long           iobaseaddr;
137         unsigned int            iosize;
138         volatile u32            *hwregs;
139         atomic_t                isr_codec;
140         atomic_t                isr_pp;
141 } vpu_device;
142
143 typedef struct vpu_service_info {
144         struct mutex            lock;
145         struct timer_list       timer;                  /* timer for power off */
146         struct list_head        waiting;                /* link to link_reg in struct vpu_reg */
147         struct list_head        running;                /* link to link_reg in struct vpu_reg */
148         struct list_head        done;                   /* link to link_reg in struct vpu_reg */
149         struct list_head        session;                /* link to list_session in struct vpu_session */
150         atomic_t                total_running;
151         bool                    enabled;
152         vpu_reg                 *reg_codec;
153         vpu_reg                 *reg_pproc;
154         vpu_reg                 *reg_resev;
155         VPUHwDecConfig_t        dec_config;
156         VPUHwEncConfig_t        enc_config;
157 } vpu_service_info;
158
159 typedef struct vpu_request
160 {
161         unsigned long   *req;
162         unsigned long   size;
163 } vpu_request;
164
165 static struct clk *pd_video;
166 static struct clk *clk_vpu; /* for power on notify */
167 static struct clk *aclk_vepu;
168 static struct clk *hclk_vepu;
169 static struct clk *aclk_ddr_vepu;
170 static struct clk *hclk_cpu_vcodec;
171 static vpu_service_info service;
172 static vpu_device       dec_dev;
173 static vpu_device       enc_dev;
174
175 #define POWER_OFF_DELAY 4*HZ /* 4s */
176 #define TIMEOUT_DELAY   2*HZ /* 2s */
177
178 static void vpu_get_clk(void)
179 {
180         pd_video        = clk_get(NULL, "pd_video");
181         clk_vpu         = clk_get(NULL, "vpu");
182         aclk_vepu       = clk_get(NULL, "aclk_vepu");
183         hclk_vepu       = clk_get(NULL, "hclk_vepu");
184         aclk_ddr_vepu   = clk_get(NULL, "aclk_ddr_vepu");
185         hclk_cpu_vcodec = clk_get(NULL, "hclk_cpu_vcodec");
186 }
187
188 static void vpu_put_clk(void)
189 {
190         clk_put(pd_video);
191         clk_put(clk_vpu);
192         clk_put(aclk_vepu);
193         clk_put(hclk_vepu);
194         clk_put(aclk_ddr_vepu);
195         clk_put(hclk_cpu_vcodec);
196 }
197
198 static void vpu_reset(void)
199 {
200 #if defined(CONFIG_ARCH_RK29)
201         clk_disable(aclk_ddr_vepu);
202         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
203         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
204         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
205         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
206         mdelay(10);
207         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
208         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
209         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
210         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
211         clk_enable(aclk_ddr_vepu);
212 #elif defined(CONFIG_ARCH_RK30)
213         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
214         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
215         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
216         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
217         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
218         mdelay(1);
219         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
220         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
221         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
222         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
223         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
224 #endif
225         service.reg_codec = NULL;
226         service.reg_pproc = NULL;
227         service.reg_resev = NULL;
228 }
229
230 static void reg_deinit(vpu_reg *reg);
231 static void vpu_service_session_clear(vpu_session *session)
232 {
233         vpu_reg *reg, *n;
234         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
235                 reg_deinit(reg);
236         }
237         list_for_each_entry_safe(reg, n, &session->running, session_link) {
238                 reg_deinit(reg);
239         }
240         list_for_each_entry_safe(reg, n, &session->done, session_link) {
241                 reg_deinit(reg);
242         }
243 }
244
245 static void vpu_service_dump(void)
246 {
247         int running;
248         vpu_reg *reg, *reg_tmp;
249         vpu_session *session, *session_tmp;
250
251         running = atomic_read(&service.total_running);
252         printk("total_running %d\n", running);
253
254         printk("reg_codec 0x%.8x\n", (unsigned int)service.reg_codec);
255         printk("reg_pproc 0x%.8x\n", (unsigned int)service.reg_pproc);
256         printk("reg_resev 0x%.8x\n", (unsigned int)service.reg_resev);
257
258         list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
259                 printk("session pid %d type %d:\n", session->pid, session->type);
260                 running = atomic_read(&session->task_running);
261                 printk("task_running %d\n", running);
262                 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
263                         printk("waiting register set 0x%.8x\n", (unsigned int)reg);
264                 }
265                 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
266                         printk("running register set 0x%.8x\n", (unsigned int)reg);
267                 }
268                 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
269                         printk("done    register set 0x%.8x\n", (unsigned int)reg);
270                 }
271         }
272 }
273
274 static void vpu_service_power_off(void)
275 {
276         int total_running;
277
278         mutex_lock(&service.lock);
279         if (!service.enabled) {
280                 mutex_unlock(&service.lock);
281                 return;
282         }
283
284         service.enabled = false;
285         total_running = atomic_read(&service.total_running);
286         if (total_running) {
287                 pr_alert("alert: power off when %d task running!!\n", total_running);
288                 mdelay(50);
289                 pr_alert("alert: delay 50 ms for running task\n");
290                 vpu_service_dump();
291         }
292
293         printk("vpu: power off...");
294 #ifdef CONFIG_ARCH_RK29
295         pmu_set_power_domain(PD_VCODEC, false);
296 #else
297         clk_disable(pd_video);
298 #endif
299         udelay(10);
300         clk_disable(hclk_cpu_vcodec);
301         clk_disable(aclk_ddr_vepu);
302         clk_disable(hclk_vepu);
303         clk_disable(aclk_vepu);
304         clk_disable(clk_vpu);
305         printk("done\n");
306         mutex_unlock(&service.lock);
307 }
308
309 static void vpu_service_power_off_work_func(unsigned long data)
310 {
311         printk("delayed ");
312         vpu_service_power_off();
313 }
314
315 static void vpu_service_power_on(void)
316 {
317         clk_enable(clk_vpu); /* notify vpu on without lock. */
318
319         mutex_lock(&service.lock);
320         if (!service.enabled) {
321                 service.enabled = true;
322                 printk("vpu: power on\n");
323
324                 clk_enable(clk_vpu);
325                 clk_enable(aclk_vepu);
326                 clk_enable(hclk_vepu);
327                 clk_enable(hclk_cpu_vcodec);
328                 udelay(10);
329 #ifdef CONFIG_ARCH_RK29
330                 pmu_set_power_domain(PD_VCODEC, true);
331 #else
332                 clk_enable(pd_video);
333 #endif
334                 udelay(10);
335                 clk_enable(aclk_ddr_vepu);
336         }
337         mod_timer(&service.timer, jiffies + POWER_OFF_DELAY);
338         mutex_unlock(&service.lock);
339
340         clk_disable(clk_vpu);
341 }
342
343 static vpu_reg *reg_init(vpu_session *session, void __user *src, unsigned long size)
344 {
345         vpu_reg *reg = kmalloc(sizeof(vpu_reg), GFP_KERNEL);
346         if (NULL == reg) {
347                 pr_err("error: kmalloc fail in reg_init\n");
348                 return NULL;
349         }
350
351         reg->session = session;
352         reg->type = session->type;
353         reg->size = size;
354         INIT_LIST_HEAD(&reg->session_link);
355         INIT_LIST_HEAD(&reg->status_link);
356
357         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
358                 pr_err("error: copy_from_user failed in reg_init\n");
359                 kfree(reg);
360                 return NULL;
361         }
362
363         mutex_lock(&service.lock);
364         list_add_tail(&reg->status_link, &service.waiting);
365         list_add_tail(&reg->session_link, &session->waiting);
366         mutex_unlock(&service.lock);
367
368         return reg;
369 }
370
371 static void reg_deinit(vpu_reg *reg)
372 {
373         list_del_init(&reg->session_link);
374         list_del_init(&reg->status_link);
375         if (reg == service.reg_codec) service.reg_codec = NULL;
376         if (reg == service.reg_pproc) service.reg_pproc = NULL;
377         kfree(reg);
378 }
379
380 static void reg_from_wait_to_run(vpu_reg *reg)
381 {
382         list_del_init(&reg->status_link);
383         list_add_tail(&reg->status_link, &service.running);
384
385         list_del_init(&reg->session_link);
386         list_add_tail(&reg->session_link, &reg->session->running);
387 }
388
389 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
390 {
391         int i;
392         u32 *dst = (u32 *)&reg->reg[0];
393         for (i = 0; i < count; i++)
394                 *dst++ = *src++;
395 }
396
397 static void reg_from_run_to_done(vpu_reg *reg)
398 {
399         list_del_init(&reg->status_link);
400         list_add_tail(&reg->status_link, &service.done);
401
402         list_del_init(&reg->session_link);
403         list_add_tail(&reg->session_link, &reg->session->done);
404
405         switch (reg->type) {
406         case VPU_ENC : {
407                 service.reg_codec = NULL;
408                 reg_copy_from_hw(reg, enc_dev.hwregs, REG_NUM_ENC);
409                 break;
410         }
411         case VPU_DEC : {
412                 service.reg_codec = NULL;
413                 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC);
414                 break;
415         }
416         case VPU_PP : {
417                 service.reg_pproc = NULL;
418                 reg_copy_from_hw(reg, dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_PP);
419                 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
420                 break;
421         }
422         case VPU_DEC_PP : {
423                 service.reg_codec = NULL;
424                 service.reg_pproc = NULL;
425                 reg_copy_from_hw(reg, dec_dev.hwregs, REG_NUM_DEC_PP);
426                 dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
427                 break;
428         }
429         default : {
430                 pr_err("error: copy reg from hw with unknown type %d\n", reg->type);
431                 break;
432         }
433         }
434         atomic_sub(1, &reg->session->task_running);
435         atomic_sub(1, &service.total_running);
436         wake_up_interruptible_sync(&reg->session->wait);
437 }
438
439 void reg_copy_to_hw(vpu_reg *reg)
440 {
441         int i;
442         u32 *src = (u32 *)&reg->reg[0];
443         atomic_add(1, &service.total_running);
444         atomic_add(1, &reg->session->task_running);
445         switch (reg->type) {
446         case VPU_ENC : {
447                 u32 *dst = (u32 *)enc_dev.hwregs;
448 #if defined(CONFIG_ARCH_RK30)
449                 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
450                 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
451                 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
452                 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
453 #endif
454                 service.reg_codec = reg;
455
456                 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
457
458                 for (i = 0; i < VPU_REG_EN_ENC; i++)
459                         dst[i] = src[i];
460
461                 for (i = VPU_REG_EN_ENC + 1; i < REG_NUM_ENC; i++)
462                         dst[i] = src[i];
463
464                 dsb();
465
466                 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
467                 dst[VPU_REG_EN_ENC]   = src[VPU_REG_EN_ENC];
468         } break;
469         case VPU_DEC : {
470                 u32 *dst = (u32 *)dec_dev.hwregs;
471                 service.reg_codec = reg;
472
473                 for (i = REG_NUM_DEC - 1; i > VPU_REG_DEC_GATE; i--)
474                         dst[i] = src[i];
475
476                 dsb();
477
478                 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
479                 dst[VPU_REG_EN_DEC]   = src[VPU_REG_EN_DEC];
480         } break;
481         case VPU_PP : {
482                 u32 *dst = (u32 *)dec_dev.hwregs + PP_INTERRUPT_REGISTER;
483                 service.reg_pproc = reg;
484
485                 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
486
487                 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_PP; i++)
488                         dst[i] = src[i];
489
490                 dsb();
491
492                 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
493         } break;
494         case VPU_DEC_PP : {
495                 u32 *dst = (u32 *)dec_dev.hwregs;
496                 service.reg_codec = reg;
497                 service.reg_pproc = reg;
498
499                 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_DEC_PP; i++)
500                         dst[i] = src[i];
501
502                 dst[VPU_REG_EN_DEC_PP]   = src[VPU_REG_EN_DEC_PP] | 0x2;
503                 dsb();
504
505                 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
506                 dst[VPU_REG_DEC_GATE]    = src[VPU_REG_DEC_GATE]    | VPU_REG_DEC_GATE_BIT;
507                 dst[VPU_REG_EN_DEC]      = src[VPU_REG_EN_DEC];
508         } break;
509         default : {
510                 pr_err("error: unsupport session type %d", reg->type);
511                 atomic_sub(1, &service.total_running);
512                 atomic_sub(1, &reg->session->task_running);
513                 break;
514         }
515         }
516 }
517
518 static void try_set_reg(void)
519 {
520         // first get reg from reg list
521         mutex_lock(&service.lock);
522         if (!list_empty(&service.waiting)) {
523                 int can_set = 0;
524                 vpu_reg *reg = list_entry(service.waiting.next, vpu_reg, status_link);
525
526                 mod_timer(&service.timer, jiffies + POWER_OFF_DELAY);
527                 switch (reg->type) {
528                 case VPU_ENC : {
529                         if ((NULL == service.reg_codec) &&  (NULL == service.reg_pproc))
530                         can_set = 1;
531                 } break;
532                 case VPU_DEC : {
533                         if (NULL == service.reg_codec)
534                                 can_set = 1;
535                 } break;
536                 case VPU_PP : {
537                         if (NULL == service.reg_codec) {
538                                 if (NULL == service.reg_pproc)
539                                         can_set = 1;
540                         } else {
541                                 if ((VPU_DEC == service.reg_codec->type) && (NULL == service.reg_pproc))
542                                         can_set = 1;
543                         }
544                 } break;
545                 case VPU_DEC_PP : {
546                         if ((NULL == service.reg_codec) && (NULL == service.reg_pproc))
547                                 can_set = 1;
548                         } break;
549                 default : {
550                         printk("undefined reg type %d\n", reg->type);
551                 } break;
552                 }
553                 if (can_set) {
554                         reg_from_wait_to_run(reg);
555                         reg_copy_to_hw(reg);
556                 }
557         }
558         mutex_unlock(&service.lock);
559 }
560
561 static int return_reg(vpu_reg *reg, u32 __user *dst)
562 {
563         int ret = 0;
564         switch (reg->type) {
565         case VPU_ENC : {
566                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_ENC)))
567                         ret = -EFAULT;
568                 break;
569         }
570         case VPU_DEC : {
571                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_DEC)))
572                         ret = -EFAULT;
573                 break;
574         }
575         case VPU_PP : {
576                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_PP)))
577                         ret = -EFAULT;
578                 break;
579         }
580         case VPU_DEC_PP : {
581                 if (copy_to_user(dst, &reg->reg[0], SIZE_REG(REG_NUM_DEC_PP)))
582                         ret = -EFAULT;
583                 break;
584         }
585         default : {
586                 ret = -EFAULT;
587                 pr_err("error: copy reg to user with unknown type %d\n", reg->type);
588                 break;
589         }
590         }
591         reg_deinit(reg);
592         return ret;
593 }
594
595 static long vpu_service_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
596 {
597         vpu_session *session = (vpu_session *)filp->private_data;
598         if (NULL == session) {
599                 return -EINVAL;
600         }
601
602         switch (cmd) {
603         case VPU_IOC_SET_CLIENT_TYPE : {
604                 session->type = (VPU_CLIENT_TYPE)arg;
605                 break;
606         }
607         case VPU_IOC_GET_HW_FUSE_STATUS : {
608                 vpu_request req;
609                 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
610                         pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
611                         return -EFAULT;
612                 } else {
613                         if (VPU_ENC != session->type) {
614                                 if (copy_to_user((void __user *)req.req, &service.dec_config, sizeof(VPUHwDecConfig_t))) {
615                                         pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
616                                         return -EFAULT;
617                                 }
618                         } else {
619                                 if (copy_to_user((void __user *)req.req, &service.enc_config, sizeof(VPUHwEncConfig_t))) {
620                                         pr_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n", session->type);
621                                         return -EFAULT;
622                                 }
623                         }
624                 }
625
626                 break;
627         }
628         case VPU_IOC_SET_REG : {
629                 vpu_request req;
630                 vpu_reg *reg;
631                 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
632                         pr_err("error: VPU_IOC_SET_REG copy_from_user failed\n");
633                         return -EFAULT;
634                 }
635
636                 reg = reg_init(session, (void __user *)req.req, req.size);
637                 if (NULL == reg) {
638                         return -EFAULT;
639                 } else {
640                         vpu_service_power_on();
641                         try_set_reg();
642                 }
643
644                 break;
645         }
646         case VPU_IOC_GET_REG : {
647                 vpu_request req;
648                 vpu_reg *reg;
649                 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
650                         pr_err("error: VPU_IOC_GET_REG copy_from_user failed\n");
651                         return -EFAULT;
652                 } else {
653                         int ret = wait_event_interruptible_timeout(session->wait, !list_empty(&session->done), TIMEOUT_DELAY);
654                         if (!list_empty(&session->done)) {
655                                 if (ret < 0) {
656                                         pr_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
657                                 }
658                                 ret = 0;
659                         } else {
660                                 if (unlikely(ret < 0)) {
661                                         pr_err("error: pid %d wait task ret %d\n", session->pid, ret);
662                                 } else if (0 == ret) {
663                                         pr_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
664                                         ret = -ETIMEDOUT;
665                                 }
666                         }
667                         mutex_lock(&service.lock);
668                         if (ret < 0) {
669                                 int task_running = atomic_read(&session->task_running);
670                                 vpu_service_dump();
671                                 if (task_running) {
672                                         atomic_set(&session->task_running, 0);
673                                         atomic_sub(task_running, &service.total_running);
674                                         printk("%d task is running but not return, reset hardware...", task_running);
675                                         vpu_reset();
676                                         printk("done\n");
677                                 }
678                                 vpu_service_session_clear(session);
679                                 mutex_unlock(&service.lock);
680                                 return ret;
681                         }
682                         mutex_unlock(&service.lock);
683                 }
684                 mutex_lock(&service.lock);
685                 reg = list_entry(session->done.next, vpu_reg, session_link);
686                 return_reg(reg, (u32 __user *)req.req);
687                 mutex_unlock(&service.lock);
688                 break;
689         }
690         default : {
691                 pr_err("error: unknow vpu service ioctl cmd %x\n", cmd);
692                 break;
693         }
694         }
695
696         return 0;
697 }
698
699 static int vpu_service_check_hw_id(struct vpu_device * dev, const u16 *hwids, size_t num)
700 {
701         u32 hwid = readl(dev->hwregs);
702         pr_info("HW ID = 0x%08x\n", hwid);
703
704         hwid = (hwid >> 16) & 0xFFFF;   /* product version only */
705
706         while (num--) {
707                 if (hwid == hwids[num]) {
708                         pr_info("Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
709                         return 1;
710                 }
711         }
712
713         pr_info("No Compatible HW found at 0x%08lx\n", dev->iobaseaddr);
714         return 0;
715 }
716
717 static void vpu_service_release_io(void)
718 {
719         if (dec_dev.hwregs)
720                 iounmap((void *)dec_dev.hwregs);
721         release_mem_region(dec_dev.iobaseaddr, dec_dev.iosize);
722
723         if (enc_dev.hwregs)
724                 iounmap((void *)enc_dev.hwregs);
725         release_mem_region(enc_dev.iobaseaddr, enc_dev.iosize);
726 }
727
728 static int vpu_service_reserve_io(void)
729 {
730         unsigned long iobaseaddr;
731         unsigned long iosize;
732
733         iobaseaddr      = dec_dev.iobaseaddr;
734         iosize          = dec_dev.iosize;
735
736         if (!request_mem_region(iobaseaddr, iosize, "vdpu_io")) {
737                 pr_info("failed to reserve dec HW regs\n");
738                 return -EBUSY;
739         }
740
741         dec_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
742
743         if (dec_dev.hwregs == NULL) {
744                 pr_info("failed to ioremap dec HW regs\n");
745                 goto err;
746         }
747
748         /* check for correct HW */
749         if (!vpu_service_check_hw_id(&dec_dev, dec_hw_ids, ARRAY_SIZE(dec_hw_ids))) {
750                 goto err;
751         }
752
753         iobaseaddr      = enc_dev.iobaseaddr;
754         iosize          = enc_dev.iosize;
755
756         if (!request_mem_region(iobaseaddr, iosize, "hx280enc")) {
757                 pr_info("failed to reserve enc HW regs\n");
758                 goto err;
759         }
760
761         enc_dev.hwregs = (volatile u32 *)ioremap_nocache(iobaseaddr, iosize);
762
763         if (enc_dev.hwregs == NULL) {
764                 pr_info("failed to ioremap enc HW regs\n");
765                 goto err;
766         }
767
768         /* check for correct HW */
769         if (!vpu_service_check_hw_id(&enc_dev, enc_hw_ids, ARRAY_SIZE(enc_hw_ids))) {
770                 goto err;
771         }
772         return 0;
773
774 err:
775         vpu_service_release_io();
776         return -EBUSY;
777 }
778
779 static int vpu_service_open(struct inode *inode, struct file *filp)
780 {
781         vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
782         if (NULL == session) {
783                 pr_err("error: unable to allocate memory for vpu_session.");
784                 return -ENOMEM;
785         }
786
787         session->type   = VPU_TYPE_BUTT;
788         session->pid    = current->pid;
789         INIT_LIST_HEAD(&session->waiting);
790         INIT_LIST_HEAD(&session->running);
791         INIT_LIST_HEAD(&session->done);
792         INIT_LIST_HEAD(&session->list_session);
793         init_waitqueue_head(&session->wait);
794         atomic_set(&session->task_running, 0);
795         mutex_lock(&service.lock);
796         list_add_tail(&session->list_session, &service.session);
797         filp->private_data = (void *)session;
798         mutex_unlock(&service.lock);
799
800         pr_debug("dev opened\n");
801         return nonseekable_open(inode, filp);
802 }
803
804 static int vpu_service_release(struct inode *inode, struct file *filp)
805 {
806         int task_running;
807         vpu_session *session = (vpu_session *)filp->private_data;
808         if (NULL == session)
809                 return -EINVAL;
810
811         task_running = atomic_read(&session->task_running);
812         if (task_running) {
813                 pr_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running);
814                 msleep(50);
815         }
816         wake_up_interruptible_sync(&session->wait);
817
818         mutex_lock(&service.lock);
819         /* remove this filp from the asynchronusly notified filp's */
820         //vpu_service_fasync(-1, filp, 0);
821         list_del_init(&session->list_session);
822         vpu_service_session_clear(session);
823         kfree(session);
824         filp->private_data = NULL;
825         mutex_unlock(&service.lock);
826
827         pr_debug("dev closed\n");
828         return 0;
829 }
830
831 static const struct file_operations vpu_service_fops = {
832         .unlocked_ioctl = vpu_service_ioctl,
833         .open           = vpu_service_open,
834         .release        = vpu_service_release,
835         //.fasync       = vpu_service_fasync,
836 };
837
838 static struct miscdevice vpu_service_misc_device = {
839         .minor          = MISC_DYNAMIC_MINOR,
840         .name           = "vpu_service",
841         .fops           = &vpu_service_fops,
842 };
843
844 static void vpu_service_shutdown(struct platform_device *pdev)
845 {
846         pr_cont("shutdown...");
847         del_timer(&service.timer);
848         vpu_service_power_off();
849         pr_cont("done\n");
850 }
851
852 static int vpu_service_suspend(struct platform_device *pdev, pm_message_t state)
853 {
854         bool enabled;
855         pr_info("suspend...");
856         del_timer(&service.timer);
857         enabled = service.enabled;
858         vpu_service_power_off();
859         service.enabled = enabled;
860         return 0;
861 }
862
863 static int vpu_service_resume(struct platform_device *pdev)
864 {
865         pr_info("resume...");
866         if (service.enabled) {
867                 service.enabled = false;
868                 vpu_service_power_on();
869                 try_set_reg();
870         }
871         return 0;
872 }
873
874 static struct platform_device vpu_service_device = {
875         .name              = "vpu_service",
876         .id                = -1,
877 };
878
879 static struct platform_driver vpu_service_driver = {
880         .driver    = {
881                 .name  = "vpu_service",
882                 .owner = THIS_MODULE,
883         },
884         .shutdown  = vpu_service_shutdown,
885         .suspend   = vpu_service_suspend,
886         .resume    = vpu_service_resume,
887 };
888
889 static void get_hw_info(void)
890 {
891         VPUHwDecConfig_t *dec = &service.dec_config;
892         VPUHwEncConfig_t *enc = &service.enc_config;
893         u32 configReg   = dec_dev.hwregs[VPU_DEC_HWCFG0];
894         u32 asicID      = dec_dev.hwregs[0];
895
896         dec->h264Support    = (configReg >> DWL_H264_E) & 0x3U;
897         dec->jpegSupport    = (configReg >> DWL_JPEG_E) & 0x01U;
898         if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
899                 dec->jpegSupport = JPEG_PROGRESSIVE;
900         dec->mpeg4Support   = (configReg >> DWL_MPEG4_E) & 0x3U;
901         dec->vc1Support     = (configReg >> DWL_VC1_E) & 0x3U;
902         dec->mpeg2Support   = (configReg >> DWL_MPEG2_E) & 0x01U;
903         dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
904         dec->refBufSupport  = (configReg >> DWL_REF_BUFF_E) & 0x01U;
905         dec->vp6Support     = (configReg >> DWL_VP6_E) & 0x01U;
906         dec->maxDecPicWidth = configReg & 0x07FFU;
907
908         /* 2nd Config register */
909         configReg   = dec_dev.hwregs[VPU_DEC_HWCFG1];
910         if (dec->refBufSupport) {
911                 if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
912                         dec->refBufSupport |= 2;
913                 if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
914                         dec->refBufSupport |= 4;
915         }
916         dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
917         dec->vp7Support     = (configReg >> DWL_VP7_E) & 0x01U;
918         dec->vp8Support     = (configReg >> DWL_VP8_E) & 0x01U;
919         dec->avsSupport     = (configReg >> DWL_AVS_E) & 0x01U;
920
921         /* JPEG xtensions */
922         if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
923                 dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
924         } else {
925                 dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
926         }
927
928         if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) ) {
929                 dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
930         } else {
931                 dec->rvSupport = RV_NOT_SUPPORTED;
932         }
933
934         dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
935
936         if (dec->refBufSupport && (asicID >> 16) == 0x6731U ) {
937                 dec->refBufSupport |= 8; /* enable HW support for offset */
938         }
939
940         {
941         VPUHwFuseStatus_t hwFuseSts;
942         /* Decoder fuse configuration */
943         u32 fuseReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
944
945         hwFuseSts.h264SupportFuse = (fuseReg >> DWL_H264_FUSE_E) & 0x01U;
946         hwFuseSts.mpeg4SupportFuse = (fuseReg >> DWL_MPEG4_FUSE_E) & 0x01U;
947         hwFuseSts.mpeg2SupportFuse = (fuseReg >> DWL_MPEG2_FUSE_E) & 0x01U;
948         hwFuseSts.sorensonSparkSupportFuse = (fuseReg >> DWL_SORENSONSPARK_FUSE_E) & 0x01U;
949         hwFuseSts.jpegSupportFuse = (fuseReg >> DWL_JPEG_FUSE_E) & 0x01U;
950         hwFuseSts.vp6SupportFuse = (fuseReg >> DWL_VP6_FUSE_E) & 0x01U;
951         hwFuseSts.vc1SupportFuse = (fuseReg >> DWL_VC1_FUSE_E) & 0x01U;
952         hwFuseSts.jpegProgSupportFuse = (fuseReg >> DWL_PJPEG_FUSE_E) & 0x01U;
953         hwFuseSts.rvSupportFuse = (fuseReg >> DWL_RV_FUSE_E) & 0x01U;
954         hwFuseSts.avsSupportFuse = (fuseReg >> DWL_AVS_FUSE_E) & 0x01U;
955         hwFuseSts.vp7SupportFuse = (fuseReg >> DWL_VP7_FUSE_E) & 0x01U;
956         hwFuseSts.vp8SupportFuse = (fuseReg >> DWL_VP8_FUSE_E) & 0x01U;
957         hwFuseSts.customMpeg4SupportFuse = (fuseReg >> DWL_CUSTOM_MPEG4_FUSE_E) & 0x01U;
958         hwFuseSts.mvcSupportFuse = (fuseReg >> DWL_MVC_FUSE_E) & 0x01U;
959
960         /* check max. decoder output width */
961
962         if (fuseReg & 0x8000U)
963                 hwFuseSts.maxDecPicWidthFuse = 1920;
964         else if (fuseReg & 0x4000U)
965                 hwFuseSts.maxDecPicWidthFuse = 1280;
966         else if (fuseReg & 0x2000U)
967                 hwFuseSts.maxDecPicWidthFuse = 720;
968         else if (fuseReg & 0x1000U)
969                 hwFuseSts.maxDecPicWidthFuse = 352;
970         else    /* remove warning */
971                 hwFuseSts.maxDecPicWidthFuse = 352;
972
973         hwFuseSts.refBufSupportFuse = (fuseReg >> DWL_REF_BUFF_FUSE_E) & 0x01U;
974
975         /* Pp configuration */
976         configReg = dec_dev.hwregs[VPU_PP_HW_SYNTH_CFG];
977
978         if ((configReg >> DWL_PP_E) & 0x01U) {
979                 dec->ppSupport = 1;
980                 dec->maxPpOutPicWidth = configReg & 0x07FFU;
981                 /*pHwCfg->ppConfig = (configReg >> DWL_CFG_E) & 0x0FU; */
982                 dec->ppConfig = configReg;
983         } else {
984                 dec->ppSupport = 0;
985                 dec->maxPpOutPicWidth = 0;
986                 dec->ppConfig = 0;
987         }
988
989         /* check the HW versio */
990         if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U)) {
991                 /* Pp configuration */
992                 configReg = dec_dev.hwregs[VPU_DEC_HW_FUSE_CFG];
993
994                 if ((configReg >> DWL_PP_E) & 0x01U) {
995                         /* Pp fuse configuration */
996                         u32 fuseRegPp = dec_dev.hwregs[VPU_PP_HW_FUSE_CFG];
997
998                         if ((fuseRegPp >> DWL_PP_FUSE_E) & 0x01U) {
999                                 hwFuseSts.ppSupportFuse = 1;
1000                                 /* check max. pp output width */
1001                                 if      (fuseRegPp & 0x8000U) hwFuseSts.maxPpOutPicWidthFuse = 1920;
1002                                 else if (fuseRegPp & 0x4000U) hwFuseSts.maxPpOutPicWidthFuse = 1280;
1003                                 else if (fuseRegPp & 0x2000U) hwFuseSts.maxPpOutPicWidthFuse = 720;
1004                                 else if (fuseRegPp & 0x1000U) hwFuseSts.maxPpOutPicWidthFuse = 352;
1005                                 else                          hwFuseSts.maxPpOutPicWidthFuse = 352;
1006                                 hwFuseSts.ppConfigFuse = fuseRegPp;
1007                         } else {
1008                                 hwFuseSts.ppSupportFuse = 0;
1009                                 hwFuseSts.maxPpOutPicWidthFuse = 0;
1010                                 hwFuseSts.ppConfigFuse = 0;
1011                         }
1012                 } else {
1013                         hwFuseSts.ppSupportFuse = 0;
1014                         hwFuseSts.maxPpOutPicWidthFuse = 0;
1015                         hwFuseSts.ppConfigFuse = 0;
1016                 }
1017
1018                 if (dec->maxDecPicWidth > hwFuseSts.maxDecPicWidthFuse)
1019                         dec->maxDecPicWidth = hwFuseSts.maxDecPicWidthFuse;
1020                 if (dec->maxPpOutPicWidth > hwFuseSts.maxPpOutPicWidthFuse)
1021                         dec->maxPpOutPicWidth = hwFuseSts.maxPpOutPicWidthFuse;
1022                 if (!hwFuseSts.h264SupportFuse) dec->h264Support = H264_NOT_SUPPORTED;
1023                 if (!hwFuseSts.mpeg4SupportFuse) dec->mpeg4Support = MPEG4_NOT_SUPPORTED;
1024                 if (!hwFuseSts.customMpeg4SupportFuse) dec->customMpeg4Support = MPEG4_CUSTOM_NOT_SUPPORTED;
1025                 if (!hwFuseSts.jpegSupportFuse) dec->jpegSupport = JPEG_NOT_SUPPORTED;
1026                 if ((dec->jpegSupport == JPEG_PROGRESSIVE) && !hwFuseSts.jpegProgSupportFuse)
1027                         dec->jpegSupport = JPEG_BASELINE;
1028                 if (!hwFuseSts.mpeg2SupportFuse) dec->mpeg2Support = MPEG2_NOT_SUPPORTED;
1029                 if (!hwFuseSts.vc1SupportFuse) dec->vc1Support = VC1_NOT_SUPPORTED;
1030                 if (!hwFuseSts.vp6SupportFuse) dec->vp6Support = VP6_NOT_SUPPORTED;
1031                 if (!hwFuseSts.vp7SupportFuse) dec->vp7Support = VP7_NOT_SUPPORTED;
1032                 if (!hwFuseSts.vp8SupportFuse) dec->vp8Support = VP8_NOT_SUPPORTED;
1033                 if (!hwFuseSts.ppSupportFuse) dec->ppSupport = PP_NOT_SUPPORTED;
1034
1035                 /* check the pp config vs fuse status */
1036                 if ((dec->ppConfig & 0xFC000000) && ((hwFuseSts.ppConfigFuse & 0xF0000000) >> 5)) {
1037                         u32 deInterlace = ((dec->ppConfig & PP_DEINTERLACING) >> 25);
1038                         u32 alphaBlend  = ((dec->ppConfig & PP_ALPHA_BLENDING) >> 24);
1039                         u32 deInterlaceFuse = (((hwFuseSts.ppConfigFuse >> 5) & PP_DEINTERLACING) >> 25);
1040                         u32 alphaBlendFuse  = (((hwFuseSts.ppConfigFuse >> 5) & PP_ALPHA_BLENDING) >> 24);
1041
1042                         if (deInterlace && !deInterlaceFuse) dec->ppConfig &= 0xFD000000;
1043                         if (alphaBlend && !alphaBlendFuse) dec->ppConfig &= 0xFE000000;
1044                 }
1045                 if (!hwFuseSts.sorensonSparkSupportFuse) dec->sorensonSparkSupport = SORENSON_SPARK_NOT_SUPPORTED;
1046                 if (!hwFuseSts.refBufSupportFuse)   dec->refBufSupport = REF_BUF_NOT_SUPPORTED;
1047                 if (!hwFuseSts.rvSupportFuse)       dec->rvSupport = RV_NOT_SUPPORTED;
1048                 if (!hwFuseSts.avsSupportFuse)      dec->avsSupport = AVS_NOT_SUPPORTED;
1049                 if (!hwFuseSts.mvcSupportFuse)      dec->mvcSupport = MVC_NOT_SUPPORTED;
1050         }
1051         }
1052         configReg = enc_dev.hwregs[63];
1053         enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
1054         enc->h264Enabled = (configReg >> 27) & 1;
1055         enc->mpeg4Enabled = (configReg >> 26) & 1;
1056         enc->jpegEnabled = (configReg >> 25) & 1;
1057         enc->vsEnabled = (configReg >> 24) & 1;
1058         enc->rgbEnabled = (configReg >> 28) & 1;
1059         enc->busType = (configReg >> 20) & 15;
1060         enc->synthesisLanguage = (configReg >> 16) & 15;
1061         enc->busWidth = (configReg >> 12) & 15;
1062 }
1063
1064 static irqreturn_t vdpu_isr_thread(int irq, void *dev_id)
1065 {
1066         vpu_device *dev = (vpu_device *) dev_id;
1067
1068         pr_debug("vdpu_isr_thread dec %d pp %d\n", atomic_read(&dev->isr_codec), atomic_read(&dev->isr_pp));
1069
1070         if (likely(atomic_read(&dev->isr_codec))) {
1071                 atomic_sub(1, &dev->isr_codec);
1072                 pr_debug("DEC IRQ thread proc!\n");
1073                 mutex_lock(&service.lock);
1074                 if (NULL == service.reg_codec) {
1075                         pr_err("error: dec isr with no task waiting\n");
1076                 } else {
1077                         reg_from_run_to_done(service.reg_codec);
1078                 }
1079                 mutex_unlock(&service.lock);
1080         }
1081
1082         if (atomic_read(&dev->isr_pp)) {
1083                 atomic_sub(1, &dev->isr_pp);
1084                 pr_debug("PP IRQ thread proc!\n");
1085                 mutex_lock(&service.lock);
1086                 if (NULL == service.reg_pproc) {
1087                         pr_err("error: pp isr with no task waiting\n");
1088                 } else {
1089                         reg_from_run_to_done(service.reg_pproc);
1090                 }
1091                 mutex_unlock(&service.lock);
1092         }
1093         try_set_reg();
1094         return IRQ_HANDLED;
1095 }
1096
1097 static irqreturn_t vdpu_isr(int irq, void *dev_id)
1098 {
1099         vpu_device *dev = (vpu_device *) dev_id;
1100         u32 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1101         u32 irq_status_pp  = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
1102
1103         pr_debug("vdpu_isr dec %x pp %x\n", irq_status_dec, irq_status_pp);
1104
1105         if (irq_status_dec & DEC_INTERRUPT_BIT) {
1106                 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1107                 if ((irq_status_dec & 0x40001) == 0x40001)
1108                 {
1109                         do {
1110                                 irq_status_dec = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
1111                         } while ((irq_status_dec & 0x40001) == 0x40001);
1112                 }
1113                 /* clear dec IRQ */
1114                 writel(irq_status_dec & (~DEC_INTERRUPT_BIT), dev->hwregs + DEC_INTERRUPT_REGISTER);
1115                 pr_debug("DEC IRQ received!\n");
1116                 atomic_add(1, &dev->isr_codec);
1117         }
1118
1119         if (irq_status_pp & PP_INTERRUPT_BIT) {
1120                 /* clear pp IRQ */
1121                 writel(irq_status_pp & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
1122                 pr_debug("PP IRQ received!\n");
1123                 atomic_add(1, &dev->isr_pp);
1124         }
1125         return IRQ_WAKE_THREAD;
1126 }
1127
1128 static irqreturn_t vepu_isr_thread(int irq, void *dev_id)
1129 {
1130         struct vpu_device *dev = (struct vpu_device *) dev_id;
1131
1132         pr_debug("enc_isr_thread\n");
1133
1134         if (likely(atomic_read(&dev->isr_codec))) {
1135                 atomic_sub(1, &dev->isr_codec);
1136                 pr_debug("ENC IRQ thread proc!\n");
1137                 mutex_lock(&service.lock);
1138                 if (NULL == service.reg_codec) {
1139                         pr_err("error: enc isr with no task waiting\n");
1140                 } else {
1141                         reg_from_run_to_done(service.reg_codec);
1142                 }
1143                 mutex_unlock(&service.lock);
1144         }
1145         try_set_reg();
1146         return IRQ_HANDLED;
1147 }
1148
1149 static irqreturn_t vepu_isr(int irq, void *dev_id)
1150 {
1151         struct vpu_device *dev = (struct vpu_device *) dev_id;
1152         u32 irq_status = readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
1153
1154         pr_debug("enc_isr\n");
1155
1156         if (likely(irq_status & ENC_INTERRUPT_BIT)) {
1157                 /* clear enc IRQ */
1158                 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
1159                 pr_debug("ENC IRQ received!\n");
1160                 atomic_add(1, &dev->isr_codec);
1161         }
1162         return IRQ_WAKE_THREAD;
1163 }
1164
1165 static int __init vpu_service_proc_init(void);
1166 static int __init vpu_service_init(void)
1167 {
1168         int ret;
1169
1170         pr_debug("baseaddr = 0x%08x vdpu irq = %d vepu irq = %d\n", RK29_VCODEC_PHYS, IRQ_VDPU, IRQ_VEPU);
1171
1172         dec_dev.iobaseaddr      = RK29_VCODEC_PHYS + DEC_PHY_OFFSET;
1173         dec_dev.iosize          = DEC_IO_SIZE;
1174         atomic_set(&dec_dev.isr_codec, 0);
1175         atomic_set(&dec_dev.isr_pp, 0);
1176         enc_dev.iobaseaddr      = RK29_VCODEC_PHYS;
1177         enc_dev.iosize          = ENC_IO_SIZE;
1178         atomic_set(&enc_dev.isr_codec, 0);
1179         atomic_set(&enc_dev.isr_pp, 0);
1180
1181         INIT_LIST_HEAD(&service.waiting);
1182         INIT_LIST_HEAD(&service.running);
1183         INIT_LIST_HEAD(&service.done);
1184         INIT_LIST_HEAD(&service.session);
1185         mutex_init(&service.lock);
1186         service.reg_codec       = NULL;
1187         service.reg_pproc       = NULL;
1188         atomic_set(&service.total_running, 0);
1189         service.enabled         = false;
1190
1191         vpu_get_clk();
1192         init_timer(&service.timer);
1193         service.timer.expires = jiffies + POWER_OFF_DELAY;
1194         service.timer.function = vpu_service_power_off_work_func;
1195         vpu_service_power_on();
1196
1197         ret = vpu_service_reserve_io();
1198         if (ret < 0) {
1199                 pr_err("error: reserve io failed\n");
1200                 goto err_reserve_io;
1201         }
1202
1203         /* get the IRQ line */
1204         ret = request_threaded_irq(IRQ_VDPU, vdpu_isr, vdpu_isr_thread, IRQF_SHARED, "vdpu", (void *)&dec_dev);
1205         if (ret) {
1206                 pr_err("error: can't request vdpu irq %d\n", IRQ_VDPU);
1207                 goto err_req_vdpu_irq;
1208         }
1209
1210         ret = request_threaded_irq(IRQ_VEPU, vepu_isr, vepu_isr_thread, IRQF_SHARED, "vepu", (void *)&enc_dev);
1211         if (ret) {
1212                 pr_err("error: can't request vepu irq %d\n", IRQ_VEPU);
1213                 goto err_req_vepu_irq;
1214         }
1215
1216         ret = misc_register(&vpu_service_misc_device);
1217         if (ret) {
1218                 pr_err("error: misc_register failed\n");
1219                 goto err_register;
1220         }
1221
1222         platform_device_register(&vpu_service_device);
1223         platform_driver_probe(&vpu_service_driver, NULL);
1224         get_hw_info();
1225         del_timer(&service.timer);
1226         vpu_service_power_off();
1227         pr_info("init success\n");
1228
1229         vpu_service_proc_init();
1230         return 0;
1231
1232 err_register:
1233         free_irq(IRQ_VEPU, (void *)&enc_dev);
1234 err_req_vepu_irq:
1235         free_irq(IRQ_VDPU, (void *)&dec_dev);
1236 err_req_vdpu_irq:
1237         pr_info("init failed\n");
1238 err_reserve_io:
1239         del_timer(&service.timer);
1240         vpu_service_power_off();
1241         vpu_service_release_io();
1242         vpu_put_clk();
1243         pr_info("init failed\n");
1244         return ret;
1245 }
1246
1247 static void __exit vpu_service_exit(void)
1248 {
1249         del_timer(&service.timer);
1250         vpu_service_power_off();
1251         platform_device_unregister(&vpu_service_device);
1252         platform_driver_unregister(&vpu_service_driver);
1253         misc_deregister(&vpu_service_misc_device);
1254         free_irq(IRQ_VEPU, (void *)&enc_dev);
1255         free_irq(IRQ_VDPU, (void *)&dec_dev);
1256         vpu_put_clk();
1257 }
1258
1259 module_init(vpu_service_init);
1260 module_exit(vpu_service_exit);
1261
1262 #ifdef CONFIG_PROC_FS
1263 #include <linux/proc_fs.h>
1264 #include <linux/seq_file.h>
1265
1266 static int proc_vpu_service_show(struct seq_file *s, void *v)
1267 {
1268         unsigned int i, n;
1269         vpu_reg *reg, *reg_tmp;
1270         vpu_session *session, *session_tmp;
1271
1272         vpu_service_power_on();
1273         seq_printf(s, "\nENC Registers:\n");
1274         n = enc_dev.iosize >> 2;
1275         for (i = 0; i < n; i++) {
1276                 seq_printf(s, "\tswreg%.3d = %08X\n", i, readl(enc_dev.hwregs + i));
1277         }
1278         seq_printf(s, "\nDEC Registers:\n");
1279         n = dec_dev.iosize >> 2;
1280         for (i = 0; i < n; i++) {
1281                 seq_printf(s, "\tswreg%.3d = %08X\n", i, readl(dec_dev.hwregs + i));
1282         }
1283
1284         seq_printf(s, "\nvpu service status:\n");
1285         mutex_lock(&service.lock);
1286         list_for_each_entry_safe(session, session_tmp, &service.session, list_session) {
1287                 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
1288                 //seq_printf(s, "waiting reg set %d\n");
1289                 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
1290                         seq_printf(s, "waiting register set\n");
1291                 }
1292                 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
1293                         seq_printf(s, "running register set\n");
1294                 }
1295                 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
1296                         seq_printf(s, "done    register set\n");
1297                 }
1298         }
1299         mutex_unlock(&service.lock);
1300
1301         return 0;
1302 }
1303
1304 static int proc_vpu_service_open(struct inode *inode, struct file *file)
1305 {
1306         return single_open(file, proc_vpu_service_show, NULL);
1307 }
1308
1309 static const struct file_operations proc_vpu_service_fops = {
1310         .open           = proc_vpu_service_open,
1311         .read           = seq_read,
1312         .llseek         = seq_lseek,
1313         .release        = single_release,
1314 };
1315
1316 static int __init vpu_service_proc_init(void)
1317 {
1318         proc_create("vpu_service", 0, NULL, &proc_vpu_service_fops);
1319         return 0;
1320
1321 }
1322 #endif /* CONFIG_PROC_FS */
1323