Merge branch 'android-4.4' of https://android.googlesource.com/kernel/common
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_service.c
1 /**
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  * author: chenhengming chm@rock-chips.com
4  *         Alpha Lin, alpha.lin@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/clk.h>
20 #include <linux/compat.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/fs.h>
26 #include <linux/mm.h>
27 #include <linux/platform_device.h>
28 #include <linux/reset.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/wakelock.h>
32 #include <linux/cdev.h>
33 #include <linux/of.h>
34 #include <linux/of_platform.h>
35 #include <linux/of_irq.h>
36 #include <linux/regmap.h>
37 #include <linux/mfd/syscon.h>
38 #include <linux/uaccess.h>
39 #include <linux/debugfs.h>
40
41 #include <linux/rockchip/cpu.h>
42 #include <linux/rockchip/cru.h>
43 #include <linux/rockchip/pmu.h>
44 #include <linux/rockchip/grf.h>
45
46 #if defined(CONFIG_ION_ROCKCHIP)
47 #include <linux/rockchip_ion.h>
48 #endif
49
50 #if defined(CONFIG_RK_IOMMU) & defined(CONFIG_ION_ROCKCHIP)
51 #define CONFIG_VCODEC_MMU
52 #include <linux/rockchip-iovmm.h>
53 #include <linux/dma-buf.h>
54 #endif
55
56 #include "vcodec_hw_info.h"
57 #include "vcodec_hw_vpu.h"
58 #include "vcodec_hw_rkv.h"
59 #include "vcodec_hw_vpu2.h"
60
61 #include "vcodec_service.h"
62
63 /*
64  * debug flag usage:
65  * +------+-------------------+
66  * | 8bit |      24bit        |
67  * +------+-------------------+
68  *  0~23 bit is for different information type
69  * 24~31 bit is for information print format
70  */
71
72 #define DEBUG_POWER                             0x00000001
73 #define DEBUG_CLOCK                             0x00000002
74 #define DEBUG_IRQ_STATUS                        0x00000004
75 #define DEBUG_IOMMU                             0x00000008
76 #define DEBUG_IOCTL                             0x00000010
77 #define DEBUG_FUNCTION                          0x00000020
78 #define DEBUG_REGISTER                          0x00000040
79 #define DEBUG_EXTRA_INFO                        0x00000080
80 #define DEBUG_TIMING                            0x00000100
81 #define DEBUG_TASK_INFO                         0x00000200
82
83 #define DEBUG_SET_REG                           0x00001000
84 #define DEBUG_GET_REG                           0x00002000
85 #define DEBUG_PPS_FILL                          0x00004000
86 #define DEBUG_IRQ_CHECK                         0x00008000
87 #define DEBUG_CACHE_32B                         0x00010000
88
89 #define PRINT_FUNCTION                          0x80000000
90 #define PRINT_LINE                              0x40000000
91
92 static int debug;
93 module_param(debug, int, S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(debug, "bit switch for vcodec_service debug information");
95
96 #define VCODEC_CLOCK_ENABLE     1
97
98 /*
99  * hardware information organization
100  *
101  * In order to support multiple hardware with different version the hardware
102  * information is organized as follow:
103  *
104  * 1. First, index hardware by register size / position.
105  *    These information is fix for each hardware and do not relate to runtime
106  *    work flow. It only related to resource allocation.
107  *    Descriptor: struct vpu_hw_info
108  *
109  * 2. Then, index hardware by runtime configuration
110  *    These information is related to runtime setting behave including enable
111  *    register, irq register and other key control flag
112  *    Descriptor: struct vpu_task_info
113  *
114  * 3. Final, on iommu case the fd translation is required
115  *    Descriptor: struct vpu_trans_info
116  */
117
118 enum VPU_FREQ {
119         VPU_FREQ_200M,
120         VPU_FREQ_266M,
121         VPU_FREQ_300M,
122         VPU_FREQ_400M,
123         VPU_FREQ_500M,
124         VPU_FREQ_600M,
125         VPU_FREQ_DEFAULT,
126         VPU_FREQ_BUT,
127 };
128
129 struct extra_info_elem {
130         u32 index;
131         u32 offset;
132 };
133
134 #define EXTRA_INFO_MAGIC        0x4C4A46
135
136 struct extra_info_for_iommu {
137         u32 magic;
138         u32 cnt;
139         struct extra_info_elem elem[20];
140 };
141
142 #define MHZ                                     (1000*1000)
143 #define SIZE_REG(reg)                           ((reg)*4)
144
145 static struct vcodec_info vcodec_info_set[] = {
146         [0] = {
147                 .hw_id          = VPU_ID_8270,
148                 .hw_info        = &hw_vpu_8270,
149                 .task_info      = task_vpu,
150                 .trans_info     = trans_vpu,
151         },
152         [1] = {
153                 .hw_id          = VPU_ID_4831,
154                 .hw_info        = &hw_vpu_4831,
155                 .task_info      = task_vpu,
156                 .trans_info     = trans_vpu,
157         },
158         [2] = {
159                 .hw_id          = VPU_DEC_ID_9190,
160                 .hw_info        = &hw_vpu_9190,
161                 .task_info      = task_vpu,
162                 .trans_info     = trans_vpu,
163         },
164         [3] = {
165                 .hw_id          = HEVC_ID,
166                 .hw_info        = &hw_rkhevc,
167                 .task_info      = task_rkv,
168                 .trans_info     = trans_rkv,
169         },
170         [4] = {
171                 .hw_id          = RKV_DEC_ID,
172                 .hw_info        = &hw_rkvdec,
173                 .task_info      = task_rkv,
174                 .trans_info     = trans_rkv,
175         },
176         [5] = {
177                 .hw_id          = VPU2_ID,
178                 .hw_info        = &hw_vpu2,
179                 .task_info      = task_vpu2,
180                 .trans_info     = trans_vpu2,
181         },
182 };
183
184 #define DEBUG
185 #ifdef DEBUG
186 #define vpu_debug_func(type, fmt, args...)                      \
187         do {                                                    \
188                 if (unlikely(debug & type)) {                   \
189                         pr_info("%s:%d: " fmt,                  \
190                                  __func__, __LINE__, ##args);   \
191                 }                                               \
192         } while (0)
193 #define vpu_debug(type, fmt, args...)                           \
194         do {                                                    \
195                 if (unlikely(debug & type)) {                   \
196                         pr_info(fmt, ##args);                   \
197                 }                                               \
198         } while (0)
199 #else
200 #define vpu_debug_func(level, fmt, args...)
201 #define vpu_debug(level, fmt, args...)
202 #endif
203
204 #define vpu_debug_enter() vpu_debug_func(DEBUG_FUNCTION, "enter\n")
205 #define vpu_debug_leave() vpu_debug_func(DEBUG_FUNCTION, "leave\n")
206
207 #define vpu_err(fmt, args...)                           \
208                 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
209
210 enum VPU_DEC_FMT {
211         VPU_DEC_FMT_H264,
212         VPU_DEC_FMT_MPEG4,
213         VPU_DEC_FMT_H263,
214         VPU_DEC_FMT_JPEG,
215         VPU_DEC_FMT_VC1,
216         VPU_DEC_FMT_MPEG2,
217         VPU_DEC_FMT_MPEG1,
218         VPU_DEC_FMT_VP6,
219         VPU_DEC_FMT_RESERV0,
220         VPU_DEC_FMT_VP7,
221         VPU_DEC_FMT_VP8,
222         VPU_DEC_FMT_AVS,
223         VPU_DEC_FMT_RES
224 };
225
226 /**
227  * struct for process session which connect to vpu
228  *
229  * @author ChenHengming (2011-5-3)
230  */
231 struct vpu_session {
232         enum VPU_CLIENT_TYPE type;
233         /* a linked list of data so we can access them for debugging */
234         struct list_head list_session;
235         /* a linked list of register data waiting for process */
236         struct list_head waiting;
237         /* a linked list of register data in processing */
238         struct list_head running;
239         /* a linked list of register data processed */
240         struct list_head done;
241         wait_queue_head_t wait;
242         pid_t pid;
243         atomic_t task_running;
244 };
245
246 /**
247  * struct for process register set
248  *
249  * @author ChenHengming (2011-5-4)
250  */
251 struct vpu_reg {
252         enum VPU_CLIENT_TYPE type;
253         enum VPU_FREQ freq;
254         struct vpu_session *session;
255         struct vpu_subdev_data *data;
256         struct vpu_task_info *task;
257         const struct vpu_trans_info *trans;
258
259         /* link to vpu service session */
260         struct list_head session_link;
261         /* link to register set list */
262         struct list_head status_link;
263
264         unsigned long size;
265         struct list_head mem_region_list;
266         u32 dec_base;
267         u32 *reg;
268 };
269
270 struct vpu_device {
271         atomic_t irq_count_codec;
272         atomic_t irq_count_pp;
273         unsigned int iosize;
274         u32 *regs;
275 };
276
277 enum vcodec_device_id {
278         VCODEC_DEVICE_ID_VPU,
279         VCODEC_DEVICE_ID_HEVC,
280         VCODEC_DEVICE_ID_COMBO,
281         VCODEC_DEVICE_ID_RKVDEC,
282         VCODEC_DEVICE_ID_BUTT
283 };
284
285 enum VCODEC_RUNNING_MODE {
286         VCODEC_RUNNING_MODE_NONE = -1,
287         VCODEC_RUNNING_MODE_VPU,
288         VCODEC_RUNNING_MODE_HEVC,
289         VCODEC_RUNNING_MODE_RKVDEC
290 };
291
292 struct vcodec_mem_region {
293         struct list_head srv_lnk;
294         struct list_head reg_lnk;
295         struct list_head session_lnk;
296         unsigned long iova;     /* virtual address for iommu */
297         unsigned long len;
298         u32 reg_idx;
299         struct ion_handle *hdl;
300 };
301
302 enum vpu_ctx_state {
303         MMU_ACTIVATED   = BIT(0)
304 };
305
306 struct vpu_subdev_data {
307         struct cdev cdev;
308         dev_t dev_t;
309         struct class *cls;
310         struct device *child_dev;
311
312         int irq_enc;
313         int irq_dec;
314         struct vpu_service_info *pservice;
315
316         u32 *regs;
317         enum VCODEC_RUNNING_MODE mode;
318         struct list_head lnk_service;
319
320         struct device *dev;
321
322         struct vpu_device enc_dev;
323         struct vpu_device dec_dev;
324
325         enum VPU_HW_ID hw_id;
326         struct vpu_hw_info *hw_info;
327         struct vpu_task_info *task_info;
328         const struct vpu_trans_info *trans_info;
329
330         u32 reg_size;
331         unsigned long state;
332
333 #ifdef CONFIG_DEBUG_FS
334         struct dentry *debugfs_dir;
335         struct dentry *debugfs_file_regs;
336 #endif
337
338         struct device *mmu_dev;
339 };
340
341 struct vpu_service_info {
342         struct wake_lock wake_lock;
343         struct delayed_work power_off_work;
344         /* vpu service structure global lock */
345         struct mutex lock;
346         /* link to link_reg in struct vpu_reg */
347         struct list_head waiting;
348         /* link to link_reg in struct vpu_reg */
349         struct list_head running;
350         /* link to link_reg in struct vpu_reg */
351         struct list_head done;
352         /* link to list_session in struct vpu_session */
353         struct list_head session;
354         atomic_t total_running;
355         atomic_t enabled;
356         atomic_t power_on_cnt;
357         atomic_t power_off_cnt;
358         struct vpu_reg *reg_codec;
359         struct vpu_reg *reg_pproc;
360         struct vpu_reg *reg_resev;
361         struct vpu_dec_config dec_config;
362         struct vpu_enc_config enc_config;
363
364         bool auto_freq;
365         bool bug_dec_addr;
366         atomic_t freq_status;
367
368         struct clk *aclk_vcodec;
369         struct clk *hclk_vcodec;
370         struct clk *clk_core;
371         struct clk *clk_cabac;
372         struct clk *pd_video;
373
374 #ifdef CONFIG_RESET_CONTROLLER
375         struct reset_control *rst_a;
376         struct reset_control *rst_h;
377         struct reset_control *rst_v;
378 #endif
379         struct device *dev;
380
381         u32 irq_status;
382         atomic_t reset_request;
383         struct ion_client *ion_client;
384         struct list_head mem_region_list;
385
386         enum vcodec_device_id dev_id;
387
388         enum VCODEC_RUNNING_MODE curr_mode;
389         u32 prev_mode;
390
391         struct delayed_work simulate_work;
392
393         u32 mode_bit;
394         u32 mode_ctrl;
395         u32 *reg_base;
396         u32 ioaddr;
397         struct regmap *grf;
398         u32 *grf_base;
399
400         char *name;
401
402         u32 subcnt;
403         struct list_head subdev_list;
404 };
405
406 struct vpu_request {
407         u32 *req;
408         u32 size;
409 };
410
411 #ifdef CONFIG_COMPAT
412 struct compat_vpu_request {
413         compat_uptr_t req;
414         u32 size;
415 };
416 #endif
417
418 /* debugfs root directory for all device (vpu, hevc).*/
419 static struct dentry *parent;
420
421 #ifdef CONFIG_DEBUG_FS
422 static int vcodec_debugfs_init(void);
423 static void vcodec_debugfs_exit(void);
424 static struct dentry *vcodec_debugfs_create_device_dir(
425                 char *dirname, struct dentry *parent);
426 static int debug_vcodec_open(struct inode *inode, struct file *file);
427
428 static const struct file_operations debug_vcodec_fops = {
429         .open = debug_vcodec_open,
430         .read = seq_read,
431         .llseek = seq_lseek,
432         .release = single_release,
433 };
434 #endif
435
436 #define VDPU_SOFT_RESET_REG     101
437 #define VDPU_CLEAN_CACHE_REG    516
438 #define VEPU_CLEAN_CACHE_REG    772
439 #define HEVC_CLEAN_CACHE_REG    260
440
441 #define VPU_REG_ENABLE(base, reg)       writel_relaxed(1, base + reg)
442
443 #define VDPU_SOFT_RESET(base)   VPU_REG_ENABLE(base, VDPU_SOFT_RESET_REG)
444 #define VDPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VDPU_CLEAN_CACHE_REG)
445 #define VEPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VEPU_CLEAN_CACHE_REG)
446 #define HEVC_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, HEVC_CLEAN_CACHE_REG)
447
448 #define VPU_POWER_OFF_DELAY             (4 * HZ) /* 4s */
449 #define VPU_TIMEOUT_DELAY               (2 * HZ) /* 2s */
450
451 static void time_record(struct vpu_task_info *task, int is_end)
452 {
453         if (unlikely(debug & DEBUG_TIMING) && task)
454                 do_gettimeofday((is_end) ? (&task->end) : (&task->start));
455 }
456
457 static void time_diff(struct vpu_task_info *task)
458 {
459         vpu_debug(DEBUG_TIMING, "%s task: %ld ms\n", task->name,
460                   (task->end.tv_sec  - task->start.tv_sec)  * 1000 +
461                   (task->end.tv_usec - task->start.tv_usec) / 1000);
462 }
463
464 static void vcodec_enter_mode(struct vpu_subdev_data *data)
465 {
466         int bits;
467         u32 raw = 0;
468         struct vpu_service_info *pservice = data->pservice;
469 #if defined(CONFIG_VCODEC_MMU)
470         struct vpu_subdev_data *subdata, *n;
471 #endif
472         if (pservice->subcnt < 2) {
473 #if defined(CONFIG_VCODEC_MMU)
474                 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
475                         set_bit(MMU_ACTIVATED, &data->state);
476                         if (atomic_read(&pservice->enabled))
477                                 rockchip_iovmm_activate(data->dev);
478                         else
479                                 BUG_ON(!atomic_read(&pservice->enabled));
480                 }
481 #endif
482                 return;
483         }
484
485         if (pservice->curr_mode == data->mode)
486                 return;
487
488         vpu_debug(DEBUG_IOMMU, "vcodec enter mode %d\n", data->mode);
489 #if defined(CONFIG_VCODEC_MMU)
490         list_for_each_entry_safe(subdata, n,
491                                  &pservice->subdev_list, lnk_service) {
492                 if (data != subdata && subdata->mmu_dev &&
493                     test_bit(MMU_ACTIVATED, &subdata->state)) {
494                         clear_bit(MMU_ACTIVATED, &subdata->state);
495                         rockchip_iovmm_deactivate(subdata->dev);
496                 }
497         }
498 #endif
499         bits = 1 << pservice->mode_bit;
500 #ifdef CONFIG_MFD_SYSCON
501         if (pservice->grf) {
502                 regmap_read(pservice->grf, pservice->mode_ctrl, &raw);
503
504                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
505                         regmap_write(pservice->grf, pservice->mode_ctrl,
506                                      raw | bits | (bits << 16));
507                 else
508                         regmap_write(pservice->grf, pservice->mode_ctrl,
509                                      (raw & (~bits)) | (bits << 16));
510         } else if (pservice->grf_base) {
511                 u32 *grf_base = pservice->grf_base;
512
513                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
514                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
515                         writel_relaxed(raw | bits | (bits << 16),
516                                        grf_base + pservice->mode_ctrl / 4);
517                 else
518                         writel_relaxed((raw & (~bits)) | (bits << 16),
519                                        grf_base + pservice->mode_ctrl / 4);
520         } else {
521                 vpu_err("no grf resource define, switch decoder failed\n");
522                 return;
523         }
524 #else
525         if (pservice->grf_base) {
526                 u32 *grf_base = pservice->grf_base;
527
528                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
529                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
530                         writel_relaxed(raw | bits | (bits << 16),
531                                        grf_base + pservice->mode_ctrl / 4);
532                 else
533                         writel_relaxed((raw & (~bits)) | (bits << 16),
534                                        grf_base + pservice->mode_ctrl / 4);
535         } else {
536                 vpu_err("no grf resource define, switch decoder failed\n");
537                 return;
538         }
539 #endif
540 #if defined(CONFIG_VCODEC_MMU)
541         if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
542                 set_bit(MMU_ACTIVATED, &data->state);
543                 if (atomic_read(&pservice->enabled))
544                         rockchip_iovmm_activate(data->dev);
545                 else
546                         BUG_ON(!atomic_read(&pservice->enabled));
547         }
548 #endif
549         pservice->prev_mode = pservice->curr_mode;
550         pservice->curr_mode = data->mode;
551 }
552
553 static void vcodec_exit_mode(struct vpu_subdev_data *data)
554 {
555         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
556                 clear_bit(MMU_ACTIVATED, &data->state);
557 #if defined(CONFIG_VCODEC_MMU)
558                 rockchip_iovmm_deactivate(data->dev);
559 #endif
560                 data->pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
561         }
562 }
563
564 static int vpu_get_clk(struct vpu_service_info *pservice)
565 {
566 #if VCODEC_CLOCK_ENABLE
567         struct device *dev = pservice->dev;
568
569         switch (pservice->dev_id) {
570         case VCODEC_DEVICE_ID_HEVC:
571                 pservice->pd_video = devm_clk_get(dev, "pd_hevc");
572                 if (IS_ERR(pservice->pd_video)) {
573                         dev_err(dev, "failed on clk_get pd_hevc\n");
574                         return -1;
575                 }
576         case VCODEC_DEVICE_ID_COMBO:
577         case VCODEC_DEVICE_ID_RKVDEC:
578                 pservice->clk_cabac = devm_clk_get(dev, "clk_cabac");
579                 if (IS_ERR(pservice->clk_cabac)) {
580                         dev_err(dev, "failed on clk_get clk_cabac\n");
581                         pservice->clk_cabac = NULL;
582                 }
583                 pservice->clk_core = devm_clk_get(dev, "clk_core");
584                 if (IS_ERR(pservice->clk_core)) {
585                         dev_err(dev, "failed on clk_get clk_core\n");
586                         return -1;
587                 }
588         case VCODEC_DEVICE_ID_VPU:
589                 pservice->aclk_vcodec = devm_clk_get(dev, "aclk_vcodec");
590                 if (IS_ERR(pservice->aclk_vcodec)) {
591                         dev_err(dev, "failed on clk_get aclk_vcodec\n");
592                         return -1;
593                 }
594
595                 pservice->hclk_vcodec = devm_clk_get(dev, "hclk_vcodec");
596                 if (IS_ERR(pservice->hclk_vcodec)) {
597                         dev_err(dev, "failed on clk_get hclk_vcodec\n");
598                         return -1;
599                 }
600                 if (pservice->pd_video == NULL) {
601                         pservice->pd_video = devm_clk_get(dev, "pd_video");
602                         if (IS_ERR(pservice->pd_video)) {
603                                 pservice->pd_video = NULL;
604                                 dev_info(dev, "do not have pd_video\n");
605                         }
606                 }
607                 break;
608         default:
609                 break;
610         }
611
612         return 0;
613 #else
614         return 0;
615 #endif
616 }
617
618 static void vpu_put_clk(struct vpu_service_info *pservice)
619 {
620 #if VCODEC_CLOCK_ENABLE
621         if (pservice->pd_video)
622                 devm_clk_put(pservice->dev, pservice->pd_video);
623         if (pservice->aclk_vcodec)
624                 devm_clk_put(pservice->dev, pservice->aclk_vcodec);
625         if (pservice->hclk_vcodec)
626                 devm_clk_put(pservice->dev, pservice->hclk_vcodec);
627         if (pservice->clk_core)
628                 devm_clk_put(pservice->dev, pservice->clk_core);
629         if (pservice->clk_cabac)
630                 devm_clk_put(pservice->dev, pservice->clk_cabac);
631 #endif
632 }
633
634 static void vpu_reset(struct vpu_subdev_data *data)
635 {
636         struct vpu_service_info *pservice = data->pservice;
637         enum pmu_idle_req type = IDLE_REQ_VIDEO;
638
639         if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC)
640                 type = IDLE_REQ_HEVC;
641
642         pr_info("%s: resetting...", dev_name(pservice->dev));
643
644 #if defined(CONFIG_ARCH_RK29)
645         clk_disable(aclk_ddr_vepu);
646         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
647         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
648         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
649         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
650         mdelay(10);
651         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
652         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
653         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
654         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
655         clk_enable(aclk_ddr_vepu);
656 #elif defined(CONFIG_ARCH_RK30)
657         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
658         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
659         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
660         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
661         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
662         mdelay(1);
663         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
664         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
665         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
666         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
667         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
668 #else
669 #endif
670         WARN_ON(pservice->reg_codec != NULL);
671         WARN_ON(pservice->reg_pproc != NULL);
672         WARN_ON(pservice->reg_resev != NULL);
673         pservice->reg_codec = NULL;
674         pservice->reg_pproc = NULL;
675         pservice->reg_resev = NULL;
676
677         pr_info("for 3288/3368...");
678 #if 0 //def CONFIG_RESET_CONTROLLER
679         if (pservice->rst_a && pservice->rst_h) {
680                 if (rockchip_pmu_ops.set_idle_request)
681                         rockchip_pmu_ops.set_idle_request(type, true);
682                 pr_info("reset in\n");
683                 if (pservice->rst_v)
684                         reset_control_assert(pservice->rst_v);
685                 reset_control_assert(pservice->rst_a);
686                 reset_control_assert(pservice->rst_h);
687                 udelay(5);
688                 reset_control_deassert(pservice->rst_h);
689                 reset_control_deassert(pservice->rst_a);
690                 if (pservice->rst_v)
691                         reset_control_deassert(pservice->rst_v);
692                 if (rockchip_pmu_ops.set_idle_request)
693                         rockchip_pmu_ops.set_idle_request(type, false);
694         }
695 #endif
696
697 #if defined(CONFIG_VCODEC_MMU)
698         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
699                 clear_bit(MMU_ACTIVATED, &data->state);
700                 if (atomic_read(&pservice->enabled))
701                         rockchip_iovmm_deactivate(data->dev);
702                 else
703                         BUG_ON(!atomic_read(&pservice->enabled));
704         }
705 #endif
706         atomic_set(&pservice->reset_request, 0);
707         pr_info("done\n");
708 }
709
710 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg);
711 static void vpu_service_session_clear(struct vpu_subdev_data *data,
712                                       struct vpu_session *session)
713 {
714         struct vpu_reg *reg, *n;
715
716         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
717                 reg_deinit(data, reg);
718         }
719         list_for_each_entry_safe(reg, n, &session->running, session_link) {
720                 reg_deinit(data, reg);
721         }
722         list_for_each_entry_safe(reg, n, &session->done, session_link) {
723                 reg_deinit(data, reg);
724         }
725 }
726
727 static void vpu_service_dump(struct vpu_service_info *pservice)
728 {
729 }
730
731
732 static void vpu_service_power_off(struct vpu_service_info *pservice)
733 {
734         int total_running;
735         struct vpu_subdev_data *data = NULL, *n;
736         int ret = atomic_add_unless(&pservice->enabled, -1, 0);
737
738         if (!ret)
739                 return;
740
741         total_running = atomic_read(&pservice->total_running);
742         if (total_running) {
743                 pr_alert("alert: power off when %d task running!!\n",
744                          total_running);
745                 mdelay(50);
746                 pr_alert("alert: delay 50 ms for running task\n");
747                 vpu_service_dump(pservice);
748         }
749
750         pr_info("%s: power off...", dev_name(pservice->dev));
751
752         udelay(5);
753
754 #if defined(CONFIG_VCODEC_MMU)
755         list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
756                 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
757                         clear_bit(MMU_ACTIVATED, &data->state);
758                         rockchip_iovmm_deactivate(data->dev);
759                 }
760         }
761         pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
762 #endif
763
764 #if VCODEC_CLOCK_ENABLE
765                 if (pservice->pd_video)
766                         clk_disable_unprepare(pservice->pd_video);
767                 if (pservice->hclk_vcodec)
768                         clk_disable_unprepare(pservice->hclk_vcodec);
769                 if (pservice->aclk_vcodec)
770                         clk_disable_unprepare(pservice->aclk_vcodec);
771                 if (pservice->clk_core)
772                         clk_disable_unprepare(pservice->clk_core);
773                 if (pservice->clk_cabac)
774                         clk_disable_unprepare(pservice->clk_cabac);
775 #endif
776
777         atomic_add(1, &pservice->power_off_cnt);
778         wake_unlock(&pservice->wake_lock);
779         pr_info("done\n");
780 }
781
782 static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
783 {
784         queue_delayed_work(system_wq, &pservice->power_off_work,
785                            VPU_POWER_OFF_DELAY);
786 }
787
788 static void vpu_power_off_work(struct work_struct *work_s)
789 {
790         struct delayed_work *dlwork = container_of(work_s,
791                         struct delayed_work, work);
792         struct vpu_service_info *pservice = container_of(dlwork,
793                         struct vpu_service_info, power_off_work);
794
795         if (mutex_trylock(&pservice->lock)) {
796                 vpu_service_power_off(pservice);
797                 mutex_unlock(&pservice->lock);
798         } else {
799                 /* Come back later if the device is busy... */
800                 vpu_queue_power_off_work(pservice);
801         }
802 }
803
804 static void vpu_service_power_on(struct vpu_service_info *pservice)
805 {
806         int ret;
807         static ktime_t last;
808         ktime_t now = ktime_get();
809
810         if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
811                 cancel_delayed_work_sync(&pservice->power_off_work);
812                 vpu_queue_power_off_work(pservice);
813                 last = now;
814         }
815         ret = atomic_add_unless(&pservice->enabled, 1, 1);
816         if (!ret)
817                 return;
818
819         pr_info("%s: power on\n", dev_name(pservice->dev));
820
821 #define BIT_VCODEC_CLK_SEL      (1<<10)
822         if (cpu_is_rk312x())
823                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1)
824                         | BIT_VCODEC_CLK_SEL | (BIT_VCODEC_CLK_SEL << 16),
825                         RK_GRF_VIRT + RK312X_GRF_SOC_CON1);
826
827 #if VCODEC_CLOCK_ENABLE
828         if (pservice->aclk_vcodec)
829                 clk_prepare_enable(pservice->aclk_vcodec);
830         if (pservice->hclk_vcodec)
831                 clk_prepare_enable(pservice->hclk_vcodec);
832         if (pservice->clk_core)
833                 clk_prepare_enable(pservice->clk_core);
834         if (pservice->clk_cabac)
835                 clk_prepare_enable(pservice->clk_cabac);
836         if (pservice->pd_video)
837                 clk_prepare_enable(pservice->pd_video);
838 #endif
839
840         udelay(5);
841         atomic_add(1, &pservice->power_on_cnt);
842         wake_lock(&pservice->wake_lock);
843 }
844
845 static inline bool reg_check_interlace(struct vpu_reg *reg)
846 {
847         u32 type = (reg->reg[3] & (1 << 23));
848
849         return (type > 0);
850 }
851
852 static inline enum VPU_DEC_FMT reg_check_fmt(struct vpu_reg *reg)
853 {
854         enum VPU_DEC_FMT type = (enum VPU_DEC_FMT)((reg->reg[3] >> 28) & 0xf);
855
856         return type;
857 }
858
859 static inline int reg_probe_width(struct vpu_reg *reg)
860 {
861         int width_in_mb = reg->reg[4] >> 23;
862
863         return width_in_mb * 16;
864 }
865
866 static inline int reg_probe_hevc_y_stride(struct vpu_reg *reg)
867 {
868         int y_virstride = reg->reg[8];
869
870         return y_virstride;
871 }
872
873 static int vcodec_fd_to_iova(struct vpu_subdev_data *data,
874                              struct vpu_reg *reg, int fd)
875 {
876         struct vpu_service_info *pservice = data->pservice;
877         struct ion_handle *hdl;
878         int ret = 0;
879         struct vcodec_mem_region *mem_region;
880
881         hdl = ion_import_dma_buf(pservice->ion_client, fd);
882         if (IS_ERR(hdl)) {
883                 vpu_err("import dma-buf from fd %d failed\n", fd);
884                 return PTR_ERR(hdl);
885         }
886         mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
887
888         if (mem_region == NULL) {
889                 vpu_err("allocate memory for iommu memory region failed\n");
890                 ion_free(pservice->ion_client, hdl);
891                 return -1;
892         }
893
894         mem_region->hdl = hdl;
895         if (data->mmu_dev)
896                 ret = ion_map_iommu(data->dev, pservice->ion_client,
897                                     mem_region->hdl, &mem_region->iova,
898                                     &mem_region->len);
899         else
900                 ret = ion_phys(pservice->ion_client,
901                                mem_region->hdl,
902                                (ion_phys_addr_t *)&mem_region->iova,
903                                (size_t *)&mem_region->len);
904
905         if (ret < 0) {
906                 vpu_err("fd %d ion map iommu failed\n", fd);
907                 kfree(mem_region);
908                 ion_free(pservice->ion_client, hdl);
909                 return ret;
910         }
911         INIT_LIST_HEAD(&mem_region->reg_lnk);
912         list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
913         return mem_region->iova;
914 }
915
916 /*
917  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
918  * it by pps id in video stream data.
919  *
920  * So we need to translate the address in iommu case. The address data is also
921  * 10bit fd + 22bit offset mode.
922  * Because userspace decoder do not give the pps id in the register file sets
923  * kernel driver need to translate each scaling list address in pps buffer which
924  * means 256 pps for H.264, 64 pps for H.265.
925  *
926  * In order to optimize the performance kernel driver ask userspace decoder to
927  * set all scaling list address in pps buffer to the same one which will be used
928  * on current decoding task. Then kernel driver can only translate the first
929  * address then copy it all pps buffer.
930  */
931 static void fill_scaling_list_addr_in_pps(
932                 struct vpu_subdev_data *data,
933                 struct vpu_reg *reg,
934                 char *pps,
935                 int pps_info_count,
936                 int pps_info_size,
937                 int scaling_list_addr_offset)
938 {
939         int base = scaling_list_addr_offset;
940         int scaling_fd = 0;
941         u32 scaling_offset;
942
943         scaling_offset  = (u32)pps[base + 0];
944         scaling_offset += (u32)pps[base + 1] << 8;
945         scaling_offset += (u32)pps[base + 2] << 16;
946         scaling_offset += (u32)pps[base + 3] << 24;
947
948         scaling_fd = scaling_offset & 0x3ff;
949         scaling_offset = scaling_offset >> 10;
950
951         if (scaling_fd > 0) {
952                 int i = 0;
953                 u32 tmp = vcodec_fd_to_iova(data, reg, scaling_fd);
954                 tmp += scaling_offset;
955
956                 for (i = 0; i < pps_info_count; i++, base += pps_info_size) {
957                         pps[base + 0] = (tmp >>  0) & 0xff;
958                         pps[base + 1] = (tmp >>  8) & 0xff;
959                         pps[base + 2] = (tmp >> 16) & 0xff;
960                         pps[base + 3] = (tmp >> 24) & 0xff;
961                 }
962         }
963 }
964
965 static int vcodec_bufid_to_iova(struct vpu_subdev_data *data, const u8 *tbl,
966                                 int size, struct vpu_reg *reg,
967                                 struct extra_info_for_iommu *ext_inf)
968 {
969         struct vpu_service_info *pservice = data->pservice;
970         struct vpu_task_info *task = reg->task;
971         enum FORMAT_TYPE type;
972         struct ion_handle *hdl;
973         int ret = 0;
974         struct vcodec_mem_region *mem_region;
975         int i;
976         int offset = 0;
977
978         if (tbl == NULL || size <= 0) {
979                 dev_err(pservice->dev, "input arguments invalidate\n");
980                 return -1;
981         }
982
983         if (task->get_fmt)
984                 type = task->get_fmt(reg->reg);
985         else {
986                 pr_err("invalid task with NULL get_fmt\n");
987                 return -1;
988         }
989
990         for (i = 0; i < size; i++) {
991                 int usr_fd = reg->reg[tbl[i]] & 0x3FF;
992
993                 /* if userspace do not set the fd at this register, skip */
994                 if (usr_fd == 0)
995                         continue;
996
997                 /*
998                  * special offset scale case
999                  *
1000                  * This translation is for fd + offset translation.
1001                  * One register has 32bits. We need to transfer both buffer file
1002                  * handle and the start address offset so we packet file handle
1003                  * and offset together using below format.
1004                  *
1005                  *  0~9  bit for buffer file handle range 0 ~ 1023
1006                  * 10~31 bit for offset range 0 ~ 4M
1007                  *
1008                  * But on 4K case the offset can be larger the 4M
1009                  * So on H.264 4K vpu/vpu2 decoder we scale the offset by 16
1010                  * But MPEG4 will use the same register for colmv and it do not
1011                  * need scale.
1012                  *
1013                  * RKVdec do not have this issue.
1014                  */
1015                 if ((type == FMT_H264D || type == FMT_VP9D) &&
1016                     task->reg_dir_mv > 0 && task->reg_dir_mv == tbl[i])
1017                         offset = reg->reg[tbl[i]] >> 10 << 4;
1018                 else
1019                         offset = reg->reg[tbl[i]] >> 10;
1020
1021                 vpu_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d\n",
1022                           tbl[i], usr_fd, offset);
1023
1024                 hdl = ion_import_dma_buf(pservice->ion_client, usr_fd);
1025                 if (IS_ERR(hdl)) {
1026                         dev_err(pservice->dev,
1027                                 "import dma-buf from fd %d failed, reg[%d]\n",
1028                                 usr_fd, tbl[i]);
1029                         return PTR_ERR(hdl);
1030                 }
1031
1032                 if (task->reg_pps > 0 && task->reg_pps == tbl[i]) {
1033                         int pps_info_offset;
1034                         int pps_info_count;
1035                         int pps_info_size;
1036                         int scaling_list_addr_offset;
1037
1038                         switch (type) {
1039                         case FMT_H264D: {
1040                                 pps_info_offset = offset;
1041                                 pps_info_count = 256;
1042                                 pps_info_size = 32;
1043                                 scaling_list_addr_offset = 23;
1044                         } break;
1045                         case FMT_H265D: {
1046                                 pps_info_offset = 0;
1047                                 pps_info_count = 64;
1048                                 pps_info_size = 80;
1049                                 scaling_list_addr_offset = 74;
1050                         } break;
1051                         default: {
1052                                 pps_info_offset = 0;
1053                                 pps_info_count = 0;
1054                                 pps_info_size = 0;
1055                                 scaling_list_addr_offset = 0;
1056                         } break;
1057                         }
1058
1059                         vpu_debug(DEBUG_PPS_FILL,
1060                                   "scaling list filling parameter:\n");
1061                         vpu_debug(DEBUG_PPS_FILL,
1062                                   "pps_info_offset %d\n", pps_info_offset);
1063                         vpu_debug(DEBUG_PPS_FILL,
1064                                   "pps_info_count  %d\n", pps_info_count);
1065                         vpu_debug(DEBUG_PPS_FILL,
1066                                   "pps_info_size   %d\n", pps_info_size);
1067                         vpu_debug(DEBUG_PPS_FILL,
1068                                   "scaling_list_addr_offset %d\n",
1069                                   scaling_list_addr_offset);
1070
1071                         if (pps_info_count) {
1072                                 char *pps = (char *)ion_map_kernel(
1073                                                 pservice->ion_client, hdl);
1074                                 vpu_debug(DEBUG_PPS_FILL,
1075                                           "scaling list setting pps %p\n", pps);
1076                                 pps += pps_info_offset;
1077
1078                                 fill_scaling_list_addr_in_pps(
1079                                                 data, reg, pps,
1080                                                 pps_info_count,
1081                                                 pps_info_size,
1082                                                 scaling_list_addr_offset);
1083                         }
1084                 }
1085
1086                 mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
1087
1088                 if (!mem_region) {
1089                         ion_free(pservice->ion_client, hdl);
1090                         return -1;
1091                 }
1092
1093                 mem_region->hdl = hdl;
1094                 mem_region->reg_idx = tbl[i];
1095
1096                 if (data->mmu_dev)
1097                         ret = ion_map_iommu(data->dev,
1098                                             pservice->ion_client,
1099                                             mem_region->hdl,
1100                                             &mem_region->iova,
1101                                             &mem_region->len);
1102                 else
1103                         ret = ion_phys(pservice->ion_client,
1104                                        mem_region->hdl,
1105                                        (ion_phys_addr_t *)&mem_region->iova,
1106                                        (size_t *)&mem_region->len);
1107
1108                 if (ret < 0) {
1109                         dev_err(pservice->dev, "reg %d fd %d ion map iommu failed\n",
1110                                 tbl[i], usr_fd);
1111                         kfree(mem_region);
1112                         ion_free(pservice->ion_client, hdl);
1113                         return ret;
1114                 }
1115
1116                 /*
1117                  * special for vpu dec num 12: record decoded length
1118                  * hacking for decoded length
1119                  * NOTE: not a perfect fix, the fd is not recorded
1120                  */
1121                 if (task->reg_len > 0 && task->reg_len == tbl[i]) {
1122                         reg->dec_base = mem_region->iova + offset;
1123                         vpu_debug(DEBUG_REGISTER, "dec_set %08x\n",
1124                                   reg->dec_base);
1125                 }
1126
1127                 reg->reg[tbl[i]] = mem_region->iova + offset;
1128                 INIT_LIST_HEAD(&mem_region->reg_lnk);
1129                 list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
1130         }
1131
1132         if (ext_inf != NULL && ext_inf->magic == EXTRA_INFO_MAGIC) {
1133                 for (i = 0; i < ext_inf->cnt; i++) {
1134                         vpu_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1135                                   ext_inf->elem[i].index,
1136                                   ext_inf->elem[i].offset);
1137                         reg->reg[ext_inf->elem[i].index] +=
1138                                 ext_inf->elem[i].offset;
1139                 }
1140         }
1141
1142         return 0;
1143 }
1144
1145 static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
1146                                         struct vpu_reg *reg,
1147                                         struct extra_info_for_iommu *ext_inf)
1148 {
1149         enum FORMAT_TYPE type = reg->task->get_fmt(reg->reg);
1150
1151         if (type < FMT_TYPE_BUTT) {
1152                 const struct vpu_trans_info *info = &reg->trans[type];
1153                 const u8 *tbl = info->table;
1154                 int size = info->count;
1155
1156                 return vcodec_bufid_to_iova(data, tbl, size, reg, ext_inf);
1157         }
1158         pr_err("found invalid format type!\n");
1159         return -1;
1160 }
1161
1162 static void get_reg_freq(struct vpu_subdev_data *data, struct vpu_reg *reg)
1163 {
1164
1165         if (!soc_is_rk2928g()) {
1166                 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1167                         if (reg_check_fmt(reg) == VPU_DEC_FMT_H264) {
1168                                 if (reg_probe_width(reg) > 3200) {
1169                                         /*raise frequency for 4k avc.*/
1170                                         reg->freq = VPU_FREQ_600M;
1171                                 }
1172                         } else {
1173                                 if (reg_check_interlace(reg))
1174                                         reg->freq = VPU_FREQ_400M;
1175                         }
1176                 }
1177                 if (data->hw_id == HEVC_ID) {
1178                         if (reg_probe_hevc_y_stride(reg) > 60000)
1179                                 reg->freq = VPU_FREQ_400M;
1180                 }
1181                 if (reg->type == VPU_PP)
1182                         reg->freq = VPU_FREQ_400M;
1183         }
1184 }
1185
1186 static struct vpu_reg *reg_init(struct vpu_subdev_data *data,
1187                                 struct vpu_session *session,
1188                                 void __user *src, u32 size)
1189 {
1190         struct vpu_service_info *pservice = data->pservice;
1191         int extra_size = 0;
1192         struct extra_info_for_iommu extra_info;
1193         struct vpu_reg *reg = kzalloc(sizeof(*reg) + data->reg_size,
1194                                       GFP_KERNEL);
1195
1196         vpu_debug_enter();
1197
1198         if (NULL == reg) {
1199                 vpu_err("error: kmalloc fail in reg_init\n");
1200                 return NULL;
1201         }
1202
1203         if (size > data->reg_size) {
1204                 pr_err("vpu reg size %u is larger than hw reg size %u\n",
1205                        size, data->reg_size);
1206                 extra_size = size - data->reg_size;
1207                 size = data->reg_size;
1208         }
1209         reg->session = session;
1210         reg->data = data;
1211         reg->type = session->type;
1212         reg->size = size;
1213         reg->freq = VPU_FREQ_DEFAULT;
1214         reg->task = &data->task_info[session->type];
1215         reg->trans = data->trans_info;
1216         reg->reg = (u32 *)&reg[1];
1217         INIT_LIST_HEAD(&reg->session_link);
1218         INIT_LIST_HEAD(&reg->status_link);
1219
1220         INIT_LIST_HEAD(&reg->mem_region_list);
1221
1222         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
1223                 vpu_err("error: copy_from_user failed in reg_init\n");
1224                 kfree(reg);
1225                 return NULL;
1226         }
1227
1228         if (copy_from_user(&extra_info, (u8 *)src + size, extra_size)) {
1229                 vpu_err("error: copy_from_user failed in reg_init\n");
1230                 kfree(reg);
1231                 return NULL;
1232         }
1233
1234         if (0 > vcodec_reg_address_translate(data, reg, &extra_info)) {
1235                 int i = 0;
1236
1237                 vpu_err("error: translate reg address failed, dumping regs\n");
1238                 for (i = 0; i < size >> 2; i++)
1239                         pr_err("reg[%02d]: %08x\n", i, *((u32 *)src + i));
1240
1241                 kfree(reg);
1242                 return NULL;
1243         }
1244
1245         mutex_lock(&pservice->lock);
1246         list_add_tail(&reg->status_link, &pservice->waiting);
1247         list_add_tail(&reg->session_link, &session->waiting);
1248         mutex_unlock(&pservice->lock);
1249
1250         if (pservice->auto_freq)
1251                 get_reg_freq(data, reg);
1252
1253         vpu_debug_leave();
1254         return reg;
1255 }
1256
1257 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg)
1258 {
1259         struct vpu_service_info *pservice = data->pservice;
1260         struct vcodec_mem_region *mem_region = NULL, *n;
1261
1262         list_del_init(&reg->session_link);
1263         list_del_init(&reg->status_link);
1264         if (reg == pservice->reg_codec)
1265                 pservice->reg_codec = NULL;
1266         if (reg == pservice->reg_pproc)
1267                 pservice->reg_pproc = NULL;
1268
1269         /* release memory region attach to this registers table. */
1270         list_for_each_entry_safe(mem_region, n,
1271                         &reg->mem_region_list, reg_lnk) {
1272                 ion_free(pservice->ion_client, mem_region->hdl);
1273                 list_del_init(&mem_region->reg_lnk);
1274                 kfree(mem_region);
1275         }
1276
1277         kfree(reg);
1278 }
1279
1280 static void reg_from_wait_to_run(struct vpu_service_info *pservice,
1281                                  struct vpu_reg *reg)
1282 {
1283         vpu_debug_enter();
1284         list_del_init(&reg->status_link);
1285         list_add_tail(&reg->status_link, &pservice->running);
1286
1287         list_del_init(&reg->session_link);
1288         list_add_tail(&reg->session_link, &reg->session->running);
1289         vpu_debug_leave();
1290 }
1291
1292 static void reg_copy_from_hw(struct vpu_reg *reg, u32 *src, u32 count)
1293 {
1294         int i;
1295         u32 *dst = reg->reg;
1296
1297         vpu_debug_enter();
1298         for (i = 0; i < count; i++, src++)
1299                 *dst++ = readl_relaxed(src);
1300
1301         dst = (u32 *)&reg->reg[0];
1302         for (i = 0; i < count; i++)
1303                 vpu_debug(DEBUG_GET_REG, "get reg[%02d] %08x\n", i, dst[i]);
1304
1305         vpu_debug_leave();
1306 }
1307
1308 static void reg_from_run_to_done(struct vpu_subdev_data *data,
1309                                  struct vpu_reg *reg)
1310 {
1311         struct vpu_service_info *pservice = data->pservice;
1312         struct vpu_hw_info *hw_info = data->hw_info;
1313         struct vpu_task_info *task = reg->task;
1314
1315         vpu_debug_enter();
1316
1317         list_del_init(&reg->status_link);
1318         list_add_tail(&reg->status_link, &pservice->done);
1319
1320         list_del_init(&reg->session_link);
1321         list_add_tail(&reg->session_link, &reg->session->done);
1322
1323         switch (reg->type) {
1324         case VPU_ENC: {
1325                 pservice->reg_codec = NULL;
1326                 reg_copy_from_hw(reg, data->enc_dev.regs, hw_info->enc_reg_num);
1327                 reg->reg[task->reg_irq] = pservice->irq_status;
1328         } break;
1329         case VPU_DEC: {
1330                 pservice->reg_codec = NULL;
1331                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1332
1333                 /* revert hack for decoded length */
1334                 if (task->reg_len > 0) {
1335                         int reg_len = task->reg_len;
1336                         u32 dec_get = reg->reg[reg_len];
1337                         s32 dec_length = dec_get - reg->dec_base;
1338
1339                         vpu_debug(DEBUG_REGISTER,
1340                                   "dec_get %08x dec_length %d\n",
1341                                   dec_get, dec_length);
1342                         reg->reg[reg_len] = dec_length << 10;
1343                 }
1344
1345                 reg->reg[task->reg_irq] = pservice->irq_status;
1346         } break;
1347         case VPU_PP: {
1348                 pservice->reg_pproc = NULL;
1349                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1350                 writel_relaxed(0, data->dec_dev.regs + task->reg_irq);
1351         } break;
1352         case VPU_DEC_PP: {
1353                 u32 pipe_mode;
1354                 u32 *regs = data->dec_dev.regs;
1355
1356                 pservice->reg_codec = NULL;
1357                 pservice->reg_pproc = NULL;
1358
1359                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1360
1361                 /* NOTE: remove pp pipeline mode flag first */
1362                 pipe_mode = readl_relaxed(regs + task->reg_pipe);
1363                 pipe_mode &= ~task->pipe_mask;
1364                 writel_relaxed(pipe_mode, regs + task->reg_pipe);
1365
1366                 /* revert hack for decoded length */
1367                 if (task->reg_len > 0) {
1368                         int reg_len = task->reg_len;
1369                         u32 dec_get = reg->reg[reg_len];
1370                         s32 dec_length = dec_get - reg->dec_base;
1371
1372                         vpu_debug(DEBUG_REGISTER,
1373                                   "dec_get %08x dec_length %d\n",
1374                                   dec_get, dec_length);
1375                         reg->reg[reg_len] = dec_length << 10;
1376                 }
1377
1378                 reg->reg[task->reg_irq] = pservice->irq_status;
1379         } break;
1380         default: {
1381                 vpu_err("error: copy reg from hw with unknown type %d\n",
1382                         reg->type);
1383         } break;
1384         }
1385         vcodec_exit_mode(data);
1386
1387         atomic_sub(1, &reg->session->task_running);
1388         atomic_sub(1, &pservice->total_running);
1389         wake_up(&reg->session->wait);
1390
1391         vpu_debug_leave();
1392 }
1393
1394 static void vpu_service_set_freq(struct vpu_service_info *pservice,
1395                                  struct vpu_reg *reg)
1396 {
1397         enum VPU_FREQ curr = atomic_read(&pservice->freq_status);
1398
1399         if (curr == reg->freq)
1400                 return;
1401
1402         atomic_set(&pservice->freq_status, reg->freq);
1403         switch (reg->freq) {
1404         case VPU_FREQ_200M: {
1405                 clk_set_rate(pservice->aclk_vcodec, 200*MHZ);
1406         } break;
1407         case VPU_FREQ_266M: {
1408                 clk_set_rate(pservice->aclk_vcodec, 266*MHZ);
1409         } break;
1410         case VPU_FREQ_300M: {
1411                 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1412         } break;
1413         case VPU_FREQ_400M: {
1414                 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1415         } break;
1416         case VPU_FREQ_500M: {
1417                 clk_set_rate(pservice->aclk_vcodec, 500*MHZ);
1418         } break;
1419         case VPU_FREQ_600M: {
1420                 clk_set_rate(pservice->aclk_vcodec, 600*MHZ);
1421         } break;
1422         default: {
1423                 unsigned long rate = 300*MHZ;
1424
1425                 if (soc_is_rk2928g())
1426                         rate = 400*MHZ;
1427
1428                 clk_set_rate(pservice->aclk_vcodec, rate);
1429         } break;
1430         }
1431 }
1432
1433 static void reg_copy_to_hw(struct vpu_subdev_data *data, struct vpu_reg *reg)
1434 {
1435         struct vpu_service_info *pservice = data->pservice;
1436         struct vpu_task_info *task = reg->task;
1437         struct vpu_hw_info *hw_info = data->hw_info;
1438         int i;
1439         u32 *src = (u32 *)&reg->reg[0];
1440         u32 enable_mask = task->enable_mask;
1441         u32 gating_mask = task->gating_mask;
1442         u32 reg_en = task->reg_en;
1443
1444         vpu_debug_enter();
1445
1446         atomic_add(1, &pservice->total_running);
1447         atomic_add(1, &reg->session->task_running);
1448
1449         if (pservice->auto_freq)
1450                 vpu_service_set_freq(pservice, reg);
1451
1452         vcodec_enter_mode(data);
1453
1454         switch (reg->type) {
1455         case VPU_ENC: {
1456                 u32 *dst = data->enc_dev.regs;
1457                 u32 base = 0;
1458                 u32 end  = hw_info->enc_reg_num;
1459                 /* u32 reg_gating = task->reg_gating; */
1460
1461                 pservice->reg_codec = reg;
1462
1463                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1464                           base, end, reg_en, enable_mask, gating_mask);
1465
1466                 VEPU_CLEAN_CACHE(dst);
1467
1468                 if (debug & DEBUG_SET_REG)
1469                         for (i = base; i < end; i++)
1470                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1471                                           i, src[i]);
1472
1473                 /*
1474                  * NOTE: encoder need to setup mode first
1475                  */
1476                 writel_relaxed(src[reg_en] & enable_mask, dst + reg_en);
1477
1478                 /* NOTE: encoder gating is not on enable register */
1479                 /* src[reg_gating] |= gating_mask; */
1480
1481                 for (i = base; i < end; i++) {
1482                         if (i != reg_en)
1483                                 writel_relaxed(src[i], dst + i);
1484                 }
1485
1486                 writel(src[reg_en], dst + reg_en);
1487                 dsb(sy);
1488
1489                 time_record(reg->task, 0);
1490         } break;
1491         case VPU_DEC: {
1492                 u32 *dst = data->dec_dev.regs;
1493                 u32 len = hw_info->dec_reg_num;
1494                 u32 base = hw_info->base_dec;
1495                 u32 end  = hw_info->end_dec;
1496
1497                 pservice->reg_codec = reg;
1498
1499                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1500                           base, end, reg_en, enable_mask, gating_mask);
1501
1502                 VDPU_CLEAN_CACHE(dst);
1503
1504                 /* on rkvdec set cache size to 64byte */
1505                 if (pservice->dev_id == VCODEC_DEVICE_ID_RKVDEC) {
1506                         u32 *cache_base = dst + 0x100;
1507                         u32 val = (debug & DEBUG_CACHE_32B) ? (0x3) : (0x13);
1508                         writel_relaxed(val, cache_base + 0x07);
1509                         writel_relaxed(val, cache_base + 0x17);
1510                 }
1511
1512                 if (debug & DEBUG_SET_REG)
1513                         for (i = 0; i < len; i++)
1514                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1515                                           i, src[i]);
1516
1517                 /*
1518                  * NOTE: The end register is invalid. Do NOT write to it
1519                  *       Also the base register must be written
1520                  */
1521                 for (i = base; i < end; i++) {
1522                         if (i != reg_en)
1523                                 writel_relaxed(src[i], dst + i);
1524                 }
1525
1526                 writel(src[reg_en] | gating_mask, dst + reg_en);
1527                 dsb(sy);
1528
1529                 time_record(reg->task, 0);
1530         } break;
1531         case VPU_PP: {
1532                 u32 *dst = data->dec_dev.regs;
1533                 u32 base = hw_info->base_pp;
1534                 u32 end  = hw_info->end_pp;
1535
1536                 pservice->reg_pproc = reg;
1537
1538                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1539                           base, end, reg_en, enable_mask, gating_mask);
1540
1541                 if (debug & DEBUG_SET_REG)
1542                         for (i = base; i < end; i++)
1543                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1544                                           i, src[i]);
1545
1546                 for (i = base; i < end; i++) {
1547                         if (i != reg_en)
1548                                 writel_relaxed(src[i], dst + i);
1549                 }
1550
1551                 writel(src[reg_en] | gating_mask, dst + reg_en);
1552                 dsb(sy);
1553
1554                 time_record(reg->task, 0);
1555         } break;
1556         case VPU_DEC_PP: {
1557                 u32 *dst = data->dec_dev.regs;
1558                 u32 base = hw_info->base_dec_pp;
1559                 u32 end  = hw_info->end_dec_pp;
1560
1561                 pservice->reg_codec = reg;
1562                 pservice->reg_pproc = reg;
1563
1564                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1565                           base, end, reg_en, enable_mask, gating_mask);
1566
1567                 /* VDPU_SOFT_RESET(dst); */
1568                 VDPU_CLEAN_CACHE(dst);
1569
1570                 if (debug & DEBUG_SET_REG)
1571                         for (i = base; i < end; i++)
1572                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1573                                           i, src[i]);
1574
1575                 for (i = base; i < end; i++) {
1576                         if (i != reg_en)
1577                                 writel_relaxed(src[i], dst + i);
1578                 }
1579
1580                 /* NOTE: dec output must be disabled */
1581
1582                 writel(src[reg_en] | gating_mask, dst + reg_en);
1583                 dsb(sy);
1584
1585                 time_record(reg->task, 0);
1586         } break;
1587         default: {
1588                 vpu_err("error: unsupport session type %d", reg->type);
1589                 atomic_sub(1, &pservice->total_running);
1590                 atomic_sub(1, &reg->session->task_running);
1591         } break;
1592         }
1593
1594         vpu_debug_leave();
1595 }
1596
1597 static void try_set_reg(struct vpu_subdev_data *data)
1598 {
1599         struct vpu_service_info *pservice = data->pservice;
1600
1601         vpu_debug_enter();
1602         if (!list_empty(&pservice->waiting)) {
1603                 struct vpu_reg *reg_codec = pservice->reg_codec;
1604                 struct vpu_reg *reg_pproc = pservice->reg_pproc;
1605                 int can_set = 0;
1606                 bool change_able = (reg_codec == NULL) && (reg_pproc == NULL);
1607                 int reset_request = atomic_read(&pservice->reset_request);
1608                 struct vpu_reg *reg = list_entry(pservice->waiting.next,
1609                                 struct vpu_reg, status_link);
1610
1611                 vpu_service_power_on(pservice);
1612
1613                 if (change_able || !reset_request) {
1614                         switch (reg->type) {
1615                         case VPU_ENC: {
1616                                 if (change_able)
1617                                         can_set = 1;
1618                         } break;
1619                         case VPU_DEC: {
1620                                 if (reg_codec == NULL)
1621                                         can_set = 1;
1622                                 if (pservice->auto_freq && (reg_pproc != NULL))
1623                                         can_set = 0;
1624                         } break;
1625                         case VPU_PP: {
1626                                 if (reg_codec == NULL) {
1627                                         if (reg_pproc == NULL)
1628                                                 can_set = 1;
1629                                 } else {
1630                                         if ((reg_codec->type == VPU_DEC) &&
1631                                             (reg_pproc == NULL))
1632                                                 can_set = 1;
1633
1634                                         /*
1635                                          * NOTE:
1636                                          * can not charge frequency
1637                                          * when vpu is working
1638                                          */
1639                                         if (pservice->auto_freq)
1640                                                 can_set = 0;
1641                                 }
1642                         } break;
1643                         case VPU_DEC_PP: {
1644                                 if (change_able)
1645                                         can_set = 1;
1646                                 } break;
1647                         default: {
1648                                 pr_err("undefined reg type %d\n", reg->type);
1649                         } break;
1650                         }
1651                 }
1652
1653                 /* then check reset request */
1654                 if (reset_request && !change_able)
1655                         reset_request = 0;
1656
1657                 /* do reset before setting registers */
1658                 if (reset_request)
1659                         vpu_reset(data);
1660
1661                 if (can_set) {
1662                         reg_from_wait_to_run(pservice, reg);
1663                         reg_copy_to_hw(reg->data, reg);
1664                 }
1665         }
1666         vpu_debug_leave();
1667 }
1668
1669 static int return_reg(struct vpu_subdev_data *data,
1670                       struct vpu_reg *reg, u32 __user *dst)
1671 {
1672         struct vpu_hw_info *hw_info = data->hw_info;
1673         size_t size = reg->size;
1674         u32 base;
1675
1676         vpu_debug_enter();
1677         switch (reg->type) {
1678         case VPU_ENC: {
1679                 base = 0;
1680         } break;
1681         case VPU_DEC: {
1682                 base = hw_info->base_dec_pp;
1683         } break;
1684         case VPU_PP: {
1685                 base = hw_info->base_pp;
1686         } break;
1687         case VPU_DEC_PP: {
1688                 base = hw_info->base_dec_pp;
1689         } break;
1690         default: {
1691                 vpu_err("error: copy reg to user with unknown type %d\n",
1692                         reg->type);
1693                 return -EFAULT;
1694         } break;
1695         }
1696
1697         if (copy_to_user(dst, &reg->reg[base], size)) {
1698                 vpu_err("error: return_reg copy_to_user failed\n");
1699                 return -EFAULT;
1700         }
1701
1702         reg_deinit(data, reg);
1703         vpu_debug_leave();
1704         return 0;
1705 }
1706
1707 static long vpu_service_ioctl(struct file *filp, unsigned int cmd,
1708                               unsigned long arg)
1709 {
1710         struct vpu_subdev_data *data =
1711                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1712                              struct vpu_subdev_data, cdev);
1713         struct vpu_service_info *pservice = data->pservice;
1714         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1715
1716         vpu_debug_enter();
1717         if (NULL == session)
1718                 return -EINVAL;
1719
1720         switch (cmd) {
1721         case VPU_IOC_SET_CLIENT_TYPE: {
1722                 session->type = (enum VPU_CLIENT_TYPE)arg;
1723                 vpu_debug(DEBUG_IOCTL, "pid %d set client type %d\n",
1724                           session->pid, session->type);
1725         } break;
1726         case VPU_IOC_GET_HW_FUSE_STATUS: {
1727                 struct vpu_request req;
1728
1729                 vpu_debug(DEBUG_IOCTL, "pid %d get hw status %d\n",
1730                           session->pid, session->type);
1731                 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
1732                         vpu_err("error: get hw status copy_from_user failed\n");
1733                         return -EFAULT;
1734                 } else {
1735                         void *config = (session->type != VPU_ENC) ?
1736                                        ((void *)&pservice->dec_config) :
1737                                        ((void *)&pservice->enc_config);
1738                         size_t size = (session->type != VPU_ENC) ?
1739                                       (sizeof(struct vpu_dec_config)) :
1740                                       (sizeof(struct vpu_enc_config));
1741                         if (copy_to_user((void __user *)req.req,
1742                                          config, size)) {
1743                                 vpu_err("error: get hw status copy_to_user failed type %d\n",
1744                                         session->type);
1745                                 return -EFAULT;
1746                         }
1747                 }
1748         } break;
1749         case VPU_IOC_SET_REG: {
1750                 struct vpu_request req;
1751                 struct vpu_reg *reg;
1752
1753                 vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
1754                           session->pid, session->type);
1755                 if (copy_from_user(&req, (void __user *)arg,
1756                                    sizeof(struct vpu_request))) {
1757                         vpu_err("error: set reg copy_from_user failed\n");
1758                         return -EFAULT;
1759                 }
1760                 reg = reg_init(data, session, (void __user *)req.req, req.size);
1761                 if (NULL == reg) {
1762                         return -EFAULT;
1763                 } else {
1764                         mutex_lock(&pservice->lock);
1765                         try_set_reg(data);
1766                         mutex_unlock(&pservice->lock);
1767                 }
1768         } break;
1769         case VPU_IOC_GET_REG: {
1770                 struct vpu_request req;
1771                 struct vpu_reg *reg;
1772                 int ret;
1773
1774                 vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
1775                           session->pid, session->type);
1776                 if (copy_from_user(&req, (void __user *)arg,
1777                                    sizeof(struct vpu_request))) {
1778                         vpu_err("error: get reg copy_from_user failed\n");
1779                         return -EFAULT;
1780                 }
1781
1782                 ret = wait_event_timeout(session->wait,
1783                                          !list_empty(&session->done),
1784                                          VPU_TIMEOUT_DELAY);
1785
1786                 if (!list_empty(&session->done)) {
1787                         if (ret < 0)
1788                                 vpu_err("warning: pid %d wait task error ret %d\n",
1789                                         session->pid, ret);
1790                         ret = 0;
1791                 } else {
1792                         if (unlikely(ret < 0)) {
1793                                 vpu_err("error: pid %d wait task ret %d\n",
1794                                         session->pid, ret);
1795                         } else if (ret == 0) {
1796                                 vpu_err("error: pid %d wait %d task done timeout\n",
1797                                         session->pid,
1798                                         atomic_read(&session->task_running));
1799                                 ret = -ETIMEDOUT;
1800                         }
1801                 }
1802
1803                 if (ret < 0) {
1804                         int task_running = atomic_read(&session->task_running);
1805
1806                         mutex_lock(&pservice->lock);
1807                         vpu_service_dump(pservice);
1808                         if (task_running) {
1809                                 atomic_set(&session->task_running, 0);
1810                                 atomic_sub(task_running,
1811                                            &pservice->total_running);
1812                                 pr_err("%d task is running but not return, reset hardware...",
1813                                        task_running);
1814                                 vpu_reset(data);
1815                                 pr_err("done\n");
1816                         }
1817                         vpu_service_session_clear(data, session);
1818                         mutex_unlock(&pservice->lock);
1819                         return ret;
1820                 }
1821
1822                 mutex_lock(&pservice->lock);
1823                 reg = list_entry(session->done.next,
1824                                  struct vpu_reg, session_link);
1825                 return_reg(data, reg, (u32 __user *)req.req);
1826                 mutex_unlock(&pservice->lock);
1827         } break;
1828         case VPU_IOC_PROBE_IOMMU_STATUS: {
1829                 int iommu_enable = 1;
1830
1831                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_PROBE_IOMMU_STATUS\n");
1832
1833                 if (copy_to_user((void __user *)arg,
1834                                  &iommu_enable, sizeof(int))) {
1835                         vpu_err("error: iommu status copy_to_user failed\n");
1836                         return -EFAULT;
1837                 }
1838         } break;
1839         default: {
1840                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1841         } break;
1842         }
1843         vpu_debug_leave();
1844         return 0;
1845 }
1846
1847 #ifdef CONFIG_COMPAT
1848 static long compat_vpu_service_ioctl(struct file *filp, unsigned int cmd,
1849                                      unsigned long arg)
1850 {
1851         struct vpu_subdev_data *data =
1852                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1853                              struct vpu_subdev_data, cdev);
1854         struct vpu_service_info *pservice = data->pservice;
1855         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1856
1857         vpu_debug_enter();
1858         vpu_debug(3, "cmd %x, COMPAT_VPU_IOC_SET_CLIENT_TYPE %x\n", cmd,
1859                   (u32)COMPAT_VPU_IOC_SET_CLIENT_TYPE);
1860         if (NULL == session)
1861                 return -EINVAL;
1862
1863         switch (cmd) {
1864         case COMPAT_VPU_IOC_SET_CLIENT_TYPE: {
1865                 session->type = (enum VPU_CLIENT_TYPE)arg;
1866                 vpu_debug(DEBUG_IOCTL, "compat set client type %d\n",
1867                           session->type);
1868         } break;
1869         case COMPAT_VPU_IOC_GET_HW_FUSE_STATUS: {
1870                 struct compat_vpu_request req;
1871
1872                 vpu_debug(DEBUG_IOCTL, "compat get hw status %d\n",
1873                           session->type);
1874                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1875                                    sizeof(struct compat_vpu_request))) {
1876                         vpu_err("error: compat get hw status copy_from_user failed\n");
1877                         return -EFAULT;
1878                 } else {
1879                         void *config = (session->type != VPU_ENC) ?
1880                                        ((void *)&pservice->dec_config) :
1881                                        ((void *)&pservice->enc_config);
1882                         size_t size = (session->type != VPU_ENC) ?
1883                                       (sizeof(struct vpu_dec_config)) :
1884                                       (sizeof(struct vpu_enc_config));
1885
1886                         if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1887                                          config, size)) {
1888                                 vpu_err("error: compat get hw status copy_to_user failed type %d\n",
1889                                         session->type);
1890                                 return -EFAULT;
1891                         }
1892                 }
1893         } break;
1894         case COMPAT_VPU_IOC_SET_REG: {
1895                 struct compat_vpu_request req;
1896                 struct vpu_reg *reg;
1897
1898                 vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
1899                           session->type);
1900                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1901                                    sizeof(struct compat_vpu_request))) {
1902                         vpu_err("compat set_reg copy_from_user failed\n");
1903                         return -EFAULT;
1904                 }
1905                 reg = reg_init(data, session,
1906                                compat_ptr((compat_uptr_t)req.req), req.size);
1907                 if (NULL == reg) {
1908                         return -EFAULT;
1909                 } else {
1910                         mutex_lock(&pservice->lock);
1911                         try_set_reg(data);
1912                         mutex_unlock(&pservice->lock);
1913                 }
1914         } break;
1915         case COMPAT_VPU_IOC_GET_REG: {
1916                 struct compat_vpu_request req;
1917                 struct vpu_reg *reg;
1918                 int ret;
1919
1920                 vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
1921                           session->type);
1922                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1923                                    sizeof(struct compat_vpu_request))) {
1924                         vpu_err("compat get reg copy_from_user failed\n");
1925                         return -EFAULT;
1926                 }
1927
1928                 ret = wait_event_timeout(session->wait,
1929                                          !list_empty(&session->done),
1930                                          VPU_TIMEOUT_DELAY);
1931
1932                 if (!list_empty(&session->done)) {
1933                         if (ret < 0)
1934                                 vpu_err("warning: pid %d wait task error ret %d\n",
1935                                         session->pid, ret);
1936                         ret = 0;
1937                 } else {
1938                         if (unlikely(ret < 0)) {
1939                                 vpu_err("error: pid %d wait task ret %d\n",
1940                                         session->pid, ret);
1941                         } else if (ret == 0) {
1942                                 vpu_err("error: pid %d wait %d task done timeout\n",
1943                                         session->pid,
1944                                         atomic_read(&session->task_running));
1945                                 ret = -ETIMEDOUT;
1946                         }
1947                 }
1948
1949                 if (ret < 0) {
1950                         int task_running = atomic_read(&session->task_running);
1951
1952                         mutex_lock(&pservice->lock);
1953                         vpu_service_dump(pservice);
1954                         if (task_running) {
1955                                 atomic_set(&session->task_running, 0);
1956                                 atomic_sub(task_running,
1957                                            &pservice->total_running);
1958                                 pr_err("%d task is running but not return, reset hardware...",
1959                                        task_running);
1960                                 vpu_reset(data);
1961                                 pr_err("done\n");
1962                         }
1963                         vpu_service_session_clear(data, session);
1964                         mutex_unlock(&pservice->lock);
1965                         return ret;
1966                 }
1967
1968                 mutex_lock(&pservice->lock);
1969                 reg = list_entry(session->done.next,
1970                                  struct vpu_reg, session_link);
1971                 return_reg(data, reg, compat_ptr((compat_uptr_t)req.req));
1972                 mutex_unlock(&pservice->lock);
1973         } break;
1974         case COMPAT_VPU_IOC_PROBE_IOMMU_STATUS: {
1975                 int iommu_enable = 1;
1976
1977                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_PROBE_IOMMU_STATUS\n");
1978
1979                 if (copy_to_user(compat_ptr((compat_uptr_t)arg),
1980                                  &iommu_enable, sizeof(int))) {
1981                         vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
1982                         return -EFAULT;
1983                 }
1984         } break;
1985         default: {
1986                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1987         } break;
1988         }
1989         vpu_debug_leave();
1990         return 0;
1991 }
1992 #endif
1993
1994 static int vpu_service_check_hw(struct vpu_subdev_data *data)
1995 {
1996         int ret = -EINVAL, i = 0;
1997         u32 hw_id = readl_relaxed(data->regs);
1998
1999         hw_id = (hw_id >> 16) & 0xFFFF;
2000         pr_info("checking hw id %x\n", hw_id);
2001         data->hw_info = NULL;
2002         for (i = 0; i < ARRAY_SIZE(vcodec_info_set); i++) {
2003                 struct vcodec_info *info = &vcodec_info_set[i];
2004
2005                 if (hw_id == info->hw_id) {
2006                         data->hw_id = info->hw_id;
2007                         data->hw_info = info->hw_info;
2008                         data->task_info = info->task_info;
2009                         data->trans_info = info->trans_info;
2010                         ret = 0;
2011                         break;
2012                 }
2013         }
2014         return ret;
2015 }
2016
2017 static int vpu_service_open(struct inode *inode, struct file *filp)
2018 {
2019         struct vpu_subdev_data *data = container_of(
2020                         inode->i_cdev, struct vpu_subdev_data, cdev);
2021         struct vpu_service_info *pservice = data->pservice;
2022         struct vpu_session *session = kmalloc(sizeof(*session), GFP_KERNEL);
2023
2024         vpu_debug_enter();
2025
2026         if (NULL == session) {
2027                 vpu_err("error: unable to allocate memory for vpu_session.");
2028                 return -ENOMEM;
2029         }
2030
2031         session->type   = VPU_TYPE_BUTT;
2032         session->pid    = current->pid;
2033         INIT_LIST_HEAD(&session->waiting);
2034         INIT_LIST_HEAD(&session->running);
2035         INIT_LIST_HEAD(&session->done);
2036         INIT_LIST_HEAD(&session->list_session);
2037         init_waitqueue_head(&session->wait);
2038         atomic_set(&session->task_running, 0);
2039         mutex_lock(&pservice->lock);
2040         list_add_tail(&session->list_session, &pservice->session);
2041         filp->private_data = (void *)session;
2042         mutex_unlock(&pservice->lock);
2043
2044         pr_debug("dev opened\n");
2045         vpu_debug_leave();
2046         return nonseekable_open(inode, filp);
2047 }
2048
2049 static int vpu_service_release(struct inode *inode, struct file *filp)
2050 {
2051         struct vpu_subdev_data *data = container_of(
2052                         inode->i_cdev, struct vpu_subdev_data, cdev);
2053         struct vpu_service_info *pservice = data->pservice;
2054         int task_running;
2055         struct vpu_session *session = (struct vpu_session *)filp->private_data;
2056
2057         vpu_debug_enter();
2058         if (NULL == session)
2059                 return -EINVAL;
2060
2061         task_running = atomic_read(&session->task_running);
2062         if (task_running) {
2063                 pr_err("error: session %d still has %d task running when closing\n",
2064                        session->pid, task_running);
2065                 msleep(50);
2066         }
2067         wake_up(&session->wait);
2068
2069         mutex_lock(&pservice->lock);
2070         /* remove this filp from the asynchronusly notified filp's */
2071         list_del_init(&session->list_session);
2072         vpu_service_session_clear(data, session);
2073         kfree(session);
2074         filp->private_data = NULL;
2075         mutex_unlock(&pservice->lock);
2076
2077         pr_debug("dev closed\n");
2078         vpu_debug_leave();
2079         return 0;
2080 }
2081
2082 static const struct file_operations vpu_service_fops = {
2083         .unlocked_ioctl = vpu_service_ioctl,
2084         .open           = vpu_service_open,
2085         .release        = vpu_service_release,
2086 #ifdef CONFIG_COMPAT
2087         .compat_ioctl   = compat_vpu_service_ioctl,
2088 #endif
2089 };
2090
2091 static irqreturn_t vdpu_irq(int irq, void *dev_id);
2092 static irqreturn_t vdpu_isr(int irq, void *dev_id);
2093 static irqreturn_t vepu_irq(int irq, void *dev_id);
2094 static irqreturn_t vepu_isr(int irq, void *dev_id);
2095 static void get_hw_info(struct vpu_subdev_data *data);
2096
2097 static struct device *rockchip_get_sysmmu_dev(const char *compt)
2098 {
2099         struct device_node *dn = NULL;
2100         struct platform_device *pd = NULL;
2101         struct device *ret = NULL;
2102
2103         dn = of_find_compatible_node(NULL, NULL, compt);
2104         if (!dn) {
2105                 pr_err("can't find device node %s \r\n", compt);
2106                 return NULL;
2107         }
2108
2109         pd = of_find_device_by_node(dn);
2110         if (!pd) {
2111                 pr_err("can't find platform device in device node %s\n", compt);
2112                 return  NULL;
2113         }
2114         ret = &pd->dev;
2115
2116         return ret;
2117 }
2118
2119 #ifdef CONFIG_IOMMU_API
2120 static inline void platform_set_sysmmu(struct device *iommu,
2121                                        struct device *dev)
2122 {
2123         dev->archdata.iommu = iommu;
2124 }
2125 #else
2126 static inline void platform_set_sysmmu(struct device *iommu,
2127                                        struct device *dev)
2128 {
2129 }
2130 #endif
2131
2132 int vcodec_sysmmu_fault_hdl(struct device *dev,
2133                             enum rk_iommu_inttype itype,
2134                             unsigned long pgtable_base,
2135                             unsigned long fault_addr, unsigned int status)
2136 {
2137         struct platform_device *pdev;
2138         struct vpu_service_info *pservice;
2139         struct vpu_subdev_data *data;
2140
2141         vpu_debug_enter();
2142
2143         if (dev == NULL) {
2144                 pr_err("invalid NULL dev\n");
2145                 return 0;
2146         }
2147
2148         pdev = container_of(dev, struct platform_device, dev);
2149         if (pdev == NULL) {
2150                 pr_err("invalid NULL platform_device\n");
2151                 return 0;
2152         }
2153
2154         data = platform_get_drvdata(pdev);
2155         if (data == NULL) {
2156                 pr_err("invalid NULL vpu_subdev_data\n");
2157                 return 0;
2158         }
2159
2160         pservice = data->pservice;
2161         if (pservice == NULL) {
2162                 pr_err("invalid NULL vpu_service_info\n");
2163                 return 0;
2164         }
2165
2166         if (pservice->reg_codec) {
2167                 struct vpu_reg *reg = pservice->reg_codec;
2168                 struct vcodec_mem_region *mem, *n;
2169                 int i = 0;
2170
2171                 pr_err("vcodec, fault addr 0x%08lx\n", fault_addr);
2172                 if (!list_empty(&reg->mem_region_list)) {
2173                         list_for_each_entry_safe(mem, n, &reg->mem_region_list,
2174                                                  reg_lnk) {
2175                                 pr_err("vcodec, reg[%02u] mem region [%02d] 0x%lx %lx\n",
2176                                        mem->reg_idx, i, mem->iova, mem->len);
2177                                 i++;
2178                         }
2179                 } else {
2180                         pr_err("no memory region mapped\n");
2181                 }
2182
2183                 if (reg->data) {
2184                         struct vpu_subdev_data *data = reg->data;
2185                         u32 *base = (u32 *)data->dec_dev.regs;
2186                         u32 len = data->hw_info->dec_reg_num;
2187
2188                         pr_err("current errror register set:\n");
2189
2190                         for (i = 0; i < len; i++)
2191                                 pr_err("reg[%02d] %08x\n",
2192                                        i, readl_relaxed(base + i));
2193                 }
2194
2195                 pr_alert("vcodec, page fault occur, reset hw\n");
2196
2197                 /* reg->reg[101] = 1; */
2198                 vpu_reset(data);
2199         }
2200
2201         return 0;
2202 }
2203
2204 static int vcodec_subdev_probe(struct platform_device *pdev,
2205                                struct vpu_service_info *pservice)
2206 {
2207         int ret = 0;
2208         struct resource *res = NULL;
2209         u32 ioaddr = 0;
2210         u8 *regs = NULL;
2211         struct vpu_hw_info *hw_info = NULL;
2212         struct device *dev = &pdev->dev;
2213         char *name = (char *)dev_name(dev);
2214         struct device_node *np = pdev->dev.of_node;
2215         struct vpu_subdev_data *data =
2216                 devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
2217         u32 iommu_en = 0;
2218         char mmu_dev_dts_name[40];
2219
2220         of_property_read_u32(np, "iommu_enabled", &iommu_en);
2221
2222         pr_info("probe device %s\n", dev_name(dev));
2223
2224         data->pservice = pservice;
2225         data->dev = dev;
2226
2227         of_property_read_string(np, "name", (const char **)&name);
2228         of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
2229
2230         if (pservice->reg_base == 0) {
2231                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2232                 data->regs = devm_ioremap_resource(dev, res);
2233                 if (IS_ERR(data->regs)) {
2234                         ret = PTR_ERR(data->regs);
2235                         goto err;
2236                 }
2237                 ioaddr = res->start;
2238         } else {
2239                 data->regs = pservice->reg_base;
2240                 ioaddr = pservice->ioaddr;
2241         }
2242
2243         clear_bit(MMU_ACTIVATED, &data->state);
2244         vcodec_enter_mode(data);
2245         ret = vpu_service_check_hw(data);
2246         if (ret < 0) {
2247                 vpu_err("error: hw info check faild\n");
2248                 goto err;
2249         }
2250
2251         hw_info = data->hw_info;
2252         regs = (u8 *)data->regs;
2253
2254         if (hw_info->dec_reg_num) {
2255                 data->dec_dev.iosize = hw_info->dec_io_size;
2256                 data->dec_dev.regs = (u32 *)(regs + hw_info->dec_offset);
2257         }
2258
2259         if (hw_info->enc_reg_num) {
2260                 data->enc_dev.iosize = hw_info->enc_io_size;
2261                 data->enc_dev.regs = (u32 *)(regs + hw_info->enc_offset);
2262         }
2263
2264         data->reg_size = max(hw_info->dec_io_size, hw_info->enc_io_size);
2265
2266         data->irq_enc = platform_get_irq_byname(pdev, "irq_enc");
2267         if (data->irq_enc > 0) {
2268                 ret = devm_request_threaded_irq(dev, data->irq_enc,
2269                                                 vepu_irq, vepu_isr,
2270                                                 IRQF_SHARED, dev_name(dev),
2271                                                 (void *)data);
2272                 if (ret) {
2273                         dev_err(dev, "error: can't request vepu irq %d\n",
2274                                 data->irq_enc);
2275                         goto err;
2276                 }
2277         }
2278         data->irq_dec = platform_get_irq_byname(pdev, "irq_dec");
2279         if (data->irq_dec > 0) {
2280                 ret = devm_request_threaded_irq(dev, data->irq_dec,
2281                                                 vdpu_irq, vdpu_isr,
2282                                                 IRQF_SHARED, dev_name(dev),
2283                                                 (void *)data);
2284                 if (ret) {
2285                         dev_err(dev, "error: can't request vdpu irq %d\n",
2286                                 data->irq_dec);
2287                         goto err;
2288                 }
2289         }
2290         atomic_set(&data->dec_dev.irq_count_codec, 0);
2291         atomic_set(&data->dec_dev.irq_count_pp, 0);
2292         atomic_set(&data->enc_dev.irq_count_codec, 0);
2293         atomic_set(&data->enc_dev.irq_count_pp, 0);
2294
2295         if (iommu_en) {
2296                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
2297                         sprintf(mmu_dev_dts_name,
2298                                 HEVC_IOMMU_COMPATIBLE_NAME);
2299                 else if (data->mode == VCODEC_RUNNING_MODE_VPU)
2300                         sprintf(mmu_dev_dts_name,
2301                                 VPU_IOMMU_COMPATIBLE_NAME);
2302                 else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2303                         sprintf(mmu_dev_dts_name, VDEC_IOMMU_COMPATIBLE_NAME);
2304                 else
2305                         sprintf(mmu_dev_dts_name,
2306                                 HEVC_IOMMU_COMPATIBLE_NAME);
2307
2308                 data->mmu_dev =
2309                         rockchip_get_sysmmu_dev(mmu_dev_dts_name);
2310
2311                 if (data->mmu_dev)
2312                         platform_set_sysmmu(data->mmu_dev, dev);
2313
2314                 rockchip_iovmm_set_fault_handler(dev, vcodec_sysmmu_fault_hdl);
2315         }
2316
2317         get_hw_info(data);
2318         pservice->auto_freq = true;
2319
2320         vcodec_exit_mode(data);
2321         /* create device node */
2322         ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
2323         if (ret) {
2324                 dev_err(dev, "alloc dev_t failed\n");
2325                 goto err;
2326         }
2327
2328         cdev_init(&data->cdev, &vpu_service_fops);
2329
2330         data->cdev.owner = THIS_MODULE;
2331         data->cdev.ops = &vpu_service_fops;
2332
2333         ret = cdev_add(&data->cdev, data->dev_t, 1);
2334
2335         if (ret) {
2336                 dev_err(dev, "add dev_t failed\n");
2337                 goto err;
2338         }
2339
2340         data->cls = class_create(THIS_MODULE, name);
2341
2342         if (IS_ERR(data->cls)) {
2343                 ret = PTR_ERR(data->cls);
2344                 dev_err(dev, "class_create err:%d\n", ret);
2345                 goto err;
2346         }
2347
2348         data->child_dev = device_create(data->cls, dev,
2349                 data->dev_t, NULL, name);
2350
2351         platform_set_drvdata(pdev, data);
2352
2353         INIT_LIST_HEAD(&data->lnk_service);
2354         list_add_tail(&data->lnk_service, &pservice->subdev_list);
2355
2356 #ifdef CONFIG_DEBUG_FS
2357         data->debugfs_dir = vcodec_debugfs_create_device_dir(name, parent);
2358         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2359                 data->debugfs_file_regs =
2360                         debugfs_create_file("regs", 0664, data->debugfs_dir,
2361                                         data, &debug_vcodec_fops);
2362         else
2363                 vpu_err("create debugfs dir %s failed\n", name);
2364 #endif
2365         return 0;
2366 err:
2367         if (data->child_dev) {
2368                 device_destroy(data->cls, data->dev_t);
2369                 cdev_del(&data->cdev);
2370                 unregister_chrdev_region(data->dev_t, 1);
2371         }
2372
2373         if (data->cls)
2374                 class_destroy(data->cls);
2375         return -1;
2376 }
2377
2378 static void vcodec_subdev_remove(struct vpu_subdev_data *data)
2379 {
2380         struct vpu_service_info *pservice = data->pservice;
2381
2382         mutex_lock(&pservice->lock);
2383         cancel_delayed_work_sync(&pservice->power_off_work);
2384         vpu_service_power_off(pservice);
2385         mutex_unlock(&pservice->lock);
2386
2387         device_destroy(data->cls, data->dev_t);
2388         class_destroy(data->cls);
2389         cdev_del(&data->cdev);
2390         unregister_chrdev_region(data->dev_t, 1);
2391
2392 #ifdef CONFIG_DEBUG_FS
2393         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2394                 debugfs_remove_recursive(data->debugfs_dir);
2395 #endif
2396 }
2397
2398 static void vcodec_read_property(struct device_node *np,
2399                                  struct vpu_service_info *pservice)
2400 {
2401         pservice->mode_bit = 0;
2402         pservice->mode_ctrl = 0;
2403         pservice->subcnt = 0;
2404         pservice->grf_base = NULL;
2405
2406         of_property_read_u32(np, "subcnt", &pservice->subcnt);
2407
2408         if (pservice->subcnt > 1) {
2409                 of_property_read_u32(np, "mode_bit", &pservice->mode_bit);
2410                 of_property_read_u32(np, "mode_ctrl", &pservice->mode_ctrl);
2411         }
2412 #ifdef CONFIG_MFD_SYSCON
2413         pservice->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
2414         if (IS_ERR_OR_NULL(pservice->grf)) {
2415                 pservice->grf = NULL;
2416 #ifdef CONFIG_ARM
2417                 pservice->grf_base = RK_GRF_VIRT;
2418 #else
2419                 vpu_err("can't find vpu grf property\n");
2420                 return;
2421 #endif
2422         }
2423 #else
2424 #ifdef CONFIG_ARM
2425         pservice->grf_base = RK_GRF_VIRT;
2426 #else
2427         vpu_err("can't find vpu grf property\n");
2428         return;
2429 #endif
2430 #endif
2431
2432 #ifdef CONFIG_RESET_CONTROLLER
2433         pservice->rst_a = devm_reset_control_get(pservice->dev, "video_a");
2434         pservice->rst_h = devm_reset_control_get(pservice->dev, "video_h");
2435         pservice->rst_v = devm_reset_control_get(pservice->dev, "video");
2436
2437         if (IS_ERR_OR_NULL(pservice->rst_a)) {
2438                 pr_warn("No aclk reset resource define\n");
2439                 pservice->rst_a = NULL;
2440         }
2441
2442         if (IS_ERR_OR_NULL(pservice->rst_h)) {
2443                 pr_warn("No hclk reset resource define\n");
2444                 pservice->rst_h = NULL;
2445         }
2446
2447         if (IS_ERR_OR_NULL(pservice->rst_v)) {
2448                 pr_warn("No core reset resource define\n");
2449                 pservice->rst_v = NULL;
2450         }
2451 #endif
2452
2453         of_property_read_string(np, "name", (const char **)&pservice->name);
2454 }
2455
2456 static void vcodec_init_drvdata(struct vpu_service_info *pservice)
2457 {
2458         pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2459         pservice->curr_mode = -1;
2460
2461         wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");
2462         INIT_LIST_HEAD(&pservice->waiting);
2463         INIT_LIST_HEAD(&pservice->running);
2464         mutex_init(&pservice->lock);
2465
2466         INIT_LIST_HEAD(&pservice->done);
2467         INIT_LIST_HEAD(&pservice->session);
2468         INIT_LIST_HEAD(&pservice->subdev_list);
2469
2470         pservice->reg_pproc     = NULL;
2471         atomic_set(&pservice->total_running, 0);
2472         atomic_set(&pservice->enabled,       0);
2473         atomic_set(&pservice->power_on_cnt,  0);
2474         atomic_set(&pservice->power_off_cnt, 0);
2475         atomic_set(&pservice->reset_request, 0);
2476
2477         INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
2478
2479         pservice->ion_client = rockchip_ion_client_create("vpu");
2480         if (IS_ERR(pservice->ion_client)) {
2481                 vpu_err("failed to create ion client for vcodec ret %ld\n",
2482                         PTR_ERR(pservice->ion_client));
2483         } else {
2484                 vpu_debug(DEBUG_IOMMU, "vcodec ion client create success!\n");
2485         }
2486 }
2487
2488 static int vcodec_probe(struct platform_device *pdev)
2489 {
2490         int i;
2491         int ret = 0;
2492         struct resource *res = NULL;
2493         struct device *dev = &pdev->dev;
2494         struct device_node *np = pdev->dev.of_node;
2495         struct vpu_service_info *pservice =
2496                 devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);
2497
2498         pservice->dev = dev;
2499
2500         vcodec_read_property(np, pservice);
2501         vcodec_init_drvdata(pservice);
2502
2503         if (strncmp(pservice->name, "hevc_service", 12) == 0)
2504                 pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
2505         else if (strncmp(pservice->name, "vpu_service", 11) == 0)
2506                 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2507         else if (strncmp(pservice->name, "rkvdec", 6) == 0)
2508                 pservice->dev_id = VCODEC_DEVICE_ID_RKVDEC;
2509         else
2510                 pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
2511
2512         if (0 > vpu_get_clk(pservice))
2513                 goto err;
2514
2515         vpu_service_power_on(pservice);
2516
2517         if (of_property_read_bool(np, "reg")) {
2518                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2519
2520                 pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
2521                 if (IS_ERR(pservice->reg_base)) {
2522                         vpu_err("ioremap registers base failed\n");
2523                         ret = PTR_ERR(pservice->reg_base);
2524                         goto err;
2525                 }
2526                 pservice->ioaddr = res->start;
2527         } else {
2528                 pservice->reg_base = 0;
2529         }
2530
2531         if (of_property_read_bool(np, "subcnt")) {
2532                 for (i = 0; i < pservice->subcnt; i++) {
2533                         struct device_node *sub_np;
2534                         struct platform_device *sub_pdev;
2535
2536                         sub_np = of_parse_phandle(np, "rockchip,sub", i);
2537                         sub_pdev = of_find_device_by_node(sub_np);
2538
2539                         vcodec_subdev_probe(sub_pdev, pservice);
2540                 }
2541         } else {
2542                 vcodec_subdev_probe(pdev, pservice);
2543         }
2544
2545         vpu_service_power_off(pservice);
2546
2547         pr_info("init success\n");
2548
2549         return 0;
2550
2551 err:
2552         pr_info("init failed\n");
2553         vpu_service_power_off(pservice);
2554         vpu_put_clk(pservice);
2555         wake_lock_destroy(&pservice->wake_lock);
2556
2557         return ret;
2558 }
2559
2560 static int vcodec_remove(struct platform_device *pdev)
2561 {
2562         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2563
2564         vcodec_subdev_remove(data);
2565         return 0;
2566 }
2567
2568 #if defined(CONFIG_OF)
2569 static const struct of_device_id vcodec_service_dt_ids[] = {
2570         {.compatible = "rockchip,vpu_service",},
2571         {.compatible = "rockchip,hevc_service",},
2572         {.compatible = "rockchip,vpu_combo",},
2573         {.compatible = "rockchip,rkvdec",},
2574         {},
2575 };
2576 #endif
2577
2578 static struct platform_driver vcodec_driver = {
2579         .probe = vcodec_probe,
2580         .remove = vcodec_remove,
2581         .driver = {
2582                 .name = "vcodec",
2583                 .owner = THIS_MODULE,
2584 #if defined(CONFIG_OF)
2585                 .of_match_table = of_match_ptr(vcodec_service_dt_ids),
2586 #endif
2587         },
2588 };
2589
2590 static void get_hw_info(struct vpu_subdev_data *data)
2591 {
2592         struct vpu_service_info *pservice = data->pservice;
2593         struct vpu_dec_config *dec = &pservice->dec_config;
2594         struct vpu_enc_config *enc = &pservice->enc_config;
2595
2596         if (cpu_is_rk2928() || cpu_is_rk3036() ||
2597             cpu_is_rk30xx() || cpu_is_rk312x() ||
2598             cpu_is_rk3188())
2599                 dec->max_dec_pic_width = 1920;
2600         else
2601                 dec->max_dec_pic_width = 4096;
2602
2603         if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2604                 dec->h264_support = 3;
2605                 dec->jpeg_support = 1;
2606                 dec->mpeg4_support = 2;
2607                 dec->vc1_support = 3;
2608                 dec->mpeg2_support = 1;
2609                 dec->pp_support = 1;
2610                 dec->sorenson_support = 1;
2611                 dec->ref_buf_support = 3;
2612                 dec->vp6_support = 1;
2613                 dec->vp7_support = 1;
2614                 dec->vp8_support = 1;
2615                 dec->avs_support = 1;
2616                 dec->jpeg_ext_support = 0;
2617                 dec->custom_mpeg4_support = 1;
2618                 dec->reserve = 0;
2619                 dec->mvc_support = 1;
2620
2621                 if (!cpu_is_rk3036()) {
2622                         u32 config_reg = readl_relaxed(data->enc_dev.regs + 63);
2623
2624                         enc->max_encoded_width = config_reg & ((1 << 11) - 1);
2625                         enc->h264_enabled = 1;
2626                         enc->mpeg4_enabled = (config_reg >> 26) & 1;
2627                         enc->jpeg_enabled = 1;
2628                         enc->vs_enabled = (config_reg >> 24) & 1;
2629                         enc->rgb_enabled = (config_reg >> 28) & 1;
2630                         enc->reg_size = data->reg_size;
2631                         enc->reserv[0] = 0;
2632                         enc->reserv[1] = 0;
2633                 }
2634
2635                 pservice->auto_freq = true;
2636                 vpu_debug(DEBUG_EXTRA_INFO, "vpu_service set to auto frequency mode\n");
2637                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2638
2639                 pservice->bug_dec_addr = cpu_is_rk30xx();
2640         } else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC) {
2641                 pservice->auto_freq = true;
2642                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2643         } else {
2644                 /* disable frequency switch in hevc.*/
2645                 pservice->auto_freq = false;
2646         }
2647 }
2648
2649 static bool check_irq_err(struct vpu_task_info *task, u32 irq_status)
2650 {
2651         vpu_debug(DEBUG_IRQ_CHECK, "task %s status %08x mask %08x\n",
2652                   task->name, irq_status, task->error_mask);
2653
2654         return (task->error_mask & irq_status) ? true : false;
2655 }
2656
2657 static irqreturn_t vdpu_irq(int irq, void *dev_id)
2658 {
2659         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2660         struct vpu_service_info *pservice = data->pservice;
2661         struct vpu_task_info *task = NULL;
2662         struct vpu_device *dev = &data->dec_dev;
2663         u32 hw_id = data->hw_info->hw_id;
2664         u32 raw_status;
2665         u32 dec_status;
2666
2667         task = &data->task_info[TASK_DEC];
2668
2669         raw_status = readl_relaxed(dev->regs + task->reg_irq);
2670         dec_status = raw_status;
2671
2672         vpu_debug(DEBUG_TASK_INFO, "vdpu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2673                   task->reg_irq, dec_status,
2674                   task->irq_mask, task->ready_mask, task->error_mask);
2675
2676         if (dec_status & task->irq_mask) {
2677                 time_record(task, 1);
2678                 vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq dec status %08x\n",
2679                           dec_status);
2680                 if ((dec_status & 0x40001) == 0x40001) {
2681                         do {
2682                                 dec_status =
2683                                         readl_relaxed(dev->regs +
2684                                                 task->reg_irq);
2685                         } while ((dec_status & 0x40001) == 0x40001);
2686                 }
2687
2688                 if (check_irq_err(task, dec_status))
2689                         atomic_add(1, &pservice->reset_request);
2690
2691                 writel_relaxed(0, dev->regs + task->reg_irq);
2692
2693                 /*
2694                  * NOTE: rkvdec need to reset after each task to avoid timeout
2695                  *       error on H.264 switch to H.265
2696                  */
2697                 if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2698                         writel(0x100000, dev->regs + task->reg_irq);
2699
2700                 /* set clock gating to save power */
2701                 writel(task->gating_mask, dev->regs + task->reg_irq);
2702
2703                 atomic_add(1, &dev->irq_count_codec);
2704                 time_diff(task);
2705         }
2706
2707         task = &data->task_info[TASK_PP];
2708         if (hw_id != HEVC_ID && hw_id != RKV_DEC_ID) {
2709                 u32 pp_status = readl_relaxed(dev->regs + task->irq_mask);
2710
2711                 if (pp_status & task->irq_mask) {
2712                         time_record(task, 1);
2713                         vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq pp status %08x\n",
2714                                   pp_status);
2715
2716                         if (check_irq_err(task, dec_status))
2717                                 atomic_add(1, &pservice->reset_request);
2718
2719                         /* clear pp IRQ */
2720                         writel_relaxed(pp_status & (~task->reg_irq),
2721                                        dev->regs + task->irq_mask);
2722                         atomic_add(1, &dev->irq_count_pp);
2723                         time_diff(task);
2724                 }
2725         }
2726
2727         pservice->irq_status = raw_status;
2728
2729         if (atomic_read(&dev->irq_count_pp) ||
2730             atomic_read(&dev->irq_count_codec))
2731                 return IRQ_WAKE_THREAD;
2732         else
2733                 return IRQ_NONE;
2734 }
2735
2736 static irqreturn_t vdpu_isr(int irq, void *dev_id)
2737 {
2738         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2739         struct vpu_service_info *pservice = data->pservice;
2740         struct vpu_device *dev = &data->dec_dev;
2741
2742         mutex_lock(&pservice->lock);
2743         if (atomic_read(&dev->irq_count_codec)) {
2744                 atomic_sub(1, &dev->irq_count_codec);
2745                 if (pservice->reg_codec == NULL) {
2746                         vpu_err("error: dec isr with no task waiting\n");
2747                 } else {
2748                         reg_from_run_to_done(data, pservice->reg_codec);
2749                         /* avoid vpu timeout and can't recover problem */
2750                         VDPU_SOFT_RESET(data->regs);
2751                 }
2752         }
2753
2754         if (atomic_read(&dev->irq_count_pp)) {
2755                 atomic_sub(1, &dev->irq_count_pp);
2756                 if (pservice->reg_pproc == NULL)
2757                         vpu_err("error: pp isr with no task waiting\n");
2758                 else
2759                         reg_from_run_to_done(data, pservice->reg_pproc);
2760         }
2761         try_set_reg(data);
2762         mutex_unlock(&pservice->lock);
2763         return IRQ_HANDLED;
2764 }
2765
2766 static irqreturn_t vepu_irq(int irq, void *dev_id)
2767 {
2768         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2769         struct vpu_service_info *pservice = data->pservice;
2770         struct vpu_task_info *task = &data->task_info[TASK_ENC];
2771         struct vpu_device *dev = &data->enc_dev;
2772         u32 irq_status;
2773
2774         irq_status = readl_relaxed(dev->regs + task->reg_irq);
2775
2776         vpu_debug(DEBUG_TASK_INFO, "vepu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2777                   task->reg_irq, irq_status,
2778                   task->irq_mask, task->ready_mask, task->error_mask);
2779
2780         vpu_debug(DEBUG_IRQ_STATUS, "vepu_irq enc status %08x\n", irq_status);
2781
2782         if (likely(irq_status & task->irq_mask)) {
2783                 time_record(task, 1);
2784
2785                 if (check_irq_err(task, irq_status))
2786                         atomic_add(1, &pservice->reset_request);
2787
2788                 /* clear enc IRQ */
2789                 writel_relaxed(irq_status & (~task->irq_mask),
2790                                dev->regs + task->reg_irq);
2791
2792                 atomic_add(1, &dev->irq_count_codec);
2793                 time_diff(task);
2794         }
2795
2796         pservice->irq_status = irq_status;
2797
2798         if (atomic_read(&dev->irq_count_codec))
2799                 return IRQ_WAKE_THREAD;
2800         else
2801                 return IRQ_NONE;
2802 }
2803
2804 static irqreturn_t vepu_isr(int irq, void *dev_id)
2805 {
2806         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2807         struct vpu_service_info *pservice = data->pservice;
2808         struct vpu_device *dev = &data->enc_dev;
2809
2810         mutex_lock(&pservice->lock);
2811         if (atomic_read(&dev->irq_count_codec)) {
2812                 atomic_sub(1, &dev->irq_count_codec);
2813                 if (NULL == pservice->reg_codec)
2814                         vpu_err("error: enc isr with no task waiting\n");
2815                 else
2816                         reg_from_run_to_done(data, pservice->reg_codec);
2817         }
2818         try_set_reg(data);
2819         mutex_unlock(&pservice->lock);
2820         return IRQ_HANDLED;
2821 }
2822
2823 static int __init vcodec_service_init(void)
2824 {
2825         int ret = platform_driver_register(&vcodec_driver);
2826
2827         if (ret) {
2828                 vpu_err("Platform device register failed (%d).\n", ret);
2829                 return ret;
2830         }
2831
2832 #ifdef CONFIG_DEBUG_FS
2833         vcodec_debugfs_init();
2834 #endif
2835
2836         return ret;
2837 }
2838
2839 static void __exit vcodec_service_exit(void)
2840 {
2841 #ifdef CONFIG_DEBUG_FS
2842         vcodec_debugfs_exit();
2843 #endif
2844
2845         platform_driver_unregister(&vcodec_driver);
2846 }
2847
2848 module_init(vcodec_service_init);
2849 module_exit(vcodec_service_exit);
2850 MODULE_LICENSE("Proprietary");
2851
2852 #ifdef CONFIG_DEBUG_FS
2853 #include <linux/seq_file.h>
2854
2855 static int vcodec_debugfs_init(void)
2856 {
2857         parent = debugfs_create_dir("vcodec", NULL);
2858         if (!parent)
2859                 return -1;
2860
2861         return 0;
2862 }
2863
2864 static void vcodec_debugfs_exit(void)
2865 {
2866         debugfs_remove(parent);
2867 }
2868
2869 static struct dentry *vcodec_debugfs_create_device_dir(
2870                 char *dirname, struct dentry *parent)
2871 {
2872         return debugfs_create_dir(dirname, parent);
2873 }
2874
2875 static int debug_vcodec_show(struct seq_file *s, void *unused)
2876 {
2877         struct vpu_subdev_data *data = s->private;
2878         struct vpu_service_info *pservice = data->pservice;
2879         unsigned int i, n;
2880         struct vpu_reg *reg, *reg_tmp;
2881         struct vpu_session *session, *session_tmp;
2882
2883         mutex_lock(&pservice->lock);
2884         vpu_service_power_on(pservice);
2885         if (data->hw_info->hw_id != HEVC_ID) {
2886                 seq_puts(s, "\nENC Registers:\n");
2887                 n = data->enc_dev.iosize >> 2;
2888
2889                 for (i = 0; i < n; i++)
2890                         seq_printf(s, "\tswreg%d = %08X\n", i,
2891                                    readl_relaxed(data->enc_dev.regs + i));
2892         }
2893
2894         seq_puts(s, "\nDEC Registers:\n");
2895
2896         n = data->dec_dev.iosize >> 2;
2897         for (i = 0; i < n; i++)
2898                 seq_printf(s, "\tswreg%d = %08X\n", i,
2899                            readl_relaxed(data->dec_dev.regs + i));
2900
2901         seq_puts(s, "\nvpu service status:\n");
2902
2903         list_for_each_entry_safe(session, session_tmp,
2904                                  &pservice->session, list_session) {
2905                 seq_printf(s, "session pid %d type %d:\n",
2906                            session->pid, session->type);
2907
2908                 list_for_each_entry_safe(reg, reg_tmp,
2909                                          &session->waiting, session_link) {
2910                         seq_printf(s, "waiting register set %p\n", reg);
2911                 }
2912                 list_for_each_entry_safe(reg, reg_tmp,
2913                                          &session->running, session_link) {
2914                         seq_printf(s, "running register set %p\n", reg);
2915                 }
2916                 list_for_each_entry_safe(reg, reg_tmp,
2917                                          &session->done, session_link) {
2918                         seq_printf(s, "done    register set %p\n", reg);
2919                 }
2920         }
2921
2922         seq_printf(s, "\npower counter: on %d off %d\n",
2923                    atomic_read(&pservice->power_on_cnt),
2924                    atomic_read(&pservice->power_off_cnt));
2925
2926         mutex_unlock(&pservice->lock);
2927         vpu_service_power_off(pservice);
2928
2929         return 0;
2930 }
2931
2932 static int debug_vcodec_open(struct inode *inode, struct file *file)
2933 {
2934         return single_open(file, debug_vcodec_show, inode->i_private);
2935 }
2936
2937 #endif
2938