Merge branch 'lsk-v4.4-eas-v5.2' of git://git.linaro.org/arm/eas/kernel.git
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_service.c
1 /**
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  * author: chenhengming chm@rock-chips.com
4  *         Alpha Lin, alpha.lin@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/clk.h>
20 #include <linux/compat.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/fs.h>
26 #include <linux/mm.h>
27 #include <linux/platform_device.h>
28 #include <linux/reset.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/wakelock.h>
32 #include <linux/cdev.h>
33 #include <linux/of.h>
34 #include <linux/of_platform.h>
35 #include <linux/of_irq.h>
36 #include <linux/regmap.h>
37 #include <linux/mfd/syscon.h>
38 #include <linux/uaccess.h>
39 #include <linux/debugfs.h>
40 #include <linux/pm_runtime.h>
41
42 #include <linux/rockchip/cpu.h>
43 #include <linux/rockchip/cru.h>
44 #include <linux/rockchip/pmu.h>
45 #include <linux/rockchip/grf.h>
46
47 #if defined(CONFIG_ION_ROCKCHIP)
48 #include <linux/rockchip_ion.h>
49 #endif
50
51 #include <linux/rockchip-iovmm.h>
52 #include <linux/dma-buf.h>
53
54 #include "vcodec_hw_info.h"
55 #include "vcodec_hw_vpu.h"
56 #include "vcodec_hw_rkv.h"
57 #include "vcodec_hw_vpu2.h"
58
59 #include "vcodec_service.h"
60
61 /*
62  * debug flag usage:
63  * +------+-------------------+
64  * | 8bit |      24bit        |
65  * +------+-------------------+
66  *  0~23 bit is for different information type
67  * 24~31 bit is for information print format
68  */
69
70 #define DEBUG_POWER                             0x00000001
71 #define DEBUG_CLOCK                             0x00000002
72 #define DEBUG_IRQ_STATUS                        0x00000004
73 #define DEBUG_IOMMU                             0x00000008
74 #define DEBUG_IOCTL                             0x00000010
75 #define DEBUG_FUNCTION                          0x00000020
76 #define DEBUG_REGISTER                          0x00000040
77 #define DEBUG_EXTRA_INFO                        0x00000080
78 #define DEBUG_TIMING                            0x00000100
79 #define DEBUG_TASK_INFO                         0x00000200
80
81 #define DEBUG_SET_REG                           0x00001000
82 #define DEBUG_GET_REG                           0x00002000
83 #define DEBUG_PPS_FILL                          0x00004000
84 #define DEBUG_IRQ_CHECK                         0x00008000
85 #define DEBUG_CACHE_32B                         0x00010000
86
87 #define PRINT_FUNCTION                          0x80000000
88 #define PRINT_LINE                              0x40000000
89
90 static int debug;
91 module_param(debug, int, S_IRUGO | S_IWUSR);
92 MODULE_PARM_DESC(debug, "bit switch for vcodec_service debug information");
93
94 #define VCODEC_CLOCK_ENABLE     1
95
96 /*
97  * hardware information organization
98  *
99  * In order to support multiple hardware with different version the hardware
100  * information is organized as follow:
101  *
102  * 1. First, index hardware by register size / position.
103  *    These information is fix for each hardware and do not relate to runtime
104  *    work flow. It only related to resource allocation.
105  *    Descriptor: struct vpu_hw_info
106  *
107  * 2. Then, index hardware by runtime configuration
108  *    These information is related to runtime setting behave including enable
109  *    register, irq register and other key control flag
110  *    Descriptor: struct vpu_task_info
111  *
112  * 3. Final, on iommu case the fd translation is required
113  *    Descriptor: struct vpu_trans_info
114  */
115
116 enum VPU_FREQ {
117         VPU_FREQ_200M,
118         VPU_FREQ_266M,
119         VPU_FREQ_300M,
120         VPU_FREQ_400M,
121         VPU_FREQ_500M,
122         VPU_FREQ_600M,
123         VPU_FREQ_DEFAULT,
124         VPU_FREQ_BUT,
125 };
126
127 struct extra_info_elem {
128         u32 index;
129         u32 offset;
130 };
131
132 #define EXTRA_INFO_MAGIC        0x4C4A46
133
134 struct extra_info_for_iommu {
135         u32 magic;
136         u32 cnt;
137         struct extra_info_elem elem[20];
138 };
139
140 #define MHZ                                     (1000*1000)
141 #define SIZE_REG(reg)                           ((reg)*4)
142
143 static struct vcodec_info vcodec_info_set[] = {
144         [0] = {
145                 .hw_id          = VPU_ID_8270,
146                 .hw_info        = &hw_vpu_8270,
147                 .task_info      = task_vpu,
148                 .trans_info     = trans_vpu,
149         },
150         [1] = {
151                 .hw_id          = VPU_ID_4831,
152                 .hw_info        = &hw_vpu_4831,
153                 .task_info      = task_vpu,
154                 .trans_info     = trans_vpu,
155         },
156         [2] = {
157                 .hw_id          = VPU_DEC_ID_9190,
158                 .hw_info        = &hw_vpu_9190,
159                 .task_info      = task_vpu,
160                 .trans_info     = trans_vpu,
161         },
162         [3] = {
163                 .hw_id          = HEVC_ID,
164                 .hw_info        = &hw_rkhevc,
165                 .task_info      = task_rkv,
166                 .trans_info     = trans_rkv,
167         },
168         [4] = {
169                 .hw_id          = RKV_DEC_ID,
170                 .hw_info        = &hw_rkvdec,
171                 .task_info      = task_rkv,
172                 .trans_info     = trans_rkv,
173         },
174         [5] = {
175                 .hw_id          = VPU2_ID,
176                 .hw_info        = &hw_vpu2,
177                 .task_info      = task_vpu2,
178                 .trans_info     = trans_vpu2,
179         },
180 };
181
182 #define DEBUG
183 #ifdef DEBUG
184 #define vpu_debug_func(type, fmt, args...)                      \
185         do {                                                    \
186                 if (unlikely(debug & type)) {                   \
187                         pr_info("%s:%d: " fmt,                  \
188                                  __func__, __LINE__, ##args);   \
189                 }                                               \
190         } while (0)
191 #define vpu_debug(type, fmt, args...)                           \
192         do {                                                    \
193                 if (unlikely(debug & type)) {                   \
194                         pr_info(fmt, ##args);                   \
195                 }                                               \
196         } while (0)
197 #else
198 #define vpu_debug_func(level, fmt, args...)
199 #define vpu_debug(level, fmt, args...)
200 #endif
201
202 #define vpu_debug_enter() vpu_debug_func(DEBUG_FUNCTION, "enter\n")
203 #define vpu_debug_leave() vpu_debug_func(DEBUG_FUNCTION, "leave\n")
204
205 #define vpu_err(fmt, args...)                           \
206                 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
207
208 enum VPU_DEC_FMT {
209         VPU_DEC_FMT_H264,
210         VPU_DEC_FMT_MPEG4,
211         VPU_DEC_FMT_H263,
212         VPU_DEC_FMT_JPEG,
213         VPU_DEC_FMT_VC1,
214         VPU_DEC_FMT_MPEG2,
215         VPU_DEC_FMT_MPEG1,
216         VPU_DEC_FMT_VP6,
217         VPU_DEC_FMT_RESERV0,
218         VPU_DEC_FMT_VP7,
219         VPU_DEC_FMT_VP8,
220         VPU_DEC_FMT_AVS,
221         VPU_DEC_FMT_RES
222 };
223
224 /**
225  * struct for process session which connect to vpu
226  *
227  * @author ChenHengming (2011-5-3)
228  */
229 struct vpu_session {
230         enum VPU_CLIENT_TYPE type;
231         /* a linked list of data so we can access them for debugging */
232         struct list_head list_session;
233         /* a linked list of register data waiting for process */
234         struct list_head waiting;
235         /* a linked list of register data in processing */
236         struct list_head running;
237         /* a linked list of register data processed */
238         struct list_head done;
239         wait_queue_head_t wait;
240         pid_t pid;
241         atomic_t task_running;
242 };
243
244 /**
245  * struct for process register set
246  *
247  * @author ChenHengming (2011-5-4)
248  */
249 struct vpu_reg {
250         enum VPU_CLIENT_TYPE type;
251         enum VPU_FREQ freq;
252         struct vpu_session *session;
253         struct vpu_subdev_data *data;
254         struct vpu_task_info *task;
255         const struct vpu_trans_info *trans;
256
257         /* link to vpu service session */
258         struct list_head session_link;
259         /* link to register set list */
260         struct list_head status_link;
261
262         unsigned long size;
263         struct list_head mem_region_list;
264         u32 dec_base;
265         u32 *reg;
266 };
267
268 struct vpu_device {
269         atomic_t irq_count_codec;
270         atomic_t irq_count_pp;
271         unsigned int iosize;
272         u32 *regs;
273 };
274
275 enum vcodec_device_id {
276         VCODEC_DEVICE_ID_VPU,
277         VCODEC_DEVICE_ID_HEVC,
278         VCODEC_DEVICE_ID_COMBO,
279         VCODEC_DEVICE_ID_RKVDEC,
280         VCODEC_DEVICE_ID_BUTT
281 };
282
283 enum VCODEC_RUNNING_MODE {
284         VCODEC_RUNNING_MODE_NONE = -1,
285         VCODEC_RUNNING_MODE_VPU,
286         VCODEC_RUNNING_MODE_HEVC,
287         VCODEC_RUNNING_MODE_RKVDEC
288 };
289
290 struct vcodec_mem_region {
291         struct list_head srv_lnk;
292         struct list_head reg_lnk;
293         struct list_head session_lnk;
294         unsigned long iova;     /* virtual address for iommu */
295         unsigned long len;
296         u32 reg_idx;
297         struct ion_handle *hdl;
298 };
299
300 enum vpu_ctx_state {
301         MMU_ACTIVATED   = BIT(0)
302 };
303
304 struct vpu_subdev_data {
305         struct cdev cdev;
306         dev_t dev_t;
307         struct class *cls;
308         struct device *child_dev;
309
310         int irq_enc;
311         int irq_dec;
312         struct vpu_service_info *pservice;
313
314         u32 *regs;
315         enum VCODEC_RUNNING_MODE mode;
316         struct list_head lnk_service;
317
318         struct device *dev;
319
320         struct vpu_device enc_dev;
321         struct vpu_device dec_dev;
322
323         enum VPU_HW_ID hw_id;
324         struct vpu_hw_info *hw_info;
325         struct vpu_task_info *task_info;
326         const struct vpu_trans_info *trans_info;
327
328         u32 reg_size;
329         unsigned long state;
330
331 #ifdef CONFIG_DEBUG_FS
332         struct dentry *debugfs_dir;
333         struct dentry *debugfs_file_regs;
334 #endif
335
336         struct device *mmu_dev;
337 };
338
339 struct vpu_service_info {
340         struct wake_lock wake_lock;
341         struct delayed_work power_off_work;
342         ktime_t last; /* record previous power-on time */
343         /* vpu service structure global lock */
344         struct mutex lock;
345         /* link to link_reg in struct vpu_reg */
346         struct list_head waiting;
347         /* link to link_reg in struct vpu_reg */
348         struct list_head running;
349         /* link to link_reg in struct vpu_reg */
350         struct list_head done;
351         /* link to list_session in struct vpu_session */
352         struct list_head session;
353         atomic_t total_running;
354         atomic_t enabled;
355         atomic_t power_on_cnt;
356         atomic_t power_off_cnt;
357         struct vpu_reg *reg_codec;
358         struct vpu_reg *reg_pproc;
359         struct vpu_reg *reg_resev;
360         struct vpu_dec_config dec_config;
361         struct vpu_enc_config enc_config;
362
363         bool auto_freq;
364         bool bug_dec_addr;
365         atomic_t freq_status;
366
367         struct clk *aclk_vcodec;
368         struct clk *hclk_vcodec;
369         struct clk *clk_core;
370         struct clk *clk_cabac;
371         struct clk *pd_video;
372
373 #ifdef CONFIG_RESET_CONTROLLER
374         struct reset_control *rst_a;
375         struct reset_control *rst_h;
376         struct reset_control *rst_v;
377 #endif
378         struct device *dev;
379
380         u32 irq_status;
381         atomic_t reset_request;
382         struct ion_client *ion_client;
383         struct list_head mem_region_list;
384
385         enum vcodec_device_id dev_id;
386
387         enum VCODEC_RUNNING_MODE curr_mode;
388         u32 prev_mode;
389
390         struct delayed_work simulate_work;
391
392         u32 mode_bit;
393         u32 mode_ctrl;
394         u32 *reg_base;
395         u32 ioaddr;
396         struct regmap *grf;
397         u32 *grf_base;
398
399         char *name;
400
401         u32 subcnt;
402         struct list_head subdev_list;
403 };
404
405 struct vpu_request {
406         u32 *req;
407         u32 size;
408 };
409
410 #ifdef CONFIG_COMPAT
411 struct compat_vpu_request {
412         compat_uptr_t req;
413         u32 size;
414 };
415 #endif
416
417 /* debugfs root directory for all device (vpu, hevc).*/
418 static struct dentry *parent;
419
420 #ifdef CONFIG_DEBUG_FS
421 static int vcodec_debugfs_init(void);
422 static void vcodec_debugfs_exit(void);
423 static struct dentry *vcodec_debugfs_create_device_dir(
424                 char *dirname, struct dentry *parent);
425 static int debug_vcodec_open(struct inode *inode, struct file *file);
426
427 static const struct file_operations debug_vcodec_fops = {
428         .open = debug_vcodec_open,
429         .read = seq_read,
430         .llseek = seq_lseek,
431         .release = single_release,
432 };
433 #endif
434
435 #define VDPU_SOFT_RESET_REG     101
436 #define VDPU_CLEAN_CACHE_REG    516
437 #define VEPU_CLEAN_CACHE_REG    772
438 #define HEVC_CLEAN_CACHE_REG    260
439
440 #define VPU_REG_ENABLE(base, reg)       writel_relaxed(1, base + reg)
441
442 #define VDPU_SOFT_RESET(base)   VPU_REG_ENABLE(base, VDPU_SOFT_RESET_REG)
443 #define VDPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VDPU_CLEAN_CACHE_REG)
444 #define VEPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VEPU_CLEAN_CACHE_REG)
445 #define HEVC_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, HEVC_CLEAN_CACHE_REG)
446
447 #define VPU_POWER_OFF_DELAY             (4 * HZ) /* 4s */
448 #define VPU_TIMEOUT_DELAY               (2 * HZ) /* 2s */
449
450 static void time_record(struct vpu_task_info *task, int is_end)
451 {
452         if (unlikely(debug & DEBUG_TIMING) && task)
453                 do_gettimeofday((is_end) ? (&task->end) : (&task->start));
454 }
455
456 static void time_diff(struct vpu_task_info *task)
457 {
458         vpu_debug(DEBUG_TIMING, "%s task: %ld ms\n", task->name,
459                   (task->end.tv_sec  - task->start.tv_sec)  * 1000 +
460                   (task->end.tv_usec - task->start.tv_usec) / 1000);
461 }
462
463 static void vcodec_enter_mode(struct vpu_subdev_data *data)
464 {
465         int bits;
466         u32 raw = 0;
467         struct vpu_service_info *pservice = data->pservice;
468         struct vpu_subdev_data *subdata, *n;
469
470         if (pservice->subcnt < 2) {
471                 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
472                         set_bit(MMU_ACTIVATED, &data->state);
473                         if (atomic_read(&pservice->enabled))
474                                 rockchip_iovmm_activate(data->dev);
475                         else
476                                 BUG_ON(!atomic_read(&pservice->enabled));
477                 }
478                 return;
479         }
480
481         if (pservice->curr_mode == data->mode)
482                 return;
483
484         vpu_debug(DEBUG_IOMMU, "vcodec enter mode %d\n", data->mode);
485         list_for_each_entry_safe(subdata, n,
486                                  &pservice->subdev_list, lnk_service) {
487                 if (data != subdata && subdata->mmu_dev &&
488                     test_bit(MMU_ACTIVATED, &subdata->state)) {
489                         clear_bit(MMU_ACTIVATED, &subdata->state);
490                         rockchip_iovmm_deactivate(subdata->dev);
491                 }
492         }
493         bits = 1 << pservice->mode_bit;
494 #ifdef CONFIG_MFD_SYSCON
495         if (pservice->grf) {
496                 regmap_read(pservice->grf, pservice->mode_ctrl, &raw);
497
498                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
499                         regmap_write(pservice->grf, pservice->mode_ctrl,
500                                      raw | bits | (bits << 16));
501                 else
502                         regmap_write(pservice->grf, pservice->mode_ctrl,
503                                      (raw & (~bits)) | (bits << 16));
504         } else if (pservice->grf_base) {
505                 u32 *grf_base = pservice->grf_base;
506
507                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
508                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
509                         writel_relaxed(raw | bits | (bits << 16),
510                                        grf_base + pservice->mode_ctrl / 4);
511                 else
512                         writel_relaxed((raw & (~bits)) | (bits << 16),
513                                        grf_base + pservice->mode_ctrl / 4);
514         } else {
515                 vpu_err("no grf resource define, switch decoder failed\n");
516                 return;
517         }
518 #else
519         if (pservice->grf_base) {
520                 u32 *grf_base = pservice->grf_base;
521
522                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
523                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
524                         writel_relaxed(raw | bits | (bits << 16),
525                                        grf_base + pservice->mode_ctrl / 4);
526                 else
527                         writel_relaxed((raw & (~bits)) | (bits << 16),
528                                        grf_base + pservice->mode_ctrl / 4);
529         } else {
530                 vpu_err("no grf resource define, switch decoder failed\n");
531                 return;
532         }
533 #endif
534         if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
535                 set_bit(MMU_ACTIVATED, &data->state);
536                 if (atomic_read(&pservice->enabled))
537                         rockchip_iovmm_activate(data->dev);
538                 else
539                         BUG_ON(!atomic_read(&pservice->enabled));
540         }
541
542         pservice->prev_mode = pservice->curr_mode;
543         pservice->curr_mode = data->mode;
544 }
545
546 static void vcodec_exit_mode(struct vpu_subdev_data *data)
547 {
548         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
549                 clear_bit(MMU_ACTIVATED, &data->state);
550                 rockchip_iovmm_deactivate(data->dev);
551                 data->pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
552         }
553 }
554
555 static int vpu_get_clk(struct vpu_service_info *pservice)
556 {
557 #if VCODEC_CLOCK_ENABLE
558         struct device *dev = pservice->dev;
559
560         switch (pservice->dev_id) {
561         case VCODEC_DEVICE_ID_HEVC:
562                 pservice->pd_video = devm_clk_get(dev, "pd_hevc");
563                 if (IS_ERR(pservice->pd_video)) {
564                         dev_err(dev, "failed on clk_get pd_hevc\n");
565                         return -1;
566                 }
567         case VCODEC_DEVICE_ID_COMBO:
568         case VCODEC_DEVICE_ID_RKVDEC:
569                 pservice->clk_cabac = devm_clk_get(dev, "clk_cabac");
570                 if (IS_ERR(pservice->clk_cabac)) {
571                         dev_err(dev, "failed on clk_get clk_cabac\n");
572                         pservice->clk_cabac = NULL;
573                 }
574                 pservice->clk_core = devm_clk_get(dev, "clk_core");
575                 if (IS_ERR(pservice->clk_core)) {
576                         dev_err(dev, "failed on clk_get clk_core\n");
577                         return -1;
578                 }
579         case VCODEC_DEVICE_ID_VPU:
580                 pservice->aclk_vcodec = devm_clk_get(dev, "aclk_vcodec");
581                 if (IS_ERR(pservice->aclk_vcodec)) {
582                         dev_err(dev, "failed on clk_get aclk_vcodec\n");
583                         return -1;
584                 }
585
586                 pservice->hclk_vcodec = devm_clk_get(dev, "hclk_vcodec");
587                 if (IS_ERR(pservice->hclk_vcodec)) {
588                         dev_err(dev, "failed on clk_get hclk_vcodec\n");
589                         return -1;
590                 }
591                 if (pservice->pd_video == NULL) {
592                         pservice->pd_video = devm_clk_get(dev, "pd_video");
593                         if (IS_ERR(pservice->pd_video)) {
594                                 pservice->pd_video = NULL;
595                                 dev_info(dev, "do not have pd_video\n");
596                         }
597                 }
598                 break;
599         default:
600                 break;
601         }
602
603         return 0;
604 #else
605         return 0;
606 #endif
607 }
608
609 static void vpu_put_clk(struct vpu_service_info *pservice)
610 {
611 #if VCODEC_CLOCK_ENABLE
612         if (pservice->pd_video)
613                 devm_clk_put(pservice->dev, pservice->pd_video);
614         if (pservice->aclk_vcodec)
615                 devm_clk_put(pservice->dev, pservice->aclk_vcodec);
616         if (pservice->hclk_vcodec)
617                 devm_clk_put(pservice->dev, pservice->hclk_vcodec);
618         if (pservice->clk_core)
619                 devm_clk_put(pservice->dev, pservice->clk_core);
620         if (pservice->clk_cabac)
621                 devm_clk_put(pservice->dev, pservice->clk_cabac);
622 #endif
623 }
624
625 static void vpu_reset(struct vpu_subdev_data *data)
626 {
627         struct vpu_service_info *pservice = data->pservice;
628         enum pmu_idle_req type = IDLE_REQ_VIDEO;
629
630         if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC)
631                 type = IDLE_REQ_HEVC;
632
633         pr_info("%s: resetting...", dev_name(pservice->dev));
634
635 #if defined(CONFIG_ARCH_RK29)
636         clk_disable(aclk_ddr_vepu);
637         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
638         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
639         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
640         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
641         mdelay(10);
642         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
643         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
644         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
645         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
646         clk_enable(aclk_ddr_vepu);
647 #elif defined(CONFIG_ARCH_RK30)
648         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
649         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
650         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
651         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
652         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
653         mdelay(1);
654         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
655         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
656         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
657         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
658         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
659 #else
660 #endif
661         WARN_ON(pservice->reg_codec != NULL);
662         WARN_ON(pservice->reg_pproc != NULL);
663         WARN_ON(pservice->reg_resev != NULL);
664         pservice->reg_codec = NULL;
665         pservice->reg_pproc = NULL;
666         pservice->reg_resev = NULL;
667
668         pr_info("for 3288/3368...");
669 #if 0 //def CONFIG_RESET_CONTROLLER
670         if (pservice->rst_a && pservice->rst_h) {
671                 if (rockchip_pmu_ops.set_idle_request)
672                         rockchip_pmu_ops.set_idle_request(type, true);
673                 pr_info("reset in\n");
674                 if (pservice->rst_v)
675                         reset_control_assert(pservice->rst_v);
676                 reset_control_assert(pservice->rst_a);
677                 reset_control_assert(pservice->rst_h);
678                 udelay(5);
679                 reset_control_deassert(pservice->rst_h);
680                 reset_control_deassert(pservice->rst_a);
681                 if (pservice->rst_v)
682                         reset_control_deassert(pservice->rst_v);
683                 if (rockchip_pmu_ops.set_idle_request)
684                         rockchip_pmu_ops.set_idle_request(type, false);
685         }
686 #endif
687
688         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
689                 clear_bit(MMU_ACTIVATED, &data->state);
690                 if (atomic_read(&pservice->enabled))
691                         rockchip_iovmm_deactivate(data->dev);
692                 else
693                         BUG_ON(!atomic_read(&pservice->enabled));
694         }
695
696         atomic_set(&pservice->reset_request, 0);
697         pr_info("done\n");
698 }
699
700 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg);
701 static void vpu_service_session_clear(struct vpu_subdev_data *data,
702                                       struct vpu_session *session)
703 {
704         struct vpu_reg *reg, *n;
705
706         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
707                 reg_deinit(data, reg);
708         }
709         list_for_each_entry_safe(reg, n, &session->running, session_link) {
710                 reg_deinit(data, reg);
711         }
712         list_for_each_entry_safe(reg, n, &session->done, session_link) {
713                 reg_deinit(data, reg);
714         }
715 }
716
717 static void vpu_service_dump(struct vpu_service_info *pservice)
718 {
719 }
720
721
722 static void vpu_service_power_off(struct vpu_service_info *pservice)
723 {
724         int total_running;
725         struct vpu_subdev_data *data = NULL, *n;
726         int ret = atomic_add_unless(&pservice->enabled, -1, 0);
727
728         if (!ret)
729                 return;
730
731         total_running = atomic_read(&pservice->total_running);
732         if (total_running) {
733                 pr_alert("alert: power off when %d task running!!\n",
734                          total_running);
735                 mdelay(50);
736                 pr_alert("alert: delay 50 ms for running task\n");
737                 vpu_service_dump(pservice);
738         }
739
740         pr_info("%s: power off...", dev_name(pservice->dev));
741
742         udelay(5);
743
744         list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
745                 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
746                         clear_bit(MMU_ACTIVATED, &data->state);
747                         rockchip_iovmm_deactivate(data->dev);
748                 }
749         }
750         pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
751
752 #if VCODEC_CLOCK_ENABLE
753                 if (pservice->pd_video)
754                         clk_disable_unprepare(pservice->pd_video);
755                 if (pservice->hclk_vcodec)
756                         clk_disable_unprepare(pservice->hclk_vcodec);
757                 if (pservice->aclk_vcodec)
758                         clk_disable_unprepare(pservice->aclk_vcodec);
759                 if (pservice->clk_core)
760                         clk_disable_unprepare(pservice->clk_core);
761                 if (pservice->clk_cabac)
762                         clk_disable_unprepare(pservice->clk_cabac);
763 #endif
764         pm_runtime_put(pservice->dev);
765
766         atomic_add(1, &pservice->power_off_cnt);
767         wake_unlock(&pservice->wake_lock);
768         pr_info("done\n");
769 }
770
771 static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
772 {
773         queue_delayed_work(system_wq, &pservice->power_off_work,
774                            VPU_POWER_OFF_DELAY);
775 }
776
777 static void vpu_power_off_work(struct work_struct *work_s)
778 {
779         struct delayed_work *dlwork = container_of(work_s,
780                         struct delayed_work, work);
781         struct vpu_service_info *pservice = container_of(dlwork,
782                         struct vpu_service_info, power_off_work);
783
784         if (mutex_trylock(&pservice->lock)) {
785                 vpu_service_power_off(pservice);
786                 mutex_unlock(&pservice->lock);
787         } else {
788                 /* Come back later if the device is busy... */
789                 vpu_queue_power_off_work(pservice);
790         }
791 }
792
793 static void vpu_service_power_on(struct vpu_service_info *pservice)
794 {
795         int ret;
796         ktime_t now = ktime_get();
797
798         if (ktime_to_ns(ktime_sub(now, pservice->last)) > NSEC_PER_SEC) {
799                 cancel_delayed_work_sync(&pservice->power_off_work);
800                 vpu_queue_power_off_work(pservice);
801                 pservice->last = now;
802         }
803         ret = atomic_add_unless(&pservice->enabled, 1, 1);
804         if (!ret)
805                 return;
806
807         pr_info("%s: power on\n", dev_name(pservice->dev));
808
809 #define BIT_VCODEC_CLK_SEL      (1<<10)
810         if (cpu_is_rk312x())
811                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1)
812                         | BIT_VCODEC_CLK_SEL | (BIT_VCODEC_CLK_SEL << 16),
813                         RK_GRF_VIRT + RK312X_GRF_SOC_CON1);
814
815 #if VCODEC_CLOCK_ENABLE
816         if (pservice->aclk_vcodec)
817                 clk_prepare_enable(pservice->aclk_vcodec);
818         if (pservice->hclk_vcodec)
819                 clk_prepare_enable(pservice->hclk_vcodec);
820         if (pservice->clk_core)
821                 clk_prepare_enable(pservice->clk_core);
822         if (pservice->clk_cabac)
823                 clk_prepare_enable(pservice->clk_cabac);
824         if (pservice->pd_video)
825                 clk_prepare_enable(pservice->pd_video);
826 #endif
827         pm_runtime_get_sync(pservice->dev);
828
829         udelay(5);
830         atomic_add(1, &pservice->power_on_cnt);
831         wake_lock(&pservice->wake_lock);
832 }
833
834 static inline bool reg_check_interlace(struct vpu_reg *reg)
835 {
836         u32 type = (reg->reg[3] & (1 << 23));
837
838         return (type > 0);
839 }
840
841 static inline enum VPU_DEC_FMT reg_check_fmt(struct vpu_reg *reg)
842 {
843         enum VPU_DEC_FMT type = (enum VPU_DEC_FMT)((reg->reg[3] >> 28) & 0xf);
844
845         return type;
846 }
847
848 static inline int reg_probe_width(struct vpu_reg *reg)
849 {
850         int width_in_mb = reg->reg[4] >> 23;
851
852         return width_in_mb * 16;
853 }
854
855 static inline int reg_probe_hevc_y_stride(struct vpu_reg *reg)
856 {
857         int y_virstride = reg->reg[8];
858
859         return y_virstride;
860 }
861
862 static int vcodec_fd_to_iova(struct vpu_subdev_data *data,
863                              struct vpu_reg *reg, int fd)
864 {
865         struct vpu_service_info *pservice = data->pservice;
866         struct ion_handle *hdl;
867         int ret = 0;
868         struct vcodec_mem_region *mem_region;
869
870         hdl = ion_import_dma_buf(pservice->ion_client, fd);
871         if (IS_ERR(hdl)) {
872                 vpu_err("import dma-buf from fd %d failed\n", fd);
873                 return PTR_ERR(hdl);
874         }
875         mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
876
877         if (mem_region == NULL) {
878                 vpu_err("allocate memory for iommu memory region failed\n");
879                 ion_free(pservice->ion_client, hdl);
880                 return -1;
881         }
882
883         mem_region->hdl = hdl;
884         if (data->mmu_dev)
885                 ret = ion_map_iommu(data->dev, pservice->ion_client,
886                                     mem_region->hdl, &mem_region->iova,
887                                     &mem_region->len);
888         else
889                 ret = ion_phys(pservice->ion_client,
890                                mem_region->hdl,
891                                (ion_phys_addr_t *)&mem_region->iova,
892                                (size_t *)&mem_region->len);
893
894         if (ret < 0) {
895                 vpu_err("fd %d ion map iommu failed\n", fd);
896                 kfree(mem_region);
897                 ion_free(pservice->ion_client, hdl);
898                 return ret;
899         }
900         INIT_LIST_HEAD(&mem_region->reg_lnk);
901         list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
902         return mem_region->iova;
903 }
904
905 /*
906  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
907  * it by pps id in video stream data.
908  *
909  * So we need to translate the address in iommu case. The address data is also
910  * 10bit fd + 22bit offset mode.
911  * Because userspace decoder do not give the pps id in the register file sets
912  * kernel driver need to translate each scaling list address in pps buffer which
913  * means 256 pps for H.264, 64 pps for H.265.
914  *
915  * In order to optimize the performance kernel driver ask userspace decoder to
916  * set all scaling list address in pps buffer to the same one which will be used
917  * on current decoding task. Then kernel driver can only translate the first
918  * address then copy it all pps buffer.
919  */
920 static void fill_scaling_list_addr_in_pps(
921                 struct vpu_subdev_data *data,
922                 struct vpu_reg *reg,
923                 char *pps,
924                 int pps_info_count,
925                 int pps_info_size,
926                 int scaling_list_addr_offset)
927 {
928         int base = scaling_list_addr_offset;
929         int scaling_fd = 0;
930         u32 scaling_offset;
931
932         scaling_offset  = (u32)pps[base + 0];
933         scaling_offset += (u32)pps[base + 1] << 8;
934         scaling_offset += (u32)pps[base + 2] << 16;
935         scaling_offset += (u32)pps[base + 3] << 24;
936
937         scaling_fd = scaling_offset & 0x3ff;
938         scaling_offset = scaling_offset >> 10;
939
940         if (scaling_fd > 0) {
941                 int i = 0;
942                 u32 tmp = vcodec_fd_to_iova(data, reg, scaling_fd);
943                 tmp += scaling_offset;
944
945                 for (i = 0; i < pps_info_count; i++, base += pps_info_size) {
946                         pps[base + 0] = (tmp >>  0) & 0xff;
947                         pps[base + 1] = (tmp >>  8) & 0xff;
948                         pps[base + 2] = (tmp >> 16) & 0xff;
949                         pps[base + 3] = (tmp >> 24) & 0xff;
950                 }
951         }
952 }
953
954 static int vcodec_bufid_to_iova(struct vpu_subdev_data *data, const u8 *tbl,
955                                 int size, struct vpu_reg *reg,
956                                 struct extra_info_for_iommu *ext_inf)
957 {
958         struct vpu_service_info *pservice = data->pservice;
959         struct vpu_task_info *task = reg->task;
960         enum FORMAT_TYPE type;
961         struct ion_handle *hdl;
962         int ret = 0;
963         struct vcodec_mem_region *mem_region;
964         int i;
965         int offset = 0;
966
967         if (tbl == NULL || size <= 0) {
968                 dev_err(pservice->dev, "input arguments invalidate\n");
969                 return -1;
970         }
971
972         if (task->get_fmt)
973                 type = task->get_fmt(reg->reg);
974         else {
975                 pr_err("invalid task with NULL get_fmt\n");
976                 return -1;
977         }
978
979         for (i = 0; i < size; i++) {
980                 int usr_fd = reg->reg[tbl[i]] & 0x3FF;
981
982                 /* if userspace do not set the fd at this register, skip */
983                 if (usr_fd == 0)
984                         continue;
985
986                 /*
987                  * special offset scale case
988                  *
989                  * This translation is for fd + offset translation.
990                  * One register has 32bits. We need to transfer both buffer file
991                  * handle and the start address offset so we packet file handle
992                  * and offset together using below format.
993                  *
994                  *  0~9  bit for buffer file handle range 0 ~ 1023
995                  * 10~31 bit for offset range 0 ~ 4M
996                  *
997                  * But on 4K case the offset can be larger the 4M
998                  * So on H.264 4K vpu/vpu2 decoder we scale the offset by 16
999                  * But MPEG4 will use the same register for colmv and it do not
1000                  * need scale.
1001                  *
1002                  * RKVdec do not have this issue.
1003                  */
1004                 if ((type == FMT_H264D || type == FMT_VP9D) &&
1005                     task->reg_dir_mv > 0 && task->reg_dir_mv == tbl[i])
1006                         offset = reg->reg[tbl[i]] >> 10 << 4;
1007                 else
1008                         offset = reg->reg[tbl[i]] >> 10;
1009
1010                 vpu_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d\n",
1011                           tbl[i], usr_fd, offset);
1012
1013                 hdl = ion_import_dma_buf(pservice->ion_client, usr_fd);
1014                 if (IS_ERR(hdl)) {
1015                         dev_err(pservice->dev,
1016                                 "import dma-buf from fd %d failed, reg[%d]\n",
1017                                 usr_fd, tbl[i]);
1018                         return PTR_ERR(hdl);
1019                 }
1020
1021                 if (task->reg_pps > 0 && task->reg_pps == tbl[i]) {
1022                         int pps_info_offset;
1023                         int pps_info_count;
1024                         int pps_info_size;
1025                         int scaling_list_addr_offset;
1026
1027                         switch (type) {
1028                         case FMT_H264D: {
1029                                 pps_info_offset = offset;
1030                                 pps_info_count = 256;
1031                                 pps_info_size = 32;
1032                                 scaling_list_addr_offset = 23;
1033                         } break;
1034                         case FMT_H265D: {
1035                                 pps_info_offset = 0;
1036                                 pps_info_count = 64;
1037                                 pps_info_size = 80;
1038                                 scaling_list_addr_offset = 74;
1039                         } break;
1040                         default: {
1041                                 pps_info_offset = 0;
1042                                 pps_info_count = 0;
1043                                 pps_info_size = 0;
1044                                 scaling_list_addr_offset = 0;
1045                         } break;
1046                         }
1047
1048                         vpu_debug(DEBUG_PPS_FILL,
1049                                   "scaling list filling parameter:\n");
1050                         vpu_debug(DEBUG_PPS_FILL,
1051                                   "pps_info_offset %d\n", pps_info_offset);
1052                         vpu_debug(DEBUG_PPS_FILL,
1053                                   "pps_info_count  %d\n", pps_info_count);
1054                         vpu_debug(DEBUG_PPS_FILL,
1055                                   "pps_info_size   %d\n", pps_info_size);
1056                         vpu_debug(DEBUG_PPS_FILL,
1057                                   "scaling_list_addr_offset %d\n",
1058                                   scaling_list_addr_offset);
1059
1060                         if (pps_info_count) {
1061                                 char *pps = (char *)ion_map_kernel(
1062                                                 pservice->ion_client, hdl);
1063                                 vpu_debug(DEBUG_PPS_FILL,
1064                                           "scaling list setting pps %p\n", pps);
1065                                 pps += pps_info_offset;
1066
1067                                 fill_scaling_list_addr_in_pps(
1068                                                 data, reg, pps,
1069                                                 pps_info_count,
1070                                                 pps_info_size,
1071                                                 scaling_list_addr_offset);
1072                         }
1073                 }
1074
1075                 mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
1076
1077                 if (!mem_region) {
1078                         ion_free(pservice->ion_client, hdl);
1079                         return -1;
1080                 }
1081
1082                 mem_region->hdl = hdl;
1083                 mem_region->reg_idx = tbl[i];
1084
1085                 if (data->mmu_dev)
1086                         ret = ion_map_iommu(data->dev,
1087                                             pservice->ion_client,
1088                                             mem_region->hdl,
1089                                             &mem_region->iova,
1090                                             &mem_region->len);
1091                 else
1092                         ret = ion_phys(pservice->ion_client,
1093                                        mem_region->hdl,
1094                                        (ion_phys_addr_t *)&mem_region->iova,
1095                                        (size_t *)&mem_region->len);
1096
1097                 if (ret < 0) {
1098                         dev_err(pservice->dev, "reg %d fd %d ion map iommu failed\n",
1099                                 tbl[i], usr_fd);
1100                         kfree(mem_region);
1101                         ion_free(pservice->ion_client, hdl);
1102                         return ret;
1103                 }
1104
1105                 /*
1106                  * special for vpu dec num 12: record decoded length
1107                  * hacking for decoded length
1108                  * NOTE: not a perfect fix, the fd is not recorded
1109                  */
1110                 if (task->reg_len > 0 && task->reg_len == tbl[i]) {
1111                         reg->dec_base = mem_region->iova + offset;
1112                         vpu_debug(DEBUG_REGISTER, "dec_set %08x\n",
1113                                   reg->dec_base);
1114                 }
1115
1116                 reg->reg[tbl[i]] = mem_region->iova + offset;
1117                 INIT_LIST_HEAD(&mem_region->reg_lnk);
1118                 list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
1119         }
1120
1121         if (ext_inf != NULL && ext_inf->magic == EXTRA_INFO_MAGIC) {
1122                 for (i = 0; i < ext_inf->cnt; i++) {
1123                         vpu_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1124                                   ext_inf->elem[i].index,
1125                                   ext_inf->elem[i].offset);
1126                         reg->reg[ext_inf->elem[i].index] +=
1127                                 ext_inf->elem[i].offset;
1128                 }
1129         }
1130
1131         return 0;
1132 }
1133
1134 static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
1135                                         struct vpu_reg *reg,
1136                                         struct extra_info_for_iommu *ext_inf)
1137 {
1138         enum FORMAT_TYPE type = reg->task->get_fmt(reg->reg);
1139
1140         if (type < FMT_TYPE_BUTT) {
1141                 const struct vpu_trans_info *info = &reg->trans[type];
1142                 const u8 *tbl = info->table;
1143                 int size = info->count;
1144
1145                 return vcodec_bufid_to_iova(data, tbl, size, reg, ext_inf);
1146         }
1147         pr_err("found invalid format type!\n");
1148         return -1;
1149 }
1150
1151 static void get_reg_freq(struct vpu_subdev_data *data, struct vpu_reg *reg)
1152 {
1153
1154         if (!soc_is_rk2928g()) {
1155                 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1156                         if (reg_check_fmt(reg) == VPU_DEC_FMT_H264) {
1157                                 if (reg_probe_width(reg) > 3200) {
1158                                         /*raise frequency for 4k avc.*/
1159                                         reg->freq = VPU_FREQ_600M;
1160                                 }
1161                         } else {
1162                                 if (reg_check_interlace(reg))
1163                                         reg->freq = VPU_FREQ_400M;
1164                         }
1165                 }
1166                 if (data->hw_id == HEVC_ID) {
1167                         if (reg_probe_hevc_y_stride(reg) > 60000)
1168                                 reg->freq = VPU_FREQ_400M;
1169                 }
1170                 if (reg->type == VPU_PP)
1171                         reg->freq = VPU_FREQ_400M;
1172         }
1173 }
1174
1175 static struct vpu_reg *reg_init(struct vpu_subdev_data *data,
1176                                 struct vpu_session *session,
1177                                 void __user *src, u32 size)
1178 {
1179         struct vpu_service_info *pservice = data->pservice;
1180         int extra_size = 0;
1181         struct extra_info_for_iommu extra_info;
1182         struct vpu_reg *reg = kzalloc(sizeof(*reg) + data->reg_size,
1183                                       GFP_KERNEL);
1184
1185         vpu_debug_enter();
1186
1187         if (NULL == reg) {
1188                 vpu_err("error: kmalloc fail in reg_init\n");
1189                 return NULL;
1190         }
1191
1192         if (size > data->reg_size) {
1193                 pr_err("vpu reg size %u is larger than hw reg size %u\n",
1194                        size, data->reg_size);
1195                 extra_size = size - data->reg_size;
1196                 size = data->reg_size;
1197         }
1198         reg->session = session;
1199         reg->data = data;
1200         reg->type = session->type;
1201         reg->size = size;
1202         reg->freq = VPU_FREQ_DEFAULT;
1203         reg->task = &data->task_info[session->type];
1204         reg->trans = data->trans_info;
1205         reg->reg = (u32 *)&reg[1];
1206         INIT_LIST_HEAD(&reg->session_link);
1207         INIT_LIST_HEAD(&reg->status_link);
1208
1209         INIT_LIST_HEAD(&reg->mem_region_list);
1210
1211         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
1212                 vpu_err("error: copy_from_user failed in reg_init\n");
1213                 kfree(reg);
1214                 return NULL;
1215         }
1216
1217         if (copy_from_user(&extra_info, (u8 *)src + size, extra_size)) {
1218                 vpu_err("error: copy_from_user failed in reg_init\n");
1219                 kfree(reg);
1220                 return NULL;
1221         }
1222
1223         if (0 > vcodec_reg_address_translate(data, reg, &extra_info)) {
1224                 int i = 0;
1225
1226                 vpu_err("error: translate reg address failed, dumping regs\n");
1227                 for (i = 0; i < size >> 2; i++)
1228                         pr_err("reg[%02d]: %08x\n", i, *((u32 *)src + i));
1229
1230                 kfree(reg);
1231                 return NULL;
1232         }
1233
1234         mutex_lock(&pservice->lock);
1235         list_add_tail(&reg->status_link, &pservice->waiting);
1236         list_add_tail(&reg->session_link, &session->waiting);
1237         mutex_unlock(&pservice->lock);
1238
1239         if (pservice->auto_freq)
1240                 get_reg_freq(data, reg);
1241
1242         vpu_debug_leave();
1243         return reg;
1244 }
1245
1246 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg)
1247 {
1248         struct vpu_service_info *pservice = data->pservice;
1249         struct vcodec_mem_region *mem_region = NULL, *n;
1250
1251         list_del_init(&reg->session_link);
1252         list_del_init(&reg->status_link);
1253         if (reg == pservice->reg_codec)
1254                 pservice->reg_codec = NULL;
1255         if (reg == pservice->reg_pproc)
1256                 pservice->reg_pproc = NULL;
1257
1258         /* release memory region attach to this registers table. */
1259         list_for_each_entry_safe(mem_region, n,
1260                         &reg->mem_region_list, reg_lnk) {
1261                 ion_free(pservice->ion_client, mem_region->hdl);
1262                 list_del_init(&mem_region->reg_lnk);
1263                 kfree(mem_region);
1264         }
1265
1266         kfree(reg);
1267 }
1268
1269 static void reg_from_wait_to_run(struct vpu_service_info *pservice,
1270                                  struct vpu_reg *reg)
1271 {
1272         vpu_debug_enter();
1273         list_del_init(&reg->status_link);
1274         list_add_tail(&reg->status_link, &pservice->running);
1275
1276         list_del_init(&reg->session_link);
1277         list_add_tail(&reg->session_link, &reg->session->running);
1278         vpu_debug_leave();
1279 }
1280
1281 static void reg_copy_from_hw(struct vpu_reg *reg, u32 *src, u32 count)
1282 {
1283         int i;
1284         u32 *dst = reg->reg;
1285
1286         vpu_debug_enter();
1287         for (i = 0; i < count; i++, src++)
1288                 *dst++ = readl_relaxed(src);
1289
1290         dst = (u32 *)&reg->reg[0];
1291         for (i = 0; i < count; i++)
1292                 vpu_debug(DEBUG_GET_REG, "get reg[%02d] %08x\n", i, dst[i]);
1293
1294         vpu_debug_leave();
1295 }
1296
1297 static void reg_from_run_to_done(struct vpu_subdev_data *data,
1298                                  struct vpu_reg *reg)
1299 {
1300         struct vpu_service_info *pservice = data->pservice;
1301         struct vpu_hw_info *hw_info = data->hw_info;
1302         struct vpu_task_info *task = reg->task;
1303
1304         vpu_debug_enter();
1305
1306         list_del_init(&reg->status_link);
1307         list_add_tail(&reg->status_link, &pservice->done);
1308
1309         list_del_init(&reg->session_link);
1310         list_add_tail(&reg->session_link, &reg->session->done);
1311
1312         switch (reg->type) {
1313         case VPU_ENC: {
1314                 pservice->reg_codec = NULL;
1315                 reg_copy_from_hw(reg, data->enc_dev.regs, hw_info->enc_reg_num);
1316                 reg->reg[task->reg_irq] = pservice->irq_status;
1317         } break;
1318         case VPU_DEC: {
1319                 pservice->reg_codec = NULL;
1320                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1321
1322                 /* revert hack for decoded length */
1323                 if (task->reg_len > 0) {
1324                         int reg_len = task->reg_len;
1325                         u32 dec_get = reg->reg[reg_len];
1326                         s32 dec_length = dec_get - reg->dec_base;
1327
1328                         vpu_debug(DEBUG_REGISTER,
1329                                   "dec_get %08x dec_length %d\n",
1330                                   dec_get, dec_length);
1331                         reg->reg[reg_len] = dec_length << 10;
1332                 }
1333
1334                 reg->reg[task->reg_irq] = pservice->irq_status;
1335         } break;
1336         case VPU_PP: {
1337                 pservice->reg_pproc = NULL;
1338                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1339                 writel_relaxed(0, data->dec_dev.regs + task->reg_irq);
1340         } break;
1341         case VPU_DEC_PP: {
1342                 u32 pipe_mode;
1343                 u32 *regs = data->dec_dev.regs;
1344
1345                 pservice->reg_codec = NULL;
1346                 pservice->reg_pproc = NULL;
1347
1348                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1349
1350                 /* NOTE: remove pp pipeline mode flag first */
1351                 pipe_mode = readl_relaxed(regs + task->reg_pipe);
1352                 pipe_mode &= ~task->pipe_mask;
1353                 writel_relaxed(pipe_mode, regs + task->reg_pipe);
1354
1355                 /* revert hack for decoded length */
1356                 if (task->reg_len > 0) {
1357                         int reg_len = task->reg_len;
1358                         u32 dec_get = reg->reg[reg_len];
1359                         s32 dec_length = dec_get - reg->dec_base;
1360
1361                         vpu_debug(DEBUG_REGISTER,
1362                                   "dec_get %08x dec_length %d\n",
1363                                   dec_get, dec_length);
1364                         reg->reg[reg_len] = dec_length << 10;
1365                 }
1366
1367                 reg->reg[task->reg_irq] = pservice->irq_status;
1368         } break;
1369         default: {
1370                 vpu_err("error: copy reg from hw with unknown type %d\n",
1371                         reg->type);
1372         } break;
1373         }
1374         vcodec_exit_mode(data);
1375
1376         atomic_sub(1, &reg->session->task_running);
1377         atomic_sub(1, &pservice->total_running);
1378         wake_up(&reg->session->wait);
1379
1380         vpu_debug_leave();
1381 }
1382
1383 static void vpu_service_set_freq(struct vpu_service_info *pservice,
1384                                  struct vpu_reg *reg)
1385 {
1386         enum VPU_FREQ curr = atomic_read(&pservice->freq_status);
1387
1388         if (curr == reg->freq)
1389                 return;
1390
1391         atomic_set(&pservice->freq_status, reg->freq);
1392         switch (reg->freq) {
1393         case VPU_FREQ_200M: {
1394                 clk_set_rate(pservice->aclk_vcodec, 200*MHZ);
1395         } break;
1396         case VPU_FREQ_266M: {
1397                 clk_set_rate(pservice->aclk_vcodec, 266*MHZ);
1398         } break;
1399         case VPU_FREQ_300M: {
1400                 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1401         } break;
1402         case VPU_FREQ_400M: {
1403                 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1404         } break;
1405         case VPU_FREQ_500M: {
1406                 clk_set_rate(pservice->aclk_vcodec, 500*MHZ);
1407         } break;
1408         case VPU_FREQ_600M: {
1409                 clk_set_rate(pservice->aclk_vcodec, 600*MHZ);
1410         } break;
1411         default: {
1412                 unsigned long rate = 300*MHZ;
1413
1414                 if (soc_is_rk2928g())
1415                         rate = 400*MHZ;
1416
1417                 clk_set_rate(pservice->aclk_vcodec, rate);
1418         } break;
1419         }
1420 }
1421
1422 static void reg_copy_to_hw(struct vpu_subdev_data *data, struct vpu_reg *reg)
1423 {
1424         struct vpu_service_info *pservice = data->pservice;
1425         struct vpu_task_info *task = reg->task;
1426         struct vpu_hw_info *hw_info = data->hw_info;
1427         int i;
1428         u32 *src = (u32 *)&reg->reg[0];
1429         u32 enable_mask = task->enable_mask;
1430         u32 gating_mask = task->gating_mask;
1431         u32 reg_en = task->reg_en;
1432
1433         vpu_debug_enter();
1434
1435         atomic_add(1, &pservice->total_running);
1436         atomic_add(1, &reg->session->task_running);
1437
1438         if (pservice->auto_freq)
1439                 vpu_service_set_freq(pservice, reg);
1440
1441         vcodec_enter_mode(data);
1442
1443         switch (reg->type) {
1444         case VPU_ENC: {
1445                 u32 *dst = data->enc_dev.regs;
1446                 u32 base = 0;
1447                 u32 end  = hw_info->enc_reg_num;
1448                 /* u32 reg_gating = task->reg_gating; */
1449
1450                 pservice->reg_codec = reg;
1451
1452                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1453                           base, end, reg_en, enable_mask, gating_mask);
1454
1455                 VEPU_CLEAN_CACHE(dst);
1456
1457                 if (debug & DEBUG_SET_REG)
1458                         for (i = base; i < end; i++)
1459                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1460                                           i, src[i]);
1461
1462                 /*
1463                  * NOTE: encoder need to setup mode first
1464                  */
1465                 writel_relaxed(src[reg_en] & enable_mask, dst + reg_en);
1466
1467                 /* NOTE: encoder gating is not on enable register */
1468                 /* src[reg_gating] |= gating_mask; */
1469
1470                 for (i = base; i < end; i++) {
1471                         if (i != reg_en)
1472                                 writel_relaxed(src[i], dst + i);
1473                 }
1474
1475                 writel(src[reg_en], dst + reg_en);
1476                 dsb(sy);
1477
1478                 time_record(reg->task, 0);
1479         } break;
1480         case VPU_DEC: {
1481                 u32 *dst = data->dec_dev.regs;
1482                 u32 len = hw_info->dec_reg_num;
1483                 u32 base = hw_info->base_dec;
1484                 u32 end  = hw_info->end_dec;
1485
1486                 pservice->reg_codec = reg;
1487
1488                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1489                           base, end, reg_en, enable_mask, gating_mask);
1490
1491                 VDPU_CLEAN_CACHE(dst);
1492
1493                 /* on rkvdec set cache size to 64byte */
1494                 if (pservice->dev_id == VCODEC_DEVICE_ID_RKVDEC) {
1495                         u32 *cache_base = dst + 0x100;
1496                         u32 val = (debug & DEBUG_CACHE_32B) ? (0x3) : (0x13);
1497                         writel_relaxed(val, cache_base + 0x07);
1498                         writel_relaxed(val, cache_base + 0x17);
1499                 }
1500
1501                 if (debug & DEBUG_SET_REG)
1502                         for (i = 0; i < len; i++)
1503                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1504                                           i, src[i]);
1505
1506                 /*
1507                  * NOTE: The end register is invalid. Do NOT write to it
1508                  *       Also the base register must be written
1509                  */
1510                 for (i = base; i < end; i++) {
1511                         if (i != reg_en)
1512                                 writel_relaxed(src[i], dst + i);
1513                 }
1514
1515                 writel(src[reg_en] | gating_mask, dst + reg_en);
1516                 dsb(sy);
1517
1518                 time_record(reg->task, 0);
1519         } break;
1520         case VPU_PP: {
1521                 u32 *dst = data->dec_dev.regs;
1522                 u32 base = hw_info->base_pp;
1523                 u32 end  = hw_info->end_pp;
1524
1525                 pservice->reg_pproc = reg;
1526
1527                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1528                           base, end, reg_en, enable_mask, gating_mask);
1529
1530                 if (debug & DEBUG_SET_REG)
1531                         for (i = base; i < end; i++)
1532                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1533                                           i, src[i]);
1534
1535                 for (i = base; i < end; i++) {
1536                         if (i != reg_en)
1537                                 writel_relaxed(src[i], dst + i);
1538                 }
1539
1540                 writel(src[reg_en] | gating_mask, dst + reg_en);
1541                 dsb(sy);
1542
1543                 time_record(reg->task, 0);
1544         } break;
1545         case VPU_DEC_PP: {
1546                 u32 *dst = data->dec_dev.regs;
1547                 u32 base = hw_info->base_dec_pp;
1548                 u32 end  = hw_info->end_dec_pp;
1549
1550                 pservice->reg_codec = reg;
1551                 pservice->reg_pproc = reg;
1552
1553                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1554                           base, end, reg_en, enable_mask, gating_mask);
1555
1556                 /* VDPU_SOFT_RESET(dst); */
1557                 VDPU_CLEAN_CACHE(dst);
1558
1559                 if (debug & DEBUG_SET_REG)
1560                         for (i = base; i < end; i++)
1561                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1562                                           i, src[i]);
1563
1564                 for (i = base; i < end; i++) {
1565                         if (i != reg_en)
1566                                 writel_relaxed(src[i], dst + i);
1567                 }
1568
1569                 /* NOTE: dec output must be disabled */
1570
1571                 writel(src[reg_en] | gating_mask, dst + reg_en);
1572                 dsb(sy);
1573
1574                 time_record(reg->task, 0);
1575         } break;
1576         default: {
1577                 vpu_err("error: unsupport session type %d", reg->type);
1578                 atomic_sub(1, &pservice->total_running);
1579                 atomic_sub(1, &reg->session->task_running);
1580         } break;
1581         }
1582
1583         vpu_debug_leave();
1584 }
1585
1586 static void try_set_reg(struct vpu_subdev_data *data)
1587 {
1588         struct vpu_service_info *pservice = data->pservice;
1589
1590         vpu_debug_enter();
1591         if (!list_empty(&pservice->waiting)) {
1592                 struct vpu_reg *reg_codec = pservice->reg_codec;
1593                 struct vpu_reg *reg_pproc = pservice->reg_pproc;
1594                 int can_set = 0;
1595                 bool change_able = (reg_codec == NULL) && (reg_pproc == NULL);
1596                 int reset_request = atomic_read(&pservice->reset_request);
1597                 struct vpu_reg *reg = list_entry(pservice->waiting.next,
1598                                 struct vpu_reg, status_link);
1599
1600                 vpu_service_power_on(pservice);
1601
1602                 if (change_able || !reset_request) {
1603                         switch (reg->type) {
1604                         case VPU_ENC: {
1605                                 if (change_able)
1606                                         can_set = 1;
1607                         } break;
1608                         case VPU_DEC: {
1609                                 if (reg_codec == NULL)
1610                                         can_set = 1;
1611                                 if (pservice->auto_freq && (reg_pproc != NULL))
1612                                         can_set = 0;
1613                         } break;
1614                         case VPU_PP: {
1615                                 if (reg_codec == NULL) {
1616                                         if (reg_pproc == NULL)
1617                                                 can_set = 1;
1618                                 } else {
1619                                         if ((reg_codec->type == VPU_DEC) &&
1620                                             (reg_pproc == NULL))
1621                                                 can_set = 1;
1622
1623                                         /*
1624                                          * NOTE:
1625                                          * can not charge frequency
1626                                          * when vpu is working
1627                                          */
1628                                         if (pservice->auto_freq)
1629                                                 can_set = 0;
1630                                 }
1631                         } break;
1632                         case VPU_DEC_PP: {
1633                                 if (change_able)
1634                                         can_set = 1;
1635                                 } break;
1636                         default: {
1637                                 pr_err("undefined reg type %d\n", reg->type);
1638                         } break;
1639                         }
1640                 }
1641
1642                 /* then check reset request */
1643                 if (reset_request && !change_able)
1644                         reset_request = 0;
1645
1646                 /* do reset before setting registers */
1647                 if (reset_request)
1648                         vpu_reset(data);
1649
1650                 if (can_set) {
1651                         reg_from_wait_to_run(pservice, reg);
1652                         reg_copy_to_hw(reg->data, reg);
1653                 }
1654         }
1655         vpu_debug_leave();
1656 }
1657
1658 static int return_reg(struct vpu_subdev_data *data,
1659                       struct vpu_reg *reg, u32 __user *dst)
1660 {
1661         struct vpu_hw_info *hw_info = data->hw_info;
1662         size_t size = reg->size;
1663         u32 base;
1664
1665         vpu_debug_enter();
1666         switch (reg->type) {
1667         case VPU_ENC: {
1668                 base = 0;
1669         } break;
1670         case VPU_DEC: {
1671                 base = hw_info->base_dec_pp;
1672         } break;
1673         case VPU_PP: {
1674                 base = hw_info->base_pp;
1675         } break;
1676         case VPU_DEC_PP: {
1677                 base = hw_info->base_dec_pp;
1678         } break;
1679         default: {
1680                 vpu_err("error: copy reg to user with unknown type %d\n",
1681                         reg->type);
1682                 return -EFAULT;
1683         } break;
1684         }
1685
1686         if (copy_to_user(dst, &reg->reg[base], size)) {
1687                 vpu_err("error: return_reg copy_to_user failed\n");
1688                 return -EFAULT;
1689         }
1690
1691         reg_deinit(data, reg);
1692         vpu_debug_leave();
1693         return 0;
1694 }
1695
1696 static long vpu_service_ioctl(struct file *filp, unsigned int cmd,
1697                               unsigned long arg)
1698 {
1699         struct vpu_subdev_data *data =
1700                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1701                              struct vpu_subdev_data, cdev);
1702         struct vpu_service_info *pservice = data->pservice;
1703         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1704
1705         vpu_debug_enter();
1706         if (NULL == session)
1707                 return -EINVAL;
1708
1709         switch (cmd) {
1710         case VPU_IOC_SET_CLIENT_TYPE: {
1711                 session->type = (enum VPU_CLIENT_TYPE)arg;
1712                 vpu_debug(DEBUG_IOCTL, "pid %d set client type %d\n",
1713                           session->pid, session->type);
1714         } break;
1715         case VPU_IOC_GET_HW_FUSE_STATUS: {
1716                 struct vpu_request req;
1717
1718                 vpu_debug(DEBUG_IOCTL, "pid %d get hw status %d\n",
1719                           session->pid, session->type);
1720                 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
1721                         vpu_err("error: get hw status copy_from_user failed\n");
1722                         return -EFAULT;
1723                 } else {
1724                         void *config = (session->type != VPU_ENC) ?
1725                                        ((void *)&pservice->dec_config) :
1726                                        ((void *)&pservice->enc_config);
1727                         size_t size = (session->type != VPU_ENC) ?
1728                                       (sizeof(struct vpu_dec_config)) :
1729                                       (sizeof(struct vpu_enc_config));
1730                         if (copy_to_user((void __user *)req.req,
1731                                          config, size)) {
1732                                 vpu_err("error: get hw status copy_to_user failed type %d\n",
1733                                         session->type);
1734                                 return -EFAULT;
1735                         }
1736                 }
1737         } break;
1738         case VPU_IOC_SET_REG: {
1739                 struct vpu_request req;
1740                 struct vpu_reg *reg;
1741
1742                 vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
1743                           session->pid, session->type);
1744                 if (copy_from_user(&req, (void __user *)arg,
1745                                    sizeof(struct vpu_request))) {
1746                         vpu_err("error: set reg copy_from_user failed\n");
1747                         return -EFAULT;
1748                 }
1749                 reg = reg_init(data, session, (void __user *)req.req, req.size);
1750                 if (NULL == reg) {
1751                         return -EFAULT;
1752                 } else {
1753                         mutex_lock(&pservice->lock);
1754                         try_set_reg(data);
1755                         mutex_unlock(&pservice->lock);
1756                 }
1757         } break;
1758         case VPU_IOC_GET_REG: {
1759                 struct vpu_request req;
1760                 struct vpu_reg *reg;
1761                 int ret;
1762
1763                 vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
1764                           session->pid, session->type);
1765                 if (copy_from_user(&req, (void __user *)arg,
1766                                    sizeof(struct vpu_request))) {
1767                         vpu_err("error: get reg copy_from_user failed\n");
1768                         return -EFAULT;
1769                 }
1770
1771                 ret = wait_event_timeout(session->wait,
1772                                          !list_empty(&session->done),
1773                                          VPU_TIMEOUT_DELAY);
1774
1775                 if (!list_empty(&session->done)) {
1776                         if (ret < 0)
1777                                 vpu_err("warning: pid %d wait task error ret %d\n",
1778                                         session->pid, ret);
1779                         ret = 0;
1780                 } else {
1781                         if (unlikely(ret < 0)) {
1782                                 vpu_err("error: pid %d wait task ret %d\n",
1783                                         session->pid, ret);
1784                         } else if (ret == 0) {
1785                                 vpu_err("error: pid %d wait %d task done timeout\n",
1786                                         session->pid,
1787                                         atomic_read(&session->task_running));
1788                                 ret = -ETIMEDOUT;
1789                         }
1790                 }
1791
1792                 if (ret < 0) {
1793                         int task_running = atomic_read(&session->task_running);
1794
1795                         mutex_lock(&pservice->lock);
1796                         vpu_service_dump(pservice);
1797                         if (task_running) {
1798                                 atomic_set(&session->task_running, 0);
1799                                 atomic_sub(task_running,
1800                                            &pservice->total_running);
1801                                 pr_err("%d task is running but not return, reset hardware...",
1802                                        task_running);
1803                                 vpu_reset(data);
1804                                 pr_err("done\n");
1805                         }
1806                         vpu_service_session_clear(data, session);
1807                         mutex_unlock(&pservice->lock);
1808                         return ret;
1809                 }
1810
1811                 mutex_lock(&pservice->lock);
1812                 reg = list_entry(session->done.next,
1813                                  struct vpu_reg, session_link);
1814                 return_reg(data, reg, (u32 __user *)req.req);
1815                 mutex_unlock(&pservice->lock);
1816         } break;
1817         case VPU_IOC_PROBE_IOMMU_STATUS: {
1818                 int iommu_enable = 1;
1819
1820                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_PROBE_IOMMU_STATUS\n");
1821
1822                 if (copy_to_user((void __user *)arg,
1823                                  &iommu_enable, sizeof(int))) {
1824                         vpu_err("error: iommu status copy_to_user failed\n");
1825                         return -EFAULT;
1826                 }
1827         } break;
1828         default: {
1829                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1830         } break;
1831         }
1832         vpu_debug_leave();
1833         return 0;
1834 }
1835
1836 #ifdef CONFIG_COMPAT
1837 static long compat_vpu_service_ioctl(struct file *filp, unsigned int cmd,
1838                                      unsigned long arg)
1839 {
1840         struct vpu_subdev_data *data =
1841                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1842                              struct vpu_subdev_data, cdev);
1843         struct vpu_service_info *pservice = data->pservice;
1844         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1845
1846         vpu_debug_enter();
1847         vpu_debug(3, "cmd %x, COMPAT_VPU_IOC_SET_CLIENT_TYPE %x\n", cmd,
1848                   (u32)COMPAT_VPU_IOC_SET_CLIENT_TYPE);
1849         if (NULL == session)
1850                 return -EINVAL;
1851
1852         switch (cmd) {
1853         case COMPAT_VPU_IOC_SET_CLIENT_TYPE: {
1854                 session->type = (enum VPU_CLIENT_TYPE)arg;
1855                 vpu_debug(DEBUG_IOCTL, "compat set client type %d\n",
1856                           session->type);
1857         } break;
1858         case COMPAT_VPU_IOC_GET_HW_FUSE_STATUS: {
1859                 struct compat_vpu_request req;
1860
1861                 vpu_debug(DEBUG_IOCTL, "compat get hw status %d\n",
1862                           session->type);
1863                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1864                                    sizeof(struct compat_vpu_request))) {
1865                         vpu_err("error: compat get hw status copy_from_user failed\n");
1866                         return -EFAULT;
1867                 } else {
1868                         void *config = (session->type != VPU_ENC) ?
1869                                        ((void *)&pservice->dec_config) :
1870                                        ((void *)&pservice->enc_config);
1871                         size_t size = (session->type != VPU_ENC) ?
1872                                       (sizeof(struct vpu_dec_config)) :
1873                                       (sizeof(struct vpu_enc_config));
1874
1875                         if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1876                                          config, size)) {
1877                                 vpu_err("error: compat get hw status copy_to_user failed type %d\n",
1878                                         session->type);
1879                                 return -EFAULT;
1880                         }
1881                 }
1882         } break;
1883         case COMPAT_VPU_IOC_SET_REG: {
1884                 struct compat_vpu_request req;
1885                 struct vpu_reg *reg;
1886
1887                 vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
1888                           session->type);
1889                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1890                                    sizeof(struct compat_vpu_request))) {
1891                         vpu_err("compat set_reg copy_from_user failed\n");
1892                         return -EFAULT;
1893                 }
1894                 reg = reg_init(data, session,
1895                                compat_ptr((compat_uptr_t)req.req), req.size);
1896                 if (NULL == reg) {
1897                         return -EFAULT;
1898                 } else {
1899                         mutex_lock(&pservice->lock);
1900                         try_set_reg(data);
1901                         mutex_unlock(&pservice->lock);
1902                 }
1903         } break;
1904         case COMPAT_VPU_IOC_GET_REG: {
1905                 struct compat_vpu_request req;
1906                 struct vpu_reg *reg;
1907                 int ret;
1908
1909                 vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
1910                           session->type);
1911                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1912                                    sizeof(struct compat_vpu_request))) {
1913                         vpu_err("compat get reg copy_from_user failed\n");
1914                         return -EFAULT;
1915                 }
1916
1917                 ret = wait_event_timeout(session->wait,
1918                                          !list_empty(&session->done),
1919                                          VPU_TIMEOUT_DELAY);
1920
1921                 if (!list_empty(&session->done)) {
1922                         if (ret < 0)
1923                                 vpu_err("warning: pid %d wait task error ret %d\n",
1924                                         session->pid, ret);
1925                         ret = 0;
1926                 } else {
1927                         if (unlikely(ret < 0)) {
1928                                 vpu_err("error: pid %d wait task ret %d\n",
1929                                         session->pid, ret);
1930                         } else if (ret == 0) {
1931                                 vpu_err("error: pid %d wait %d task done timeout\n",
1932                                         session->pid,
1933                                         atomic_read(&session->task_running));
1934                                 ret = -ETIMEDOUT;
1935                         }
1936                 }
1937
1938                 if (ret < 0) {
1939                         int task_running = atomic_read(&session->task_running);
1940
1941                         mutex_lock(&pservice->lock);
1942                         vpu_service_dump(pservice);
1943                         if (task_running) {
1944                                 atomic_set(&session->task_running, 0);
1945                                 atomic_sub(task_running,
1946                                            &pservice->total_running);
1947                                 pr_err("%d task is running but not return, reset hardware...",
1948                                        task_running);
1949                                 vpu_reset(data);
1950                                 pr_err("done\n");
1951                         }
1952                         vpu_service_session_clear(data, session);
1953                         mutex_unlock(&pservice->lock);
1954                         return ret;
1955                 }
1956
1957                 mutex_lock(&pservice->lock);
1958                 reg = list_entry(session->done.next,
1959                                  struct vpu_reg, session_link);
1960                 return_reg(data, reg, compat_ptr((compat_uptr_t)req.req));
1961                 mutex_unlock(&pservice->lock);
1962         } break;
1963         case COMPAT_VPU_IOC_PROBE_IOMMU_STATUS: {
1964                 int iommu_enable = 1;
1965
1966                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_PROBE_IOMMU_STATUS\n");
1967
1968                 if (copy_to_user(compat_ptr((compat_uptr_t)arg),
1969                                  &iommu_enable, sizeof(int))) {
1970                         vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
1971                         return -EFAULT;
1972                 }
1973         } break;
1974         default: {
1975                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1976         } break;
1977         }
1978         vpu_debug_leave();
1979         return 0;
1980 }
1981 #endif
1982
1983 static int vpu_service_check_hw(struct vpu_subdev_data *data)
1984 {
1985         int ret = -EINVAL, i = 0;
1986         u32 hw_id = readl_relaxed(data->regs);
1987
1988         hw_id = (hw_id >> 16) & 0xFFFF;
1989         pr_info("checking hw id %x\n", hw_id);
1990         data->hw_info = NULL;
1991         for (i = 0; i < ARRAY_SIZE(vcodec_info_set); i++) {
1992                 struct vcodec_info *info = &vcodec_info_set[i];
1993
1994                 if (hw_id == info->hw_id) {
1995                         data->hw_id = info->hw_id;
1996                         data->hw_info = info->hw_info;
1997                         data->task_info = info->task_info;
1998                         data->trans_info = info->trans_info;
1999                         ret = 0;
2000                         break;
2001                 }
2002         }
2003         return ret;
2004 }
2005
2006 static int vpu_service_open(struct inode *inode, struct file *filp)
2007 {
2008         struct vpu_subdev_data *data = container_of(
2009                         inode->i_cdev, struct vpu_subdev_data, cdev);
2010         struct vpu_service_info *pservice = data->pservice;
2011         struct vpu_session *session = kmalloc(sizeof(*session), GFP_KERNEL);
2012
2013         vpu_debug_enter();
2014
2015         if (NULL == session) {
2016                 vpu_err("error: unable to allocate memory for vpu_session.");
2017                 return -ENOMEM;
2018         }
2019
2020         session->type   = VPU_TYPE_BUTT;
2021         session->pid    = current->pid;
2022         INIT_LIST_HEAD(&session->waiting);
2023         INIT_LIST_HEAD(&session->running);
2024         INIT_LIST_HEAD(&session->done);
2025         INIT_LIST_HEAD(&session->list_session);
2026         init_waitqueue_head(&session->wait);
2027         atomic_set(&session->task_running, 0);
2028         mutex_lock(&pservice->lock);
2029         list_add_tail(&session->list_session, &pservice->session);
2030         filp->private_data = (void *)session;
2031         mutex_unlock(&pservice->lock);
2032
2033         pr_debug("dev opened\n");
2034         vpu_debug_leave();
2035         return nonseekable_open(inode, filp);
2036 }
2037
2038 static int vpu_service_release(struct inode *inode, struct file *filp)
2039 {
2040         struct vpu_subdev_data *data = container_of(
2041                         inode->i_cdev, struct vpu_subdev_data, cdev);
2042         struct vpu_service_info *pservice = data->pservice;
2043         int task_running;
2044         struct vpu_session *session = (struct vpu_session *)filp->private_data;
2045
2046         vpu_debug_enter();
2047         if (NULL == session)
2048                 return -EINVAL;
2049
2050         task_running = atomic_read(&session->task_running);
2051         if (task_running) {
2052                 pr_err("error: session %d still has %d task running when closing\n",
2053                        session->pid, task_running);
2054                 msleep(50);
2055         }
2056         wake_up(&session->wait);
2057
2058         mutex_lock(&pservice->lock);
2059         /* remove this filp from the asynchronusly notified filp's */
2060         list_del_init(&session->list_session);
2061         vpu_service_session_clear(data, session);
2062         kfree(session);
2063         filp->private_data = NULL;
2064         mutex_unlock(&pservice->lock);
2065
2066         pr_debug("dev closed\n");
2067         vpu_debug_leave();
2068         return 0;
2069 }
2070
2071 static const struct file_operations vpu_service_fops = {
2072         .unlocked_ioctl = vpu_service_ioctl,
2073         .open           = vpu_service_open,
2074         .release        = vpu_service_release,
2075 #ifdef CONFIG_COMPAT
2076         .compat_ioctl   = compat_vpu_service_ioctl,
2077 #endif
2078 };
2079
2080 static irqreturn_t vdpu_irq(int irq, void *dev_id);
2081 static irqreturn_t vdpu_isr(int irq, void *dev_id);
2082 static irqreturn_t vepu_irq(int irq, void *dev_id);
2083 static irqreturn_t vepu_isr(int irq, void *dev_id);
2084 static void get_hw_info(struct vpu_subdev_data *data);
2085
2086 static struct device *rockchip_get_sysmmu_dev(const char *compt)
2087 {
2088         struct device_node *dn = NULL;
2089         struct platform_device *pd = NULL;
2090         struct device *ret = NULL;
2091
2092         dn = of_find_compatible_node(NULL, NULL, compt);
2093         if (!dn) {
2094                 pr_err("can't find device node %s \r\n", compt);
2095                 return NULL;
2096         }
2097
2098         pd = of_find_device_by_node(dn);
2099         if (!pd) {
2100                 pr_err("can't find platform device in device node %s\n", compt);
2101                 return  NULL;
2102         }
2103         ret = &pd->dev;
2104
2105         return ret;
2106 }
2107
2108 #ifdef CONFIG_IOMMU_API
2109 static inline void platform_set_sysmmu(struct device *iommu,
2110                                        struct device *dev)
2111 {
2112         dev->archdata.iommu = iommu;
2113 }
2114 #else
2115 static inline void platform_set_sysmmu(struct device *iommu,
2116                                        struct device *dev)
2117 {
2118 }
2119 #endif
2120
2121 int vcodec_sysmmu_fault_hdl(struct device *dev,
2122                             enum rk_iommu_inttype itype,
2123                             unsigned long pgtable_base,
2124                             unsigned long fault_addr, unsigned int status)
2125 {
2126         struct platform_device *pdev;
2127         struct vpu_service_info *pservice;
2128         struct vpu_subdev_data *data;
2129
2130         vpu_debug_enter();
2131
2132         if (dev == NULL) {
2133                 pr_err("invalid NULL dev\n");
2134                 return 0;
2135         }
2136
2137         pdev = container_of(dev, struct platform_device, dev);
2138         if (pdev == NULL) {
2139                 pr_err("invalid NULL platform_device\n");
2140                 return 0;
2141         }
2142
2143         data = platform_get_drvdata(pdev);
2144         if (data == NULL) {
2145                 pr_err("invalid NULL vpu_subdev_data\n");
2146                 return 0;
2147         }
2148
2149         pservice = data->pservice;
2150         if (pservice == NULL) {
2151                 pr_err("invalid NULL vpu_service_info\n");
2152                 return 0;
2153         }
2154
2155         if (pservice->reg_codec) {
2156                 struct vpu_reg *reg = pservice->reg_codec;
2157                 struct vcodec_mem_region *mem, *n;
2158                 int i = 0;
2159
2160                 pr_err("vcodec, fault addr 0x%08lx\n", fault_addr);
2161                 if (!list_empty(&reg->mem_region_list)) {
2162                         list_for_each_entry_safe(mem, n, &reg->mem_region_list,
2163                                                  reg_lnk) {
2164                                 pr_err("vcodec, reg[%02u] mem region [%02d] 0x%lx %lx\n",
2165                                        mem->reg_idx, i, mem->iova, mem->len);
2166                                 i++;
2167                         }
2168                 } else {
2169                         pr_err("no memory region mapped\n");
2170                 }
2171
2172                 if (reg->data) {
2173                         struct vpu_subdev_data *data = reg->data;
2174                         u32 *base = (u32 *)data->dec_dev.regs;
2175                         u32 len = data->hw_info->dec_reg_num;
2176
2177                         pr_err("current errror register set:\n");
2178
2179                         for (i = 0; i < len; i++)
2180                                 pr_err("reg[%02d] %08x\n",
2181                                        i, readl_relaxed(base + i));
2182                 }
2183
2184                 pr_alert("vcodec, page fault occur, reset hw\n");
2185
2186                 /* reg->reg[101] = 1; */
2187                 vpu_reset(data);
2188         }
2189
2190         return 0;
2191 }
2192
2193 static int vcodec_subdev_probe(struct platform_device *pdev,
2194                                struct vpu_service_info *pservice)
2195 {
2196         int ret = 0;
2197         struct resource *res = NULL;
2198         u32 ioaddr = 0;
2199         u8 *regs = NULL;
2200         struct vpu_hw_info *hw_info = NULL;
2201         struct device *dev = &pdev->dev;
2202         char *name = (char *)dev_name(dev);
2203         struct device_node *np = pdev->dev.of_node;
2204         struct vpu_subdev_data *data =
2205                 devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
2206         u32 iommu_en = 0;
2207         char mmu_dev_dts_name[40];
2208
2209         of_property_read_u32(np, "iommu_enabled", &iommu_en);
2210
2211         pr_info("probe device %s\n", dev_name(dev));
2212
2213         data->pservice = pservice;
2214         data->dev = dev;
2215
2216         of_property_read_string(np, "name", (const char **)&name);
2217         of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
2218
2219         if (pservice->reg_base == 0) {
2220                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2221                 data->regs = devm_ioremap_resource(dev, res);
2222                 if (IS_ERR(data->regs)) {
2223                         ret = PTR_ERR(data->regs);
2224                         goto err;
2225                 }
2226                 ioaddr = res->start;
2227         } else {
2228                 data->regs = pservice->reg_base;
2229                 ioaddr = pservice->ioaddr;
2230         }
2231
2232         clear_bit(MMU_ACTIVATED, &data->state);
2233         vcodec_enter_mode(data);
2234
2235         vpu_service_power_on(pservice);
2236         ret = vpu_service_check_hw(data);
2237         if (ret < 0) {
2238                 vpu_err("error: hw info check faild\n");
2239                 goto err;
2240         }
2241
2242         hw_info = data->hw_info;
2243         regs = (u8 *)data->regs;
2244
2245         if (hw_info->dec_reg_num) {
2246                 data->dec_dev.iosize = hw_info->dec_io_size;
2247                 data->dec_dev.regs = (u32 *)(regs + hw_info->dec_offset);
2248         }
2249
2250         if (hw_info->enc_reg_num) {
2251                 data->enc_dev.iosize = hw_info->enc_io_size;
2252                 data->enc_dev.regs = (u32 *)(regs + hw_info->enc_offset);
2253         }
2254
2255         data->reg_size = max(hw_info->dec_io_size, hw_info->enc_io_size);
2256
2257         data->irq_enc = platform_get_irq_byname(pdev, "irq_enc");
2258         if (data->irq_enc > 0) {
2259                 ret = devm_request_threaded_irq(dev, data->irq_enc,
2260                                                 vepu_irq, vepu_isr,
2261                                                 IRQF_SHARED, dev_name(dev),
2262                                                 (void *)data);
2263                 if (ret) {
2264                         dev_err(dev, "error: can't request vepu irq %d\n",
2265                                 data->irq_enc);
2266                         goto err;
2267                 }
2268         }
2269         data->irq_dec = platform_get_irq_byname(pdev, "irq_dec");
2270         if (data->irq_dec > 0) {
2271                 ret = devm_request_threaded_irq(dev, data->irq_dec,
2272                                                 vdpu_irq, vdpu_isr,
2273                                                 IRQF_SHARED, dev_name(dev),
2274                                                 (void *)data);
2275                 if (ret) {
2276                         dev_err(dev, "error: can't request vdpu irq %d\n",
2277                                 data->irq_dec);
2278                         goto err;
2279                 }
2280         }
2281         atomic_set(&data->dec_dev.irq_count_codec, 0);
2282         atomic_set(&data->dec_dev.irq_count_pp, 0);
2283         atomic_set(&data->enc_dev.irq_count_codec, 0);
2284         atomic_set(&data->enc_dev.irq_count_pp, 0);
2285
2286         if (iommu_en) {
2287                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
2288                         sprintf(mmu_dev_dts_name,
2289                                 HEVC_IOMMU_COMPATIBLE_NAME);
2290                 else if (data->mode == VCODEC_RUNNING_MODE_VPU)
2291                         sprintf(mmu_dev_dts_name,
2292                                 VPU_IOMMU_COMPATIBLE_NAME);
2293                 else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2294                         sprintf(mmu_dev_dts_name, VDEC_IOMMU_COMPATIBLE_NAME);
2295                 else
2296                         sprintf(mmu_dev_dts_name,
2297                                 HEVC_IOMMU_COMPATIBLE_NAME);
2298
2299                 data->mmu_dev =
2300                         rockchip_get_sysmmu_dev(mmu_dev_dts_name);
2301
2302                 if (data->mmu_dev)
2303                         platform_set_sysmmu(data->mmu_dev, dev);
2304
2305                 rockchip_iovmm_set_fault_handler(dev, vcodec_sysmmu_fault_hdl);
2306         }
2307
2308         get_hw_info(data);
2309         pservice->auto_freq = true;
2310
2311         vcodec_exit_mode(data);
2312         /* create device node */
2313         ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
2314         if (ret) {
2315                 dev_err(dev, "alloc dev_t failed\n");
2316                 goto err;
2317         }
2318
2319         cdev_init(&data->cdev, &vpu_service_fops);
2320
2321         data->cdev.owner = THIS_MODULE;
2322         data->cdev.ops = &vpu_service_fops;
2323
2324         ret = cdev_add(&data->cdev, data->dev_t, 1);
2325
2326         if (ret) {
2327                 dev_err(dev, "add dev_t failed\n");
2328                 goto err;
2329         }
2330
2331         data->cls = class_create(THIS_MODULE, name);
2332
2333         if (IS_ERR(data->cls)) {
2334                 ret = PTR_ERR(data->cls);
2335                 dev_err(dev, "class_create err:%d\n", ret);
2336                 goto err;
2337         }
2338
2339         data->child_dev = device_create(data->cls, dev,
2340                 data->dev_t, NULL, name);
2341
2342         platform_set_drvdata(pdev, data);
2343
2344         INIT_LIST_HEAD(&data->lnk_service);
2345         list_add_tail(&data->lnk_service, &pservice->subdev_list);
2346
2347 #ifdef CONFIG_DEBUG_FS
2348         data->debugfs_dir = vcodec_debugfs_create_device_dir(name, parent);
2349         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2350                 data->debugfs_file_regs =
2351                         debugfs_create_file("regs", 0664, data->debugfs_dir,
2352                                         data, &debug_vcodec_fops);
2353         else
2354                 vpu_err("create debugfs dir %s failed\n", name);
2355 #endif
2356         return 0;
2357 err:
2358         if (data->child_dev) {
2359                 device_destroy(data->cls, data->dev_t);
2360                 cdev_del(&data->cdev);
2361                 unregister_chrdev_region(data->dev_t, 1);
2362         }
2363
2364         if (data->cls)
2365                 class_destroy(data->cls);
2366         return -1;
2367 }
2368
2369 static void vcodec_subdev_remove(struct vpu_subdev_data *data)
2370 {
2371         struct vpu_service_info *pservice = data->pservice;
2372
2373         mutex_lock(&pservice->lock);
2374         cancel_delayed_work_sync(&pservice->power_off_work);
2375         vpu_service_power_off(pservice);
2376         mutex_unlock(&pservice->lock);
2377
2378         device_destroy(data->cls, data->dev_t);
2379         class_destroy(data->cls);
2380         cdev_del(&data->cdev);
2381         unregister_chrdev_region(data->dev_t, 1);
2382
2383 #ifdef CONFIG_DEBUG_FS
2384         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2385                 debugfs_remove_recursive(data->debugfs_dir);
2386 #endif
2387 }
2388
2389 static void vcodec_read_property(struct device_node *np,
2390                                  struct vpu_service_info *pservice)
2391 {
2392         pservice->mode_bit = 0;
2393         pservice->mode_ctrl = 0;
2394         pservice->subcnt = 0;
2395         pservice->grf_base = NULL;
2396
2397         of_property_read_u32(np, "subcnt", &pservice->subcnt);
2398
2399         if (pservice->subcnt > 1) {
2400                 of_property_read_u32(np, "mode_bit", &pservice->mode_bit);
2401                 of_property_read_u32(np, "mode_ctrl", &pservice->mode_ctrl);
2402         }
2403 #ifdef CONFIG_MFD_SYSCON
2404         pservice->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
2405         if (IS_ERR_OR_NULL(pservice->grf)) {
2406                 pservice->grf = NULL;
2407 #ifdef CONFIG_ARM
2408                 pservice->grf_base = RK_GRF_VIRT;
2409 #else
2410                 vpu_err("can't find vpu grf property\n");
2411                 return;
2412 #endif
2413         }
2414 #else
2415 #ifdef CONFIG_ARM
2416         pservice->grf_base = RK_GRF_VIRT;
2417 #else
2418         vpu_err("can't find vpu grf property\n");
2419         return;
2420 #endif
2421 #endif
2422
2423 #ifdef CONFIG_RESET_CONTROLLER
2424         pservice->rst_a = devm_reset_control_get(pservice->dev, "video_a");
2425         pservice->rst_h = devm_reset_control_get(pservice->dev, "video_h");
2426         pservice->rst_v = devm_reset_control_get(pservice->dev, "video");
2427
2428         if (IS_ERR_OR_NULL(pservice->rst_a)) {
2429                 pr_warn("No aclk reset resource define\n");
2430                 pservice->rst_a = NULL;
2431         }
2432
2433         if (IS_ERR_OR_NULL(pservice->rst_h)) {
2434                 pr_warn("No hclk reset resource define\n");
2435                 pservice->rst_h = NULL;
2436         }
2437
2438         if (IS_ERR_OR_NULL(pservice->rst_v)) {
2439                 pr_warn("No core reset resource define\n");
2440                 pservice->rst_v = NULL;
2441         }
2442 #endif
2443
2444         of_property_read_string(np, "name", (const char **)&pservice->name);
2445 }
2446
2447 static void vcodec_init_drvdata(struct vpu_service_info *pservice)
2448 {
2449         pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2450         pservice->curr_mode = -1;
2451
2452         wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");
2453         INIT_LIST_HEAD(&pservice->waiting);
2454         INIT_LIST_HEAD(&pservice->running);
2455         mutex_init(&pservice->lock);
2456
2457         INIT_LIST_HEAD(&pservice->done);
2458         INIT_LIST_HEAD(&pservice->session);
2459         INIT_LIST_HEAD(&pservice->subdev_list);
2460
2461         pservice->reg_pproc     = NULL;
2462         atomic_set(&pservice->total_running, 0);
2463         atomic_set(&pservice->enabled,       0);
2464         atomic_set(&pservice->power_on_cnt,  0);
2465         atomic_set(&pservice->power_off_cnt, 0);
2466         atomic_set(&pservice->reset_request, 0);
2467
2468         INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
2469         pservice->last.tv64 = 0;
2470
2471         pservice->ion_client = rockchip_ion_client_create("vpu");
2472         if (IS_ERR(pservice->ion_client)) {
2473                 vpu_err("failed to create ion client for vcodec ret %ld\n",
2474                         PTR_ERR(pservice->ion_client));
2475         } else {
2476                 vpu_debug(DEBUG_IOMMU, "vcodec ion client create success!\n");
2477         }
2478 }
2479
2480 static int vcodec_probe(struct platform_device *pdev)
2481 {
2482         int i;
2483         int ret = 0;
2484         struct resource *res = NULL;
2485         struct device *dev = &pdev->dev;
2486         struct device_node *np = pdev->dev.of_node;
2487         struct vpu_service_info *pservice =
2488                 devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);
2489
2490         pservice->dev = dev;
2491
2492         vcodec_read_property(np, pservice);
2493         vcodec_init_drvdata(pservice);
2494
2495         if (strncmp(pservice->name, "hevc_service", 12) == 0)
2496                 pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
2497         else if (strncmp(pservice->name, "vpu_service", 11) == 0)
2498                 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2499         else if (strncmp(pservice->name, "rkvdec", 6) == 0)
2500                 pservice->dev_id = VCODEC_DEVICE_ID_RKVDEC;
2501         else
2502                 pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
2503
2504         if (0 > vpu_get_clk(pservice))
2505                 goto err;
2506
2507         if (of_property_read_bool(np, "reg")) {
2508                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2509
2510                 pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
2511                 if (IS_ERR(pservice->reg_base)) {
2512                         vpu_err("ioremap registers base failed\n");
2513                         ret = PTR_ERR(pservice->reg_base);
2514                         goto err;
2515                 }
2516                 pservice->ioaddr = res->start;
2517         } else {
2518                 pservice->reg_base = 0;
2519         }
2520
2521         pm_runtime_enable(dev);
2522
2523         if (of_property_read_bool(np, "subcnt")) {
2524                 for (i = 0; i < pservice->subcnt; i++) {
2525                         struct device_node *sub_np;
2526                         struct platform_device *sub_pdev;
2527
2528                         sub_np = of_parse_phandle(np, "rockchip,sub", i);
2529                         sub_pdev = of_find_device_by_node(sub_np);
2530
2531                         vcodec_subdev_probe(sub_pdev, pservice);
2532                 }
2533         } else {
2534                 vcodec_subdev_probe(pdev, pservice);
2535         }
2536
2537         vpu_service_power_off(pservice);
2538
2539         pr_info("init success\n");
2540
2541         return 0;
2542
2543 err:
2544         pr_info("init failed\n");
2545         vpu_service_power_off(pservice);
2546         vpu_put_clk(pservice);
2547         wake_lock_destroy(&pservice->wake_lock);
2548
2549         return ret;
2550 }
2551
2552 static int vcodec_remove(struct platform_device *pdev)
2553 {
2554         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2555
2556         vcodec_subdev_remove(data);
2557
2558         pm_runtime_disable(data->pservice->dev);
2559
2560         return 0;
2561 }
2562
2563 #if defined(CONFIG_OF)
2564 static const struct of_device_id vcodec_service_dt_ids[] = {
2565         {.compatible = "rockchip,vpu_service",},
2566         {.compatible = "rockchip,hevc_service",},
2567         {.compatible = "rockchip,vpu_combo",},
2568         {.compatible = "rockchip,rkvdec",},
2569         {},
2570 };
2571 #endif
2572
2573 static struct platform_driver vcodec_driver = {
2574         .probe = vcodec_probe,
2575         .remove = vcodec_remove,
2576         .driver = {
2577                 .name = "vcodec",
2578                 .owner = THIS_MODULE,
2579 #if defined(CONFIG_OF)
2580                 .of_match_table = of_match_ptr(vcodec_service_dt_ids),
2581 #endif
2582         },
2583 };
2584
2585 static void get_hw_info(struct vpu_subdev_data *data)
2586 {
2587         struct vpu_service_info *pservice = data->pservice;
2588         struct vpu_dec_config *dec = &pservice->dec_config;
2589         struct vpu_enc_config *enc = &pservice->enc_config;
2590
2591         if (cpu_is_rk2928() || cpu_is_rk3036() ||
2592             cpu_is_rk30xx() || cpu_is_rk312x() ||
2593             cpu_is_rk3188())
2594                 dec->max_dec_pic_width = 1920;
2595         else
2596                 dec->max_dec_pic_width = 4096;
2597
2598         if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2599                 dec->h264_support = 3;
2600                 dec->jpeg_support = 1;
2601                 dec->mpeg4_support = 2;
2602                 dec->vc1_support = 3;
2603                 dec->mpeg2_support = 1;
2604                 dec->pp_support = 1;
2605                 dec->sorenson_support = 1;
2606                 dec->ref_buf_support = 3;
2607                 dec->vp6_support = 1;
2608                 dec->vp7_support = 1;
2609                 dec->vp8_support = 1;
2610                 dec->avs_support = 1;
2611                 dec->jpeg_ext_support = 0;
2612                 dec->custom_mpeg4_support = 1;
2613                 dec->reserve = 0;
2614                 dec->mvc_support = 1;
2615
2616                 if (!cpu_is_rk3036()) {
2617                         u32 config_reg = readl_relaxed(data->enc_dev.regs + 63);
2618
2619                         enc->max_encoded_width = config_reg & ((1 << 11) - 1);
2620                         enc->h264_enabled = 1;
2621                         enc->mpeg4_enabled = (config_reg >> 26) & 1;
2622                         enc->jpeg_enabled = 1;
2623                         enc->vs_enabled = (config_reg >> 24) & 1;
2624                         enc->rgb_enabled = (config_reg >> 28) & 1;
2625                         enc->reg_size = data->reg_size;
2626                         enc->reserv[0] = 0;
2627                         enc->reserv[1] = 0;
2628                 }
2629
2630                 pservice->auto_freq = true;
2631                 vpu_debug(DEBUG_EXTRA_INFO, "vpu_service set to auto frequency mode\n");
2632                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2633
2634                 pservice->bug_dec_addr = cpu_is_rk30xx();
2635         } else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC) {
2636                 pservice->auto_freq = true;
2637                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2638         } else {
2639                 /* disable frequency switch in hevc.*/
2640                 pservice->auto_freq = false;
2641         }
2642 }
2643
2644 static bool check_irq_err(struct vpu_task_info *task, u32 irq_status)
2645 {
2646         vpu_debug(DEBUG_IRQ_CHECK, "task %s status %08x mask %08x\n",
2647                   task->name, irq_status, task->error_mask);
2648
2649         return (task->error_mask & irq_status) ? true : false;
2650 }
2651
2652 static irqreturn_t vdpu_irq(int irq, void *dev_id)
2653 {
2654         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2655         struct vpu_service_info *pservice = data->pservice;
2656         struct vpu_task_info *task = NULL;
2657         struct vpu_device *dev = &data->dec_dev;
2658         u32 hw_id = data->hw_info->hw_id;
2659         u32 raw_status;
2660         u32 dec_status;
2661
2662         task = &data->task_info[TASK_DEC];
2663
2664         raw_status = readl_relaxed(dev->regs + task->reg_irq);
2665         dec_status = raw_status;
2666
2667         vpu_debug(DEBUG_TASK_INFO, "vdpu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2668                   task->reg_irq, dec_status,
2669                   task->irq_mask, task->ready_mask, task->error_mask);
2670
2671         if (dec_status & task->irq_mask) {
2672                 time_record(task, 1);
2673                 vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq dec status %08x\n",
2674                           dec_status);
2675                 if ((dec_status & 0x40001) == 0x40001) {
2676                         do {
2677                                 dec_status =
2678                                         readl_relaxed(dev->regs +
2679                                                 task->reg_irq);
2680                         } while ((dec_status & 0x40001) == 0x40001);
2681                 }
2682
2683                 if (check_irq_err(task, dec_status))
2684                         atomic_add(1, &pservice->reset_request);
2685
2686                 writel_relaxed(0, dev->regs + task->reg_irq);
2687
2688                 /*
2689                  * NOTE: rkvdec need to reset after each task to avoid timeout
2690                  *       error on H.264 switch to H.265
2691                  */
2692                 if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2693                         writel(0x100000, dev->regs + task->reg_irq);
2694
2695                 /* set clock gating to save power */
2696                 writel(task->gating_mask, dev->regs + task->reg_irq);
2697
2698                 atomic_add(1, &dev->irq_count_codec);
2699                 time_diff(task);
2700         }
2701
2702         task = &data->task_info[TASK_PP];
2703         if (hw_id != HEVC_ID && hw_id != RKV_DEC_ID) {
2704                 u32 pp_status = readl_relaxed(dev->regs + task->irq_mask);
2705
2706                 if (pp_status & task->irq_mask) {
2707                         time_record(task, 1);
2708                         vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq pp status %08x\n",
2709                                   pp_status);
2710
2711                         if (check_irq_err(task, dec_status))
2712                                 atomic_add(1, &pservice->reset_request);
2713
2714                         /* clear pp IRQ */
2715                         writel_relaxed(pp_status & (~task->reg_irq),
2716                                        dev->regs + task->irq_mask);
2717                         atomic_add(1, &dev->irq_count_pp);
2718                         time_diff(task);
2719                 }
2720         }
2721
2722         pservice->irq_status = raw_status;
2723
2724         if (atomic_read(&dev->irq_count_pp) ||
2725             atomic_read(&dev->irq_count_codec))
2726                 return IRQ_WAKE_THREAD;
2727         else
2728                 return IRQ_NONE;
2729 }
2730
2731 static irqreturn_t vdpu_isr(int irq, void *dev_id)
2732 {
2733         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2734         struct vpu_service_info *pservice = data->pservice;
2735         struct vpu_device *dev = &data->dec_dev;
2736
2737         mutex_lock(&pservice->lock);
2738         if (atomic_read(&dev->irq_count_codec)) {
2739                 atomic_sub(1, &dev->irq_count_codec);
2740                 if (pservice->reg_codec == NULL) {
2741                         vpu_err("error: dec isr with no task waiting\n");
2742                 } else {
2743                         reg_from_run_to_done(data, pservice->reg_codec);
2744                         /* avoid vpu timeout and can't recover problem */
2745                         VDPU_SOFT_RESET(data->regs);
2746                 }
2747         }
2748
2749         if (atomic_read(&dev->irq_count_pp)) {
2750                 atomic_sub(1, &dev->irq_count_pp);
2751                 if (pservice->reg_pproc == NULL)
2752                         vpu_err("error: pp isr with no task waiting\n");
2753                 else
2754                         reg_from_run_to_done(data, pservice->reg_pproc);
2755         }
2756         try_set_reg(data);
2757         mutex_unlock(&pservice->lock);
2758         return IRQ_HANDLED;
2759 }
2760
2761 static irqreturn_t vepu_irq(int irq, void *dev_id)
2762 {
2763         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2764         struct vpu_service_info *pservice = data->pservice;
2765         struct vpu_task_info *task = &data->task_info[TASK_ENC];
2766         struct vpu_device *dev = &data->enc_dev;
2767         u32 irq_status;
2768
2769         irq_status = readl_relaxed(dev->regs + task->reg_irq);
2770
2771         vpu_debug(DEBUG_TASK_INFO, "vepu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2772                   task->reg_irq, irq_status,
2773                   task->irq_mask, task->ready_mask, task->error_mask);
2774
2775         vpu_debug(DEBUG_IRQ_STATUS, "vepu_irq enc status %08x\n", irq_status);
2776
2777         if (likely(irq_status & task->irq_mask)) {
2778                 time_record(task, 1);
2779
2780                 if (check_irq_err(task, irq_status))
2781                         atomic_add(1, &pservice->reset_request);
2782
2783                 /* clear enc IRQ */
2784                 writel_relaxed(irq_status & (~task->irq_mask),
2785                                dev->regs + task->reg_irq);
2786
2787                 atomic_add(1, &dev->irq_count_codec);
2788                 time_diff(task);
2789         }
2790
2791         pservice->irq_status = irq_status;
2792
2793         if (atomic_read(&dev->irq_count_codec))
2794                 return IRQ_WAKE_THREAD;
2795         else
2796                 return IRQ_NONE;
2797 }
2798
2799 static irqreturn_t vepu_isr(int irq, void *dev_id)
2800 {
2801         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2802         struct vpu_service_info *pservice = data->pservice;
2803         struct vpu_device *dev = &data->enc_dev;
2804
2805         mutex_lock(&pservice->lock);
2806         if (atomic_read(&dev->irq_count_codec)) {
2807                 atomic_sub(1, &dev->irq_count_codec);
2808                 if (NULL == pservice->reg_codec)
2809                         vpu_err("error: enc isr with no task waiting\n");
2810                 else
2811                         reg_from_run_to_done(data, pservice->reg_codec);
2812         }
2813         try_set_reg(data);
2814         mutex_unlock(&pservice->lock);
2815         return IRQ_HANDLED;
2816 }
2817
2818 static int __init vcodec_service_init(void)
2819 {
2820         int ret = platform_driver_register(&vcodec_driver);
2821
2822         if (ret) {
2823                 vpu_err("Platform device register failed (%d).\n", ret);
2824                 return ret;
2825         }
2826
2827 #ifdef CONFIG_DEBUG_FS
2828         vcodec_debugfs_init();
2829 #endif
2830
2831         return ret;
2832 }
2833
2834 static void __exit vcodec_service_exit(void)
2835 {
2836 #ifdef CONFIG_DEBUG_FS
2837         vcodec_debugfs_exit();
2838 #endif
2839
2840         platform_driver_unregister(&vcodec_driver);
2841 }
2842
2843 module_init(vcodec_service_init);
2844 module_exit(vcodec_service_exit);
2845 MODULE_LICENSE("Proprietary");
2846
2847 #ifdef CONFIG_DEBUG_FS
2848 #include <linux/seq_file.h>
2849
2850 static int vcodec_debugfs_init(void)
2851 {
2852         parent = debugfs_create_dir("vcodec", NULL);
2853         if (!parent)
2854                 return -1;
2855
2856         return 0;
2857 }
2858
2859 static void vcodec_debugfs_exit(void)
2860 {
2861         debugfs_remove(parent);
2862 }
2863
2864 static struct dentry *vcodec_debugfs_create_device_dir(
2865                 char *dirname, struct dentry *parent)
2866 {
2867         return debugfs_create_dir(dirname, parent);
2868 }
2869
2870 static int debug_vcodec_show(struct seq_file *s, void *unused)
2871 {
2872         struct vpu_subdev_data *data = s->private;
2873         struct vpu_service_info *pservice = data->pservice;
2874         unsigned int i, n;
2875         struct vpu_reg *reg, *reg_tmp;
2876         struct vpu_session *session, *session_tmp;
2877
2878         mutex_lock(&pservice->lock);
2879         vpu_service_power_on(pservice);
2880         if (data->hw_info->hw_id != HEVC_ID) {
2881                 seq_puts(s, "\nENC Registers:\n");
2882                 n = data->enc_dev.iosize >> 2;
2883
2884                 for (i = 0; i < n; i++)
2885                         seq_printf(s, "\tswreg%d = %08X\n", i,
2886                                    readl_relaxed(data->enc_dev.regs + i));
2887         }
2888
2889         seq_puts(s, "\nDEC Registers:\n");
2890
2891         n = data->dec_dev.iosize >> 2;
2892         for (i = 0; i < n; i++)
2893                 seq_printf(s, "\tswreg%d = %08X\n", i,
2894                            readl_relaxed(data->dec_dev.regs + i));
2895
2896         seq_puts(s, "\nvpu service status:\n");
2897
2898         list_for_each_entry_safe(session, session_tmp,
2899                                  &pservice->session, list_session) {
2900                 seq_printf(s, "session pid %d type %d:\n",
2901                            session->pid, session->type);
2902
2903                 list_for_each_entry_safe(reg, reg_tmp,
2904                                          &session->waiting, session_link) {
2905                         seq_printf(s, "waiting register set %p\n", reg);
2906                 }
2907                 list_for_each_entry_safe(reg, reg_tmp,
2908                                          &session->running, session_link) {
2909                         seq_printf(s, "running register set %p\n", reg);
2910                 }
2911                 list_for_each_entry_safe(reg, reg_tmp,
2912                                          &session->done, session_link) {
2913                         seq_printf(s, "done    register set %p\n", reg);
2914                 }
2915         }
2916
2917         seq_printf(s, "\npower counter: on %d off %d\n",
2918                    atomic_read(&pservice->power_on_cnt),
2919                    atomic_read(&pservice->power_off_cnt));
2920
2921         mutex_unlock(&pservice->lock);
2922         vpu_service_power_off(pservice);
2923
2924         return 0;
2925 }
2926
2927 static int debug_vcodec_open(struct inode *inode, struct file *file)
2928 {
2929         return single_open(file, debug_vcodec_show, inode->i_private);
2930 }
2931
2932 #endif
2933