rockchip/vcodec: add shutdown ops
[firefly-linux-kernel-4.4.55.git] / drivers / video / rockchip / vcodec / vcodec_service.c
1 /**
2  * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
3  * author: chenhengming chm@rock-chips.com
4  *         Alpha Lin, alpha.lin@rock-chips.com
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/clk.h>
20 #include <linux/compat.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/fs.h>
26 #include <linux/mm.h>
27 #include <linux/platform_device.h>
28 #include <linux/reset.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/wakelock.h>
32 #include <linux/cdev.h>
33 #include <linux/of.h>
34 #include <linux/of_platform.h>
35 #include <linux/of_irq.h>
36 #include <linux/regmap.h>
37 #include <linux/mfd/syscon.h>
38 #include <linux/uaccess.h>
39 #include <linux/debugfs.h>
40 #include <linux/pm_runtime.h>
41
42 #include <linux/rockchip/cpu.h>
43 #include <linux/rockchip/cru.h>
44 #include <linux/rockchip/pmu.h>
45 #include <linux/rockchip/grf.h>
46
47 #if defined(CONFIG_ION_ROCKCHIP)
48 #include <linux/rockchip_ion.h>
49 #endif
50
51 #include <linux/rockchip-iovmm.h>
52 #include <linux/dma-buf.h>
53
54 #include "vcodec_hw_info.h"
55 #include "vcodec_hw_vpu.h"
56 #include "vcodec_hw_rkv.h"
57 #include "vcodec_hw_vpu2.h"
58
59 #include "vcodec_service.h"
60
61 /*
62  * debug flag usage:
63  * +------+-------------------+
64  * | 8bit |      24bit        |
65  * +------+-------------------+
66  *  0~23 bit is for different information type
67  * 24~31 bit is for information print format
68  */
69
70 #define DEBUG_POWER                             0x00000001
71 #define DEBUG_CLOCK                             0x00000002
72 #define DEBUG_IRQ_STATUS                        0x00000004
73 #define DEBUG_IOMMU                             0x00000008
74 #define DEBUG_IOCTL                             0x00000010
75 #define DEBUG_FUNCTION                          0x00000020
76 #define DEBUG_REGISTER                          0x00000040
77 #define DEBUG_EXTRA_INFO                        0x00000080
78 #define DEBUG_TIMING                            0x00000100
79 #define DEBUG_TASK_INFO                         0x00000200
80
81 #define DEBUG_SET_REG                           0x00001000
82 #define DEBUG_GET_REG                           0x00002000
83 #define DEBUG_PPS_FILL                          0x00004000
84 #define DEBUG_IRQ_CHECK                         0x00008000
85 #define DEBUG_CACHE_32B                         0x00010000
86
87 #define PRINT_FUNCTION                          0x80000000
88 #define PRINT_LINE                              0x40000000
89
90 static int debug;
91 module_param(debug, int, S_IRUGO | S_IWUSR);
92 MODULE_PARM_DESC(debug, "bit switch for vcodec_service debug information");
93
94 #define VCODEC_CLOCK_ENABLE     1
95
96 /*
97  * hardware information organization
98  *
99  * In order to support multiple hardware with different version the hardware
100  * information is organized as follow:
101  *
102  * 1. First, index hardware by register size / position.
103  *    These information is fix for each hardware and do not relate to runtime
104  *    work flow. It only related to resource allocation.
105  *    Descriptor: struct vpu_hw_info
106  *
107  * 2. Then, index hardware by runtime configuration
108  *    These information is related to runtime setting behave including enable
109  *    register, irq register and other key control flag
110  *    Descriptor: struct vpu_task_info
111  *
112  * 3. Final, on iommu case the fd translation is required
113  *    Descriptor: struct vpu_trans_info
114  */
115
116 enum VPU_FREQ {
117         VPU_FREQ_200M,
118         VPU_FREQ_266M,
119         VPU_FREQ_300M,
120         VPU_FREQ_400M,
121         VPU_FREQ_500M,
122         VPU_FREQ_600M,
123         VPU_FREQ_DEFAULT,
124         VPU_FREQ_BUT,
125 };
126
127 struct extra_info_elem {
128         u32 index;
129         u32 offset;
130 };
131
132 #define EXTRA_INFO_MAGIC        0x4C4A46
133
134 struct extra_info_for_iommu {
135         u32 magic;
136         u32 cnt;
137         struct extra_info_elem elem[20];
138 };
139
140 #define MHZ                                     (1000*1000)
141 #define SIZE_REG(reg)                           ((reg)*4)
142
143 static struct vcodec_info vcodec_info_set[] = {
144         [0] = {
145                 .hw_id          = VPU_ID_8270,
146                 .hw_info        = &hw_vpu_8270,
147                 .task_info      = task_vpu,
148                 .trans_info     = trans_vpu,
149         },
150         [1] = {
151                 .hw_id          = VPU_ID_4831,
152                 .hw_info        = &hw_vpu_4831,
153                 .task_info      = task_vpu,
154                 .trans_info     = trans_vpu,
155         },
156         [2] = {
157                 .hw_id          = VPU_DEC_ID_9190,
158                 .hw_info        = &hw_vpu_9190,
159                 .task_info      = task_vpu,
160                 .trans_info     = trans_vpu,
161         },
162         [3] = {
163                 .hw_id          = HEVC_ID,
164                 .hw_info        = &hw_rkhevc,
165                 .task_info      = task_rkv,
166                 .trans_info     = trans_rkv,
167         },
168         [4] = {
169                 .hw_id          = RKV_DEC_ID,
170                 .hw_info        = &hw_rkvdec,
171                 .task_info      = task_rkv,
172                 .trans_info     = trans_rkv,
173         },
174         [5] = {
175                 .hw_id          = VPU2_ID,
176                 .hw_info        = &hw_vpu2,
177                 .task_info      = task_vpu2,
178                 .trans_info     = trans_vpu2,
179         },
180 };
181
182 #define DEBUG
183 #ifdef DEBUG
184 #define vpu_debug_func(type, fmt, args...)                      \
185         do {                                                    \
186                 if (unlikely(debug & type)) {                   \
187                         pr_info("%s:%d: " fmt,                  \
188                                  __func__, __LINE__, ##args);   \
189                 }                                               \
190         } while (0)
191 #define vpu_debug(type, fmt, args...)                           \
192         do {                                                    \
193                 if (unlikely(debug & type)) {                   \
194                         pr_info(fmt, ##args);                   \
195                 }                                               \
196         } while (0)
197 #else
198 #define vpu_debug_func(level, fmt, args...)
199 #define vpu_debug(level, fmt, args...)
200 #endif
201
202 #define vpu_debug_enter() vpu_debug_func(DEBUG_FUNCTION, "enter\n")
203 #define vpu_debug_leave() vpu_debug_func(DEBUG_FUNCTION, "leave\n")
204
205 #define vpu_err(fmt, args...)                           \
206                 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
207
208 enum VPU_DEC_FMT {
209         VPU_DEC_FMT_H264,
210         VPU_DEC_FMT_MPEG4,
211         VPU_DEC_FMT_H263,
212         VPU_DEC_FMT_JPEG,
213         VPU_DEC_FMT_VC1,
214         VPU_DEC_FMT_MPEG2,
215         VPU_DEC_FMT_MPEG1,
216         VPU_DEC_FMT_VP6,
217         VPU_DEC_FMT_RESERV0,
218         VPU_DEC_FMT_VP7,
219         VPU_DEC_FMT_VP8,
220         VPU_DEC_FMT_AVS,
221         VPU_DEC_FMT_RES
222 };
223
224 /**
225  * struct for process session which connect to vpu
226  *
227  * @author ChenHengming (2011-5-3)
228  */
229 struct vpu_session {
230         enum VPU_CLIENT_TYPE type;
231         /* a linked list of data so we can access them for debugging */
232         struct list_head list_session;
233         /* a linked list of register data waiting for process */
234         struct list_head waiting;
235         /* a linked list of register data in processing */
236         struct list_head running;
237         /* a linked list of register data processed */
238         struct list_head done;
239         wait_queue_head_t wait;
240         pid_t pid;
241         atomic_t task_running;
242 };
243
244 /**
245  * struct for process register set
246  *
247  * @author ChenHengming (2011-5-4)
248  */
249 struct vpu_reg {
250         enum VPU_CLIENT_TYPE type;
251         enum VPU_FREQ freq;
252         struct vpu_session *session;
253         struct vpu_subdev_data *data;
254         struct vpu_task_info *task;
255         const struct vpu_trans_info *trans;
256
257         /* link to vpu service session */
258         struct list_head session_link;
259         /* link to register set list */
260         struct list_head status_link;
261
262         unsigned long size;
263         struct list_head mem_region_list;
264         u32 dec_base;
265         u32 *reg;
266 };
267
268 struct vpu_device {
269         atomic_t irq_count_codec;
270         atomic_t irq_count_pp;
271         unsigned int iosize;
272         u32 *regs;
273 };
274
275 enum vcodec_device_id {
276         VCODEC_DEVICE_ID_VPU,
277         VCODEC_DEVICE_ID_HEVC,
278         VCODEC_DEVICE_ID_COMBO,
279         VCODEC_DEVICE_ID_RKVDEC,
280         VCODEC_DEVICE_ID_BUTT
281 };
282
283 enum VCODEC_RUNNING_MODE {
284         VCODEC_RUNNING_MODE_NONE = -1,
285         VCODEC_RUNNING_MODE_VPU,
286         VCODEC_RUNNING_MODE_HEVC,
287         VCODEC_RUNNING_MODE_RKVDEC
288 };
289
290 struct vcodec_mem_region {
291         struct list_head srv_lnk;
292         struct list_head reg_lnk;
293         struct list_head session_lnk;
294         unsigned long iova;     /* virtual address for iommu */
295         unsigned long len;
296         u32 reg_idx;
297         struct ion_handle *hdl;
298 };
299
300 enum vpu_ctx_state {
301         MMU_ACTIVATED   = BIT(0)
302 };
303
304 struct vpu_subdev_data {
305         struct cdev cdev;
306         dev_t dev_t;
307         struct class *cls;
308         struct device *child_dev;
309
310         int irq_enc;
311         int irq_dec;
312         struct vpu_service_info *pservice;
313
314         u32 *regs;
315         enum VCODEC_RUNNING_MODE mode;
316         struct list_head lnk_service;
317
318         struct device *dev;
319
320         struct vpu_device enc_dev;
321         struct vpu_device dec_dev;
322
323         enum VPU_HW_ID hw_id;
324         struct vpu_hw_info *hw_info;
325         struct vpu_task_info *task_info;
326         const struct vpu_trans_info *trans_info;
327
328         u32 reg_size;
329         unsigned long state;
330
331 #ifdef CONFIG_DEBUG_FS
332         struct dentry *debugfs_dir;
333         struct dentry *debugfs_file_regs;
334 #endif
335
336         struct device *mmu_dev;
337 };
338
339 struct vpu_service_info {
340         struct wake_lock wake_lock;
341         struct delayed_work power_off_work;
342         ktime_t last; /* record previous power-on time */
343         /* vpu service structure global lock */
344         struct mutex lock;
345         /* link to link_reg in struct vpu_reg */
346         struct list_head waiting;
347         /* link to link_reg in struct vpu_reg */
348         struct list_head running;
349         /* link to link_reg in struct vpu_reg */
350         struct list_head done;
351         /* link to list_session in struct vpu_session */
352         struct list_head session;
353         atomic_t total_running;
354         atomic_t enabled;
355         atomic_t power_on_cnt;
356         atomic_t power_off_cnt;
357         atomic_t service_on;
358         struct mutex shutdown_lock;
359         struct vpu_reg *reg_codec;
360         struct vpu_reg *reg_pproc;
361         struct vpu_reg *reg_resev;
362         struct vpu_dec_config dec_config;
363         struct vpu_enc_config enc_config;
364
365         bool auto_freq;
366         bool bug_dec_addr;
367         atomic_t freq_status;
368
369         struct clk *aclk_vcodec;
370         struct clk *hclk_vcodec;
371         struct clk *clk_core;
372         struct clk *clk_cabac;
373         struct clk *pd_video;
374
375 #ifdef CONFIG_RESET_CONTROLLER
376         struct reset_control *rst_a;
377         struct reset_control *rst_h;
378         struct reset_control *rst_v;
379 #endif
380         struct device *dev;
381
382         u32 irq_status;
383         atomic_t reset_request;
384         struct ion_client *ion_client;
385         struct list_head mem_region_list;
386
387         enum vcodec_device_id dev_id;
388
389         enum VCODEC_RUNNING_MODE curr_mode;
390         u32 prev_mode;
391
392         struct delayed_work simulate_work;
393
394         u32 mode_bit;
395         u32 mode_ctrl;
396         u32 *reg_base;
397         u32 ioaddr;
398         struct regmap *grf;
399         u32 *grf_base;
400
401         char *name;
402
403         u32 subcnt;
404         struct list_head subdev_list;
405 };
406
407 struct vpu_request {
408         u32 *req;
409         u32 size;
410 };
411
412 #ifdef CONFIG_COMPAT
413 struct compat_vpu_request {
414         compat_uptr_t req;
415         u32 size;
416 };
417 #endif
418
419 /* debugfs root directory for all device (vpu, hevc).*/
420 static struct dentry *parent;
421
422 #ifdef CONFIG_DEBUG_FS
423 static int vcodec_debugfs_init(void);
424 static void vcodec_debugfs_exit(void);
425 static struct dentry *vcodec_debugfs_create_device_dir(
426                 char *dirname, struct dentry *parent);
427 static int debug_vcodec_open(struct inode *inode, struct file *file);
428
429 static const struct file_operations debug_vcodec_fops = {
430         .open = debug_vcodec_open,
431         .read = seq_read,
432         .llseek = seq_lseek,
433         .release = single_release,
434 };
435 #endif
436
437 #define VDPU_SOFT_RESET_REG     101
438 #define VDPU_CLEAN_CACHE_REG    516
439 #define VEPU_CLEAN_CACHE_REG    772
440 #define HEVC_CLEAN_CACHE_REG    260
441
442 #define VPU_REG_ENABLE(base, reg)       writel_relaxed(1, base + reg)
443
444 #define VDPU_SOFT_RESET(base)   VPU_REG_ENABLE(base, VDPU_SOFT_RESET_REG)
445 #define VDPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VDPU_CLEAN_CACHE_REG)
446 #define VEPU_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, VEPU_CLEAN_CACHE_REG)
447 #define HEVC_CLEAN_CACHE(base)  VPU_REG_ENABLE(base, HEVC_CLEAN_CACHE_REG)
448
449 #define VPU_POWER_OFF_DELAY             (4 * HZ) /* 4s */
450 #define VPU_TIMEOUT_DELAY               (2 * HZ) /* 2s */
451
452 static void time_record(struct vpu_task_info *task, int is_end)
453 {
454         if (unlikely(debug & DEBUG_TIMING) && task)
455                 do_gettimeofday((is_end) ? (&task->end) : (&task->start));
456 }
457
458 static void time_diff(struct vpu_task_info *task)
459 {
460         vpu_debug(DEBUG_TIMING, "%s task: %ld ms\n", task->name,
461                   (task->end.tv_sec  - task->start.tv_sec)  * 1000 +
462                   (task->end.tv_usec - task->start.tv_usec) / 1000);
463 }
464
465 static void vcodec_enter_mode(struct vpu_subdev_data *data)
466 {
467         int bits;
468         u32 raw = 0;
469         struct vpu_service_info *pservice = data->pservice;
470         struct vpu_subdev_data *subdata, *n;
471
472         if (pservice->subcnt < 2) {
473                 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
474                         set_bit(MMU_ACTIVATED, &data->state);
475                         if (atomic_read(&pservice->enabled))
476                                 rockchip_iovmm_activate(data->dev);
477                         else
478                                 BUG_ON(!atomic_read(&pservice->enabled));
479                 }
480                 return;
481         }
482
483         if (pservice->curr_mode == data->mode)
484                 return;
485
486         vpu_debug(DEBUG_IOMMU, "vcodec enter mode %d\n", data->mode);
487         list_for_each_entry_safe(subdata, n,
488                                  &pservice->subdev_list, lnk_service) {
489                 if (data != subdata && subdata->mmu_dev &&
490                     test_bit(MMU_ACTIVATED, &subdata->state)) {
491                         clear_bit(MMU_ACTIVATED, &subdata->state);
492                         rockchip_iovmm_deactivate(subdata->dev);
493                 }
494         }
495         bits = 1 << pservice->mode_bit;
496 #ifdef CONFIG_MFD_SYSCON
497         if (pservice->grf) {
498                 regmap_read(pservice->grf, pservice->mode_ctrl, &raw);
499
500                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
501                         regmap_write(pservice->grf, pservice->mode_ctrl,
502                                      raw | bits | (bits << 16));
503                 else
504                         regmap_write(pservice->grf, pservice->mode_ctrl,
505                                      (raw & (~bits)) | (bits << 16));
506         } else if (pservice->grf_base) {
507                 u32 *grf_base = pservice->grf_base;
508
509                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
510                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
511                         writel_relaxed(raw | bits | (bits << 16),
512                                        grf_base + pservice->mode_ctrl / 4);
513                 else
514                         writel_relaxed((raw & (~bits)) | (bits << 16),
515                                        grf_base + pservice->mode_ctrl / 4);
516         } else {
517                 vpu_err("no grf resource define, switch decoder failed\n");
518                 return;
519         }
520 #else
521         if (pservice->grf_base) {
522                 u32 *grf_base = pservice->grf_base;
523
524                 raw = readl_relaxed(grf_base + pservice->mode_ctrl / 4);
525                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
526                         writel_relaxed(raw | bits | (bits << 16),
527                                        grf_base + pservice->mode_ctrl / 4);
528                 else
529                         writel_relaxed((raw & (~bits)) | (bits << 16),
530                                        grf_base + pservice->mode_ctrl / 4);
531         } else {
532                 vpu_err("no grf resource define, switch decoder failed\n");
533                 return;
534         }
535 #endif
536         if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
537                 set_bit(MMU_ACTIVATED, &data->state);
538                 if (atomic_read(&pservice->enabled))
539                         rockchip_iovmm_activate(data->dev);
540                 else
541                         BUG_ON(!atomic_read(&pservice->enabled));
542         }
543
544         pservice->prev_mode = pservice->curr_mode;
545         pservice->curr_mode = data->mode;
546 }
547
548 static void vcodec_exit_mode(struct vpu_subdev_data *data)
549 {
550         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
551                 clear_bit(MMU_ACTIVATED, &data->state);
552                 rockchip_iovmm_deactivate(data->dev);
553                 data->pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
554         }
555 }
556
557 static int vpu_get_clk(struct vpu_service_info *pservice)
558 {
559 #if VCODEC_CLOCK_ENABLE
560         struct device *dev = pservice->dev;
561
562         switch (pservice->dev_id) {
563         case VCODEC_DEVICE_ID_HEVC:
564                 pservice->pd_video = devm_clk_get(dev, "pd_hevc");
565                 if (IS_ERR(pservice->pd_video)) {
566                         dev_err(dev, "failed on clk_get pd_hevc\n");
567                         return -1;
568                 }
569         case VCODEC_DEVICE_ID_COMBO:
570         case VCODEC_DEVICE_ID_RKVDEC:
571                 pservice->clk_cabac = devm_clk_get(dev, "clk_cabac");
572                 if (IS_ERR(pservice->clk_cabac)) {
573                         dev_err(dev, "failed on clk_get clk_cabac\n");
574                         pservice->clk_cabac = NULL;
575                 }
576                 pservice->clk_core = devm_clk_get(dev, "clk_core");
577                 if (IS_ERR(pservice->clk_core)) {
578                         dev_err(dev, "failed on clk_get clk_core\n");
579                         return -1;
580                 }
581         case VCODEC_DEVICE_ID_VPU:
582                 pservice->aclk_vcodec = devm_clk_get(dev, "aclk_vcodec");
583                 if (IS_ERR(pservice->aclk_vcodec)) {
584                         dev_err(dev, "failed on clk_get aclk_vcodec\n");
585                         return -1;
586                 }
587
588                 pservice->hclk_vcodec = devm_clk_get(dev, "hclk_vcodec");
589                 if (IS_ERR(pservice->hclk_vcodec)) {
590                         dev_err(dev, "failed on clk_get hclk_vcodec\n");
591                         return -1;
592                 }
593                 if (pservice->pd_video == NULL) {
594                         pservice->pd_video = devm_clk_get(dev, "pd_video");
595                         if (IS_ERR(pservice->pd_video)) {
596                                 pservice->pd_video = NULL;
597                                 dev_info(dev, "do not have pd_video\n");
598                         }
599                 }
600                 break;
601         default:
602                 break;
603         }
604
605         return 0;
606 #else
607         return 0;
608 #endif
609 }
610
611 static void vpu_put_clk(struct vpu_service_info *pservice)
612 {
613 #if VCODEC_CLOCK_ENABLE
614         if (pservice->pd_video)
615                 devm_clk_put(pservice->dev, pservice->pd_video);
616         if (pservice->aclk_vcodec)
617                 devm_clk_put(pservice->dev, pservice->aclk_vcodec);
618         if (pservice->hclk_vcodec)
619                 devm_clk_put(pservice->dev, pservice->hclk_vcodec);
620         if (pservice->clk_core)
621                 devm_clk_put(pservice->dev, pservice->clk_core);
622         if (pservice->clk_cabac)
623                 devm_clk_put(pservice->dev, pservice->clk_cabac);
624 #endif
625 }
626
627 static void vpu_reset(struct vpu_subdev_data *data)
628 {
629         struct vpu_service_info *pservice = data->pservice;
630         enum pmu_idle_req type = IDLE_REQ_VIDEO;
631
632         if (pservice->dev_id == VCODEC_DEVICE_ID_HEVC)
633                 type = IDLE_REQ_HEVC;
634
635         pr_info("%s: resetting...", dev_name(pservice->dev));
636
637 #if defined(CONFIG_ARCH_RK29)
638         clk_disable(aclk_ddr_vepu);
639         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
640         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
641         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
642         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
643         mdelay(10);
644         cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
645         cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
646         cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
647         cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
648         clk_enable(aclk_ddr_vepu);
649 #elif defined(CONFIG_ARCH_RK30)
650         pmu_set_idle_request(IDLE_REQ_VIDEO, true);
651         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
652         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
653         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
654         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
655         mdelay(1);
656         cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
657         cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
658         cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
659         cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
660         pmu_set_idle_request(IDLE_REQ_VIDEO, false);
661 #else
662 #endif
663         WARN_ON(pservice->reg_codec != NULL);
664         WARN_ON(pservice->reg_pproc != NULL);
665         WARN_ON(pservice->reg_resev != NULL);
666         pservice->reg_codec = NULL;
667         pservice->reg_pproc = NULL;
668         pservice->reg_resev = NULL;
669
670         pr_info("for 3288/3368...");
671 #ifdef CONFIG_RESET_CONTROLLER
672         if (pservice->rst_a && pservice->rst_h) {
673                 pr_info("reset in\n");
674                 if (pservice->rst_v)
675                         reset_control_assert(pservice->rst_v);
676                 reset_control_assert(pservice->rst_a);
677                 reset_control_assert(pservice->rst_h);
678                 udelay(5);
679                 reset_control_deassert(pservice->rst_h);
680                 reset_control_deassert(pservice->rst_a);
681                 if (pservice->rst_v)
682                         reset_control_deassert(pservice->rst_v);
683         }
684 #endif
685
686         if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
687                 clear_bit(MMU_ACTIVATED, &data->state);
688                 if (atomic_read(&pservice->enabled))
689                         rockchip_iovmm_deactivate(data->dev);
690                 else
691                         BUG_ON(!atomic_read(&pservice->enabled));
692         }
693
694         atomic_set(&pservice->reset_request, 0);
695         pr_info("done\n");
696 }
697
698 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg);
699 static void vpu_service_session_clear(struct vpu_subdev_data *data,
700                                       struct vpu_session *session)
701 {
702         struct vpu_reg *reg, *n;
703
704         list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
705                 reg_deinit(data, reg);
706         }
707         list_for_each_entry_safe(reg, n, &session->running, session_link) {
708                 reg_deinit(data, reg);
709         }
710         list_for_each_entry_safe(reg, n, &session->done, session_link) {
711                 reg_deinit(data, reg);
712         }
713 }
714
715 static void vpu_service_clear(struct vpu_subdev_data *data)
716 {
717         struct vpu_reg *reg, *n;
718         struct vpu_session *session, *s;
719         struct vpu_service_info *pservice = data->pservice;
720
721         list_for_each_entry_safe(reg, n, &pservice->waiting, status_link) {
722                 reg_deinit(data, reg);
723         }
724
725         /* wake up session wait event to prevent the timeout hw reset
726          * during reboot procedure.
727          */
728         list_for_each_entry_safe(session, s,
729                                  &pservice->session, list_session)
730                 wake_up(&session->wait);
731 }
732
733 static void vpu_service_dump(struct vpu_service_info *pservice)
734 {
735 }
736
737
738 static void vpu_service_power_off(struct vpu_service_info *pservice)
739 {
740         int total_running;
741         struct vpu_subdev_data *data = NULL, *n;
742         int ret = atomic_add_unless(&pservice->enabled, -1, 0);
743
744         if (!ret)
745                 return;
746
747         total_running = atomic_read(&pservice->total_running);
748         if (total_running) {
749                 pr_alert("alert: power off when %d task running!!\n",
750                          total_running);
751                 mdelay(50);
752                 pr_alert("alert: delay 50 ms for running task\n");
753                 vpu_service_dump(pservice);
754         }
755
756         pr_info("%s: power off...", dev_name(pservice->dev));
757
758         udelay(5);
759
760         list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
761                 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
762                         clear_bit(MMU_ACTIVATED, &data->state);
763                         rockchip_iovmm_deactivate(data->dev);
764                 }
765         }
766         pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
767
768 #if VCODEC_CLOCK_ENABLE
769                 if (pservice->pd_video)
770                         clk_disable_unprepare(pservice->pd_video);
771                 if (pservice->hclk_vcodec)
772                         clk_disable_unprepare(pservice->hclk_vcodec);
773                 if (pservice->aclk_vcodec)
774                         clk_disable_unprepare(pservice->aclk_vcodec);
775                 if (pservice->clk_core)
776                         clk_disable_unprepare(pservice->clk_core);
777                 if (pservice->clk_cabac)
778                         clk_disable_unprepare(pservice->clk_cabac);
779 #endif
780         pm_runtime_put(pservice->dev);
781
782         atomic_add(1, &pservice->power_off_cnt);
783         wake_unlock(&pservice->wake_lock);
784         pr_info("done\n");
785 }
786
787 static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
788 {
789         queue_delayed_work(system_wq, &pservice->power_off_work,
790                            VPU_POWER_OFF_DELAY);
791 }
792
793 static void vpu_power_off_work(struct work_struct *work_s)
794 {
795         struct delayed_work *dlwork = container_of(work_s,
796                         struct delayed_work, work);
797         struct vpu_service_info *pservice = container_of(dlwork,
798                         struct vpu_service_info, power_off_work);
799
800         if (mutex_trylock(&pservice->lock)) {
801                 vpu_service_power_off(pservice);
802                 mutex_unlock(&pservice->lock);
803         } else {
804                 /* Come back later if the device is busy... */
805                 vpu_queue_power_off_work(pservice);
806         }
807 }
808
809 static void vpu_service_power_on(struct vpu_service_info *pservice)
810 {
811         int ret;
812         ktime_t now = ktime_get();
813
814         if (ktime_to_ns(ktime_sub(now, pservice->last)) > NSEC_PER_SEC) {
815                 cancel_delayed_work_sync(&pservice->power_off_work);
816                 vpu_queue_power_off_work(pservice);
817                 pservice->last = now;
818         }
819         ret = atomic_add_unless(&pservice->enabled, 1, 1);
820         if (!ret)
821                 return;
822
823         pr_info("%s: power on\n", dev_name(pservice->dev));
824
825 #define BIT_VCODEC_CLK_SEL      (1<<10)
826         if (cpu_is_rk312x())
827                 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1)
828                         | BIT_VCODEC_CLK_SEL | (BIT_VCODEC_CLK_SEL << 16),
829                         RK_GRF_VIRT + RK312X_GRF_SOC_CON1);
830
831 #if VCODEC_CLOCK_ENABLE
832         if (pservice->aclk_vcodec)
833                 clk_prepare_enable(pservice->aclk_vcodec);
834         if (pservice->hclk_vcodec)
835                 clk_prepare_enable(pservice->hclk_vcodec);
836         if (pservice->clk_core)
837                 clk_prepare_enable(pservice->clk_core);
838         if (pservice->clk_cabac)
839                 clk_prepare_enable(pservice->clk_cabac);
840         if (pservice->pd_video)
841                 clk_prepare_enable(pservice->pd_video);
842 #endif
843         pm_runtime_get_sync(pservice->dev);
844
845         udelay(5);
846         atomic_add(1, &pservice->power_on_cnt);
847         wake_lock(&pservice->wake_lock);
848 }
849
850 static inline bool reg_check_interlace(struct vpu_reg *reg)
851 {
852         u32 type = (reg->reg[3] & (1 << 23));
853
854         return (type > 0);
855 }
856
857 static inline enum VPU_DEC_FMT reg_check_fmt(struct vpu_reg *reg)
858 {
859         enum VPU_DEC_FMT type = (enum VPU_DEC_FMT)((reg->reg[3] >> 28) & 0xf);
860
861         return type;
862 }
863
864 static inline int reg_probe_width(struct vpu_reg *reg)
865 {
866         int width_in_mb = reg->reg[4] >> 23;
867
868         return width_in_mb * 16;
869 }
870
871 static inline int reg_probe_hevc_y_stride(struct vpu_reg *reg)
872 {
873         int y_virstride = reg->reg[8];
874
875         return y_virstride;
876 }
877
878 static int vcodec_fd_to_iova(struct vpu_subdev_data *data,
879                              struct vpu_reg *reg, int fd)
880 {
881         struct vpu_service_info *pservice = data->pservice;
882         struct ion_handle *hdl;
883         int ret = 0;
884         struct vcodec_mem_region *mem_region;
885
886         hdl = ion_import_dma_buf(pservice->ion_client, fd);
887         if (IS_ERR(hdl)) {
888                 vpu_err("import dma-buf from fd %d failed\n", fd);
889                 return PTR_ERR(hdl);
890         }
891         mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
892
893         if (mem_region == NULL) {
894                 vpu_err("allocate memory for iommu memory region failed\n");
895                 ion_free(pservice->ion_client, hdl);
896                 return -1;
897         }
898
899         mem_region->hdl = hdl;
900         if (data->mmu_dev)
901                 ret = ion_map_iommu(data->dev, pservice->ion_client,
902                                     mem_region->hdl, &mem_region->iova,
903                                     &mem_region->len);
904         else
905                 ret = ion_phys(pservice->ion_client,
906                                mem_region->hdl,
907                                (ion_phys_addr_t *)&mem_region->iova,
908                                (size_t *)&mem_region->len);
909
910         if (ret < 0) {
911                 vpu_err("fd %d ion map iommu failed\n", fd);
912                 kfree(mem_region);
913                 ion_free(pservice->ion_client, hdl);
914                 return ret;
915         }
916         INIT_LIST_HEAD(&mem_region->reg_lnk);
917         list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
918         return mem_region->iova;
919 }
920
921 /*
922  * NOTE: rkvdec/rkhevc put scaling list address in pps buffer hardware will read
923  * it by pps id in video stream data.
924  *
925  * So we need to translate the address in iommu case. The address data is also
926  * 10bit fd + 22bit offset mode.
927  * Because userspace decoder do not give the pps id in the register file sets
928  * kernel driver need to translate each scaling list address in pps buffer which
929  * means 256 pps for H.264, 64 pps for H.265.
930  *
931  * In order to optimize the performance kernel driver ask userspace decoder to
932  * set all scaling list address in pps buffer to the same one which will be used
933  * on current decoding task. Then kernel driver can only translate the first
934  * address then copy it all pps buffer.
935  */
936 static void fill_scaling_list_addr_in_pps(
937                 struct vpu_subdev_data *data,
938                 struct vpu_reg *reg,
939                 char *pps,
940                 int pps_info_count,
941                 int pps_info_size,
942                 int scaling_list_addr_offset)
943 {
944         int base = scaling_list_addr_offset;
945         int scaling_fd = 0;
946         u32 scaling_offset;
947
948         scaling_offset  = (u32)pps[base + 0];
949         scaling_offset += (u32)pps[base + 1] << 8;
950         scaling_offset += (u32)pps[base + 2] << 16;
951         scaling_offset += (u32)pps[base + 3] << 24;
952
953         scaling_fd = scaling_offset & 0x3ff;
954         scaling_offset = scaling_offset >> 10;
955
956         if (scaling_fd > 0) {
957                 int i = 0;
958                 u32 tmp = vcodec_fd_to_iova(data, reg, scaling_fd);
959                 tmp += scaling_offset;
960
961                 for (i = 0; i < pps_info_count; i++, base += pps_info_size) {
962                         pps[base + 0] = (tmp >>  0) & 0xff;
963                         pps[base + 1] = (tmp >>  8) & 0xff;
964                         pps[base + 2] = (tmp >> 16) & 0xff;
965                         pps[base + 3] = (tmp >> 24) & 0xff;
966                 }
967         }
968 }
969
970 static int vcodec_bufid_to_iova(struct vpu_subdev_data *data, const u8 *tbl,
971                                 int size, struct vpu_reg *reg,
972                                 struct extra_info_for_iommu *ext_inf)
973 {
974         struct vpu_service_info *pservice = data->pservice;
975         struct vpu_task_info *task = reg->task;
976         enum FORMAT_TYPE type;
977         struct ion_handle *hdl;
978         int ret = 0;
979         struct vcodec_mem_region *mem_region;
980         int i;
981         int offset = 0;
982
983         if (tbl == NULL || size <= 0) {
984                 dev_err(pservice->dev, "input arguments invalidate\n");
985                 return -1;
986         }
987
988         if (task->get_fmt)
989                 type = task->get_fmt(reg->reg);
990         else {
991                 pr_err("invalid task with NULL get_fmt\n");
992                 return -1;
993         }
994
995         for (i = 0; i < size; i++) {
996                 int usr_fd = reg->reg[tbl[i]] & 0x3FF;
997
998                 /* if userspace do not set the fd at this register, skip */
999                 if (usr_fd == 0)
1000                         continue;
1001
1002                 /*
1003                  * special offset scale case
1004                  *
1005                  * This translation is for fd + offset translation.
1006                  * One register has 32bits. We need to transfer both buffer file
1007                  * handle and the start address offset so we packet file handle
1008                  * and offset together using below format.
1009                  *
1010                  *  0~9  bit for buffer file handle range 0 ~ 1023
1011                  * 10~31 bit for offset range 0 ~ 4M
1012                  *
1013                  * But on 4K case the offset can be larger the 4M
1014                  * So on H.264 4K vpu/vpu2 decoder we scale the offset by 16
1015                  * But MPEG4 will use the same register for colmv and it do not
1016                  * need scale.
1017                  *
1018                  * RKVdec do not have this issue.
1019                  */
1020                 if ((type == FMT_H264D || type == FMT_VP9D) &&
1021                     task->reg_dir_mv > 0 && task->reg_dir_mv == tbl[i])
1022                         offset = reg->reg[tbl[i]] >> 10 << 4;
1023                 else
1024                         offset = reg->reg[tbl[i]] >> 10;
1025
1026                 vpu_debug(DEBUG_IOMMU, "pos %3d fd %3d offset %10d\n",
1027                           tbl[i], usr_fd, offset);
1028
1029                 hdl = ion_import_dma_buf(pservice->ion_client, usr_fd);
1030                 if (IS_ERR(hdl)) {
1031                         dev_err(pservice->dev,
1032                                 "import dma-buf from fd %d failed, reg[%d]\n",
1033                                 usr_fd, tbl[i]);
1034                         return PTR_ERR(hdl);
1035                 }
1036
1037                 if (task->reg_pps > 0 && task->reg_pps == tbl[i]) {
1038                         int pps_info_offset;
1039                         int pps_info_count;
1040                         int pps_info_size;
1041                         int scaling_list_addr_offset;
1042
1043                         switch (type) {
1044                         case FMT_H264D: {
1045                                 pps_info_offset = offset;
1046                                 pps_info_count = 256;
1047                                 pps_info_size = 32;
1048                                 scaling_list_addr_offset = 23;
1049                         } break;
1050                         case FMT_H265D: {
1051                                 pps_info_offset = 0;
1052                                 pps_info_count = 64;
1053                                 pps_info_size = 80;
1054                                 scaling_list_addr_offset = 74;
1055                         } break;
1056                         default: {
1057                                 pps_info_offset = 0;
1058                                 pps_info_count = 0;
1059                                 pps_info_size = 0;
1060                                 scaling_list_addr_offset = 0;
1061                         } break;
1062                         }
1063
1064                         vpu_debug(DEBUG_PPS_FILL,
1065                                   "scaling list filling parameter:\n");
1066                         vpu_debug(DEBUG_PPS_FILL,
1067                                   "pps_info_offset %d\n", pps_info_offset);
1068                         vpu_debug(DEBUG_PPS_FILL,
1069                                   "pps_info_count  %d\n", pps_info_count);
1070                         vpu_debug(DEBUG_PPS_FILL,
1071                                   "pps_info_size   %d\n", pps_info_size);
1072                         vpu_debug(DEBUG_PPS_FILL,
1073                                   "scaling_list_addr_offset %d\n",
1074                                   scaling_list_addr_offset);
1075
1076                         if (pps_info_count) {
1077                                 char *pps = (char *)ion_map_kernel(
1078                                                 pservice->ion_client, hdl);
1079                                 vpu_debug(DEBUG_PPS_FILL,
1080                                           "scaling list setting pps %p\n", pps);
1081                                 pps += pps_info_offset;
1082
1083                                 fill_scaling_list_addr_in_pps(
1084                                                 data, reg, pps,
1085                                                 pps_info_count,
1086                                                 pps_info_size,
1087                                                 scaling_list_addr_offset);
1088                         }
1089                 }
1090
1091                 mem_region = kzalloc(sizeof(*mem_region), GFP_KERNEL);
1092
1093                 if (!mem_region) {
1094                         ion_free(pservice->ion_client, hdl);
1095                         return -1;
1096                 }
1097
1098                 mem_region->hdl = hdl;
1099                 mem_region->reg_idx = tbl[i];
1100
1101                 if (data->mmu_dev)
1102                         ret = ion_map_iommu(data->dev,
1103                                             pservice->ion_client,
1104                                             mem_region->hdl,
1105                                             &mem_region->iova,
1106                                             &mem_region->len);
1107                 else
1108                         ret = ion_phys(pservice->ion_client,
1109                                        mem_region->hdl,
1110                                        (ion_phys_addr_t *)&mem_region->iova,
1111                                        (size_t *)&mem_region->len);
1112
1113                 if (ret < 0) {
1114                         dev_err(pservice->dev, "reg %d fd %d ion map iommu failed\n",
1115                                 tbl[i], usr_fd);
1116                         kfree(mem_region);
1117                         ion_free(pservice->ion_client, hdl);
1118                         return ret;
1119                 }
1120
1121                 /*
1122                  * special for vpu dec num 12: record decoded length
1123                  * hacking for decoded length
1124                  * NOTE: not a perfect fix, the fd is not recorded
1125                  */
1126                 if (task->reg_len > 0 && task->reg_len == tbl[i]) {
1127                         reg->dec_base = mem_region->iova + offset;
1128                         vpu_debug(DEBUG_REGISTER, "dec_set %08x\n",
1129                                   reg->dec_base);
1130                 }
1131
1132                 reg->reg[tbl[i]] = mem_region->iova + offset;
1133                 INIT_LIST_HEAD(&mem_region->reg_lnk);
1134                 list_add_tail(&mem_region->reg_lnk, &reg->mem_region_list);
1135         }
1136
1137         if (ext_inf != NULL && ext_inf->magic == EXTRA_INFO_MAGIC) {
1138                 for (i = 0; i < ext_inf->cnt; i++) {
1139                         vpu_debug(DEBUG_IOMMU, "reg[%d] + offset %d\n",
1140                                   ext_inf->elem[i].index,
1141                                   ext_inf->elem[i].offset);
1142                         reg->reg[ext_inf->elem[i].index] +=
1143                                 ext_inf->elem[i].offset;
1144                 }
1145         }
1146
1147         return 0;
1148 }
1149
1150 static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
1151                                         struct vpu_reg *reg,
1152                                         struct extra_info_for_iommu *ext_inf)
1153 {
1154         enum FORMAT_TYPE type = reg->task->get_fmt(reg->reg);
1155
1156         if (type < FMT_TYPE_BUTT) {
1157                 const struct vpu_trans_info *info = &reg->trans[type];
1158                 const u8 *tbl = info->table;
1159                 int size = info->count;
1160
1161                 return vcodec_bufid_to_iova(data, tbl, size, reg, ext_inf);
1162         }
1163         pr_err("found invalid format type!\n");
1164         return -1;
1165 }
1166
1167 static void get_reg_freq(struct vpu_subdev_data *data, struct vpu_reg *reg)
1168 {
1169
1170         if (!soc_is_rk2928g()) {
1171                 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1172                         if (reg_check_fmt(reg) == VPU_DEC_FMT_H264) {
1173                                 if (reg_probe_width(reg) > 3200) {
1174                                         /*raise frequency for 4k avc.*/
1175                                         reg->freq = VPU_FREQ_600M;
1176                                 }
1177                         } else {
1178                                 if (reg_check_interlace(reg))
1179                                         reg->freq = VPU_FREQ_400M;
1180                         }
1181                 }
1182                 if (data->hw_id == HEVC_ID) {
1183                         if (reg_probe_hevc_y_stride(reg) > 60000)
1184                                 reg->freq = VPU_FREQ_400M;
1185                 }
1186                 if (reg->type == VPU_PP)
1187                         reg->freq = VPU_FREQ_400M;
1188         }
1189 }
1190
1191 static struct vpu_reg *reg_init(struct vpu_subdev_data *data,
1192                                 struct vpu_session *session,
1193                                 void __user *src, u32 size)
1194 {
1195         struct vpu_service_info *pservice = data->pservice;
1196         int extra_size = 0;
1197         struct extra_info_for_iommu extra_info;
1198         struct vpu_reg *reg = kzalloc(sizeof(*reg) + data->reg_size,
1199                                       GFP_KERNEL);
1200
1201         vpu_debug_enter();
1202
1203         if (NULL == reg) {
1204                 vpu_err("error: kmalloc fail in reg_init\n");
1205                 return NULL;
1206         }
1207
1208         if (size > data->reg_size) {
1209                 pr_err("vpu reg size %u is larger than hw reg size %u\n",
1210                        size, data->reg_size);
1211                 extra_size = size - data->reg_size;
1212                 size = data->reg_size;
1213         }
1214         reg->session = session;
1215         reg->data = data;
1216         reg->type = session->type;
1217         reg->size = size;
1218         reg->freq = VPU_FREQ_DEFAULT;
1219         reg->task = &data->task_info[session->type];
1220         reg->trans = data->trans_info;
1221         reg->reg = (u32 *)&reg[1];
1222         INIT_LIST_HEAD(&reg->session_link);
1223         INIT_LIST_HEAD(&reg->status_link);
1224
1225         INIT_LIST_HEAD(&reg->mem_region_list);
1226
1227         if (copy_from_user(&reg->reg[0], (void __user *)src, size)) {
1228                 vpu_err("error: copy_from_user failed in reg_init\n");
1229                 kfree(reg);
1230                 return NULL;
1231         }
1232
1233         if (copy_from_user(&extra_info, (u8 *)src + size, extra_size)) {
1234                 vpu_err("error: copy_from_user failed in reg_init\n");
1235                 kfree(reg);
1236                 return NULL;
1237         }
1238
1239         if (0 > vcodec_reg_address_translate(data, reg, &extra_info)) {
1240                 int i = 0;
1241
1242                 vpu_err("error: translate reg address failed, dumping regs\n");
1243                 for (i = 0; i < size >> 2; i++)
1244                         pr_err("reg[%02d]: %08x\n", i, *((u32 *)src + i));
1245
1246                 kfree(reg);
1247                 return NULL;
1248         }
1249
1250         mutex_lock(&pservice->lock);
1251         list_add_tail(&reg->status_link, &pservice->waiting);
1252         list_add_tail(&reg->session_link, &session->waiting);
1253         mutex_unlock(&pservice->lock);
1254
1255         if (pservice->auto_freq)
1256                 get_reg_freq(data, reg);
1257
1258         vpu_debug_leave();
1259         return reg;
1260 }
1261
1262 static void reg_deinit(struct vpu_subdev_data *data, struct vpu_reg *reg)
1263 {
1264         struct vpu_service_info *pservice = data->pservice;
1265         struct vcodec_mem_region *mem_region = NULL, *n;
1266
1267         list_del_init(&reg->session_link);
1268         list_del_init(&reg->status_link);
1269         if (reg == pservice->reg_codec)
1270                 pservice->reg_codec = NULL;
1271         if (reg == pservice->reg_pproc)
1272                 pservice->reg_pproc = NULL;
1273
1274         /* release memory region attach to this registers table. */
1275         list_for_each_entry_safe(mem_region, n,
1276                         &reg->mem_region_list, reg_lnk) {
1277                 ion_free(pservice->ion_client, mem_region->hdl);
1278                 list_del_init(&mem_region->reg_lnk);
1279                 kfree(mem_region);
1280         }
1281
1282         kfree(reg);
1283 }
1284
1285 static void reg_from_wait_to_run(struct vpu_service_info *pservice,
1286                                  struct vpu_reg *reg)
1287 {
1288         vpu_debug_enter();
1289         list_del_init(&reg->status_link);
1290         list_add_tail(&reg->status_link, &pservice->running);
1291
1292         list_del_init(&reg->session_link);
1293         list_add_tail(&reg->session_link, &reg->session->running);
1294         vpu_debug_leave();
1295 }
1296
1297 static void reg_copy_from_hw(struct vpu_reg *reg, u32 *src, u32 count)
1298 {
1299         int i;
1300         u32 *dst = reg->reg;
1301
1302         vpu_debug_enter();
1303         for (i = 0; i < count; i++, src++)
1304                 *dst++ = readl_relaxed(src);
1305
1306         dst = (u32 *)&reg->reg[0];
1307         for (i = 0; i < count; i++)
1308                 vpu_debug(DEBUG_GET_REG, "get reg[%02d] %08x\n", i, dst[i]);
1309
1310         vpu_debug_leave();
1311 }
1312
1313 static void reg_from_run_to_done(struct vpu_subdev_data *data,
1314                                  struct vpu_reg *reg)
1315 {
1316         struct vpu_service_info *pservice = data->pservice;
1317         struct vpu_hw_info *hw_info = data->hw_info;
1318         struct vpu_task_info *task = reg->task;
1319
1320         vpu_debug_enter();
1321
1322         list_del_init(&reg->status_link);
1323         list_add_tail(&reg->status_link, &pservice->done);
1324
1325         list_del_init(&reg->session_link);
1326         list_add_tail(&reg->session_link, &reg->session->done);
1327
1328         switch (reg->type) {
1329         case VPU_ENC: {
1330                 pservice->reg_codec = NULL;
1331                 reg_copy_from_hw(reg, data->enc_dev.regs, hw_info->enc_reg_num);
1332                 reg->reg[task->reg_irq] = pservice->irq_status;
1333         } break;
1334         case VPU_DEC: {
1335                 pservice->reg_codec = NULL;
1336                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1337
1338                 /* revert hack for decoded length */
1339                 if (task->reg_len > 0) {
1340                         int reg_len = task->reg_len;
1341                         u32 dec_get = reg->reg[reg_len];
1342                         s32 dec_length = dec_get - reg->dec_base;
1343
1344                         vpu_debug(DEBUG_REGISTER,
1345                                   "dec_get %08x dec_length %d\n",
1346                                   dec_get, dec_length);
1347                         reg->reg[reg_len] = dec_length << 10;
1348                 }
1349
1350                 reg->reg[task->reg_irq] = pservice->irq_status;
1351         } break;
1352         case VPU_PP: {
1353                 pservice->reg_pproc = NULL;
1354                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1355                 writel_relaxed(0, data->dec_dev.regs + task->reg_irq);
1356         } break;
1357         case VPU_DEC_PP: {
1358                 u32 pipe_mode;
1359                 u32 *regs = data->dec_dev.regs;
1360
1361                 pservice->reg_codec = NULL;
1362                 pservice->reg_pproc = NULL;
1363
1364                 reg_copy_from_hw(reg, data->dec_dev.regs, hw_info->dec_reg_num);
1365
1366                 /* NOTE: remove pp pipeline mode flag first */
1367                 pipe_mode = readl_relaxed(regs + task->reg_pipe);
1368                 pipe_mode &= ~task->pipe_mask;
1369                 writel_relaxed(pipe_mode, regs + task->reg_pipe);
1370
1371                 /* revert hack for decoded length */
1372                 if (task->reg_len > 0) {
1373                         int reg_len = task->reg_len;
1374                         u32 dec_get = reg->reg[reg_len];
1375                         s32 dec_length = dec_get - reg->dec_base;
1376
1377                         vpu_debug(DEBUG_REGISTER,
1378                                   "dec_get %08x dec_length %d\n",
1379                                   dec_get, dec_length);
1380                         reg->reg[reg_len] = dec_length << 10;
1381                 }
1382
1383                 reg->reg[task->reg_irq] = pservice->irq_status;
1384         } break;
1385         default: {
1386                 vpu_err("error: copy reg from hw with unknown type %d\n",
1387                         reg->type);
1388         } break;
1389         }
1390         vcodec_exit_mode(data);
1391
1392         atomic_sub(1, &reg->session->task_running);
1393         atomic_sub(1, &pservice->total_running);
1394         wake_up(&reg->session->wait);
1395
1396         vpu_debug_leave();
1397 }
1398
1399 static void vpu_service_set_freq(struct vpu_service_info *pservice,
1400                                  struct vpu_reg *reg)
1401 {
1402         enum VPU_FREQ curr = atomic_read(&pservice->freq_status);
1403
1404         if (curr == reg->freq)
1405                 return;
1406
1407         atomic_set(&pservice->freq_status, reg->freq);
1408         switch (reg->freq) {
1409         case VPU_FREQ_200M: {
1410                 clk_set_rate(pservice->aclk_vcodec, 200*MHZ);
1411         } break;
1412         case VPU_FREQ_266M: {
1413                 clk_set_rate(pservice->aclk_vcodec, 266*MHZ);
1414         } break;
1415         case VPU_FREQ_300M: {
1416                 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1417         } break;
1418         case VPU_FREQ_400M: {
1419                 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1420         } break;
1421         case VPU_FREQ_500M: {
1422                 clk_set_rate(pservice->aclk_vcodec, 500*MHZ);
1423         } break;
1424         case VPU_FREQ_600M: {
1425                 clk_set_rate(pservice->aclk_vcodec, 600*MHZ);
1426         } break;
1427         default: {
1428                 unsigned long rate = 300*MHZ;
1429
1430                 if (soc_is_rk2928g())
1431                         rate = 400*MHZ;
1432
1433                 clk_set_rate(pservice->aclk_vcodec, rate);
1434         } break;
1435         }
1436 }
1437
1438 static void reg_copy_to_hw(struct vpu_subdev_data *data, struct vpu_reg *reg)
1439 {
1440         struct vpu_service_info *pservice = data->pservice;
1441         struct vpu_task_info *task = reg->task;
1442         struct vpu_hw_info *hw_info = data->hw_info;
1443         int i;
1444         u32 *src = (u32 *)&reg->reg[0];
1445         u32 enable_mask = task->enable_mask;
1446         u32 gating_mask = task->gating_mask;
1447         u32 reg_en = task->reg_en;
1448
1449         vpu_debug_enter();
1450
1451         atomic_add(1, &pservice->total_running);
1452         atomic_add(1, &reg->session->task_running);
1453
1454         if (pservice->auto_freq)
1455                 vpu_service_set_freq(pservice, reg);
1456
1457         vcodec_enter_mode(data);
1458
1459         switch (reg->type) {
1460         case VPU_ENC: {
1461                 u32 *dst = data->enc_dev.regs;
1462                 u32 base = 0;
1463                 u32 end  = hw_info->enc_reg_num;
1464                 /* u32 reg_gating = task->reg_gating; */
1465
1466                 pservice->reg_codec = reg;
1467
1468                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1469                           base, end, reg_en, enable_mask, gating_mask);
1470
1471                 VEPU_CLEAN_CACHE(dst);
1472
1473                 if (debug & DEBUG_SET_REG)
1474                         for (i = base; i < end; i++)
1475                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1476                                           i, src[i]);
1477
1478                 /*
1479                  * NOTE: encoder need to setup mode first
1480                  */
1481                 writel_relaxed(src[reg_en] & enable_mask, dst + reg_en);
1482
1483                 /* NOTE: encoder gating is not on enable register */
1484                 /* src[reg_gating] |= gating_mask; */
1485
1486                 for (i = base; i < end; i++) {
1487                         if (i != reg_en)
1488                                 writel_relaxed(src[i], dst + i);
1489                 }
1490
1491                 writel(src[reg_en], dst + reg_en);
1492                 dsb(sy);
1493
1494                 time_record(reg->task, 0);
1495         } break;
1496         case VPU_DEC: {
1497                 u32 *dst = data->dec_dev.regs;
1498                 u32 len = hw_info->dec_reg_num;
1499                 u32 base = hw_info->base_dec;
1500                 u32 end  = hw_info->end_dec;
1501
1502                 pservice->reg_codec = reg;
1503
1504                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1505                           base, end, reg_en, enable_mask, gating_mask);
1506
1507                 VDPU_CLEAN_CACHE(dst);
1508
1509                 /* on rkvdec set cache size to 64byte */
1510                 if (pservice->dev_id == VCODEC_DEVICE_ID_RKVDEC) {
1511                         u32 *cache_base = dst + 0x100;
1512                         u32 val = (debug & DEBUG_CACHE_32B) ? (0x3) : (0x13);
1513                         writel_relaxed(val, cache_base + 0x07);
1514                         writel_relaxed(val, cache_base + 0x17);
1515                 }
1516
1517                 if (debug & DEBUG_SET_REG)
1518                         for (i = 0; i < len; i++)
1519                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1520                                           i, src[i]);
1521
1522                 /*
1523                  * NOTE: The end register is invalid. Do NOT write to it
1524                  *       Also the base register must be written
1525                  */
1526                 for (i = base; i < end; i++) {
1527                         if (i != reg_en)
1528                                 writel_relaxed(src[i], dst + i);
1529                 }
1530
1531                 writel(src[reg_en] | gating_mask, dst + reg_en);
1532                 dsb(sy);
1533
1534                 time_record(reg->task, 0);
1535         } break;
1536         case VPU_PP: {
1537                 u32 *dst = data->dec_dev.regs;
1538                 u32 base = hw_info->base_pp;
1539                 u32 end  = hw_info->end_pp;
1540
1541                 pservice->reg_pproc = reg;
1542
1543                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1544                           base, end, reg_en, enable_mask, gating_mask);
1545
1546                 if (debug & DEBUG_SET_REG)
1547                         for (i = base; i < end; i++)
1548                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1549                                           i, src[i]);
1550
1551                 for (i = base; i < end; i++) {
1552                         if (i != reg_en)
1553                                 writel_relaxed(src[i], dst + i);
1554                 }
1555
1556                 writel(src[reg_en] | gating_mask, dst + reg_en);
1557                 dsb(sy);
1558
1559                 time_record(reg->task, 0);
1560         } break;
1561         case VPU_DEC_PP: {
1562                 u32 *dst = data->dec_dev.regs;
1563                 u32 base = hw_info->base_dec_pp;
1564                 u32 end  = hw_info->end_dec_pp;
1565
1566                 pservice->reg_codec = reg;
1567                 pservice->reg_pproc = reg;
1568
1569                 vpu_debug(DEBUG_TASK_INFO, "reg: base %3d end %d en %2d mask: en %x gate %x\n",
1570                           base, end, reg_en, enable_mask, gating_mask);
1571
1572                 /* VDPU_SOFT_RESET(dst); */
1573                 VDPU_CLEAN_CACHE(dst);
1574
1575                 if (debug & DEBUG_SET_REG)
1576                         for (i = base; i < end; i++)
1577                                 vpu_debug(DEBUG_SET_REG, "set reg[%02d] %08x\n",
1578                                           i, src[i]);
1579
1580                 for (i = base; i < end; i++) {
1581                         if (i != reg_en)
1582                                 writel_relaxed(src[i], dst + i);
1583                 }
1584
1585                 /* NOTE: dec output must be disabled */
1586
1587                 writel(src[reg_en] | gating_mask, dst + reg_en);
1588                 dsb(sy);
1589
1590                 time_record(reg->task, 0);
1591         } break;
1592         default: {
1593                 vpu_err("error: unsupport session type %d", reg->type);
1594                 atomic_sub(1, &pservice->total_running);
1595                 atomic_sub(1, &reg->session->task_running);
1596         } break;
1597         }
1598
1599         vpu_debug_leave();
1600 }
1601
1602 static void try_set_reg(struct vpu_subdev_data *data)
1603 {
1604         struct vpu_service_info *pservice = data->pservice;
1605
1606         vpu_debug_enter();
1607
1608         mutex_lock(&pservice->shutdown_lock);
1609         if (atomic_read(&pservice->service_on) == 0) {
1610                 mutex_lock(&pservice->shutdown_lock);
1611                 return;
1612         }
1613         if (!list_empty(&pservice->waiting)) {
1614                 struct vpu_reg *reg_codec = pservice->reg_codec;
1615                 struct vpu_reg *reg_pproc = pservice->reg_pproc;
1616                 int can_set = 0;
1617                 bool change_able = (reg_codec == NULL) && (reg_pproc == NULL);
1618                 int reset_request = atomic_read(&pservice->reset_request);
1619                 struct vpu_reg *reg = list_entry(pservice->waiting.next,
1620                                 struct vpu_reg, status_link);
1621
1622                 vpu_service_power_on(pservice);
1623
1624                 if (change_able || !reset_request) {
1625                         switch (reg->type) {
1626                         case VPU_ENC: {
1627                                 if (change_able)
1628                                         can_set = 1;
1629                         } break;
1630                         case VPU_DEC: {
1631                                 if (reg_codec == NULL)
1632                                         can_set = 1;
1633                                 if (pservice->auto_freq && (reg_pproc != NULL))
1634                                         can_set = 0;
1635                         } break;
1636                         case VPU_PP: {
1637                                 if (reg_codec == NULL) {
1638                                         if (reg_pproc == NULL)
1639                                                 can_set = 1;
1640                                 } else {
1641                                         if ((reg_codec->type == VPU_DEC) &&
1642                                             (reg_pproc == NULL))
1643                                                 can_set = 1;
1644
1645                                         /*
1646                                          * NOTE:
1647                                          * can not charge frequency
1648                                          * when vpu is working
1649                                          */
1650                                         if (pservice->auto_freq)
1651                                                 can_set = 0;
1652                                 }
1653                         } break;
1654                         case VPU_DEC_PP: {
1655                                 if (change_able)
1656                                         can_set = 1;
1657                                 } break;
1658                         default: {
1659                                 pr_err("undefined reg type %d\n", reg->type);
1660                         } break;
1661                         }
1662                 }
1663
1664                 /* then check reset request */
1665                 if (reset_request && !change_able)
1666                         reset_request = 0;
1667
1668                 /* do reset before setting registers */
1669                 if (reset_request)
1670                         vpu_reset(data);
1671
1672                 if (can_set) {
1673                         reg_from_wait_to_run(pservice, reg);
1674                         reg_copy_to_hw(reg->data, reg);
1675                 }
1676         }
1677
1678         mutex_unlock(&pservice->shutdown_lock);
1679         vpu_debug_leave();
1680 }
1681
1682 static int return_reg(struct vpu_subdev_data *data,
1683                       struct vpu_reg *reg, u32 __user *dst)
1684 {
1685         struct vpu_hw_info *hw_info = data->hw_info;
1686         size_t size = reg->size;
1687         u32 base;
1688
1689         vpu_debug_enter();
1690         switch (reg->type) {
1691         case VPU_ENC: {
1692                 base = 0;
1693         } break;
1694         case VPU_DEC: {
1695                 base = hw_info->base_dec_pp;
1696         } break;
1697         case VPU_PP: {
1698                 base = hw_info->base_pp;
1699         } break;
1700         case VPU_DEC_PP: {
1701                 base = hw_info->base_dec_pp;
1702         } break;
1703         default: {
1704                 vpu_err("error: copy reg to user with unknown type %d\n",
1705                         reg->type);
1706                 return -EFAULT;
1707         } break;
1708         }
1709
1710         if (copy_to_user(dst, &reg->reg[base], size)) {
1711                 vpu_err("error: return_reg copy_to_user failed\n");
1712                 return -EFAULT;
1713         }
1714
1715         reg_deinit(data, reg);
1716         vpu_debug_leave();
1717         return 0;
1718 }
1719
1720 static long vpu_service_ioctl(struct file *filp, unsigned int cmd,
1721                               unsigned long arg)
1722 {
1723         struct vpu_subdev_data *data =
1724                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1725                              struct vpu_subdev_data, cdev);
1726         struct vpu_service_info *pservice = data->pservice;
1727         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1728
1729         vpu_debug_enter();
1730         if (NULL == session)
1731                 return -EINVAL;
1732
1733         switch (cmd) {
1734         case VPU_IOC_SET_CLIENT_TYPE: {
1735                 session->type = (enum VPU_CLIENT_TYPE)arg;
1736                 vpu_debug(DEBUG_IOCTL, "pid %d set client type %d\n",
1737                           session->pid, session->type);
1738         } break;
1739         case VPU_IOC_GET_HW_FUSE_STATUS: {
1740                 struct vpu_request req;
1741
1742                 vpu_debug(DEBUG_IOCTL, "pid %d get hw status %d\n",
1743                           session->pid, session->type);
1744                 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
1745                         vpu_err("error: get hw status copy_from_user failed\n");
1746                         return -EFAULT;
1747                 } else {
1748                         void *config = (session->type != VPU_ENC) ?
1749                                        ((void *)&pservice->dec_config) :
1750                                        ((void *)&pservice->enc_config);
1751                         size_t size = (session->type != VPU_ENC) ?
1752                                       (sizeof(struct vpu_dec_config)) :
1753                                       (sizeof(struct vpu_enc_config));
1754                         if (copy_to_user((void __user *)req.req,
1755                                          config, size)) {
1756                                 vpu_err("error: get hw status copy_to_user failed type %d\n",
1757                                         session->type);
1758                                 return -EFAULT;
1759                         }
1760                 }
1761         } break;
1762         case VPU_IOC_SET_REG: {
1763                 struct vpu_request req;
1764                 struct vpu_reg *reg;
1765
1766                 vpu_debug(DEBUG_IOCTL, "pid %d set reg type %d\n",
1767                           session->pid, session->type);
1768                 if (copy_from_user(&req, (void __user *)arg,
1769                                    sizeof(struct vpu_request))) {
1770                         vpu_err("error: set reg copy_from_user failed\n");
1771                         return -EFAULT;
1772                 }
1773
1774                 reg = reg_init(data, session, (void __user *)req.req, req.size);
1775                 if (NULL == reg) {
1776                         return -EFAULT;
1777                 } else {
1778                         mutex_lock(&pservice->lock);
1779                         try_set_reg(data);
1780                         mutex_unlock(&pservice->lock);
1781                 }
1782         } break;
1783         case VPU_IOC_GET_REG: {
1784                 struct vpu_request req;
1785                 struct vpu_reg *reg;
1786                 int ret;
1787
1788                 vpu_debug(DEBUG_IOCTL, "pid %d get reg type %d\n",
1789                           session->pid, session->type);
1790                 if (copy_from_user(&req, (void __user *)arg,
1791                                    sizeof(struct vpu_request))) {
1792                         vpu_err("error: get reg copy_from_user failed\n");
1793                         return -EFAULT;
1794                 }
1795
1796                 ret = wait_event_timeout(session->wait,
1797                                          !list_empty(&session->done),
1798                                          VPU_TIMEOUT_DELAY);
1799
1800                 if (!list_empty(&session->done)) {
1801                         if (ret < 0)
1802                                 vpu_err("warning: pid %d wait task error ret %d\n",
1803                                         session->pid, ret);
1804                         ret = 0;
1805                 } else {
1806                         if (unlikely(ret < 0)) {
1807                                 vpu_err("error: pid %d wait task ret %d\n",
1808                                         session->pid, ret);
1809                         } else if (ret == 0) {
1810                                 vpu_err("error: pid %d wait %d task done timeout\n",
1811                                         session->pid,
1812                                         atomic_read(&session->task_running));
1813                                 ret = -ETIMEDOUT;
1814                         }
1815                 }
1816
1817                 if (ret < 0) {
1818                         int task_running = atomic_read(&session->task_running);
1819
1820                         mutex_lock(&pservice->lock);
1821                         vpu_service_dump(pservice);
1822                         if (task_running) {
1823                                 atomic_set(&session->task_running, 0);
1824                                 atomic_sub(task_running,
1825                                            &pservice->total_running);
1826                                 pr_err("%d task is running but not return, reset hardware...",
1827                                        task_running);
1828                                 vpu_reset(data);
1829                                 pr_err("done\n");
1830                         }
1831                         vpu_service_session_clear(data, session);
1832                         mutex_unlock(&pservice->lock);
1833                         return ret;
1834                 }
1835
1836                 mutex_lock(&pservice->lock);
1837                 reg = list_entry(session->done.next,
1838                                  struct vpu_reg, session_link);
1839                 return_reg(data, reg, (u32 __user *)req.req);
1840                 mutex_unlock(&pservice->lock);
1841         } break;
1842         case VPU_IOC_PROBE_IOMMU_STATUS: {
1843                 int iommu_enable = 1;
1844
1845                 vpu_debug(DEBUG_IOCTL, "VPU_IOC_PROBE_IOMMU_STATUS\n");
1846
1847                 if (copy_to_user((void __user *)arg,
1848                                  &iommu_enable, sizeof(int))) {
1849                         vpu_err("error: iommu status copy_to_user failed\n");
1850                         return -EFAULT;
1851                 }
1852         } break;
1853         default: {
1854                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1855         } break;
1856         }
1857         vpu_debug_leave();
1858         return 0;
1859 }
1860
1861 #ifdef CONFIG_COMPAT
1862 static long compat_vpu_service_ioctl(struct file *filp, unsigned int cmd,
1863                                      unsigned long arg)
1864 {
1865         struct vpu_subdev_data *data =
1866                 container_of(filp->f_path.dentry->d_inode->i_cdev,
1867                              struct vpu_subdev_data, cdev);
1868         struct vpu_service_info *pservice = data->pservice;
1869         struct vpu_session *session = (struct vpu_session *)filp->private_data;
1870
1871         vpu_debug_enter();
1872         vpu_debug(3, "cmd %x, COMPAT_VPU_IOC_SET_CLIENT_TYPE %x\n", cmd,
1873                   (u32)COMPAT_VPU_IOC_SET_CLIENT_TYPE);
1874         if (NULL == session)
1875                 return -EINVAL;
1876
1877         switch (cmd) {
1878         case COMPAT_VPU_IOC_SET_CLIENT_TYPE: {
1879                 session->type = (enum VPU_CLIENT_TYPE)arg;
1880                 vpu_debug(DEBUG_IOCTL, "compat set client type %d\n",
1881                           session->type);
1882         } break;
1883         case COMPAT_VPU_IOC_GET_HW_FUSE_STATUS: {
1884                 struct compat_vpu_request req;
1885
1886                 vpu_debug(DEBUG_IOCTL, "compat get hw status %d\n",
1887                           session->type);
1888                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1889                                    sizeof(struct compat_vpu_request))) {
1890                         vpu_err("error: compat get hw status copy_from_user failed\n");
1891                         return -EFAULT;
1892                 } else {
1893                         void *config = (session->type != VPU_ENC) ?
1894                                        ((void *)&pservice->dec_config) :
1895                                        ((void *)&pservice->enc_config);
1896                         size_t size = (session->type != VPU_ENC) ?
1897                                       (sizeof(struct vpu_dec_config)) :
1898                                       (sizeof(struct vpu_enc_config));
1899
1900                         if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1901                                          config, size)) {
1902                                 vpu_err("error: compat get hw status copy_to_user failed type %d\n",
1903                                         session->type);
1904                                 return -EFAULT;
1905                         }
1906                 }
1907         } break;
1908         case COMPAT_VPU_IOC_SET_REG: {
1909                 struct compat_vpu_request req;
1910                 struct vpu_reg *reg;
1911
1912                 vpu_debug(DEBUG_IOCTL, "compat set reg type %d\n",
1913                           session->type);
1914                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1915                                    sizeof(struct compat_vpu_request))) {
1916                         vpu_err("compat set_reg copy_from_user failed\n");
1917                         return -EFAULT;
1918                 }
1919                 reg = reg_init(data, session,
1920                                compat_ptr((compat_uptr_t)req.req), req.size);
1921                 if (NULL == reg) {
1922                         return -EFAULT;
1923                 } else {
1924                         mutex_lock(&pservice->lock);
1925                         try_set_reg(data);
1926                         mutex_unlock(&pservice->lock);
1927                 }
1928         } break;
1929         case COMPAT_VPU_IOC_GET_REG: {
1930                 struct compat_vpu_request req;
1931                 struct vpu_reg *reg;
1932                 int ret;
1933
1934                 vpu_debug(DEBUG_IOCTL, "compat get reg type %d\n",
1935                           session->type);
1936                 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1937                                    sizeof(struct compat_vpu_request))) {
1938                         vpu_err("compat get reg copy_from_user failed\n");
1939                         return -EFAULT;
1940                 }
1941
1942                 ret = wait_event_timeout(session->wait,
1943                                          !list_empty(&session->done),
1944                                          VPU_TIMEOUT_DELAY);
1945
1946                 if (!list_empty(&session->done)) {
1947                         if (ret < 0)
1948                                 vpu_err("warning: pid %d wait task error ret %d\n",
1949                                         session->pid, ret);
1950                         ret = 0;
1951                 } else {
1952                         if (unlikely(ret < 0)) {
1953                                 vpu_err("error: pid %d wait task ret %d\n",
1954                                         session->pid, ret);
1955                         } else if (ret == 0) {
1956                                 vpu_err("error: pid %d wait %d task done timeout\n",
1957                                         session->pid,
1958                                         atomic_read(&session->task_running));
1959                                 ret = -ETIMEDOUT;
1960                         }
1961                 }
1962
1963                 if (ret < 0) {
1964                         int task_running = atomic_read(&session->task_running);
1965
1966                         mutex_lock(&pservice->lock);
1967                         vpu_service_dump(pservice);
1968                         if (task_running) {
1969                                 atomic_set(&session->task_running, 0);
1970                                 atomic_sub(task_running,
1971                                            &pservice->total_running);
1972                                 pr_err("%d task is running but not return, reset hardware...",
1973                                        task_running);
1974                                 vpu_reset(data);
1975                                 pr_err("done\n");
1976                         }
1977                         vpu_service_session_clear(data, session);
1978                         mutex_unlock(&pservice->lock);
1979                         return ret;
1980                 }
1981
1982                 mutex_lock(&pservice->lock);
1983                 reg = list_entry(session->done.next,
1984                                  struct vpu_reg, session_link);
1985                 return_reg(data, reg, compat_ptr((compat_uptr_t)req.req));
1986                 mutex_unlock(&pservice->lock);
1987         } break;
1988         case COMPAT_VPU_IOC_PROBE_IOMMU_STATUS: {
1989                 int iommu_enable = 1;
1990
1991                 vpu_debug(DEBUG_IOCTL, "COMPAT_VPU_IOC_PROBE_IOMMU_STATUS\n");
1992
1993                 if (copy_to_user(compat_ptr((compat_uptr_t)arg),
1994                                  &iommu_enable, sizeof(int))) {
1995                         vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
1996                         return -EFAULT;
1997                 }
1998         } break;
1999         default: {
2000                 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
2001         } break;
2002         }
2003         vpu_debug_leave();
2004         return 0;
2005 }
2006 #endif
2007
2008 static int vpu_service_check_hw(struct vpu_subdev_data *data)
2009 {
2010         int ret = -EINVAL, i = 0;
2011         u32 hw_id = readl_relaxed(data->regs);
2012
2013         hw_id = (hw_id >> 16) & 0xFFFF;
2014         pr_info("checking hw id %x\n", hw_id);
2015         data->hw_info = NULL;
2016         for (i = 0; i < ARRAY_SIZE(vcodec_info_set); i++) {
2017                 struct vcodec_info *info = &vcodec_info_set[i];
2018
2019                 if (hw_id == info->hw_id) {
2020                         data->hw_id = info->hw_id;
2021                         data->hw_info = info->hw_info;
2022                         data->task_info = info->task_info;
2023                         data->trans_info = info->trans_info;
2024                         ret = 0;
2025                         break;
2026                 }
2027         }
2028         return ret;
2029 }
2030
2031 static int vpu_service_open(struct inode *inode, struct file *filp)
2032 {
2033         struct vpu_subdev_data *data = container_of(
2034                         inode->i_cdev, struct vpu_subdev_data, cdev);
2035         struct vpu_service_info *pservice = data->pservice;
2036         struct vpu_session *session = kmalloc(sizeof(*session), GFP_KERNEL);
2037
2038         vpu_debug_enter();
2039
2040         if (NULL == session) {
2041                 vpu_err("error: unable to allocate memory for vpu_session.");
2042                 return -ENOMEM;
2043         }
2044
2045         session->type   = VPU_TYPE_BUTT;
2046         session->pid    = current->pid;
2047         INIT_LIST_HEAD(&session->waiting);
2048         INIT_LIST_HEAD(&session->running);
2049         INIT_LIST_HEAD(&session->done);
2050         INIT_LIST_HEAD(&session->list_session);
2051         init_waitqueue_head(&session->wait);
2052         atomic_set(&session->task_running, 0);
2053         mutex_lock(&pservice->lock);
2054         list_add_tail(&session->list_session, &pservice->session);
2055         filp->private_data = (void *)session;
2056         mutex_unlock(&pservice->lock);
2057
2058         pr_debug("dev opened\n");
2059         vpu_debug_leave();
2060         return nonseekable_open(inode, filp);
2061 }
2062
2063 static int vpu_service_release(struct inode *inode, struct file *filp)
2064 {
2065         struct vpu_subdev_data *data = container_of(
2066                         inode->i_cdev, struct vpu_subdev_data, cdev);
2067         struct vpu_service_info *pservice = data->pservice;
2068         int task_running;
2069         struct vpu_session *session = (struct vpu_session *)filp->private_data;
2070
2071         vpu_debug_enter();
2072         if (NULL == session)
2073                 return -EINVAL;
2074
2075         task_running = atomic_read(&session->task_running);
2076         if (task_running) {
2077                 pr_err("error: session %d still has %d task running when closing\n",
2078                        session->pid, task_running);
2079                 msleep(50);
2080         }
2081         wake_up(&session->wait);
2082
2083         mutex_lock(&pservice->lock);
2084         /* remove this filp from the asynchronusly notified filp's */
2085         list_del_init(&session->list_session);
2086         vpu_service_session_clear(data, session);
2087         kfree(session);
2088         filp->private_data = NULL;
2089         mutex_unlock(&pservice->lock);
2090
2091         pr_debug("dev closed\n");
2092         vpu_debug_leave();
2093         return 0;
2094 }
2095
2096 static const struct file_operations vpu_service_fops = {
2097         .unlocked_ioctl = vpu_service_ioctl,
2098         .open           = vpu_service_open,
2099         .release        = vpu_service_release,
2100 #ifdef CONFIG_COMPAT
2101         .compat_ioctl   = compat_vpu_service_ioctl,
2102 #endif
2103 };
2104
2105 static irqreturn_t vdpu_irq(int irq, void *dev_id);
2106 static irqreturn_t vdpu_isr(int irq, void *dev_id);
2107 static irqreturn_t vepu_irq(int irq, void *dev_id);
2108 static irqreturn_t vepu_isr(int irq, void *dev_id);
2109 static void get_hw_info(struct vpu_subdev_data *data);
2110
2111 static struct device *rockchip_get_sysmmu_dev(const char *compt)
2112 {
2113         struct device_node *dn = NULL;
2114         struct platform_device *pd = NULL;
2115         struct device *ret = NULL;
2116
2117         dn = of_find_compatible_node(NULL, NULL, compt);
2118         if (!dn) {
2119                 pr_err("can't find device node %s \r\n", compt);
2120                 return NULL;
2121         }
2122
2123         pd = of_find_device_by_node(dn);
2124         if (!pd) {
2125                 pr_err("can't find platform device in device node %s\n", compt);
2126                 return  NULL;
2127         }
2128         ret = &pd->dev;
2129
2130         return ret;
2131 }
2132
2133 #ifdef CONFIG_IOMMU_API
2134 static inline void platform_set_sysmmu(struct device *iommu,
2135                                        struct device *dev)
2136 {
2137         dev->archdata.iommu = iommu;
2138 }
2139 #else
2140 static inline void platform_set_sysmmu(struct device *iommu,
2141                                        struct device *dev)
2142 {
2143 }
2144 #endif
2145
2146 int vcodec_sysmmu_fault_hdl(struct device *dev,
2147                             enum rk_iommu_inttype itype,
2148                             unsigned long pgtable_base,
2149                             unsigned long fault_addr, unsigned int status)
2150 {
2151         struct platform_device *pdev;
2152         struct vpu_service_info *pservice;
2153         struct vpu_subdev_data *data;
2154
2155         vpu_debug_enter();
2156
2157         if (dev == NULL) {
2158                 pr_err("invalid NULL dev\n");
2159                 return 0;
2160         }
2161
2162         pdev = container_of(dev, struct platform_device, dev);
2163         if (pdev == NULL) {
2164                 pr_err("invalid NULL platform_device\n");
2165                 return 0;
2166         }
2167
2168         data = platform_get_drvdata(pdev);
2169         if (data == NULL) {
2170                 pr_err("invalid NULL vpu_subdev_data\n");
2171                 return 0;
2172         }
2173
2174         pservice = data->pservice;
2175         if (pservice == NULL) {
2176                 pr_err("invalid NULL vpu_service_info\n");
2177                 return 0;
2178         }
2179
2180         if (pservice->reg_codec) {
2181                 struct vpu_reg *reg = pservice->reg_codec;
2182                 struct vcodec_mem_region *mem, *n;
2183                 int i = 0;
2184
2185                 pr_err("vcodec, fault addr 0x%08lx\n", fault_addr);
2186                 if (!list_empty(&reg->mem_region_list)) {
2187                         list_for_each_entry_safe(mem, n, &reg->mem_region_list,
2188                                                  reg_lnk) {
2189                                 pr_err("vcodec, reg[%02u] mem region [%02d] 0x%lx %lx\n",
2190                                        mem->reg_idx, i, mem->iova, mem->len);
2191                                 i++;
2192                         }
2193                 } else {
2194                         pr_err("no memory region mapped\n");
2195                 }
2196
2197                 if (reg->data) {
2198                         struct vpu_subdev_data *data = reg->data;
2199                         u32 *base = (u32 *)data->dec_dev.regs;
2200                         u32 len = data->hw_info->dec_reg_num;
2201
2202                         pr_err("current errror register set:\n");
2203
2204                         for (i = 0; i < len; i++)
2205                                 pr_err("reg[%02d] %08x\n",
2206                                        i, readl_relaxed(base + i));
2207                 }
2208
2209                 pr_alert("vcodec, page fault occur, reset hw\n");
2210
2211                 /* reg->reg[101] = 1; */
2212                 vpu_reset(data);
2213         }
2214
2215         return 0;
2216 }
2217
2218 static int vcodec_subdev_probe(struct platform_device *pdev,
2219                                struct vpu_service_info *pservice)
2220 {
2221         int ret = 0;
2222         struct resource *res = NULL;
2223         u32 ioaddr = 0;
2224         u8 *regs = NULL;
2225         struct vpu_hw_info *hw_info = NULL;
2226         struct device *dev = &pdev->dev;
2227         char *name = (char *)dev_name(dev);
2228         struct device_node *np = pdev->dev.of_node;
2229         struct vpu_subdev_data *data =
2230                 devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
2231         u32 iommu_en = 0;
2232         char mmu_dev_dts_name[40];
2233
2234         of_property_read_u32(np, "iommu_enabled", &iommu_en);
2235
2236         pr_info("probe device %s\n", dev_name(dev));
2237
2238         data->pservice = pservice;
2239         data->dev = dev;
2240
2241         of_property_read_string(np, "name", (const char **)&name);
2242         of_property_read_u32(np, "dev_mode", (u32 *)&data->mode);
2243
2244         if (pservice->reg_base == 0) {
2245                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2246                 data->regs = devm_ioremap_resource(dev, res);
2247                 if (IS_ERR(data->regs)) {
2248                         ret = PTR_ERR(data->regs);
2249                         goto err;
2250                 }
2251                 ioaddr = res->start;
2252         } else {
2253                 data->regs = pservice->reg_base;
2254                 ioaddr = pservice->ioaddr;
2255         }
2256
2257         clear_bit(MMU_ACTIVATED, &data->state);
2258         vcodec_enter_mode(data);
2259
2260         vpu_service_power_on(pservice);
2261         ret = vpu_service_check_hw(data);
2262         if (ret < 0) {
2263                 vpu_err("error: hw info check faild\n");
2264                 goto err;
2265         }
2266
2267         hw_info = data->hw_info;
2268         regs = (u8 *)data->regs;
2269
2270         if (hw_info->dec_reg_num) {
2271                 data->dec_dev.iosize = hw_info->dec_io_size;
2272                 data->dec_dev.regs = (u32 *)(regs + hw_info->dec_offset);
2273         }
2274
2275         if (hw_info->enc_reg_num) {
2276                 data->enc_dev.iosize = hw_info->enc_io_size;
2277                 data->enc_dev.regs = (u32 *)(regs + hw_info->enc_offset);
2278         }
2279
2280         data->reg_size = max(hw_info->dec_io_size, hw_info->enc_io_size);
2281
2282         data->irq_enc = platform_get_irq_byname(pdev, "irq_enc");
2283         if (data->irq_enc > 0) {
2284                 ret = devm_request_threaded_irq(dev, data->irq_enc,
2285                                                 vepu_irq, vepu_isr,
2286                                                 IRQF_SHARED, dev_name(dev),
2287                                                 (void *)data);
2288                 if (ret) {
2289                         dev_err(dev, "error: can't request vepu irq %d\n",
2290                                 data->irq_enc);
2291                         goto err;
2292                 }
2293         }
2294         data->irq_dec = platform_get_irq_byname(pdev, "irq_dec");
2295         if (data->irq_dec > 0) {
2296                 ret = devm_request_threaded_irq(dev, data->irq_dec,
2297                                                 vdpu_irq, vdpu_isr,
2298                                                 IRQF_SHARED, dev_name(dev),
2299                                                 (void *)data);
2300                 if (ret) {
2301                         dev_err(dev, "error: can't request vdpu irq %d\n",
2302                                 data->irq_dec);
2303                         goto err;
2304                 }
2305         }
2306         atomic_set(&data->dec_dev.irq_count_codec, 0);
2307         atomic_set(&data->dec_dev.irq_count_pp, 0);
2308         atomic_set(&data->enc_dev.irq_count_codec, 0);
2309         atomic_set(&data->enc_dev.irq_count_pp, 0);
2310
2311         if (iommu_en) {
2312                 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
2313                         sprintf(mmu_dev_dts_name,
2314                                 HEVC_IOMMU_COMPATIBLE_NAME);
2315                 else if (data->mode == VCODEC_RUNNING_MODE_VPU)
2316                         sprintf(mmu_dev_dts_name,
2317                                 VPU_IOMMU_COMPATIBLE_NAME);
2318                 else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2319                         sprintf(mmu_dev_dts_name, VDEC_IOMMU_COMPATIBLE_NAME);
2320                 else
2321                         sprintf(mmu_dev_dts_name,
2322                                 HEVC_IOMMU_COMPATIBLE_NAME);
2323
2324                 data->mmu_dev =
2325                         rockchip_get_sysmmu_dev(mmu_dev_dts_name);
2326
2327                 if (data->mmu_dev)
2328                         platform_set_sysmmu(data->mmu_dev, dev);
2329
2330                 rockchip_iovmm_set_fault_handler(dev, vcodec_sysmmu_fault_hdl);
2331         }
2332
2333         get_hw_info(data);
2334         pservice->auto_freq = true;
2335
2336         vcodec_exit_mode(data);
2337         /* create device node */
2338         ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
2339         if (ret) {
2340                 dev_err(dev, "alloc dev_t failed\n");
2341                 goto err;
2342         }
2343
2344         cdev_init(&data->cdev, &vpu_service_fops);
2345
2346         data->cdev.owner = THIS_MODULE;
2347         data->cdev.ops = &vpu_service_fops;
2348
2349         ret = cdev_add(&data->cdev, data->dev_t, 1);
2350
2351         if (ret) {
2352                 dev_err(dev, "add dev_t failed\n");
2353                 goto err;
2354         }
2355
2356         data->cls = class_create(THIS_MODULE, name);
2357
2358         if (IS_ERR(data->cls)) {
2359                 ret = PTR_ERR(data->cls);
2360                 dev_err(dev, "class_create err:%d\n", ret);
2361                 goto err;
2362         }
2363
2364         data->child_dev = device_create(data->cls, dev,
2365                 data->dev_t, NULL, name);
2366
2367         platform_set_drvdata(pdev, data);
2368
2369         INIT_LIST_HEAD(&data->lnk_service);
2370         list_add_tail(&data->lnk_service, &pservice->subdev_list);
2371
2372 #ifdef CONFIG_DEBUG_FS
2373         data->debugfs_dir = vcodec_debugfs_create_device_dir(name, parent);
2374         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2375                 data->debugfs_file_regs =
2376                         debugfs_create_file("regs", 0664, data->debugfs_dir,
2377                                         data, &debug_vcodec_fops);
2378         else
2379                 vpu_err("create debugfs dir %s failed\n", name);
2380 #endif
2381         return 0;
2382 err:
2383         if (data->child_dev) {
2384                 device_destroy(data->cls, data->dev_t);
2385                 cdev_del(&data->cdev);
2386                 unregister_chrdev_region(data->dev_t, 1);
2387         }
2388
2389         if (data->cls)
2390                 class_destroy(data->cls);
2391         return -1;
2392 }
2393
2394 static void vcodec_subdev_remove(struct vpu_subdev_data *data)
2395 {
2396         struct vpu_service_info *pservice = data->pservice;
2397
2398         mutex_lock(&pservice->lock);
2399         cancel_delayed_work_sync(&pservice->power_off_work);
2400         vpu_service_power_off(pservice);
2401         mutex_unlock(&pservice->lock);
2402
2403         device_destroy(data->cls, data->dev_t);
2404         class_destroy(data->cls);
2405         cdev_del(&data->cdev);
2406         unregister_chrdev_region(data->dev_t, 1);
2407
2408 #ifdef CONFIG_DEBUG_FS
2409         if (!IS_ERR_OR_NULL(data->debugfs_dir))
2410                 debugfs_remove_recursive(data->debugfs_dir);
2411 #endif
2412 }
2413
2414 static void vcodec_read_property(struct device_node *np,
2415                                  struct vpu_service_info *pservice)
2416 {
2417         pservice->mode_bit = 0;
2418         pservice->mode_ctrl = 0;
2419         pservice->subcnt = 0;
2420         pservice->grf_base = NULL;
2421
2422         of_property_read_u32(np, "subcnt", &pservice->subcnt);
2423
2424         if (pservice->subcnt > 1) {
2425                 of_property_read_u32(np, "mode_bit", &pservice->mode_bit);
2426                 of_property_read_u32(np, "mode_ctrl", &pservice->mode_ctrl);
2427         }
2428 #ifdef CONFIG_MFD_SYSCON
2429         pservice->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
2430         if (IS_ERR_OR_NULL(pservice->grf)) {
2431                 pservice->grf = NULL;
2432 #ifdef CONFIG_ARM
2433                 pservice->grf_base = RK_GRF_VIRT;
2434 #else
2435                 vpu_err("can't find vpu grf property\n");
2436                 return;
2437 #endif
2438         }
2439 #else
2440 #ifdef CONFIG_ARM
2441         pservice->grf_base = RK_GRF_VIRT;
2442 #else
2443         vpu_err("can't find vpu grf property\n");
2444         return;
2445 #endif
2446 #endif
2447
2448 #ifdef CONFIG_RESET_CONTROLLER
2449         pservice->rst_a = devm_reset_control_get(pservice->dev, "video_a");
2450         pservice->rst_h = devm_reset_control_get(pservice->dev, "video_h");
2451         pservice->rst_v = devm_reset_control_get(pservice->dev, "video");
2452
2453         if (IS_ERR_OR_NULL(pservice->rst_a)) {
2454                 pr_warn("No aclk reset resource define\n");
2455                 pservice->rst_a = NULL;
2456         }
2457
2458         if (IS_ERR_OR_NULL(pservice->rst_h)) {
2459                 pr_warn("No hclk reset resource define\n");
2460                 pservice->rst_h = NULL;
2461         }
2462
2463         if (IS_ERR_OR_NULL(pservice->rst_v)) {
2464                 pr_warn("No core reset resource define\n");
2465                 pservice->rst_v = NULL;
2466         }
2467 #endif
2468
2469         of_property_read_string(np, "name", (const char **)&pservice->name);
2470 }
2471
2472 static void vcodec_init_drvdata(struct vpu_service_info *pservice)
2473 {
2474         pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2475         pservice->curr_mode = -1;
2476
2477         wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");
2478         INIT_LIST_HEAD(&pservice->waiting);
2479         INIT_LIST_HEAD(&pservice->running);
2480         mutex_init(&pservice->lock);
2481         mutex_init(&pservice->shutdown_lock);
2482         atomic_set(&pservice->service_on, 1);
2483
2484         INIT_LIST_HEAD(&pservice->done);
2485         INIT_LIST_HEAD(&pservice->session);
2486         INIT_LIST_HEAD(&pservice->subdev_list);
2487
2488         pservice->reg_pproc     = NULL;
2489         atomic_set(&pservice->total_running, 0);
2490         atomic_set(&pservice->enabled,       0);
2491         atomic_set(&pservice->power_on_cnt,  0);
2492         atomic_set(&pservice->power_off_cnt, 0);
2493         atomic_set(&pservice->reset_request, 0);
2494
2495         INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
2496         pservice->last.tv64 = 0;
2497
2498         pservice->ion_client = rockchip_ion_client_create("vpu");
2499         if (IS_ERR(pservice->ion_client)) {
2500                 vpu_err("failed to create ion client for vcodec ret %ld\n",
2501                         PTR_ERR(pservice->ion_client));
2502         } else {
2503                 vpu_debug(DEBUG_IOMMU, "vcodec ion client create success!\n");
2504         }
2505 }
2506
2507 static int vcodec_probe(struct platform_device *pdev)
2508 {
2509         int i;
2510         int ret = 0;
2511         struct resource *res = NULL;
2512         struct device *dev = &pdev->dev;
2513         struct device_node *np = pdev->dev.of_node;
2514         struct vpu_service_info *pservice =
2515                 devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);
2516
2517         pservice->dev = dev;
2518
2519         vcodec_read_property(np, pservice);
2520         vcodec_init_drvdata(pservice);
2521
2522         if (strncmp(pservice->name, "hevc_service", 12) == 0)
2523                 pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
2524         else if (strncmp(pservice->name, "vpu_service", 11) == 0)
2525                 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2526         else if (strncmp(pservice->name, "rkvdec", 6) == 0)
2527                 pservice->dev_id = VCODEC_DEVICE_ID_RKVDEC;
2528         else
2529                 pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
2530
2531         if (0 > vpu_get_clk(pservice))
2532                 goto err;
2533
2534         if (of_property_read_bool(np, "reg")) {
2535                 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2536
2537                 pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
2538                 if (IS_ERR(pservice->reg_base)) {
2539                         vpu_err("ioremap registers base failed\n");
2540                         ret = PTR_ERR(pservice->reg_base);
2541                         goto err;
2542                 }
2543                 pservice->ioaddr = res->start;
2544         } else {
2545                 pservice->reg_base = 0;
2546         }
2547
2548         pm_runtime_enable(dev);
2549
2550         if (of_property_read_bool(np, "subcnt")) {
2551                 for (i = 0; i < pservice->subcnt; i++) {
2552                         struct device_node *sub_np;
2553                         struct platform_device *sub_pdev;
2554
2555                         sub_np = of_parse_phandle(np, "rockchip,sub", i);
2556                         sub_pdev = of_find_device_by_node(sub_np);
2557
2558                         vcodec_subdev_probe(sub_pdev, pservice);
2559                 }
2560         } else {
2561                 vcodec_subdev_probe(pdev, pservice);
2562         }
2563
2564         vpu_service_power_off(pservice);
2565
2566         pr_info("init success\n");
2567
2568         return 0;
2569
2570 err:
2571         pr_info("init failed\n");
2572         vpu_service_power_off(pservice);
2573         vpu_put_clk(pservice);
2574         wake_lock_destroy(&pservice->wake_lock);
2575
2576         return ret;
2577 }
2578
2579 static int vcodec_remove(struct platform_device *pdev)
2580 {
2581         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2582
2583         vcodec_subdev_remove(data);
2584
2585         pm_runtime_disable(data->pservice->dev);
2586
2587         return 0;
2588 }
2589
2590 static void vcodec_shutdown(struct platform_device *pdev)
2591 {
2592         struct vpu_subdev_data *data = platform_get_drvdata(pdev);
2593         struct vpu_service_info *pservice = data->pservice;
2594
2595         dev_info(&pdev->dev, "%s IN\n", __func__);
2596
2597         mutex_lock(&pservice->shutdown_lock);
2598         atomic_set(&pservice->service_on, 0);
2599         mutex_unlock(&pservice->shutdown_lock);
2600
2601         vcodec_exit_mode(data);
2602
2603         vpu_service_clear(data);
2604         vcodec_subdev_remove(data);
2605
2606         pm_runtime_disable(&pdev->dev);
2607 }
2608
2609 #if defined(CONFIG_OF)
2610 static const struct of_device_id vcodec_service_dt_ids[] = {
2611         {.compatible = "rockchip,vpu_service",},
2612         {.compatible = "rockchip,hevc_service",},
2613         {.compatible = "rockchip,vpu_combo",},
2614         {.compatible = "rockchip,rkvdec",},
2615         {},
2616 };
2617 #endif
2618
2619 static struct platform_driver vcodec_driver = {
2620         .probe = vcodec_probe,
2621         .remove = vcodec_remove,
2622         .shutdown = vcodec_shutdown,
2623         .driver = {
2624                 .name = "vcodec",
2625                 .owner = THIS_MODULE,
2626 #if defined(CONFIG_OF)
2627                 .of_match_table = of_match_ptr(vcodec_service_dt_ids),
2628 #endif
2629         },
2630 };
2631
2632 static void get_hw_info(struct vpu_subdev_data *data)
2633 {
2634         struct vpu_service_info *pservice = data->pservice;
2635         struct vpu_dec_config *dec = &pservice->dec_config;
2636         struct vpu_enc_config *enc = &pservice->enc_config;
2637
2638         if (cpu_is_rk2928() || cpu_is_rk3036() ||
2639             cpu_is_rk30xx() || cpu_is_rk312x() ||
2640             cpu_is_rk3188())
2641                 dec->max_dec_pic_width = 1920;
2642         else
2643                 dec->max_dec_pic_width = 4096;
2644
2645         if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2646                 dec->h264_support = 3;
2647                 dec->jpeg_support = 1;
2648                 dec->mpeg4_support = 2;
2649                 dec->vc1_support = 3;
2650                 dec->mpeg2_support = 1;
2651                 dec->pp_support = 1;
2652                 dec->sorenson_support = 1;
2653                 dec->ref_buf_support = 3;
2654                 dec->vp6_support = 1;
2655                 dec->vp7_support = 1;
2656                 dec->vp8_support = 1;
2657                 dec->avs_support = 1;
2658                 dec->jpeg_ext_support = 0;
2659                 dec->custom_mpeg4_support = 1;
2660                 dec->reserve = 0;
2661                 dec->mvc_support = 1;
2662
2663                 if (!cpu_is_rk3036()) {
2664                         u32 config_reg = readl_relaxed(data->enc_dev.regs + 63);
2665
2666                         enc->max_encoded_width = config_reg & ((1 << 11) - 1);
2667                         enc->h264_enabled = 1;
2668                         enc->mpeg4_enabled = (config_reg >> 26) & 1;
2669                         enc->jpeg_enabled = 1;
2670                         enc->vs_enabled = (config_reg >> 24) & 1;
2671                         enc->rgb_enabled = (config_reg >> 28) & 1;
2672                         enc->reg_size = data->reg_size;
2673                         enc->reserv[0] = 0;
2674                         enc->reserv[1] = 0;
2675                 }
2676
2677                 pservice->auto_freq = true;
2678                 vpu_debug(DEBUG_EXTRA_INFO, "vpu_service set to auto frequency mode\n");
2679                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2680
2681                 pservice->bug_dec_addr = cpu_is_rk30xx();
2682         } else if (data->mode == VCODEC_RUNNING_MODE_RKVDEC) {
2683                 pservice->auto_freq = true;
2684                 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2685         } else {
2686                 /* disable frequency switch in hevc.*/
2687                 pservice->auto_freq = false;
2688         }
2689 }
2690
2691 static bool check_irq_err(struct vpu_task_info *task, u32 irq_status)
2692 {
2693         vpu_debug(DEBUG_IRQ_CHECK, "task %s status %08x mask %08x\n",
2694                   task->name, irq_status, task->error_mask);
2695
2696         return (task->error_mask & irq_status) ? true : false;
2697 }
2698
2699 static irqreturn_t vdpu_irq(int irq, void *dev_id)
2700 {
2701         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2702         struct vpu_service_info *pservice = data->pservice;
2703         struct vpu_task_info *task = NULL;
2704         struct vpu_device *dev = &data->dec_dev;
2705         u32 hw_id = data->hw_info->hw_id;
2706         u32 raw_status;
2707         u32 dec_status;
2708
2709         task = &data->task_info[TASK_DEC];
2710
2711         raw_status = readl_relaxed(dev->regs + task->reg_irq);
2712         dec_status = raw_status;
2713
2714         vpu_debug(DEBUG_TASK_INFO, "vdpu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2715                   task->reg_irq, dec_status,
2716                   task->irq_mask, task->ready_mask, task->error_mask);
2717
2718         if (dec_status & task->irq_mask) {
2719                 time_record(task, 1);
2720                 vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq dec status %08x\n",
2721                           dec_status);
2722                 if ((dec_status & 0x40001) == 0x40001) {
2723                         do {
2724                                 dec_status =
2725                                         readl_relaxed(dev->regs +
2726                                                 task->reg_irq);
2727                         } while ((dec_status & 0x40001) == 0x40001);
2728                 }
2729
2730                 if (check_irq_err(task, dec_status))
2731                         atomic_add(1, &pservice->reset_request);
2732
2733                 writel_relaxed(0, dev->regs + task->reg_irq);
2734
2735                 /*
2736                  * NOTE: rkvdec need to reset after each task to avoid timeout
2737                  *       error on H.264 switch to H.265
2738                  */
2739                 if (data->mode == VCODEC_RUNNING_MODE_RKVDEC)
2740                         writel(0x100000, dev->regs + task->reg_irq);
2741
2742                 /* set clock gating to save power */
2743                 writel(task->gating_mask, dev->regs + task->reg_irq);
2744
2745                 atomic_add(1, &dev->irq_count_codec);
2746                 time_diff(task);
2747         }
2748
2749         task = &data->task_info[TASK_PP];
2750         if (hw_id != HEVC_ID && hw_id != RKV_DEC_ID) {
2751                 u32 pp_status = readl_relaxed(dev->regs + task->irq_mask);
2752
2753                 if (pp_status & task->irq_mask) {
2754                         time_record(task, 1);
2755                         vpu_debug(DEBUG_IRQ_STATUS, "vdpu_irq pp status %08x\n",
2756                                   pp_status);
2757
2758                         if (check_irq_err(task, dec_status))
2759                                 atomic_add(1, &pservice->reset_request);
2760
2761                         /* clear pp IRQ */
2762                         writel_relaxed(pp_status & (~task->reg_irq),
2763                                        dev->regs + task->irq_mask);
2764                         atomic_add(1, &dev->irq_count_pp);
2765                         time_diff(task);
2766                 }
2767         }
2768
2769         pservice->irq_status = raw_status;
2770
2771         if (atomic_read(&dev->irq_count_pp) ||
2772             atomic_read(&dev->irq_count_codec))
2773                 return IRQ_WAKE_THREAD;
2774         else
2775                 return IRQ_NONE;
2776 }
2777
2778 static irqreturn_t vdpu_isr(int irq, void *dev_id)
2779 {
2780         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2781         struct vpu_service_info *pservice = data->pservice;
2782         struct vpu_device *dev = &data->dec_dev;
2783
2784         mutex_lock(&pservice->lock);
2785         if (atomic_read(&dev->irq_count_codec)) {
2786                 atomic_sub(1, &dev->irq_count_codec);
2787                 if (pservice->reg_codec == NULL) {
2788                         vpu_err("error: dec isr with no task waiting\n");
2789                 } else {
2790                         reg_from_run_to_done(data, pservice->reg_codec);
2791                         /* avoid vpu timeout and can't recover problem */
2792                         VDPU_SOFT_RESET(data->regs);
2793                 }
2794         }
2795
2796         if (atomic_read(&dev->irq_count_pp)) {
2797                 atomic_sub(1, &dev->irq_count_pp);
2798                 if (pservice->reg_pproc == NULL)
2799                         vpu_err("error: pp isr with no task waiting\n");
2800                 else
2801                         reg_from_run_to_done(data, pservice->reg_pproc);
2802         }
2803         try_set_reg(data);
2804         mutex_unlock(&pservice->lock);
2805         return IRQ_HANDLED;
2806 }
2807
2808 static irqreturn_t vepu_irq(int irq, void *dev_id)
2809 {
2810         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2811         struct vpu_service_info *pservice = data->pservice;
2812         struct vpu_task_info *task = &data->task_info[TASK_ENC];
2813         struct vpu_device *dev = &data->enc_dev;
2814         u32 irq_status;
2815
2816         irq_status = readl_relaxed(dev->regs + task->reg_irq);
2817
2818         vpu_debug(DEBUG_TASK_INFO, "vepu_irq reg %d status %x mask: irq %x ready %x error %0x\n",
2819                   task->reg_irq, irq_status,
2820                   task->irq_mask, task->ready_mask, task->error_mask);
2821
2822         vpu_debug(DEBUG_IRQ_STATUS, "vepu_irq enc status %08x\n", irq_status);
2823
2824         if (likely(irq_status & task->irq_mask)) {
2825                 time_record(task, 1);
2826
2827                 if (check_irq_err(task, irq_status))
2828                         atomic_add(1, &pservice->reset_request);
2829
2830                 /* clear enc IRQ */
2831                 writel_relaxed(irq_status & (~task->irq_mask),
2832                                dev->regs + task->reg_irq);
2833
2834                 atomic_add(1, &dev->irq_count_codec);
2835                 time_diff(task);
2836         }
2837
2838         pservice->irq_status = irq_status;
2839
2840         if (atomic_read(&dev->irq_count_codec))
2841                 return IRQ_WAKE_THREAD;
2842         else
2843                 return IRQ_NONE;
2844 }
2845
2846 static irqreturn_t vepu_isr(int irq, void *dev_id)
2847 {
2848         struct vpu_subdev_data *data = (struct vpu_subdev_data *)dev_id;
2849         struct vpu_service_info *pservice = data->pservice;
2850         struct vpu_device *dev = &data->enc_dev;
2851
2852         mutex_lock(&pservice->lock);
2853         if (atomic_read(&dev->irq_count_codec)) {
2854                 atomic_sub(1, &dev->irq_count_codec);
2855                 if (NULL == pservice->reg_codec)
2856                         vpu_err("error: enc isr with no task waiting\n");
2857                 else
2858                         reg_from_run_to_done(data, pservice->reg_codec);
2859         }
2860         try_set_reg(data);
2861         mutex_unlock(&pservice->lock);
2862         return IRQ_HANDLED;
2863 }
2864
2865 static int __init vcodec_service_init(void)
2866 {
2867         int ret = platform_driver_register(&vcodec_driver);
2868
2869         if (ret) {
2870                 vpu_err("Platform device register failed (%d).\n", ret);
2871                 return ret;
2872         }
2873
2874 #ifdef CONFIG_DEBUG_FS
2875         vcodec_debugfs_init();
2876 #endif
2877
2878         return ret;
2879 }
2880
2881 static void __exit vcodec_service_exit(void)
2882 {
2883 #ifdef CONFIG_DEBUG_FS
2884         vcodec_debugfs_exit();
2885 #endif
2886
2887         platform_driver_unregister(&vcodec_driver);
2888 }
2889
2890 module_init(vcodec_service_init);
2891 module_exit(vcodec_service_exit);
2892 MODULE_LICENSE("Proprietary");
2893
2894 #ifdef CONFIG_DEBUG_FS
2895 #include <linux/seq_file.h>
2896
2897 static int vcodec_debugfs_init(void)
2898 {
2899         parent = debugfs_create_dir("vcodec", NULL);
2900         if (!parent)
2901                 return -1;
2902
2903         return 0;
2904 }
2905
2906 static void vcodec_debugfs_exit(void)
2907 {
2908         debugfs_remove(parent);
2909 }
2910
2911 static struct dentry *vcodec_debugfs_create_device_dir(
2912                 char *dirname, struct dentry *parent)
2913 {
2914         return debugfs_create_dir(dirname, parent);
2915 }
2916
2917 static int debug_vcodec_show(struct seq_file *s, void *unused)
2918 {
2919         struct vpu_subdev_data *data = s->private;
2920         struct vpu_service_info *pservice = data->pservice;
2921         unsigned int i, n;
2922         struct vpu_reg *reg, *reg_tmp;
2923         struct vpu_session *session, *session_tmp;
2924
2925         mutex_lock(&pservice->lock);
2926         vpu_service_power_on(pservice);
2927         if (data->hw_info->hw_id != HEVC_ID) {
2928                 seq_puts(s, "\nENC Registers:\n");
2929                 n = data->enc_dev.iosize >> 2;
2930
2931                 for (i = 0; i < n; i++)
2932                         seq_printf(s, "\tswreg%d = %08X\n", i,
2933                                    readl_relaxed(data->enc_dev.regs + i));
2934         }
2935
2936         seq_puts(s, "\nDEC Registers:\n");
2937
2938         n = data->dec_dev.iosize >> 2;
2939         for (i = 0; i < n; i++)
2940                 seq_printf(s, "\tswreg%d = %08X\n", i,
2941                            readl_relaxed(data->dec_dev.regs + i));
2942
2943         seq_puts(s, "\nvpu service status:\n");
2944
2945         list_for_each_entry_safe(session, session_tmp,
2946                                  &pservice->session, list_session) {
2947                 seq_printf(s, "session pid %d type %d:\n",
2948                            session->pid, session->type);
2949
2950                 list_for_each_entry_safe(reg, reg_tmp,
2951                                          &session->waiting, session_link) {
2952                         seq_printf(s, "waiting register set %p\n", reg);
2953                 }
2954                 list_for_each_entry_safe(reg, reg_tmp,
2955                                          &session->running, session_link) {
2956                         seq_printf(s, "running register set %p\n", reg);
2957                 }
2958                 list_for_each_entry_safe(reg, reg_tmp,
2959                                          &session->done, session_link) {
2960                         seq_printf(s, "done    register set %p\n", reg);
2961                 }
2962         }
2963
2964         seq_printf(s, "\npower counter: on %d off %d\n",
2965                    atomic_read(&pservice->power_on_cnt),
2966                    atomic_read(&pservice->power_off_cnt));
2967
2968         mutex_unlock(&pservice->lock);
2969         vpu_service_power_off(pservice);
2970
2971         return 0;
2972 }
2973
2974 static int debug_vcodec_open(struct inode *inode, struct file *file)
2975 {
2976         return single_open(file, debug_vcodec_show, inode->i_private);
2977 }
2978
2979 #endif
2980