2 * Copyright (C) 2014 ROCKCHIP, Inc.
3 * author: chenhengming chm@rock-chips.com
4 * Alpha Lin, alpha.lin@rock-chips.com
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
27 #include <linux/ioport.h>
28 #include <linux/miscdevice.h>
30 #include <linux/poll.h>
31 #include <linux/platform_device.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/wakelock.h>
35 #include <linux/cdev.h>
37 #include <linux/of_platform.h>
38 #include <linux/of_irq.h>
39 #include <linux/rockchip/cpu.h>
40 #include <linux/rockchip/cru.h>
41 #ifdef CONFIG_MFD_SYSCON
42 #include <linux/regmap.h>
44 #include <linux/mfd/syscon.h>
46 #include <asm/cacheflush.h>
47 #include <linux/uaccess.h>
48 #include <linux/rockchip/grf.h>
50 #if defined(CONFIG_ION_ROCKCHIP)
51 #include <linux/rockchip_ion.h>
54 #if defined(CONFIG_ROCKCHIP_IOMMU) & defined(CONFIG_ION_ROCKCHIP)
55 #define CONFIG_VCODEC_MMU
58 #ifdef CONFIG_VCODEC_MMU
59 #include <linux/rockchip-iovmm.h>
60 #include <linux/dma-buf.h>
63 #ifdef CONFIG_DEBUG_FS
64 #include <linux/debugfs.h>
67 #if defined(CONFIG_ARCH_RK319X)
71 #include "vcodec_service.h"
74 module_param(debug, int, S_IRUGO | S_IWUSR);
75 MODULE_PARM_DESC(debug,
76 "Debug level - higher value produces more verbose messages");
78 #define HEVC_TEST_ENABLE 0
79 #define VCODEC_CLOCK_ENABLE 1
82 VPU_DEC_ID_9190 = 0x6731,
96 VPU_DEC_TYPE_9190 = 0,
97 VPU_ENC_TYPE_8270 = 0x100,
101 typedef enum VPU_FREQ {
114 unsigned long hw_addr;
115 unsigned long enc_offset;
116 unsigned long enc_reg_num;
117 unsigned long enc_io_size;
118 unsigned long dec_offset;
119 unsigned long dec_reg_num;
120 unsigned long dec_io_size;
123 struct extra_info_elem {
128 #define EXTRA_INFO_MAGIC 0x4C4A46
130 struct extra_info_for_iommu {
133 struct extra_info_elem elem[20];
136 #define VPU_SERVICE_SHOW_TIME 0
138 #if VPU_SERVICE_SHOW_TIME
139 static struct timeval enc_start, enc_end;
140 static struct timeval dec_start, dec_end;
141 static struct timeval pp_start, pp_end;
144 #define MHZ (1000*1000)
146 #define REG_NUM_9190_DEC (60)
147 #define REG_NUM_9190_PP (41)
148 #define REG_NUM_9190_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP)
150 #define REG_NUM_DEC_PP (REG_NUM_9190_DEC+REG_NUM_9190_PP)
152 #define REG_NUM_ENC_8270 (96)
153 #define REG_SIZE_ENC_8270 (0x200)
154 #define REG_NUM_ENC_4831 (164)
155 #define REG_SIZE_ENC_4831 (0x400)
157 #define REG_NUM_HEVC_DEC (68)
159 #define SIZE_REG(reg) ((reg)*4)
161 static VPU_HW_INFO_E vpu_hw_set[] = {
163 .hw_id = VPU_ID_8270,
166 .enc_reg_num = REG_NUM_ENC_8270,
167 .enc_io_size = REG_NUM_ENC_8270 * 4,
168 .dec_offset = REG_SIZE_ENC_8270,
169 .dec_reg_num = REG_NUM_9190_DEC_PP,
170 .dec_io_size = REG_NUM_9190_DEC_PP * 4,
173 .hw_id = VPU_ID_4831,
176 .enc_reg_num = REG_NUM_ENC_4831,
177 .enc_io_size = REG_NUM_ENC_4831 * 4,
178 .dec_offset = REG_SIZE_ENC_4831,
179 .dec_reg_num = REG_NUM_9190_DEC_PP,
180 .dec_io_size = REG_NUM_9190_DEC_PP * 4,
186 .dec_reg_num = REG_NUM_HEVC_DEC,
187 .dec_io_size = REG_NUM_HEVC_DEC * 4,
190 .hw_id = VPU_DEC_ID_9190,
196 .dec_reg_num = REG_NUM_9190_DEC_PP,
197 .dec_io_size = REG_NUM_9190_DEC_PP * 4,
202 #define DEC_INTERRUPT_REGISTER 1
203 #define PP_INTERRUPT_REGISTER 60
204 #define ENC_INTERRUPT_REGISTER 1
206 #define DEC_INTERRUPT_BIT 0x100
207 #define DEC_BUFFER_EMPTY_BIT 0x4000
208 #define PP_INTERRUPT_BIT 0x100
209 #define ENC_INTERRUPT_BIT 0x1
211 #define HEVC_DEC_INT_RAW_BIT 0x200
212 #define HEVC_DEC_STR_ERROR_BIT 0x4000
213 #define HEVC_DEC_BUS_ERROR_BIT 0x2000
214 #define HEVC_DEC_BUFFER_EMPTY_BIT 0x10000
216 #define VPU_REG_EN_ENC 14
217 #define VPU_REG_ENC_GATE 2
218 #define VPU_REG_ENC_GATE_BIT (1<<4)
220 #define VPU_REG_EN_DEC 1
221 #define VPU_REG_DEC_GATE 2
222 #define VPU_REG_DEC_GATE_BIT (1<<10)
223 #define VPU_REG_EN_PP 0
224 #define VPU_REG_PP_GATE 1
225 #define VPU_REG_PP_GATE_BIT (1<<8)
226 #define VPU_REG_EN_DEC_PP 1
227 #define VPU_REG_DEC_PP_GATE 61
228 #define VPU_REG_DEC_PP_GATE_BIT (1<<8)
232 #define vpu_debug(level, fmt, args...) \
234 if (debug >= level) \
235 pr_info("%s:%d: " fmt, \
236 __func__, __LINE__, ##args); \
239 #define vpu_debug(level, fmt, args...)
242 #define vpu_debug_enter() vpu_debug(4, "enter\n")
243 #define vpu_debug_leave() vpu_debug(4, "leave\n")
245 #define vpu_err(fmt, args...) \
246 pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
248 #if defined(CONFIG_VCODEC_MMU)
249 static u8 addr_tbl_vpu_h264dec[] = {
250 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
251 25, 26, 27, 28, 29, 40, 41
254 static u8 addr_tbl_vpu_vp8dec[] = {
255 10, 12, 13, 14, 18, 19, 22, 23, 24, 25, 26, 27, 28, 29, 40
258 static u8 addr_tbl_vpu_vp6dec[] = {
259 12, 13, 14, 18, 27, 40
262 static u8 addr_tbl_vpu_vc1dec[] = {
263 12, 13, 14, 15, 16, 17, 27, 41
266 static u8 addr_tbl_vpu_jpegdec[] = {
270 static u8 addr_tbl_vpu_defaultdec[] = {
271 12, 13, 14, 15, 16, 17, 40, 41
274 static u8 addr_tbl_vpu_enc[] = {
275 5, 6, 7, 8, 9, 10, 11, 12, 13, 51
278 static u8 addr_tbl_hevc_dec[] = {
279 4, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
280 21, 22, 23, 24, 42, 43
305 * struct for process session which connect to vpu
307 * @author ChenHengming (2011-5-3)
309 typedef struct vpu_session {
310 enum VPU_CLIENT_TYPE type;
311 /* a linked list of data so we can access them for debugging */
312 struct list_head list_session;
313 /* a linked list of register data waiting for process */
314 struct list_head waiting;
315 /* a linked list of register data in processing */
316 struct list_head running;
317 /* a linked list of register data processed */
318 struct list_head done;
319 wait_queue_head_t wait;
321 atomic_t task_running;
325 * struct for process register set
327 * @author ChenHengming (2011-5-4)
329 typedef struct vpu_reg {
330 enum VPU_CLIENT_TYPE type;
332 vpu_session *session;
333 struct list_head session_link; /* link to vpu service session */
334 struct list_head status_link; /* link to register set list */
336 #if defined(CONFIG_VCODEC_MMU)
337 struct list_head mem_region_list;
342 typedef struct vpu_device {
343 atomic_t irq_count_codec;
344 atomic_t irq_count_pp;
345 unsigned long iobaseaddr;
347 volatile u32 *hwregs;
350 enum vcodec_device_id {
351 VCODEC_DEVICE_ID_VPU,
352 VCODEC_DEVICE_ID_HEVC,
353 VCODEC_DEVICE_ID_COMBO
356 enum VCODEC_RUNNING_MODE {
357 VCODEC_RUNNING_MODE_NONE = -1,
358 VCODEC_RUNNING_MODE_VPU,
359 VCODEC_RUNNING_MODE_HEVC,
362 struct vcodec_mem_region {
363 struct list_head srv_lnk;
364 struct list_head reg_lnk;
365 struct list_head session_lnk;
366 unsigned long iova; /* virtual address for iommu */
369 struct ion_handle *hdl;
373 MMU_ACTIVATED = BIT(0)
376 struct vpu_subdev_data {
380 struct device *child_dev;
384 struct vpu_service_info *pservice;
387 enum VCODEC_RUNNING_MODE mode;
388 struct list_head lnk_service;
394 VPU_HW_INFO_E *hw_info;
399 #ifdef CONFIG_DEBUG_FS
400 struct dentry *debugfs_dir;
401 struct dentry *debugfs_file_regs;
404 #if defined(CONFIG_VCODEC_MMU)
405 struct device *mmu_dev;
409 typedef struct vpu_service_info {
410 struct wake_lock wake_lock;
411 struct delayed_work power_off_work;
413 struct list_head waiting; /* link to link_reg in struct vpu_reg */
414 struct list_head running; /* link to link_reg in struct vpu_reg */
415 struct list_head done; /* link to link_reg in struct vpu_reg */
416 struct list_head session; /* link to list_session in struct vpu_session */
417 atomic_t total_running;
422 struct vpu_dec_config dec_config;
423 struct vpu_enc_config enc_config;
427 atomic_t freq_status;
429 struct clk *aclk_vcodec;
430 struct clk *hclk_vcodec;
431 struct clk *clk_core;
432 struct clk *clk_cabac;
433 struct clk *pd_video;
438 #if defined(CONFIG_VCODEC_MMU)
439 struct ion_client *ion_client;
440 struct list_head mem_region_list;
443 enum vcodec_device_id dev_id;
445 enum VCODEC_RUNNING_MODE curr_mode;
448 struct delayed_work simulate_work;
454 #ifdef CONFIG_MFD_SYSCON
455 struct regmap *grf_base;
462 struct list_head subdev_list;
465 struct vcodec_combo {
466 struct vpu_service_info *vpu_srv;
467 struct vpu_service_info *hevc_srv;
468 struct list_head waiting;
469 struct list_head running;
470 struct mutex run_lock;
472 enum vcodec_device_id current_hw_mode;
475 typedef struct vpu_request {
480 /* debugfs root directory for all device (vpu, hevc).*/
481 static struct dentry *parent;
483 #ifdef CONFIG_DEBUG_FS
484 static int vcodec_debugfs_init(void);
485 static void vcodec_debugfs_exit(void);
486 static struct dentry* vcodec_debugfs_create_device_dir(char *dirname, struct dentry *parent);
487 static int debug_vcodec_open(struct inode *inode, struct file *file);
489 static const struct file_operations debug_vcodec_fops = {
490 .open = debug_vcodec_open,
493 .release = single_release,
497 #define VDPU_SOFT_RESET_REG 101
498 #define VDPU_CLEAN_CACHE_REG 516
499 #define VEPU_CLEAN_CACHE_REG 772
500 #define HEVC_CLEAN_CACHE_REG 260
502 #define VPU_REG_ENABLE(base, reg) do { \
506 #define VDPU_SOFT_RESET(base) VPU_REG_ENABLE(base, VDPU_SOFT_RESET_REG)
507 #define VDPU_CLEAN_CACHE(base) VPU_REG_ENABLE(base, VDPU_CLEAN_CACHE_REG)
508 #define VEPU_CLEAN_CACHE(base) VPU_REG_ENABLE(base, VEPU_CLEAN_CACHE_REG)
509 #define HEVC_CLEAN_CACHE(base) VPU_REG_ENABLE(base, HEVC_CLEAN_CACHE_REG)
511 #define VPU_POWER_OFF_DELAY 4*HZ /* 4s */
512 #define VPU_TIMEOUT_DELAY 2*HZ /* 2s */
514 static void vcodec_enter_mode(struct vpu_subdev_data *data)
518 struct vpu_service_info *pservice = data->pservice;
519 struct vpu_subdev_data *subdata, *n;
520 if (pservice->subcnt < 2) {
521 #if defined(CONFIG_VCODEC_MMU)
522 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
523 set_bit(MMU_ACTIVATED, &data->state);
524 BUG_ON(!pservice->enabled);
525 if (pservice->enabled)
526 rockchip_iovmm_activate(data->dev);
532 if (pservice->curr_mode == data->mode)
535 vpu_debug(3, "vcodec enter mode %d\n", data->mode);
536 #if defined(CONFIG_VCODEC_MMU)
537 list_for_each_entry_safe(subdata, n, &pservice->subdev_list, lnk_service) {
538 if (data != subdata && subdata->mmu_dev &&
539 test_bit(MMU_ACTIVATED, &subdata->state)) {
540 clear_bit(MMU_ACTIVATED, &subdata->state);
541 rockchip_iovmm_deactivate(subdata->dev);
545 bits = 1 << pservice->mode_bit;
546 #ifdef CONFIG_MFD_SYSCON
547 regmap_read(pservice->grf_base, pservice->mode_ctrl, &raw);
549 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
550 regmap_write(pservice->grf_base, pservice->mode_ctrl,
551 raw | bits | (bits << 16));
553 regmap_write(pservice->grf_base, pservice->mode_ctrl,
554 (raw & (~bits)) | (bits << 16));
556 raw = readl_relaxed(pservice->grf_base + pservice->mode_ctrl / 4);
557 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
558 writel_relaxed(raw | bits | (bits << 16),
559 pservice->grf_base + pservice->mode_ctrl / 4);
561 writel_relaxed((raw & (~bits)) | (bits << 16),
562 pservice->grf_base + pservice->mode_ctrl / 4);
564 #if defined(CONFIG_VCODEC_MMU)
565 if (data->mmu_dev && !test_bit(MMU_ACTIVATED, &data->state)) {
566 set_bit(MMU_ACTIVATED, &data->state);
567 BUG_ON(!pservice->enabled);
568 if (pservice->enabled)
569 rockchip_iovmm_activate(data->dev);
572 pservice->prev_mode = pservice->curr_mode;
573 pservice->curr_mode = data->mode;
576 static void vcodec_exit_mode(struct vpu_service_info *pservice)
581 static int vpu_get_clk(struct vpu_service_info *pservice)
583 #if VCODEC_CLOCK_ENABLE
584 switch (pservice->dev_id) {
585 case VCODEC_DEVICE_ID_HEVC:
586 pservice->pd_video = devm_clk_get(pservice->dev, "pd_hevc");
587 if (IS_ERR(pservice->pd_video)) {
588 dev_err(pservice->dev, "failed on clk_get pd_hevc\n");
591 case VCODEC_DEVICE_ID_COMBO:
592 pservice->clk_cabac = devm_clk_get(pservice->dev, "clk_cabac");
593 if (IS_ERR(pservice->clk_cabac)) {
594 dev_err(pservice->dev, "failed on clk_get clk_cabac\n");
595 pservice->clk_cabac = NULL;
597 pservice->clk_core = devm_clk_get(pservice->dev, "clk_core");
598 if (IS_ERR(pservice->clk_core)) {
599 dev_err(pservice->dev, "failed on clk_get clk_core\n");
602 case VCODEC_DEVICE_ID_VPU:
603 pservice->aclk_vcodec = devm_clk_get(pservice->dev, "aclk_vcodec");
604 if (IS_ERR(pservice->aclk_vcodec)) {
605 dev_err(pservice->dev, "failed on clk_get aclk_vcodec\n");
609 pservice->hclk_vcodec = devm_clk_get(pservice->dev, "hclk_vcodec");
610 if (IS_ERR(pservice->hclk_vcodec)) {
611 dev_err(pservice->dev, "failed on clk_get hclk_vcodec\n");
614 if (pservice->pd_video == NULL) {
615 pservice->pd_video = devm_clk_get(pservice->dev, "pd_video");
616 if (IS_ERR(pservice->pd_video))
617 pservice->pd_video = NULL;
630 static void vpu_put_clk(struct vpu_service_info *pservice)
632 #if VCODEC_CLOCK_ENABLE
633 if (pservice->pd_video)
634 devm_clk_put(pservice->dev, pservice->pd_video);
635 if (pservice->aclk_vcodec)
636 devm_clk_put(pservice->dev, pservice->aclk_vcodec);
637 if (pservice->hclk_vcodec)
638 devm_clk_put(pservice->dev, pservice->hclk_vcodec);
639 if (pservice->clk_core)
640 devm_clk_put(pservice->dev, pservice->clk_core);
641 if (pservice->clk_cabac)
642 devm_clk_put(pservice->dev, pservice->clk_cabac);
646 static void vpu_reset(struct vpu_subdev_data *data)
648 struct vpu_service_info *pservice = data->pservice;
649 #if defined(CONFIG_ARCH_RK29)
650 clk_disable(aclk_ddr_vepu);
651 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, true);
652 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, true);
653 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, true);
654 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, true);
656 cru_set_soft_reset(SOFT_RST_VCODEC_AXI_BUS, false);
657 cru_set_soft_reset(SOFT_RST_VCODEC_AHB_BUS, false);
658 cru_set_soft_reset(SOFT_RST_DDR_VCODEC_PORT, false);
659 cru_set_soft_reset(SOFT_RST_CPU_VODEC_A2A_AHB, false);
660 clk_enable(aclk_ddr_vepu);
661 #elif defined(CONFIG_ARCH_RK30)
662 pmu_set_idle_request(IDLE_REQ_VIDEO, true);
663 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, true);
664 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, true);
665 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, true);
666 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, true);
668 cru_set_soft_reset(SOFT_RST_VCODEC_AXI, false);
669 cru_set_soft_reset(SOFT_RST_VCODEC_AHB, false);
670 cru_set_soft_reset(SOFT_RST_VCODEC_NIU_AXI, false);
671 cru_set_soft_reset(SOFT_RST_CPU_VCODEC, false);
672 pmu_set_idle_request(IDLE_REQ_VIDEO, false);
674 pservice->reg_codec = NULL;
675 pservice->reg_pproc = NULL;
676 pservice->reg_resev = NULL;
678 #if defined(CONFIG_VCODEC_MMU)
679 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
680 clear_bit(MMU_ACTIVATED, &data->state);
681 BUG_ON(!pservice->enabled);
682 if (pservice->enabled)
683 rockchip_iovmm_deactivate(data->dev);
688 static void reg_deinit(struct vpu_subdev_data *data, vpu_reg *reg);
689 static void vpu_service_session_clear(struct vpu_subdev_data *data, vpu_session *session)
692 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
693 reg_deinit(data, reg);
695 list_for_each_entry_safe(reg, n, &session->running, session_link) {
696 reg_deinit(data, reg);
698 list_for_each_entry_safe(reg, n, &session->done, session_link) {
699 reg_deinit(data, reg);
703 static void vpu_service_dump(struct vpu_service_info *pservice)
707 static void vpu_service_power_off(struct vpu_service_info *pservice)
710 struct vpu_subdev_data *data = NULL, *n;
711 if (!pservice->enabled)
714 pservice->enabled = false;
715 total_running = atomic_read(&pservice->total_running);
717 pr_alert("alert: power off when %d task running!!\n", total_running);
719 pr_alert("alert: delay 50 ms for running task\n");
720 vpu_service_dump(pservice);
723 pr_info("%s: power off...", dev_name(pservice->dev));
725 #if defined(CONFIG_VCODEC_MMU)
726 list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
727 if (data->mmu_dev && test_bit(MMU_ACTIVATED, &data->state)) {
728 clear_bit(MMU_ACTIVATED, &data->state);
729 rockchip_iovmm_deactivate(data->dev);
732 pservice->curr_mode = VCODEC_RUNNING_MODE_NONE;
735 #if VCODEC_CLOCK_ENABLE
736 if (pservice->pd_video)
737 clk_disable_unprepare(pservice->pd_video);
738 if (pservice->hclk_vcodec)
739 clk_disable_unprepare(pservice->hclk_vcodec);
740 if (pservice->aclk_vcodec)
741 clk_disable_unprepare(pservice->aclk_vcodec);
742 if (pservice->clk_core)
743 clk_disable_unprepare(pservice->clk_core);
744 if (pservice->clk_cabac)
745 clk_disable_unprepare(pservice->clk_cabac);
748 wake_unlock(&pservice->wake_lock);
752 static inline void vpu_queue_power_off_work(struct vpu_service_info *pservice)
754 queue_delayed_work(system_nrt_wq, &pservice->power_off_work, VPU_POWER_OFF_DELAY);
757 static void vpu_power_off_work(struct work_struct *work_s)
759 struct delayed_work *dlwork = container_of(work_s, struct delayed_work, work);
760 struct vpu_service_info *pservice = container_of(dlwork, struct vpu_service_info, power_off_work);
762 if (mutex_trylock(&pservice->lock)) {
763 vpu_service_power_off(pservice);
764 mutex_unlock(&pservice->lock);
766 /* Come back later if the device is busy... */
767 vpu_queue_power_off_work(pservice);
771 static void vpu_service_power_on(struct vpu_service_info *pservice)
774 ktime_t now = ktime_get();
775 if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
776 cancel_delayed_work_sync(&pservice->power_off_work);
777 vpu_queue_power_off_work(pservice);
780 if (pservice->enabled)
783 pservice->enabled = true;
784 pr_info("%s: power on\n", dev_name(pservice->dev));
786 #define BIT_VCODEC_CLK_SEL (1<<10)
788 writel_relaxed(readl_relaxed(RK_GRF_VIRT + RK312X_GRF_SOC_CON1) |
789 BIT_VCODEC_CLK_SEL | (BIT_VCODEC_CLK_SEL << 16),
790 RK_GRF_VIRT + RK312X_GRF_SOC_CON1);
792 #if VCODEC_CLOCK_ENABLE
793 if (pservice->aclk_vcodec)
794 clk_prepare_enable(pservice->aclk_vcodec);
795 if (pservice->hclk_vcodec)
796 clk_prepare_enable(pservice->hclk_vcodec);
797 if (pservice->clk_core)
798 clk_prepare_enable(pservice->clk_core);
799 if (pservice->clk_cabac)
800 clk_prepare_enable(pservice->clk_cabac);
801 if (pservice->pd_video)
802 clk_prepare_enable(pservice->pd_video);
806 wake_lock(&pservice->wake_lock);
809 static inline bool reg_check_rmvb_wmv(vpu_reg *reg)
811 u32 type = (reg->reg[3] & 0xF0000000) >> 28;
812 return ((type == 8) || (type == 4));
815 static inline bool reg_check_interlace(vpu_reg *reg)
817 u32 type = (reg->reg[3] & (1 << 23));
821 static inline enum VPU_DEC_FMT reg_check_fmt(vpu_reg *reg)
823 enum VPU_DEC_FMT type = (enum VPU_DEC_FMT)((reg->reg[3] & 0xF0000000) >> 28);
827 static inline int reg_probe_width(vpu_reg *reg)
829 int width_in_mb = reg->reg[4] >> 23;
830 return width_in_mb * 16;
833 #if defined(CONFIG_VCODEC_MMU)
834 static int vcodec_fd_to_iova(struct vpu_subdev_data *data, vpu_reg *reg,int fd)
836 struct vpu_service_info *pservice = data->pservice;
837 struct ion_handle *hdl;
839 struct vcodec_mem_region *mem_region;
841 hdl = ion_import_dma_buf(pservice->ion_client, fd);
843 vpu_err("import dma-buf from fd %d failed\n", fd);
846 mem_region = kzalloc(sizeof(struct vcodec_mem_region), GFP_KERNEL);
848 if (mem_region == NULL) {
849 vpu_err("allocate memory for iommu memory region failed\n");
850 ion_free(pservice->ion_client, hdl);
854 mem_region->hdl = hdl;
855 vcodec_enter_mode(data);
856 ret = ion_map_iommu(data->dev, pservice->ion_client,
857 mem_region->hdl, &mem_region->iova, &mem_region->len);
858 vcodec_exit_mode(pservice);
861 vpu_err("ion map iommu failed\n");
863 ion_free(pservice->ion_client, hdl);
866 INIT_LIST_HEAD(&mem_region->reg_lnk);
867 list_add_tail(&mem_region->reg_lnk, ®->mem_region_list);
868 return mem_region->iova;
871 static int vcodec_bufid_to_iova(struct vpu_subdev_data *data, u8 *tbl,
872 int size, vpu_reg *reg,
873 struct extra_info_for_iommu *ext_inf)
875 struct vpu_service_info *pservice = data->pservice;
880 if (tbl == NULL || size <= 0) {
881 dev_err(pservice->dev, "input arguments invalidate\n");
885 vpu_service_power_on(pservice);
887 for (i = 0; i < size; i++) {
888 usr_fd = reg->reg[tbl[i]] & 0x3FF;
890 if (tbl[i] == 41 && data->hw_info->hw_id != HEVC_ID &&
891 (reg->type == VPU_DEC || reg->type == VPU_DEC_PP))
892 /* special for vpu dec num 41 regitster */
893 offset = reg->reg[tbl[i]] >> 10 << 4;
895 offset = reg->reg[tbl[i]] >> 10;
898 struct ion_handle *hdl;
900 struct vcodec_mem_region *mem_region;
902 hdl = ion_import_dma_buf(pservice->ion_client, usr_fd);
904 dev_err(pservice->dev, "import dma-buf from fd %d failed, reg[%d]\n", usr_fd, tbl[i]);
908 if (tbl[i] == 42 && data->hw_info->hw_id == HEVC_ID){
911 pps = (char *)ion_map_kernel(pservice->ion_client,hdl);
912 for (i=0; i<64; i++) {
916 scaling_offset = (u32)pps[i*80+74];
917 scaling_offset += (u32)pps[i*80+75] << 8;
918 scaling_offset += (u32)pps[i*80+76] << 16;
919 scaling_offset += (u32)pps[i*80+77] << 24;
920 scaling_fd = scaling_offset&0x3ff;
921 scaling_offset = scaling_offset >> 10;
923 tmp = vcodec_fd_to_iova(data, reg, scaling_fd);
924 tmp += scaling_offset;
925 pps[i*80+74] = tmp & 0xff;
926 pps[i*80+75] = (tmp >> 8) & 0xff;
927 pps[i*80+76] = (tmp >> 16) & 0xff;
928 pps[i*80+77] = (tmp >> 24) & 0xff;
933 mem_region = kzalloc(sizeof(struct vcodec_mem_region), GFP_KERNEL);
935 if (mem_region == NULL) {
936 dev_err(pservice->dev, "allocate memory for iommu memory region failed\n");
937 ion_free(pservice->ion_client, hdl);
941 mem_region->hdl = hdl;
942 mem_region->reg_idx = tbl[i];
943 vcodec_enter_mode(data);
944 ret = ion_map_iommu(data->dev,
945 pservice->ion_client,
949 vcodec_exit_mode(pservice);
952 dev_err(pservice->dev, "ion map iommu failed\n");
954 ion_free(pservice->ion_client, hdl);
957 reg->reg[tbl[i]] = mem_region->iova + offset;
958 INIT_LIST_HEAD(&mem_region->reg_lnk);
959 list_add_tail(&mem_region->reg_lnk, ®->mem_region_list);
963 if (ext_inf != NULL && ext_inf->magic == EXTRA_INFO_MAGIC) {
964 for (i=0; i<ext_inf->cnt; i++) {
965 vpu_debug(3, "reg[%d] + offset %d\n",
966 ext_inf->elem[i].index,
967 ext_inf->elem[i].offset);
968 reg->reg[ext_inf->elem[i].index] +=
969 ext_inf->elem[i].offset;
976 static int vcodec_reg_address_translate(struct vpu_subdev_data *data,
978 struct extra_info_for_iommu *ext_inf)
984 hw_id = data->hw_info->hw_id;
986 if (hw_id == HEVC_ID) {
987 tbl = addr_tbl_hevc_dec;
988 size = sizeof(addr_tbl_hevc_dec);
990 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
991 switch (reg_check_fmt(reg)) {
992 case VPU_DEC_FMT_H264:
994 tbl = addr_tbl_vpu_h264dec;
995 size = sizeof(addr_tbl_vpu_h264dec);
998 case VPU_DEC_FMT_VP8:
999 case VPU_DEC_FMT_VP7:
1001 tbl = addr_tbl_vpu_vp8dec;
1002 size = sizeof(addr_tbl_vpu_vp8dec);
1006 case VPU_DEC_FMT_VP6:
1008 tbl = addr_tbl_vpu_vp6dec;
1009 size = sizeof(addr_tbl_vpu_vp6dec);
1012 case VPU_DEC_FMT_VC1:
1014 tbl = addr_tbl_vpu_vc1dec;
1015 size = sizeof(addr_tbl_vpu_vc1dec);
1019 case VPU_DEC_FMT_JPEG:
1021 tbl = addr_tbl_vpu_jpegdec;
1022 size = sizeof(addr_tbl_vpu_jpegdec);
1026 tbl = addr_tbl_vpu_defaultdec;
1027 size = sizeof(addr_tbl_vpu_defaultdec);
1030 } else if (reg->type == VPU_ENC) {
1031 tbl = addr_tbl_vpu_enc;
1032 size = sizeof(addr_tbl_vpu_enc);
1037 return vcodec_bufid_to_iova(data, tbl, size, reg, ext_inf);
1044 static vpu_reg *reg_init(struct vpu_subdev_data *data,
1045 vpu_session *session, void __user *src, u32 size)
1047 struct vpu_service_info *pservice = data->pservice;
1049 struct extra_info_for_iommu extra_info;
1050 vpu_reg *reg = kmalloc(sizeof(vpu_reg) + data->reg_size, GFP_KERNEL);
1055 vpu_err("error: kmalloc fail in reg_init\n");
1059 if (size > data->reg_size) {
1060 /*printk("warning: vpu reg size %lu is larger than hw reg size %lu\n",
1061 size, pservice->reg_size);
1062 size = pservice->reg_size;*/
1063 extra_size = size - data->reg_size;
1064 size = data->reg_size;
1066 reg->session = session;
1067 reg->type = session->type;
1069 reg->freq = VPU_FREQ_DEFAULT;
1070 reg->reg = (u32 *)®[1];
1071 INIT_LIST_HEAD(®->session_link);
1072 INIT_LIST_HEAD(®->status_link);
1074 #if defined(CONFIG_VCODEC_MMU)
1076 INIT_LIST_HEAD(®->mem_region_list);
1079 if (copy_from_user(®->reg[0], (void __user *)src, size)) {
1080 vpu_err("error: copy_from_user failed in reg_init\n");
1085 if (copy_from_user(&extra_info, (u8 *)src + size, extra_size)) {
1086 vpu_err("error: copy_from_user failed in reg_init\n");
1091 #if defined(CONFIG_VCODEC_MMU)
1092 if (data->mmu_dev &&
1093 0 > vcodec_reg_address_translate(data, reg, &extra_info)) {
1094 vpu_err("error: translate reg address failed\n");
1100 mutex_lock(&pservice->lock);
1101 list_add_tail(®->status_link, &pservice->waiting);
1102 list_add_tail(®->session_link, &session->waiting);
1103 mutex_unlock(&pservice->lock);
1105 if (pservice->auto_freq) {
1106 if (!soc_is_rk2928g()) {
1107 if (reg->type == VPU_DEC || reg->type == VPU_DEC_PP) {
1108 if (reg_check_rmvb_wmv(reg)) {
1109 reg->freq = VPU_FREQ_200M;
1110 } else if (reg_check_fmt(reg) == VPU_DEC_FMT_H264) {
1111 if (reg_probe_width(reg) > 3200) {
1112 // raise frequency for 4k avc.
1113 reg->freq = VPU_FREQ_500M;
1116 if (reg_check_interlace(reg)) {
1117 reg->freq = VPU_FREQ_400M;
1121 if (reg->type == VPU_PP) {
1122 reg->freq = VPU_FREQ_400M;
1130 static void reg_deinit(struct vpu_subdev_data *data, vpu_reg *reg)
1132 struct vpu_service_info *pservice = data->pservice;
1133 #if defined(CONFIG_VCODEC_MMU)
1134 struct vcodec_mem_region *mem_region = NULL, *n;
1137 list_del_init(®->session_link);
1138 list_del_init(®->status_link);
1139 if (reg == pservice->reg_codec)
1140 pservice->reg_codec = NULL;
1141 if (reg == pservice->reg_pproc)
1142 pservice->reg_pproc = NULL;
1144 #if defined(CONFIG_VCODEC_MMU)
1145 /* release memory region attach to this registers table. */
1146 if (data->mmu_dev) {
1147 list_for_each_entry_safe(mem_region, n,
1148 ®->mem_region_list, reg_lnk) {
1149 /* do not unmap iommu manually,
1150 unmap will proccess when memory release */
1151 /*vcodec_enter_mode(data);
1152 ion_unmap_iommu(data->dev,
1153 pservice->ion_client,
1155 vcodec_exit_mode();*/
1156 ion_free(pservice->ion_client, mem_region->hdl);
1157 list_del_init(&mem_region->reg_lnk);
1166 static void reg_from_wait_to_run(struct vpu_service_info *pservice, vpu_reg *reg)
1169 list_del_init(®->status_link);
1170 list_add_tail(®->status_link, &pservice->running);
1172 list_del_init(®->session_link);
1173 list_add_tail(®->session_link, ®->session->running);
1177 static void reg_copy_from_hw(vpu_reg *reg, volatile u32 *src, u32 count)
1180 u32 *dst = (u32 *)®->reg[0];
1182 for (i = 0; i < count; i++)
1187 static void reg_from_run_to_done(struct vpu_subdev_data *data,
1190 struct vpu_service_info *pservice = data->pservice;
1195 list_del_init(®->status_link);
1196 list_add_tail(®->status_link, &pservice->done);
1198 list_del_init(®->session_link);
1199 list_add_tail(®->session_link, ®->session->done);
1201 vcodec_enter_mode(data);
1202 switch (reg->type) {
1204 pservice->reg_codec = NULL;
1205 reg_copy_from_hw(reg, data->enc_dev.hwregs, data->hw_info->enc_reg_num);
1206 irq_reg = ENC_INTERRUPT_REGISTER;
1210 int reg_len = REG_NUM_9190_DEC;
1211 pservice->reg_codec = NULL;
1212 reg_copy_from_hw(reg, data->dec_dev.hwregs, reg_len);
1213 irq_reg = DEC_INTERRUPT_REGISTER;
1217 pservice->reg_pproc = NULL;
1218 reg_copy_from_hw(reg, data->dec_dev.hwregs + PP_INTERRUPT_REGISTER, REG_NUM_9190_PP);
1219 data->dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
1223 pservice->reg_codec = NULL;
1224 pservice->reg_pproc = NULL;
1225 reg_copy_from_hw(reg, data->dec_dev.hwregs, REG_NUM_9190_DEC_PP);
1226 data->dec_dev.hwregs[PP_INTERRUPT_REGISTER] = 0;
1230 vpu_err("error: copy reg from hw with unknown type %d\n", reg->type);
1234 vcodec_exit_mode(pservice);
1237 reg->reg[irq_reg] = pservice->irq_status;
1239 atomic_sub(1, ®->session->task_running);
1240 atomic_sub(1, &pservice->total_running);
1241 wake_up(®->session->wait);
1246 static void vpu_service_set_freq(struct vpu_service_info *pservice, vpu_reg *reg)
1248 VPU_FREQ curr = atomic_read(&pservice->freq_status);
1249 if (curr == reg->freq)
1251 atomic_set(&pservice->freq_status, reg->freq);
1252 switch (reg->freq) {
1253 case VPU_FREQ_200M : {
1254 clk_set_rate(pservice->aclk_vcodec, 200*MHZ);
1256 case VPU_FREQ_266M : {
1257 clk_set_rate(pservice->aclk_vcodec, 266*MHZ);
1259 case VPU_FREQ_300M : {
1260 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1262 case VPU_FREQ_400M : {
1263 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1265 case VPU_FREQ_500M : {
1266 clk_set_rate(pservice->aclk_vcodec, 500*MHZ);
1268 case VPU_FREQ_600M : {
1269 clk_set_rate(pservice->aclk_vcodec, 600*MHZ);
1272 if (soc_is_rk2928g())
1273 clk_set_rate(pservice->aclk_vcodec, 400*MHZ);
1275 clk_set_rate(pservice->aclk_vcodec, 300*MHZ);
1280 static void reg_copy_to_hw(struct vpu_subdev_data *data, vpu_reg *reg)
1282 struct vpu_service_info *pservice = data->pservice;
1284 u32 *src = (u32 *)®->reg[0];
1287 atomic_add(1, &pservice->total_running);
1288 atomic_add(1, ®->session->task_running);
1289 if (pservice->auto_freq)
1290 vpu_service_set_freq(pservice, reg);
1292 vcodec_enter_mode(data);
1294 switch (reg->type) {
1296 int enc_count = data->hw_info->enc_reg_num;
1297 u32 *dst = (u32 *)data->enc_dev.hwregs;
1299 pservice->reg_codec = reg;
1301 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC] & 0x6;
1303 for (i = 0; i < VPU_REG_EN_ENC; i++)
1306 for (i = VPU_REG_EN_ENC + 1; i < enc_count; i++)
1309 VEPU_CLEAN_CACHE(dst);
1313 dst[VPU_REG_ENC_GATE] = src[VPU_REG_ENC_GATE] | VPU_REG_ENC_GATE_BIT;
1314 dst[VPU_REG_EN_ENC] = src[VPU_REG_EN_ENC];
1316 #if VPU_SERVICE_SHOW_TIME
1317 do_gettimeofday(&enc_start);
1322 u32 *dst = (u32 *)data->dec_dev.hwregs;
1324 pservice->reg_codec = reg;
1326 if (data->hw_info->hw_id != HEVC_ID) {
1327 for (i = REG_NUM_9190_DEC - 1; i > VPU_REG_DEC_GATE; i--)
1329 VDPU_CLEAN_CACHE(dst);
1331 for (i = REG_NUM_HEVC_DEC - 1; i > VPU_REG_EN_DEC; i--)
1333 HEVC_CLEAN_CACHE(dst);
1338 if (data->hw_info->hw_id != HEVC_ID) {
1339 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
1340 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
1342 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
1346 #if VPU_SERVICE_SHOW_TIME
1347 do_gettimeofday(&dec_start);
1351 u32 *dst = (u32 *)data->dec_dev.hwregs + PP_INTERRUPT_REGISTER;
1352 pservice->reg_pproc = reg;
1354 dst[VPU_REG_PP_GATE] = src[VPU_REG_PP_GATE] | VPU_REG_PP_GATE_BIT;
1356 for (i = VPU_REG_PP_GATE + 1; i < REG_NUM_9190_PP; i++)
1361 dst[VPU_REG_EN_PP] = src[VPU_REG_EN_PP];
1362 #if VPU_SERVICE_SHOW_TIME
1363 do_gettimeofday(&pp_start);
1368 u32 *dst = (u32 *)data->dec_dev.hwregs;
1369 pservice->reg_codec = reg;
1370 pservice->reg_pproc = reg;
1372 VDPU_SOFT_RESET(dst);
1373 VDPU_CLEAN_CACHE(dst);
1375 for (i = VPU_REG_EN_DEC_PP + 1; i < REG_NUM_9190_DEC_PP; i++)
1378 dst[VPU_REG_EN_DEC_PP] = src[VPU_REG_EN_DEC_PP] | 0x2;
1381 dst[VPU_REG_DEC_PP_GATE] = src[VPU_REG_DEC_PP_GATE] | VPU_REG_PP_GATE_BIT;
1382 dst[VPU_REG_DEC_GATE] = src[VPU_REG_DEC_GATE] | VPU_REG_DEC_GATE_BIT;
1383 dst[VPU_REG_EN_DEC] = src[VPU_REG_EN_DEC];
1384 #if VPU_SERVICE_SHOW_TIME
1385 do_gettimeofday(&dec_start);
1389 vpu_err("error: unsupport session type %d", reg->type);
1390 atomic_sub(1, &pservice->total_running);
1391 atomic_sub(1, ®->session->task_running);
1396 vcodec_exit_mode(pservice);
1400 static void try_set_reg(struct vpu_subdev_data *data)
1402 struct vpu_service_info *pservice = data->pservice;
1404 if (!list_empty(&pservice->waiting)) {
1406 vpu_reg *reg = list_entry(pservice->waiting.next, vpu_reg, status_link);
1408 vpu_service_power_on(pservice);
1410 switch (reg->type) {
1412 if ((NULL == pservice->reg_codec) && (NULL == pservice->reg_pproc))
1416 if (NULL == pservice->reg_codec)
1418 if (pservice->auto_freq && (NULL != pservice->reg_pproc))
1422 if (NULL == pservice->reg_codec) {
1423 if (NULL == pservice->reg_pproc)
1426 if ((VPU_DEC == pservice->reg_codec->type) && (NULL == pservice->reg_pproc))
1428 /* can not charge frequency when vpu is working */
1429 if (pservice->auto_freq)
1434 if ((NULL == pservice->reg_codec) && (NULL == pservice->reg_pproc))
1438 printk("undefined reg type %d\n", reg->type);
1442 reg_from_wait_to_run(pservice, reg);
1443 reg_copy_to_hw(data, reg);
1449 static int return_reg(struct vpu_subdev_data *data,
1450 vpu_reg *reg, u32 __user *dst)
1454 switch (reg->type) {
1456 if (copy_to_user(dst, ®->reg[0], data->hw_info->enc_io_size))
1461 int reg_len = data->hw_info->hw_id == HEVC_ID ? REG_NUM_HEVC_DEC : REG_NUM_9190_DEC;
1462 if (copy_to_user(dst, ®->reg[0], SIZE_REG(reg_len)))
1467 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_PP)))
1472 if (copy_to_user(dst, ®->reg[0], SIZE_REG(REG_NUM_9190_DEC_PP)))
1478 vpu_err("error: copy reg to user with unknown type %d\n", reg->type);
1482 reg_deinit(data, reg);
1487 static long vpu_service_ioctl(struct file *filp, unsigned int cmd,
1490 struct vpu_subdev_data *data =
1491 container_of(filp->f_dentry->d_inode->i_cdev,
1492 struct vpu_subdev_data, cdev);
1493 struct vpu_service_info *pservice = data->pservice;
1494 vpu_session *session = (vpu_session *)filp->private_data;
1496 vpu_debug(3, "cmd %x, VPU_IOC_SET_CLIENT_TYPE %x\n", cmd, (u32)VPU_IOC_SET_CLIENT_TYPE);
1497 if (NULL == session)
1501 case VPU_IOC_SET_CLIENT_TYPE : {
1502 session->type = (enum VPU_CLIENT_TYPE)arg;
1505 case VPU_IOC_GET_HW_FUSE_STATUS : {
1507 if (copy_from_user(&req, (void __user *)arg, sizeof(vpu_request))) {
1508 vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_from_user failed\n");
1511 if (VPU_ENC != session->type) {
1512 if (copy_to_user((void __user *)req.req,
1513 &pservice->dec_config,
1514 sizeof(struct vpu_dec_config))) {
1515 vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n",
1520 if (copy_to_user((void __user *)req.req,
1521 &pservice->enc_config,
1522 sizeof(struct vpu_enc_config ))) {
1523 vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS copy_to_user failed type %d\n",
1532 case VPU_IOC_SET_REG : {
1535 if (copy_from_user(&req, (void __user *)arg,
1536 sizeof(vpu_request))) {
1537 vpu_err("error: VPU_IOC_SET_REG copy_from_user failed\n");
1540 reg = reg_init(data, session,
1541 (void __user *)req.req, req.size);
1545 mutex_lock(&pservice->lock);
1547 mutex_unlock(&pservice->lock);
1552 case VPU_IOC_GET_REG : {
1555 if (copy_from_user(&req, (void __user *)arg,
1556 sizeof(vpu_request))) {
1557 vpu_err("error: VPU_IOC_GET_REG copy_from_user failed\n");
1560 int ret = wait_event_timeout(session->wait, !list_empty(&session->done), VPU_TIMEOUT_DELAY);
1561 if (!list_empty(&session->done)) {
1563 vpu_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
1567 if (unlikely(ret < 0)) {
1568 vpu_err("error: pid %d wait task ret %d\n", session->pid, ret);
1569 } else if (0 == ret) {
1570 vpu_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
1575 int task_running = atomic_read(&session->task_running);
1576 mutex_lock(&pservice->lock);
1577 vpu_service_dump(pservice);
1579 atomic_set(&session->task_running, 0);
1580 atomic_sub(task_running, &pservice->total_running);
1581 printk("%d task is running but not return, reset hardware...", task_running);
1585 vpu_service_session_clear(data, session);
1586 mutex_unlock(&pservice->lock);
1590 mutex_lock(&pservice->lock);
1591 reg = list_entry(session->done.next, vpu_reg, session_link);
1592 return_reg(data, reg, (u32 __user *)req.req);
1593 mutex_unlock(&pservice->lock);
1596 case VPU_IOC_PROBE_IOMMU_STATUS: {
1597 int iommu_enable = 0;
1599 #if defined(CONFIG_VCODEC_MMU)
1600 iommu_enable = data->mmu_dev ? 1 : 0;
1603 if (copy_to_user((void __user *)arg, &iommu_enable, sizeof(int))) {
1604 vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
1610 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1618 #ifdef CONFIG_COMPAT
1619 static long compat_vpu_service_ioctl(struct file *filp, unsigned int cmd,
1622 struct vpu_subdev_data *data =
1623 container_of(filp->f_dentry->d_inode->i_cdev,
1624 struct vpu_subdev_data, cdev);
1625 struct vpu_service_info *pservice = data->pservice;
1626 vpu_session *session = (vpu_session *)filp->private_data;
1628 vpu_debug(3, "cmd %x, COMPAT_VPU_IOC_SET_CLIENT_TYPE %x\n", cmd,
1629 (u32)COMPAT_VPU_IOC_SET_CLIENT_TYPE);
1630 if (NULL == session)
1634 case COMPAT_VPU_IOC_SET_CLIENT_TYPE : {
1635 session->type = (enum VPU_CLIENT_TYPE)arg;
1638 case COMPAT_VPU_IOC_GET_HW_FUSE_STATUS : {
1640 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1641 sizeof(vpu_request))) {
1642 vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS"
1643 " copy_from_user failed\n");
1646 if (VPU_ENC != session->type) {
1647 if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1648 &pservice->dec_config,
1649 sizeof(struct vpu_dec_config))) {
1650 vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS "
1651 "copy_to_user failed type %d\n",
1656 if (copy_to_user(compat_ptr((compat_uptr_t)req.req),
1657 &pservice->enc_config,
1658 sizeof(struct vpu_enc_config ))) {
1659 vpu_err("error: VPU_IOC_GET_HW_FUSE_STATUS"
1660 " copy_to_user failed type %d\n",
1669 case COMPAT_VPU_IOC_SET_REG : {
1672 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1673 sizeof(vpu_request))) {
1674 vpu_err("VPU_IOC_SET_REG copy_from_user failed\n");
1677 reg = reg_init(data, session,
1678 compat_ptr((compat_uptr_t)req.req), req.size);
1682 mutex_lock(&pservice->lock);
1684 mutex_unlock(&pservice->lock);
1689 case COMPAT_VPU_IOC_GET_REG : {
1692 if (copy_from_user(&req, compat_ptr((compat_uptr_t)arg),
1693 sizeof(vpu_request))) {
1694 vpu_err("VPU_IOC_GET_REG copy_from_user failed\n");
1697 int ret = wait_event_timeout(session->wait, !list_empty(&session->done), VPU_TIMEOUT_DELAY);
1698 if (!list_empty(&session->done)) {
1700 vpu_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session->pid, ret);
1704 if (unlikely(ret < 0)) {
1705 vpu_err("error: pid %d wait task ret %d\n", session->pid, ret);
1706 } else if (0 == ret) {
1707 vpu_err("error: pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));
1712 int task_running = atomic_read(&session->task_running);
1713 mutex_lock(&pservice->lock);
1714 vpu_service_dump(pservice);
1716 atomic_set(&session->task_running, 0);
1717 atomic_sub(task_running, &pservice->total_running);
1718 printk("%d task is running but not return, reset hardware...", task_running);
1722 vpu_service_session_clear(data, session);
1723 mutex_unlock(&pservice->lock);
1727 mutex_lock(&pservice->lock);
1728 reg = list_entry(session->done.next, vpu_reg, session_link);
1729 return_reg(data, reg, compat_ptr((compat_uptr_t)req.req));
1730 mutex_unlock(&pservice->lock);
1733 case COMPAT_VPU_IOC_PROBE_IOMMU_STATUS : {
1734 int iommu_enable = 0;
1736 #if defined(CONFIG_VCODEC_MMU)
1737 iommu_enable = data->mmu_dev ? 1 : 0;
1740 if (copy_to_user(compat_ptr((compat_uptr_t)arg), &iommu_enable, sizeof(int))) {
1741 vpu_err("error: VPU_IOC_PROBE_IOMMU_STATUS copy_to_user failed\n");
1747 vpu_err("error: unknow vpu service ioctl cmd %x\n", cmd);
1756 static int vpu_service_check_hw(struct vpu_subdev_data *data, u32 hw_addr)
1758 int ret = -EINVAL, i = 0;
1759 volatile u32 *tmp = (volatile u32 *)ioremap_nocache(hw_addr, 0x4);
1762 enc_id = (enc_id >> 16) & 0xFFFF;
1763 pr_info("checking hw id %x\n", enc_id);
1764 data->hw_info = NULL;
1765 for (i = 0; i < ARRAY_SIZE(vpu_hw_set); i++) {
1766 if (enc_id == vpu_hw_set[i].hw_id) {
1767 data->hw_info = &vpu_hw_set[i];
1772 iounmap((void *)tmp);
1776 static int vpu_service_open(struct inode *inode, struct file *filp)
1778 struct vpu_subdev_data *data = container_of(inode->i_cdev, struct vpu_subdev_data, cdev);
1779 struct vpu_service_info *pservice = data->pservice;
1780 vpu_session *session = (vpu_session *)kmalloc(sizeof(vpu_session), GFP_KERNEL);
1784 if (NULL == session) {
1785 vpu_err("error: unable to allocate memory for vpu_session.");
1789 session->type = VPU_TYPE_BUTT;
1790 session->pid = current->pid;
1791 INIT_LIST_HEAD(&session->waiting);
1792 INIT_LIST_HEAD(&session->running);
1793 INIT_LIST_HEAD(&session->done);
1794 INIT_LIST_HEAD(&session->list_session);
1795 init_waitqueue_head(&session->wait);
1796 atomic_set(&session->task_running, 0);
1797 mutex_lock(&pservice->lock);
1798 list_add_tail(&session->list_session, &pservice->session);
1799 filp->private_data = (void *)session;
1800 mutex_unlock(&pservice->lock);
1802 pr_debug("dev opened\n");
1804 return nonseekable_open(inode, filp);
1807 static int vpu_service_release(struct inode *inode, struct file *filp)
1809 struct vpu_subdev_data *data = container_of(inode->i_cdev, struct vpu_subdev_data, cdev);
1810 struct vpu_service_info *pservice = data->pservice;
1812 vpu_session *session = (vpu_session *)filp->private_data;
1814 if (NULL == session)
1817 task_running = atomic_read(&session->task_running);
1819 vpu_err("error: vpu_service session %d still has %d task running when closing\n", session->pid, task_running);
1822 wake_up(&session->wait);
1824 mutex_lock(&pservice->lock);
1825 /* remove this filp from the asynchronusly notified filp's */
1826 list_del_init(&session->list_session);
1827 vpu_service_session_clear(data, session);
1829 filp->private_data = NULL;
1830 mutex_unlock(&pservice->lock);
1832 pr_debug("dev closed\n");
1837 static const struct file_operations vpu_service_fops = {
1838 .unlocked_ioctl = vpu_service_ioctl,
1839 .open = vpu_service_open,
1840 .release = vpu_service_release,
1841 #ifdef CONFIG_COMPAT
1842 .compat_ioctl = compat_vpu_service_ioctl,
1844 //.fasync = vpu_service_fasync,
1847 static irqreturn_t vdpu_irq(int irq, void *dev_id);
1848 static irqreturn_t vdpu_isr(int irq, void *dev_id);
1849 static irqreturn_t vepu_irq(int irq, void *dev_id);
1850 static irqreturn_t vepu_isr(int irq, void *dev_id);
1851 static void get_hw_info(struct vpu_subdev_data *data);
1853 #ifdef CONFIG_VCODEC_MMU
1854 static struct device *rockchip_get_sysmmu_dev(const char *compt)
1856 struct device_node *dn = NULL;
1857 struct platform_device *pd = NULL;
1858 struct device *ret = NULL ;
1860 dn = of_find_compatible_node(NULL,NULL,compt);
1862 printk("can't find device node %s \r\n",compt);
1866 pd = of_find_device_by_node(dn);
1868 printk("can't find platform device in device node %s\n",compt);
1876 #ifdef CONFIG_IOMMU_API
1877 static inline void platform_set_sysmmu(struct device *iommu,
1880 dev->archdata.iommu = iommu;
1883 static inline void platform_set_sysmmu(struct device *iommu,
1889 int vcodec_sysmmu_fault_hdl(struct device *dev,
1890 enum rk_iommu_inttype itype,
1891 unsigned long pgtable_base,
1892 unsigned long fault_addr, unsigned int status)
1894 struct platform_device *pdev;
1895 struct vpu_subdev_data *data;
1896 struct vpu_service_info *pservice;
1900 pdev = container_of(dev, struct platform_device, dev);
1902 data = platform_get_drvdata(pdev);
1903 pservice = data->pservice;
1905 if (pservice->reg_codec) {
1906 struct vcodec_mem_region *mem, *n;
1908 vpu_debug(3, "vcodec, fault addr 0x%08x\n", (u32)fault_addr);
1909 list_for_each_entry_safe(mem, n,
1910 &pservice->reg_codec->mem_region_list,
1912 vpu_debug(3, "vcodec, reg[%02u] mem region [%02d] 0x%08x %ld\n",
1913 mem->reg_idx, i, (u32)mem->iova, mem->len);
1917 pr_alert("vcodec, page fault occur, reset hw\n");
1918 pservice->reg_codec->reg[101] = 1;
1926 #if HEVC_TEST_ENABLE
1927 static int hevc_test_case0(vpu_service_info *pservice);
1929 #if defined(CONFIG_ION_ROCKCHIP)
1930 extern struct ion_client *rockchip_ion_client_create(const char * name);
1933 static int vcodec_subdev_probe(struct platform_device *pdev,
1934 struct vpu_service_info *pservice)
1937 struct resource *res = NULL;
1939 struct device *dev = &pdev->dev;
1940 char *name = (char*)dev_name(dev);
1941 struct device_node *np = pdev->dev.of_node;
1942 struct vpu_subdev_data *data =
1943 devm_kzalloc(dev, sizeof(struct vpu_subdev_data), GFP_KERNEL);
1944 #if defined(CONFIG_VCODEC_MMU)
1946 char mmu_dev_dts_name[40];
1947 of_property_read_u32(np, "iommu_enabled", &iommu_en);
1949 pr_info("probe device %s\n", dev_name(dev));
1951 data->pservice = pservice;
1954 of_property_read_string(np, "name", (const char**)&name);
1955 of_property_read_u32(np, "dev_mode", (u32*)&data->mode);
1956 /*dev_set_name(dev, name);*/
1958 if (pservice->reg_base == 0) {
1959 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1960 data->regs = devm_ioremap_resource(dev, res);
1961 if (IS_ERR(data->regs)) {
1962 ret = PTR_ERR(data->regs);
1965 ioaddr = res->start;
1967 data->regs = pservice->reg_base;
1968 ioaddr = pservice->ioaddr;
1971 clear_bit(MMU_ACTIVATED, &data->state);
1972 vcodec_enter_mode(data);
1973 ret = vpu_service_check_hw(data, ioaddr);
1975 vpu_err("error: hw info check faild\n");
1979 data->dec_dev.iosize = data->hw_info->dec_io_size;
1980 data->dec_dev.hwregs = (volatile u32 *)((u8 *)data->regs + data->hw_info->dec_offset);
1981 data->reg_size = data->dec_dev.iosize;
1983 if (data->mode == VCODEC_RUNNING_MODE_VPU) {
1984 data->enc_dev.iosize = data->hw_info->enc_io_size;
1985 data->reg_size = data->reg_size > data->enc_dev.iosize ? data->reg_size : data->enc_dev.iosize;
1986 data->enc_dev.hwregs = (volatile u32 *)((u8 *)data->regs + data->hw_info->enc_offset);
1989 data->irq_enc = platform_get_irq_byname(pdev, "irq_enc");
1990 if (data->irq_enc > 0) {
1991 ret = devm_request_threaded_irq(dev,
1992 data->irq_enc, vepu_irq, vepu_isr,
1993 IRQF_SHARED, dev_name(dev),
1997 "error: can't request vepu irq %d\n",
2002 data->irq_dec = platform_get_irq_byname(pdev, "irq_dec");
2003 if (data->irq_dec > 0) {
2004 ret = devm_request_threaded_irq(dev,
2005 data->irq_dec, vdpu_irq, vdpu_isr,
2006 IRQF_SHARED, dev_name(dev),
2010 "error: can't request vdpu irq %d\n",
2015 atomic_set(&data->dec_dev.irq_count_codec, 0);
2016 atomic_set(&data->dec_dev.irq_count_pp, 0);
2017 atomic_set(&data->enc_dev.irq_count_codec, 0);
2018 atomic_set(&data->enc_dev.irq_count_pp, 0);
2019 #if defined(CONFIG_VCODEC_MMU)
2021 vcodec_enter_mode(data);
2022 if (data->mode == VCODEC_RUNNING_MODE_HEVC)
2023 sprintf(mmu_dev_dts_name,
2024 HEVC_IOMMU_COMPATIBLE_NAME);
2026 sprintf(mmu_dev_dts_name,
2027 VPU_IOMMU_COMPATIBLE_NAME);
2030 rockchip_get_sysmmu_dev(mmu_dev_dts_name);
2033 platform_set_sysmmu(data->mmu_dev, dev);
2035 rockchip_iovmm_set_fault_handler(dev, vcodec_sysmmu_fault_hdl);
2038 /* create device node */
2039 ret = alloc_chrdev_region(&data->dev_t, 0, 1, name);
2041 dev_err(dev, "alloc dev_t failed\n");
2045 cdev_init(&data->cdev, &vpu_service_fops);
2047 data->cdev.owner = THIS_MODULE;
2048 data->cdev.ops = &vpu_service_fops;
2050 ret = cdev_add(&data->cdev, data->dev_t, 1);
2053 dev_err(dev, "add dev_t failed\n");
2057 data->cls = class_create(THIS_MODULE, name);
2059 if (IS_ERR(data->cls)) {
2060 ret = PTR_ERR(data->cls);
2061 dev_err(dev, "class_create err:%d\n", ret);
2065 data->child_dev = device_create(data->cls, dev,
2066 data->dev_t, NULL, name);
2070 platform_set_drvdata(pdev, data);
2072 INIT_LIST_HEAD(&data->lnk_service);
2073 list_add_tail(&data->lnk_service, &pservice->subdev_list);
2075 #ifdef CONFIG_DEBUG_FS
2077 vcodec_debugfs_create_device_dir((char*)name, parent);
2078 if (data->debugfs_dir == NULL)
2079 vpu_err("create debugfs dir %s failed\n", name);
2081 data->debugfs_file_regs =
2082 debugfs_create_file("regs", 0664,
2083 data->debugfs_dir, data,
2084 &debug_vcodec_fops);
2088 if (data->irq_enc > 0)
2089 free_irq(data->irq_enc, (void *)data);
2090 if (data->irq_dec > 0)
2091 free_irq(data->irq_dec, (void *)data);
2093 if (data->child_dev) {
2094 device_destroy(data->cls, data->dev_t);
2095 cdev_del(&data->cdev);
2096 unregister_chrdev_region(data->dev_t, 1);
2100 class_destroy(data->cls);
2104 static void vcodec_subdev_remove(struct vpu_subdev_data *data)
2106 device_destroy(data->cls, data->dev_t);
2107 class_destroy(data->cls);
2108 cdev_del(&data->cdev);
2109 unregister_chrdev_region(data->dev_t, 1);
2111 free_irq(data->irq_enc, (void *)&data);
2112 free_irq(data->irq_dec, (void *)&data);
2114 #ifdef CONFIG_DEBUG_FS
2115 debugfs_remove(data->debugfs_file_regs);
2116 debugfs_remove(data->debugfs_dir);
2120 static void vcodec_read_property(struct device_node *np,
2121 struct vpu_service_info *pservice)
2123 pservice->mode_bit = 0;
2124 pservice->mode_ctrl = 0;
2125 pservice->subcnt = 0;
2127 of_property_read_u32(np, "subcnt", &pservice->subcnt);
2129 if (pservice->subcnt > 1) {
2130 of_property_read_u32(np, "mode_bit", &pservice->mode_bit);
2131 of_property_read_u32(np, "mode_ctrl", &pservice->mode_ctrl);
2133 #ifdef CONFIG_MFD_SYSCON
2134 pservice->grf_base = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
2136 pservice->grf_base = (u32*)RK_GRF_VIRT;
2138 if (IS_ERR(pservice->grf_base)) {
2139 vpu_err("can't find vpu grf property\n");
2142 of_property_read_string(np, "name", (const char**)&pservice->name);
2145 static void vcodec_init_drvdata(struct vpu_service_info *pservice)
2147 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2148 pservice->curr_mode = -1;
2150 wake_lock_init(&pservice->wake_lock, WAKE_LOCK_SUSPEND, "vpu");
2151 INIT_LIST_HEAD(&pservice->waiting);
2152 INIT_LIST_HEAD(&pservice->running);
2153 mutex_init(&pservice->lock);
2155 INIT_LIST_HEAD(&pservice->done);
2156 INIT_LIST_HEAD(&pservice->session);
2157 INIT_LIST_HEAD(&pservice->subdev_list);
2159 pservice->reg_pproc = NULL;
2160 atomic_set(&pservice->total_running, 0);
2161 pservice->enabled = false;
2163 INIT_DELAYED_WORK(&pservice->power_off_work, vpu_power_off_work);
2165 pservice->ion_client = rockchip_ion_client_create("vpu");
2166 if (IS_ERR(pservice->ion_client)) {
2167 vpu_err("failed to create ion client for vcodec ret %ld\n",
2168 PTR_ERR(pservice->ion_client));
2170 vpu_debug(3, "vcodec ion client create success!\n");
2174 static int vcodec_probe(struct platform_device *pdev)
2178 struct resource *res = NULL;
2179 struct device *dev = &pdev->dev;
2180 struct device_node *np = pdev->dev.of_node;
2181 struct vpu_service_info *pservice =
2182 devm_kzalloc(dev, sizeof(struct vpu_service_info), GFP_KERNEL);
2184 pr_info("probe device %s\n", dev_name(dev));
2186 vcodec_read_property(np, pservice);
2187 vcodec_init_drvdata(pservice);
2189 if (strncmp(pservice->name, "hevc_service", 12) == 0)
2190 pservice->dev_id = VCODEC_DEVICE_ID_HEVC;
2191 else if (strncmp(pservice->name, "vpu_service", 11) == 0)
2192 pservice->dev_id = VCODEC_DEVICE_ID_VPU;
2194 pservice->dev_id = VCODEC_DEVICE_ID_COMBO;
2196 pservice->dev = dev;
2198 if (0 > vpu_get_clk(pservice))
2201 vpu_service_power_on(pservice);
2203 if (of_property_read_bool(np, "reg")) {
2204 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2206 pservice->reg_base = devm_ioremap_resource(pservice->dev, res);
2207 if (IS_ERR(pservice->reg_base)) {
2208 vpu_err("ioremap registers base failed\n");
2209 ret = PTR_ERR(pservice->reg_base);
2212 pservice->ioaddr = res->start;
2214 pservice->reg_base = 0;
2217 if (of_property_read_bool(np, "subcnt")) {
2218 for (i = 0; i<pservice->subcnt; i++) {
2219 struct device_node *sub_np;
2220 struct platform_device *sub_pdev;
2221 sub_np = of_parse_phandle(np, "rockchip,sub", i);
2222 sub_pdev = of_find_device_by_node(sub_np);
2224 vcodec_subdev_probe(sub_pdev, pservice);
2227 vcodec_subdev_probe(pdev, pservice);
2229 platform_set_drvdata(pdev, pservice);
2231 vpu_service_power_off(pservice);
2233 pr_info("init success\n");
2238 pr_info("init failed\n");
2239 vpu_service_power_off(pservice);
2240 vpu_put_clk(pservice);
2241 wake_lock_destroy(&pservice->wake_lock);
2244 devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
2249 static int vcodec_remove(struct platform_device *pdev)
2251 struct vpu_service_info *pservice = platform_get_drvdata(pdev);
2252 struct resource *res;
2253 struct vpu_subdev_data *data, *n;
2255 list_for_each_entry_safe(data, n, &pservice->subdev_list, lnk_service) {
2256 vcodec_subdev_remove(data);
2259 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2260 devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
2261 vpu_put_clk(pservice);
2262 wake_lock_destroy(&pservice->wake_lock);
2267 #if defined(CONFIG_OF)
2268 static const struct of_device_id vcodec_service_dt_ids[] = {
2269 {.compatible = "vpu_service",},
2270 {.compatible = "rockchip,hevc_service",},
2271 {.compatible = "rockchip,vpu_combo",},
2276 static struct platform_driver vcodec_driver = {
2277 .probe = vcodec_probe,
2278 .remove = vcodec_remove,
2281 .owner = THIS_MODULE,
2282 #if defined(CONFIG_OF)
2283 .of_match_table = of_match_ptr(vcodec_service_dt_ids),
2288 static void get_hw_info(struct vpu_subdev_data *data)
2290 struct vpu_service_info *pservice = data->pservice;
2291 struct vpu_dec_config *dec = &pservice->dec_config;
2292 struct vpu_enc_config *enc = &pservice->enc_config;
2293 if (data->mode == VCODEC_RUNNING_MODE_VPU) {
2294 u32 configReg = data->dec_dev.hwregs[VPU_DEC_HWCFG0];
2295 u32 asicID = data->dec_dev.hwregs[0];
2297 dec->h264_support = (configReg >> DWL_H264_E) & 0x3U;
2298 dec->jpegSupport = (configReg >> DWL_JPEG_E) & 0x01U;
2299 if (dec->jpegSupport && ((configReg >> DWL_PJPEG_E) & 0x01U))
2300 dec->jpegSupport = JPEG_PROGRESSIVE;
2301 dec->mpeg4Support = (configReg >> DWL_MPEG4_E) & 0x3U;
2302 dec->vc1Support = (configReg >> DWL_VC1_E) & 0x3U;
2303 dec->mpeg2Support = (configReg >> DWL_MPEG2_E) & 0x01U;
2304 dec->sorensonSparkSupport = (configReg >> DWL_SORENSONSPARK_E) & 0x01U;
2305 dec->refBufSupport = (configReg >> DWL_REF_BUFF_E) & 0x01U;
2306 dec->vp6Support = (configReg >> DWL_VP6_E) & 0x01U;
2308 dec->maxDecPicWidth = 4096;
2310 /* 2nd Config register */
2311 configReg = data->dec_dev.hwregs[VPU_DEC_HWCFG1];
2312 if (dec->refBufSupport) {
2313 if ((configReg >> DWL_REF_BUFF_ILACE_E) & 0x01U)
2314 dec->refBufSupport |= 2;
2315 if ((configReg >> DWL_REF_BUFF_DOUBLE_E) & 0x01U)
2316 dec->refBufSupport |= 4;
2318 dec->customMpeg4Support = (configReg >> DWL_MPEG4_CUSTOM_E) & 0x01U;
2319 dec->vp7Support = (configReg >> DWL_VP7_E) & 0x01U;
2320 dec->vp8Support = (configReg >> DWL_VP8_E) & 0x01U;
2321 dec->avsSupport = (configReg >> DWL_AVS_E) & 0x01U;
2323 /* JPEG xtensions */
2324 if (((asicID >> 16) >= 0x8190U) || ((asicID >> 16) == 0x6731U))
2325 dec->jpegESupport = (configReg >> DWL_JPEG_EXT_E) & 0x01U;
2327 dec->jpegESupport = JPEG_EXT_NOT_SUPPORTED;
2329 if (((asicID >> 16) >= 0x9170U) || ((asicID >> 16) == 0x6731U) )
2330 dec->rvSupport = (configReg >> DWL_RV_E) & 0x03U;
2332 dec->rvSupport = RV_NOT_SUPPORTED;
2333 dec->mvcSupport = (configReg >> DWL_MVC_E) & 0x03U;
2335 if (dec->refBufSupport && (asicID >> 16) == 0x6731U )
2336 dec->refBufSupport |= 8; /* enable HW support for offset */
2338 if (!cpu_is_rk3036()) {
2339 configReg = data->enc_dev.hwregs[63];
2340 enc->maxEncodedWidth = configReg & ((1 << 11) - 1);
2341 enc->h264Enabled = (configReg >> 27) & 1;
2342 enc->mpeg4Enabled = (configReg >> 26) & 1;
2343 enc->jpegEnabled = (configReg >> 25) & 1;
2344 enc->vsEnabled = (configReg >> 24) & 1;
2345 enc->rgbEnabled = (configReg >> 28) & 1;
2346 enc->reg_size = data->reg_size;
2347 enc->reserv[0] = enc->reserv[1] = 0;
2349 pservice->auto_freq = soc_is_rk2928g() || soc_is_rk2928l() ||
2350 soc_is_rk2926() || soc_is_rk3288() || soc_is_rk3368();
2351 if (pservice->auto_freq) {
2352 vpu_debug(3, "vpu_service set to auto frequency mode\n");
2353 atomic_set(&pservice->freq_status, VPU_FREQ_BUT);
2356 pservice->bug_dec_addr = cpu_is_rk30xx();
2358 if (cpu_is_rk3036() || cpu_is_rk312x())
2359 dec->maxDecPicWidth = 1920;
2361 dec->maxDecPicWidth = 4096;
2362 /* disable frequency switch in hevc.*/
2363 pservice->auto_freq = false;
2367 static irqreturn_t vdpu_irq(int irq, void *dev_id)
2369 struct vpu_subdev_data *data = (struct vpu_subdev_data*)dev_id;
2370 struct vpu_service_info *pservice = data->pservice;
2371 vpu_device *dev = &data->dec_dev;
2375 vcodec_enter_mode(data);
2377 irq_status = raw_status = readl(dev->hwregs + DEC_INTERRUPT_REGISTER);
2379 vpu_debug(3, "%s status %08x\n", __func__, raw_status);
2381 if (irq_status & DEC_INTERRUPT_BIT) {
2382 pr_debug("dec_isr dec %x\n", irq_status);
2383 if ((irq_status & 0x40001) == 0x40001) {
2387 DEC_INTERRUPT_REGISTER);
2388 } while ((irq_status & 0x40001) == 0x40001);
2391 writel(0, dev->hwregs + DEC_INTERRUPT_REGISTER);
2392 atomic_add(1, &dev->irq_count_codec);
2395 if (data->hw_info->hw_id != HEVC_ID) {
2396 irq_status = readl(dev->hwregs + PP_INTERRUPT_REGISTER);
2397 if (irq_status & PP_INTERRUPT_BIT) {
2398 pr_debug("vdpu_isr pp %x\n", irq_status);
2400 writel(irq_status & (~DEC_INTERRUPT_BIT), dev->hwregs + PP_INTERRUPT_REGISTER);
2401 atomic_add(1, &dev->irq_count_pp);
2405 pservice->irq_status = raw_status;
2407 vcodec_exit_mode(pservice);
2409 return IRQ_WAKE_THREAD;
2412 static irqreturn_t vdpu_isr(int irq, void *dev_id)
2414 struct vpu_subdev_data *data = (struct vpu_subdev_data*)dev_id;
2415 struct vpu_service_info *pservice = data->pservice;
2416 vpu_device *dev = &data->dec_dev;
2418 mutex_lock(&pservice->lock);
2419 if (atomic_read(&dev->irq_count_codec)) {
2420 #if VPU_SERVICE_SHOW_TIME
2421 do_gettimeofday(&dec_end);
2422 vpu_debug(3, "dec task: %ld ms\n",
2423 (dec_end.tv_sec - dec_start.tv_sec) * 1000 +
2424 (dec_end.tv_usec - dec_start.tv_usec) / 1000);
2426 atomic_sub(1, &dev->irq_count_codec);
2427 if (NULL == pservice->reg_codec) {
2428 vpu_err("error: dec isr with no task waiting\n");
2430 reg_from_run_to_done(data, pservice->reg_codec);
2434 if (atomic_read(&dev->irq_count_pp)) {
2435 #if VPU_SERVICE_SHOW_TIME
2436 do_gettimeofday(&pp_end);
2437 printk("pp task: %ld ms\n",
2438 (pp_end.tv_sec - pp_start.tv_sec) * 1000 +
2439 (pp_end.tv_usec - pp_start.tv_usec) / 1000);
2441 atomic_sub(1, &dev->irq_count_pp);
2442 if (NULL == pservice->reg_pproc) {
2443 vpu_err("error: pp isr with no task waiting\n");
2445 reg_from_run_to_done(data, pservice->reg_pproc);
2449 mutex_unlock(&pservice->lock);
2453 static irqreturn_t vepu_irq(int irq, void *dev_id)
2455 struct vpu_subdev_data *data = (struct vpu_subdev_data*)dev_id;
2456 struct vpu_service_info *pservice = data->pservice;
2457 vpu_device *dev = &data->enc_dev;
2460 vcodec_enter_mode(data);
2461 irq_status= readl(dev->hwregs + ENC_INTERRUPT_REGISTER);
2463 pr_debug("vepu_irq irq status %x\n", irq_status);
2465 #if VPU_SERVICE_SHOW_TIME
2466 do_gettimeofday(&enc_end);
2467 vpu_debug(3, "enc task: %ld ms\n",
2468 (enc_end.tv_sec - enc_start.tv_sec) * 1000 +
2469 (enc_end.tv_usec - enc_start.tv_usec) / 1000);
2471 if (likely(irq_status & ENC_INTERRUPT_BIT)) {
2473 writel(irq_status & (~ENC_INTERRUPT_BIT), dev->hwregs + ENC_INTERRUPT_REGISTER);
2474 atomic_add(1, &dev->irq_count_codec);
2477 pservice->irq_status = irq_status;
2479 vcodec_exit_mode(pservice);
2481 return IRQ_WAKE_THREAD;
2484 static irqreturn_t vepu_isr(int irq, void *dev_id)
2486 struct vpu_subdev_data *data = (struct vpu_subdev_data*)dev_id;
2487 struct vpu_service_info *pservice = data->pservice;
2488 vpu_device *dev = &data->enc_dev;
2490 mutex_lock(&pservice->lock);
2491 if (atomic_read(&dev->irq_count_codec)) {
2492 atomic_sub(1, &dev->irq_count_codec);
2493 if (NULL == pservice->reg_codec) {
2494 vpu_err("error: enc isr with no task waiting\n");
2496 reg_from_run_to_done(data, pservice->reg_codec);
2500 mutex_unlock(&pservice->lock);
2504 static int __init vcodec_service_init(void)
2508 if ((ret = platform_driver_register(&vcodec_driver)) != 0) {
2509 vpu_err("Platform device register failed (%d).\n", ret);
2513 #ifdef CONFIG_DEBUG_FS
2514 vcodec_debugfs_init();
2520 static void __exit vcodec_service_exit(void)
2522 #ifdef CONFIG_DEBUG_FS
2523 vcodec_debugfs_exit();
2526 platform_driver_unregister(&vcodec_driver);
2529 module_init(vcodec_service_init);
2530 module_exit(vcodec_service_exit);
2532 #ifdef CONFIG_DEBUG_FS
2533 #include <linux/seq_file.h>
2535 static int vcodec_debugfs_init()
2537 parent = debugfs_create_dir("vcodec", NULL);
2544 static void vcodec_debugfs_exit()
2546 debugfs_remove(parent);
2549 static struct dentry* vcodec_debugfs_create_device_dir(char *dirname, struct dentry *parent)
2551 return debugfs_create_dir(dirname, parent);
2554 static int debug_vcodec_show(struct seq_file *s, void *unused)
2556 struct vpu_subdev_data *data = s->private;
2557 struct vpu_service_info *pservice = data->pservice;
2559 vpu_reg *reg, *reg_tmp;
2560 vpu_session *session, *session_tmp;
2562 mutex_lock(&pservice->lock);
2563 vpu_service_power_on(pservice);
2564 if (data->hw_info->hw_id != HEVC_ID) {
2565 seq_printf(s, "\nENC Registers:\n");
2566 n = data->enc_dev.iosize >> 2;
2567 for (i = 0; i < n; i++)
2568 seq_printf(s, "\tswreg%d = %08X\n", i, readl(data->enc_dev.hwregs + i));
2570 seq_printf(s, "\nDEC Registers:\n");
2571 n = data->dec_dev.iosize >> 2;
2572 for (i = 0; i < n; i++)
2573 seq_printf(s, "\tswreg%d = %08X\n", i, readl(data->dec_dev.hwregs + i));
2575 seq_printf(s, "\nvpu service status:\n");
2576 list_for_each_entry_safe(session, session_tmp, &pservice->session, list_session) {
2577 seq_printf(s, "session pid %d type %d:\n", session->pid, session->type);
2578 /*seq_printf(s, "waiting reg set %d\n");*/
2579 list_for_each_entry_safe(reg, reg_tmp, &session->waiting, session_link) {
2580 seq_printf(s, "waiting register set\n");
2582 list_for_each_entry_safe(reg, reg_tmp, &session->running, session_link) {
2583 seq_printf(s, "running register set\n");
2585 list_for_each_entry_safe(reg, reg_tmp, &session->done, session_link) {
2586 seq_printf(s, "done register set\n");
2589 mutex_unlock(&pservice->lock);
2594 static int debug_vcodec_open(struct inode *inode, struct file *file)
2596 return single_open(file, debug_vcodec_show, inode->i_private);
2601 #if HEVC_TEST_ENABLE & defined(CONFIG_ION_ROCKCHIP)
2602 #include "hevc_test_inc/pps_00.h"
2603 #include "hevc_test_inc/register_00.h"
2604 #include "hevc_test_inc/rps_00.h"
2605 #include "hevc_test_inc/scaling_list_00.h"
2606 #include "hevc_test_inc/stream_00.h"
2608 #include "hevc_test_inc/pps_01.h"
2609 #include "hevc_test_inc/register_01.h"
2610 #include "hevc_test_inc/rps_01.h"
2611 #include "hevc_test_inc/scaling_list_01.h"
2612 #include "hevc_test_inc/stream_01.h"
2614 #include "hevc_test_inc/cabac.h"
2616 extern struct ion_client *rockchip_ion_client_create(const char * name);
2618 static struct ion_client *ion_client = NULL;
2619 u8* get_align_ptr(u8* tbl, int len, u32 *phy)
2621 int size = (len+15) & (~15);
2622 struct ion_handle *handle;
2623 u8 *ptr;// = (u8*)kzalloc(size, GFP_KERNEL);
2625 if (ion_client == NULL)
2626 ion_client = rockchip_ion_client_create("vcodec");
2628 handle = ion_alloc(ion_client, (size_t)len, 16, ION_HEAP(ION_CMA_HEAP_ID), 0);
2630 ptr = ion_map_kernel(ion_client, handle);
2632 ion_phys(ion_client, handle, phy, &size);
2634 memcpy(ptr, tbl, len);
2639 u8* get_align_ptr_no_copy(int len, u32 *phy)
2641 int size = (len+15) & (~15);
2642 struct ion_handle *handle;
2645 if (ion_client == NULL)
2646 ion_client = rockchip_ion_client_create("vcodec");
2648 handle = ion_alloc(ion_client, (size_t)len, 16, ION_HEAP(ION_CMA_HEAP_ID), 0);
2650 ptr = ion_map_kernel(ion_client, handle);
2652 ion_phys(ion_client, handle, phy, &size);
2658 static int hevc_test_case0(vpu_service_info *pservice)
2660 vpu_session session;
2662 unsigned long size = 272;//sizeof(register_00); // registers array length
2665 u8 *pps_tbl[TEST_CNT];
2666 u8 *register_tbl[TEST_CNT];
2667 u8 *rps_tbl[TEST_CNT];
2668 u8 *scaling_list_tbl[TEST_CNT];
2669 u8 *stream_tbl[TEST_CNT];
2685 volatile u8 *stream_buf;
2686 volatile u8 *pps_buf;
2687 volatile u8 *rps_buf;
2688 volatile u8 *scl_buf;
2689 volatile u8 *yuv_buf;
2690 volatile u8 *cabac_buf;
2691 volatile u8 *ref_buf;
2697 pps_tbl[0] = pps_00;
2698 pps_tbl[1] = pps_01;
2700 register_tbl[0] = register_00;
2701 register_tbl[1] = register_01;
2703 rps_tbl[0] = rps_00;
2704 rps_tbl[1] = rps_01;
2706 scaling_list_tbl[0] = scaling_list_00;
2707 scaling_list_tbl[1] = scaling_list_01;
2709 stream_tbl[0] = stream_00;
2710 stream_tbl[1] = stream_01;
2712 stream_size[0] = sizeof(stream_00);
2713 stream_size[1] = sizeof(stream_01);
2715 pps_size[0] = sizeof(pps_00);
2716 pps_size[1] = sizeof(pps_01);
2718 rps_size[0] = sizeof(rps_00);
2719 rps_size[1] = sizeof(rps_01);
2721 scl_size[0] = sizeof(scaling_list_00);
2722 scl_size[1] = sizeof(scaling_list_01);
2724 cabac_size[0] = sizeof(Cabac_table);
2725 cabac_size[1] = sizeof(Cabac_table);
2727 /* create session */
2728 session.pid = current->pid;
2729 session.type = VPU_DEC;
2730 INIT_LIST_HEAD(&session.waiting);
2731 INIT_LIST_HEAD(&session.running);
2732 INIT_LIST_HEAD(&session.done);
2733 INIT_LIST_HEAD(&session.list_session);
2734 init_waitqueue_head(&session.wait);
2735 atomic_set(&session.task_running, 0);
2736 list_add_tail(&session.list_session, &pservice->session);
2738 yuv[0] = get_align_ptr_no_copy(256*256*2, &phy_yuv);
2739 yuv[1] = get_align_ptr_no_copy(256*256*2, &phy_ref);
2741 while (testidx < TEST_CNT) {
2742 /* create registers */
2743 reg = kmalloc(sizeof(vpu_reg)+pservice->reg_size, GFP_KERNEL);
2745 vpu_err("error: kmalloc fail in reg_init\n");
2749 if (size > pservice->reg_size) {
2750 printk("warning: vpu reg size %lu is larger than hw reg size %lu\n", size, pservice->reg_size);
2751 size = pservice->reg_size;
2753 reg->session = &session;
2754 reg->type = session.type;
2756 reg->freq = VPU_FREQ_DEFAULT;
2757 reg->reg = (unsigned long *)®[1];
2758 INIT_LIST_HEAD(®->session_link);
2759 INIT_LIST_HEAD(®->status_link);
2761 /* TODO: stuff registers */
2762 memcpy(®->reg[0], register_tbl[testidx], /*sizeof(register_00)*/ 176);
2764 stream_buf = get_align_ptr(stream_tbl[testidx], stream_size[testidx], &phy_str);
2765 pps_buf = get_align_ptr(pps_tbl[0], pps_size[0], &phy_pps);
2766 rps_buf = get_align_ptr(rps_tbl[testidx], rps_size[testidx], &phy_rps);
2767 scl_buf = get_align_ptr(scaling_list_tbl[testidx], scl_size[testidx], &phy_scl);
2768 cabac_buf = get_align_ptr(Cabac_table, cabac_size[testidx], &phy_cabac);
2772 /* TODO: replace reigster address */
2773 for (i=0; i<64; i++) {
2777 scaling_offset = (u32)pps[i*80+74];
2778 scaling_offset += (u32)pps[i*80+75] << 8;
2779 scaling_offset += (u32)pps[i*80+76] << 16;
2780 scaling_offset += (u32)pps[i*80+77] << 24;
2782 tmp = phy_scl + scaling_offset;
2784 pps[i*80+74] = tmp & 0xff;
2785 pps[i*80+75] = (tmp >> 8) & 0xff;
2786 pps[i*80+76] = (tmp >> 16) & 0xff;
2787 pps[i*80+77] = (tmp >> 24) & 0xff;
2790 printk("%s %d, phy stream %08x, phy pps %08x, phy rps %08x\n",
2791 __func__, __LINE__, phy_str, phy_pps, phy_rps);
2794 reg->reg[4] = phy_str;
2795 reg->reg[5] = ((stream_size[testidx]+15)&(~15))+64;
2796 reg->reg[6] = phy_cabac;
2797 reg->reg[7] = testidx?phy_ref:phy_yuv;
2798 reg->reg[42] = phy_pps;
2799 reg->reg[43] = phy_rps;
2800 for (i = 10; i <= 24; i++)
2801 reg->reg[i] = phy_yuv;
2803 mutex_lock(pservice->lock);
2804 list_add_tail(®->status_link, &pservice->waiting);
2805 list_add_tail(®->session_link, &session.waiting);
2806 mutex_unlock(pservice->lock);
2808 /* stuff hardware */
2811 /* wait for result */
2812 ret = wait_event_timeout(session.wait, !list_empty(&session.done), VPU_TIMEOUT_DELAY);
2813 if (!list_empty(&session.done)) {
2815 vpu_err("warning: pid %d wait task sucess but wait_evernt ret %d\n", session.pid, ret);
2818 if (unlikely(ret < 0)) {
2819 vpu_err("error: pid %d wait task ret %d\n", session.pid, ret);
2820 } else if (0 == ret) {
2821 vpu_err("error: pid %d wait %d task done timeout\n", session.pid, atomic_read(&session.task_running));
2826 int task_running = atomic_read(&session.task_running);
2828 mutex_lock(pservice->lock);
2829 vpu_service_dump(pservice);
2831 atomic_set(&session.task_running, 0);
2832 atomic_sub(task_running, &pservice->total_running);
2833 printk("%d task is running but not return, reset hardware...", task_running);
2837 vpu_service_session_clear(pservice, &session);
2838 mutex_unlock(pservice->lock);
2840 printk("\nDEC Registers:\n");
2841 n = data->dec_dev.iosize >> 2;
2843 printk("\tswreg%d = %08X\n", i, readl(data->dec_dev.hwregs + i));
2845 vpu_err("test index %d failed\n", testidx);
2848 vpu_debug(3, "test index %d success\n", testidx);
2850 vpu_reg *reg = list_entry(session.done.next, vpu_reg, session_link);
2852 for (i=0; i<68; i++) {
2854 printk("%02d: ", i);
2855 printk("%08x ", reg->reg[i]);
2863 reg_deinit(data, reg);