2 * Copyright (C) 2013 ROCKCHIP, Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/uaccess.h>
21 #include <linux/platform_device.h>
22 #include <linux/interrupt.h>
23 #include <linux/kthread.h>
24 #include <linux/poll.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/rk_fb.h>
28 #include <linux/wakelock.h>
30 #include <linux/of_platform.h>
32 #include <linux/module.h>
33 #include <linux/rockchip/cpu.h>
34 #include <linux/rockchip/cru.h>
35 #include <asm/cacheflush.h>
37 #if defined(CONFIG_IEP_MMU)
40 #include "hw_iep_reg.h"
43 #define IEP_CLK_ENABLE
44 /*#define IEP_TEST_CASE*/
47 module_param(debug, int, S_IRUGO | S_IWUSR);
48 MODULE_PARM_DESC(debug,
49 "Debug level - higher value produces more verbose messages");
51 #define RK_IEP_SIZE 0x1000
52 #define IEP_TIMEOUT_DELAY 2*HZ
53 #define IEP_POWER_OFF_DELAY 4*HZ
56 struct miscdevice miscdev;
64 struct clk *aclk_vio1;
68 /* direct path interface mode. true: enable, false: disable */
71 struct delayed_work power_off_work;
73 /* clk enable or disable */
75 struct wake_lock wake_lock;
78 atomic_t mmu_page_fault;
79 atomic_t mmu_bus_error;
82 struct iep_drvdata *iep_drvdata1 = NULL;
83 iep_service_info iep_service;
85 static void iep_reg_deinit(struct iep_reg *reg)
87 #if defined(CONFIG_IEP_IOMMU)
88 struct iep_mem_region *mem_region = NULL, *n;
89 /* release memory region attach to this registers table.*/
90 if (iep_service.iommu_dev) {
91 list_for_each_entry_safe(mem_region, n, ®->mem_region_list, reg_lnk) {
92 /*ion_unmap_iommu(iep_service.iommu_dev, iep_service.ion_client, mem_region->hdl);*/
93 ion_free(iep_service.ion_client, mem_region->hdl);
94 list_del_init(&mem_region->reg_lnk);
99 list_del_init(®->session_link);
100 list_del_init(®->status_link);
104 static void iep_reg_from_wait_to_ready(struct iep_reg *reg)
106 list_del_init(®->status_link);
107 list_add_tail(®->status_link, &iep_service.ready);
109 list_del_init(®->session_link);
110 list_add_tail(®->session_link, ®->session->ready);
113 static void iep_reg_from_ready_to_running(struct iep_reg *reg)
115 list_del_init(®->status_link);
116 list_add_tail(®->status_link, &iep_service.running);
118 list_del_init(®->session_link);
119 list_add_tail(®->session_link, ®->session->running);
122 static void iep_del_running_list(void)
127 mutex_lock(&iep_service.lock);
129 while (!list_empty(&iep_service.running)) {
131 reg = list_entry(iep_service.running.next, struct iep_reg, status_link);
133 atomic_dec(®->session->task_running);
134 atomic_dec(&iep_service.total_running);
136 if (list_empty(®->session->waiting)) {
137 atomic_set(®->session->done, 1);
138 atomic_inc(®->session->num_done);
139 wake_up(®->session->wait);
146 mutex_unlock(&iep_service.lock);
149 static void iep_dump(void)
151 struct iep_status sts;
153 sts = iep_get_status(iep_drvdata1->iep_base);
155 IEP_INFO("scl_sts: %u, dil_sts %u, wyuv_sts %u, ryuv_sts %u, wrgb_sts %u, rrgb_sts %u, voi_sts %u\n",
156 sts.scl_sts, sts.dil_sts, sts.wyuv_sts, sts.ryuv_sts, sts.wrgb_sts, sts.rrgb_sts, sts.voi_sts); {
157 int *reg = (int *)iep_drvdata1->iep_base;
160 /* could not read validate data from address after base+0x40 */
161 for (i = 0; i < 0x40; i++) {
162 IEP_INFO("%08x ", reg[i]);
164 if ((i + 1) % 4 == 0) {
173 /* Caller must hold iep_service.lock */
174 static void iep_del_running_list_timeout(void)
178 mutex_lock(&iep_service.lock);
180 while (!list_empty(&iep_service.running)) {
181 reg = list_entry(iep_service.running.next, struct iep_reg, status_link);
183 atomic_dec(®->session->task_running);
184 atomic_dec(&iep_service.total_running);
186 /* iep_soft_rst(iep_drvdata1->iep_base); */
190 if (list_empty(®->session->waiting)) {
191 atomic_set(®->session->done, 1);
192 wake_up(®->session->wait);
198 mutex_unlock(&iep_service.lock);
201 static inline void iep_queue_power_off_work(void)
203 queue_delayed_work(system_nrt_wq, &iep_drvdata1->power_off_work, IEP_POWER_OFF_DELAY);
206 static void iep_power_on(void)
209 ktime_t now = ktime_get();
210 if (ktime_to_ns(ktime_sub(now, last)) > NSEC_PER_SEC) {
211 cancel_delayed_work_sync(&iep_drvdata1->power_off_work);
212 iep_queue_power_off_work();
216 if (iep_service.enable)
219 IEP_INFO("IEP Power ON\n");
221 /* iep_soft_rst(iep_drvdata1->iep_base); */
223 #ifdef IEP_CLK_ENABLE
224 if (iep_drvdata1->pd_iep)
225 clk_prepare_enable(iep_drvdata1->pd_iep);
226 clk_prepare_enable(iep_drvdata1->aclk_iep);
227 clk_prepare_enable(iep_drvdata1->hclk_iep);
230 wake_lock(&iep_drvdata1->wake_lock);
232 #if defined(CONFIG_IEP_IOMMU)
233 if (iep_service.iommu_dev) {
234 rockchip_iovmm_activate(iep_service.iommu_dev);
238 iep_service.enable = true;
241 static void iep_power_off(void)
245 if (!iep_service.enable) {
249 IEP_INFO("IEP Power OFF\n");
251 total_running = atomic_read(&iep_service.total_running);
253 IEP_WARNING("power off when %d task running!!\n", total_running);
255 IEP_WARNING("delay 50 ms for running task\n");
259 #if defined(CONFIG_IEP_IOMMU)
260 if (iep_service.iommu_dev) {
261 rockchip_iovmm_deactivate(iep_service.iommu_dev);
265 #ifdef IEP_CLK_ENABLE
266 clk_disable_unprepare(iep_drvdata1->aclk_iep);
267 clk_disable_unprepare(iep_drvdata1->hclk_iep);
268 if (iep_drvdata1->pd_iep)
269 clk_disable_unprepare(iep_drvdata1->pd_iep);
272 wake_unlock(&iep_drvdata1->wake_lock);
273 iep_service.enable = false;
276 static void iep_power_off_work(struct work_struct *work)
278 if (mutex_trylock(&iep_service.lock) && !iep_drvdata1->dpi_mode) {
279 IEP_INFO("iep dpi mode inactivity\n");
281 mutex_unlock(&iep_service.lock);
283 /* Come back later if the device is busy... */
284 iep_queue_power_off_work();
288 extern void rk_direct_fb_show(struct fb_info *fbi);
289 extern struct fb_info* rk_get_fb(int fb_id);
290 extern bool rk_fb_poll_wait_frame_complete(void);
291 extern int rk_fb_dpi_open(bool open);
292 extern int rk_fb_dpi_win_sel(int layer_id);
294 static void iep_config_lcdc(struct iep_reg *reg)
300 fbi = reg->layer == 0 ? 0 : 1;
302 rk_fb_dpi_win_sel(fbi);
306 switch (reg->format) {
307 case IEP_FORMAT_ARGB_8888:
308 case IEP_FORMAT_ABGR_8888:
309 fmt = HAL_PIXEL_FORMAT_RGBA_8888;
310 fb->var.bits_per_pixel = 32;
312 fb->var.red.length = 8;
313 fb->var.red.offset = 16;
314 fb->var.red.msb_right = 0;
316 fb->var.green.length = 8;
317 fb->var.green.offset = 8;
318 fb->var.green.msb_right = 0;
320 fb->var.blue.length = 8;
321 fb->var.blue.offset = 0;
322 fb->var.blue.msb_right = 0;
324 fb->var.transp.length = 8;
325 fb->var.transp.offset = 24;
326 fb->var.transp.msb_right = 0;
329 case IEP_FORMAT_BGRA_8888:
330 fmt = HAL_PIXEL_FORMAT_BGRA_8888;
331 fb->var.bits_per_pixel = 32;
333 case IEP_FORMAT_RGB_565:
334 fmt = HAL_PIXEL_FORMAT_RGB_565;
335 fb->var.bits_per_pixel = 16;
337 fb->var.red.length = 5;
338 fb->var.red.offset = 11;
339 fb->var.red.msb_right = 0;
341 fb->var.green.length = 6;
342 fb->var.green.offset = 5;
343 fb->var.green.msb_right = 0;
345 fb->var.blue.length = 5;
346 fb->var.blue.offset = 0;
347 fb->var.blue.msb_right = 0;
350 case IEP_FORMAT_YCbCr_422_SP:
351 fmt = HAL_PIXEL_FORMAT_YCbCr_422_SP;
352 fb->var.bits_per_pixel = 16;
354 case IEP_FORMAT_YCbCr_420_SP:
355 fmt = HAL_PIXEL_FORMAT_YCrCb_NV12;
356 fb->var.bits_per_pixel = 16;
358 case IEP_FORMAT_YCbCr_422_P:
359 case IEP_FORMAT_YCrCb_422_SP:
360 case IEP_FORMAT_YCrCb_422_P:
361 case IEP_FORMAT_YCrCb_420_SP:
362 case IEP_FORMAT_YCbCr_420_P:
363 case IEP_FORMAT_YCrCb_420_P:
364 case IEP_FORMAT_RGBA_8888:
365 case IEP_FORMAT_BGR_565:
366 /* unsupported format */
367 IEP_ERR("unsupported format %d\n", reg->format);
375 fb->var.xres = reg->act_width;
376 fb->var.yres = reg->act_height;
377 fb->var.xres_virtual = reg->act_width;
378 fb->var.yres_virtual = reg->act_height;
379 fb->var.nonstd = ((reg->off_y & 0xFFF) << 20) +
380 ((reg->off_x & 0xFFF) << 8) + (fmt & 0xFF);
382 ((reg->vir_height & 0xFFF) << 20) +
383 ((reg->vir_width & 0xFFF) << 8) + 0;/*win0 xsize & ysize*/
385 rk_direct_fb_show(fb);
388 static int iep_switch_dpi(struct iep_reg *reg)
391 if (!iep_drvdata1->dpi_mode) {
393 rk_fb_dpi_open(true);
394 iep_drvdata1->dpi_mode = true;
396 iep_config_lcdc(reg);
398 if (iep_drvdata1->dpi_mode) {
400 /* wait_lcdc_dpi_close(); */
402 rk_fb_dpi_open(false);
403 status = rk_fb_poll_wait_frame_complete();
405 iep_drvdata1->dpi_mode = false;
406 IEP_INFO("%s %d, iep dpi inactivated\n",
414 static void iep_reg_copy_to_hw(struct iep_reg *reg)
418 u32 *pbase = (u32 *)iep_drvdata1->iep_base;
420 /* config registers */
421 for (i = 0; i < IEP_CNF_REG_LEN; i++)
422 pbase[IEP_CNF_REG_BASE + i] = reg->reg[IEP_CNF_REG_BASE + i];
424 /* command registers */
425 for (i = 0; i < IEP_CMD_REG_LEN; i++)
426 pbase[IEP_CMD_REG_BASE + i] = reg->reg[IEP_CMD_REG_BASE + i];
428 /* address registers */
429 for (i = 0; i < IEP_ADD_REG_LEN; i++)
430 pbase[IEP_ADD_REG_BASE + i] = reg->reg[IEP_ADD_REG_BASE + i];
432 #if defined(CONFIG_IEP_MMU)
434 for (i = 0; i < IEP_MMU_REG_LEN; i++)
435 pbase[IEP_MMU_REG_BASE + i] = reg->reg[IEP_MMU_REG_BASE + i];
438 /* dmac_flush_range(&pbase[0], &pbase[IEP_REG_LEN]); */
439 /* outer_flush_range(virt_to_phys(&pbase[0]),virt_to_phys(&pbase[IEP_REG_LEN])); */
444 /** switch fields order before the next lcdc frame start
446 static void iep_switch_fields_order(void)
448 void *pbase = (void *)iep_drvdata1->iep_base;
449 int mode = iep_get_deinterlace_mode(pbase);
453 case dein_mode_I4O1B:
454 iep_set_deinterlace_mode(dein_mode_I4O1T, pbase);
456 case dein_mode_I4O1T:
457 iep_set_deinterlace_mode(dein_mode_I4O1B, pbase);
459 case dein_mode_I2O1B:
460 iep_set_deinterlace_mode(dein_mode_I2O1T, pbase);
462 case dein_mode_I2O1T:
463 iep_set_deinterlace_mode(dein_mode_I2O1B, pbase);
470 rk_direct_fb_show(fb);
472 /*iep_switch_input_address(pbase);*/
475 /* Caller must hold iep_service.lock */
476 static void iep_try_set_reg(void)
480 mutex_lock(&iep_service.lock);
482 if (list_empty(&iep_service.ready)) {
483 if (!list_empty(&iep_service.waiting)) {
484 reg = list_entry(iep_service.waiting.next, struct iep_reg, status_link);
489 iep_reg_from_wait_to_ready(reg);
490 atomic_dec(&iep_service.waitcnt);
492 /*iep_soft_rst(iep_drvdata1->iep_base);*/
494 iep_reg_copy_to_hw(reg);
497 if (iep_drvdata1->dpi_mode)
498 iep_switch_fields_order();
501 mutex_unlock(&iep_service.lock);
504 static void iep_try_start_frm(void)
508 mutex_lock(&iep_service.lock);
510 if (list_empty(&iep_service.running)) {
511 if (!list_empty(&iep_service.ready)) {
512 reg = list_entry(iep_service.ready.next, struct iep_reg, status_link);
516 iep_reg_from_ready_to_running(reg);
517 iep_config_frame_end_int_en(iep_drvdata1->iep_base);
518 iep_config_done(iep_drvdata1->iep_base);
521 atomic_inc(®->session->task_running);
522 atomic_inc(&iep_service.total_running);
523 iep_config_frm_start(iep_drvdata1->iep_base);
527 mutex_unlock(&iep_service.lock);
530 static irqreturn_t iep_isr(int irq, void *dev_id)
532 if (atomic_read(&iep_drvdata1->iep_int) > 0) {
533 if (iep_service.enable) {
534 if (list_empty(&iep_service.waiting)) {
535 if (iep_drvdata1->dpi_mode) {
536 iep_switch_fields_order();
539 iep_del_running_list();
545 atomic_dec(&iep_drvdata1->iep_int);
548 #if defined(CONFIG_IEP_MMU)
549 if (atomic_read(&iep_drvdata1->mmu_page_fault) > 0) {
551 if (!list_empty(&iep_service.running)) {
552 uint32_t va = iep_probe_mmu_page_fault_addr(iep_drvdata1->iep_base);
553 struct iep_reg *reg = list_entry(iep_service.running.next, struct iep_reg, status_link);
554 if (0 > rk_mmu_generate_pte_from_va(reg->session, va)) {
555 IEP_ERR("Generate PTE from Virtual Address 0x%08x failed\n", va);
557 iep_config_mmu_cmd(iep_drvdata1->iep_base, MMU_ZAP_CACHE);
558 iep_config_mmu_cmd(iep_drvdata1->iep_base, MMU_PAGE_FAULT_DONE);
561 IEP_ERR("Page Fault occur when IEP IDLE\n");
564 atomic_dec(&iep_drvdata1->mmu_page_fault);
567 if (atomic_read(&iep_drvdata1->mmu_bus_error) > 0) {
568 /* reset iep mmu module */
569 IEP_ERR("Bus Error!!!\n");
570 iep_config_mmu_cmd(iep_drvdata1->iep_base, MMU_FORCE_RESET);
571 atomic_dec(&iep_drvdata1->mmu_bus_error);
578 static irqreturn_t iep_irq(int irq, void *dev_id)
581 void *pbase = (void *)iep_drvdata1->iep_base;
583 #if defined(CONFIG_IEP_MMU)
584 struct iep_mmu_int_status mmu_int_status;
586 mmu_int_status = iep_probe_mmu_int_status(pbase);
587 if (mmu_int_status.page_fault) {
588 iep_config_mmu_page_fault_int_clr(pbase);
589 atomic_inc(&iep_drvdata1->mmu_page_fault);
592 if (mmu_int_status.read_bus_error) {
593 iep_config_mmu_read_bus_error_int_clr(pbase);
594 atomic_inc(&iep_drvdata1->mmu_bus_error);
598 if (iep_probe_int(pbase)) {
599 iep_config_frame_end_int_clr(pbase);
600 atomic_inc(&iep_drvdata1->iep_int);
603 return IRQ_WAKE_THREAD;
606 static void iep_service_session_clear(iep_session *session)
608 struct iep_reg *reg, *n;
610 list_for_each_entry_safe(reg, n, &session->waiting, session_link) {
614 list_for_each_entry_safe(reg, n, &session->ready, session_link) {
618 list_for_each_entry_safe(reg, n, &session->running, session_link) {
623 static int iep_open(struct inode *inode, struct file *filp)
625 //DECLARE_WAITQUEUE(wait, current);
626 iep_session *session = (iep_session *)kzalloc(sizeof(iep_session),
628 if (NULL == session) {
629 IEP_ERR("unable to allocate memory for iep_session.\n");
633 session->pid = current->pid;
634 INIT_LIST_HEAD(&session->waiting);
635 INIT_LIST_HEAD(&session->ready);
636 INIT_LIST_HEAD(&session->running);
637 INIT_LIST_HEAD(&session->list_session);
638 init_waitqueue_head(&session->wait);
639 /*add_wait_queue(&session->wait, wait);*/
640 /* no need to protect */
641 mutex_lock(&iep_service.lock);
642 list_add_tail(&session->list_session, &iep_service.session);
643 mutex_unlock(&iep_service.lock);
644 atomic_set(&session->task_running, 0);
645 atomic_set(&session->num_done, 0);
647 #if defined(CONFIG_IEP_MMU)
648 rk_mmu_init_dte_table(session);
649 INIT_LIST_HEAD(&session->pte_list);
652 filp->private_data = (void *)session;
654 return nonseekable_open(inode, filp);
657 static int iep_release(struct inode *inode, struct file *filp)
660 iep_session *session = (iep_session *)filp->private_data;
665 task_running = atomic_read(&session->task_running);
668 IEP_ERR("iep_service session %d still "
669 "has %d task running when closing\n",
670 session->pid, task_running);
675 wake_up(&session->wait);
676 mutex_lock(&iep_service.lock);
677 list_del(&session->list_session);
678 iep_service_session_clear(session);
680 mutex_unlock(&iep_service.lock);
685 static unsigned int iep_poll(struct file *filp, poll_table *wait)
688 iep_session *session = (iep_session *)filp->private_data;
691 poll_wait(filp, &session->wait, wait);
692 if (atomic_read(&session->done))
693 mask |= POLL_IN | POLLRDNORM;
698 static int iep_get_result_sync(iep_session *session)
704 ret = wait_event_timeout(session->wait,
705 atomic_read(&session->done), IEP_TIMEOUT_DELAY);
707 if (unlikely(ret < 0)) {
708 IEP_ERR("sync pid %d wait task ret %d\n", session->pid, ret);
709 iep_del_running_list();
711 } else if (0 == ret) {
712 IEP_ERR("sync pid %d wait %d task done timeout\n",
713 session->pid, atomic_read(&session->task_running));
714 iep_del_running_list_timeout();
723 static void iep_get_result_async(iep_session *session)
729 static long iep_ioctl(struct file *filp, uint32_t cmd, unsigned long arg)
732 iep_session *session = (iep_session *)filp->private_data;
734 if (NULL == session) {
735 IEP_ERR("%s [%d] iep thread session is null\n",
736 __FUNCTION__, __LINE__);
740 mutex_lock(&iep_service.mutex);
743 case IEP_SET_PARAMETER:
746 msg = (struct IEP_MSG *)kzalloc(sizeof(struct IEP_MSG),
749 if (copy_from_user(msg, (struct IEP_MSG *)arg,
750 sizeof(struct IEP_MSG))) {
751 IEP_ERR("copy_from_user failure\n");
757 if (atomic_read(&iep_service.waitcnt) < 10) {
758 #if defined(CONFIG_IEP_IOMMU)
761 iep_config(session, msg);
762 atomic_inc(&iep_service.waitcnt);
764 IEP_ERR("iep task queue full\n");
769 /** REGISTER CONFIG must accord to Timing When DPI mode
771 if (!iep_drvdata1->dpi_mode)
776 case IEP_GET_RESULT_SYNC:
777 if (0 > iep_get_result_sync(session)) {
781 case IEP_GET_RESULT_ASYNC:
782 iep_get_result_async(session);
784 case IEP_RELEASE_CURRENT_TASK:
785 iep_del_running_list_timeout();
790 IEP_ERR("unknown ioctl cmd!\n");
793 mutex_unlock(&iep_service.mutex);
799 static long compat_iep_ioctl(struct file *filp, uint32_t cmd,
803 iep_session *session = (iep_session *)filp->private_data;
805 if (NULL == session) {
806 IEP_ERR("%s [%d] iep thread session is null\n",
811 mutex_lock(&iep_service.mutex);
814 case COMPAT_IEP_SET_PARAMETER:
818 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
822 (msg, compat_ptr((compat_uptr_t)arg),
823 sizeof(struct IEP_MSG))) {
824 IEP_ERR("copy_from_user failure\n");
830 if (atomic_read(&iep_service.waitcnt) < 10) {
831 #if defined(CONFIG_IEP_IOMMU)
834 iep_config(session, msg);
835 atomic_inc(&iep_service.waitcnt);
837 IEP_ERR("iep task queue full\n");
842 /** REGISTER CONFIG must accord to Timing When DPI mode
844 if (!iep_drvdata1->dpi_mode)
849 case COMPAT_IEP_GET_RESULT_SYNC:
850 if (0 > iep_get_result_sync(session))
853 case COMPAT_IEP_GET_RESULT_ASYNC:
854 iep_get_result_async(session);
856 case COMPAT_IEP_RELEASE_CURRENT_TASK:
857 iep_del_running_list_timeout();
862 IEP_ERR("unknown ioctl cmd!\n");
865 mutex_unlock(&iep_service.mutex);
871 struct file_operations iep_fops = {
872 .owner = THIS_MODULE,
874 .release = iep_release,
876 .unlocked_ioctl = iep_ioctl,
878 .compat_ioctl = compat_iep_ioctl,
882 static struct miscdevice iep_dev = {
888 #ifdef CONFIG_IEP_IOMMU
889 static struct device* rockchip_get_sysmmu_device_by_compatible(
892 struct device_node *dn = NULL;
893 struct platform_device *pd = NULL;
894 struct device *ret = NULL;
896 dn = of_find_compatible_node(NULL, NULL, compt);
898 printk("can't find device node %s \r\n", compt);
902 pd = of_find_device_by_node(dn);
904 printk("can't find platform device in device node %s \r\n",
913 #ifdef CONFIG_IOMMU_API
914 static inline void platform_set_sysmmu(struct device *iommu,
917 dev->archdata.iommu = iommu;
920 static inline void platform_set_sysmmu(struct device *iommu,
926 static int iep_sysmmu_fault_handler(struct device *dev,
927 enum rk_iommu_inttype itype,
928 unsigned long pgtable_base,
929 unsigned long fault_addr, unsigned int status)
931 struct iep_reg *reg = list_entry(iep_service.running.next,
932 struct iep_reg, status_link);
934 struct iep_mem_region *mem, *n;
936 pr_info("iep, fault addr 0x%08x\n", (u32)fault_addr);
937 list_for_each_entry_safe(mem, n,
938 ®->mem_region_list,
940 pr_info("iep, mem region [%02d] 0x%08x %ld\n",
941 i, (u32)mem->iova, mem->len);
945 pr_alert("iep, page fault occur\n");
947 iep_del_running_list();
954 #if defined(CONFIG_IEP_IOMMU)
955 extern struct ion_client* rockchip_ion_client_create(const char *name);
957 static int iep_drv_probe(struct platform_device *pdev)
959 struct iep_drvdata *data;
961 struct resource *res = NULL;
962 #if defined(CONFIG_IEP_IOMMU)
964 struct device *mmu_dev = NULL;
965 struct device_node *np = pdev->dev.of_node;
966 of_property_read_u32(np, "iommu_enabled", &iommu_en);
969 data = (struct iep_drvdata *)devm_kzalloc(&pdev->dev,
970 sizeof(struct iep_drvdata), GFP_KERNEL);
972 IEP_ERR("failed to allocate driver data.\n");
978 INIT_LIST_HEAD(&iep_service.waiting);
979 INIT_LIST_HEAD(&iep_service.ready);
980 INIT_LIST_HEAD(&iep_service.running);
981 INIT_LIST_HEAD(&iep_service.done);
982 INIT_LIST_HEAD(&iep_service.session);
983 atomic_set(&iep_service.waitcnt, 0);
984 mutex_init(&iep_service.lock);
985 atomic_set(&iep_service.total_running, 0);
986 iep_service.enable = false;
988 #ifdef IEP_CLK_ENABLE
989 data->pd_iep = devm_clk_get(&pdev->dev, "pd_iep");
990 if (IS_ERR(data->pd_iep)) {
991 IEP_ERR("failed to find iep power down clock source.\n");
995 data->aclk_iep = devm_clk_get(&pdev->dev, "aclk_iep");
996 if (IS_ERR(data->aclk_iep)) {
997 IEP_ERR("failed to find iep axi clock source.\n");
1002 data->hclk_iep = devm_clk_get(&pdev->dev, "hclk_iep");
1003 if (IS_ERR(data->hclk_iep)) {
1004 IEP_ERR("failed to find iep ahb clock source.\n");
1010 iep_service.enable = false;
1011 INIT_DELAYED_WORK(&data->power_off_work, iep_power_off_work);
1012 wake_lock_init(&data->wake_lock, WAKE_LOCK_SUSPEND, "iep");
1014 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1016 data->iep_base = (void *)devm_ioremap_resource(&pdev->dev, res);
1017 if (data->iep_base == NULL) {
1018 IEP_ERR("iep ioremap failed\n");
1023 atomic_set(&data->iep_int, 0);
1024 atomic_set(&data->mmu_page_fault, 0);
1025 atomic_set(&data->mmu_bus_error, 0);
1028 data->irq0 = platform_get_irq(pdev, 0);
1029 if (data->irq0 <= 0) {
1030 IEP_ERR("failed to get iep irq resource (%d).\n", data->irq0);
1035 /* request the IRQ */
1036 ret = devm_request_threaded_irq(&pdev->dev, data->irq0, iep_irq,
1037 iep_isr, IRQF_SHARED, dev_name(&pdev->dev), pdev);
1039 IEP_ERR("iep request_irq failed (%d).\n", ret);
1043 mutex_init(&iep_service.mutex);
1045 platform_set_drvdata(pdev, data);
1047 ret = misc_register(&iep_dev);
1049 IEP_ERR("cannot register miscdev (%d)\n", ret);
1050 goto err_misc_register;
1053 #if defined(CONFIG_IEP_IOMMU)
1054 iep_service.iommu_dev = NULL;
1057 iep_service.ion_client = rockchip_ion_client_create("iep");
1058 if (IS_ERR(iep_service.ion_client)) {
1059 IEP_ERR("failed to create ion client for vcodec");
1060 return PTR_ERR(iep_service.ion_client);
1062 IEP_INFO("iep ion client create success!\n");
1065 mmu_dev = rockchip_get_sysmmu_device_by_compatible(
1066 IEP_IOMMU_COMPATIBLE_NAME);
1069 platform_set_sysmmu(mmu_dev, &pdev->dev);
1070 rockchip_iovmm_activate(&pdev->dev);
1073 rockchip_iovmm_set_fault_handler(&pdev->dev,
1074 iep_sysmmu_fault_handler);
1076 iep_service.iommu_dev = &pdev->dev;
1081 IEP_INFO("IEP Driver loaded succesfully\n");
1086 free_irq(data->irq0, pdev);
1089 if (data->iep_base) {
1090 devm_ioremap_release(&pdev->dev, res);
1092 devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
1095 wake_lock_destroy(&data->wake_lock);
1096 #ifdef IEP_CLK_ENABLE
1102 static int iep_drv_remove(struct platform_device *pdev)
1104 struct iep_drvdata *data = platform_get_drvdata(pdev);
1105 struct resource *res;
1107 wake_lock_destroy(&data->wake_lock);
1109 misc_deregister(&(data->miscdev));
1110 free_irq(data->irq0, &data->miscdev);
1111 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1112 devm_ioremap_release(&pdev->dev, res);
1113 devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
1115 #ifdef IEP_CLK_ENABLE
1117 devm_clk_put(&pdev->dev, data->aclk_iep);
1120 devm_clk_put(&pdev->dev, data->hclk_iep);
1123 devm_clk_put(&pdev->dev, data->pd_iep);
1129 #if defined(CONFIG_OF)
1130 static const struct of_device_id iep_dt_ids[] = {
1131 { .compatible = "rockchip,iep", },
1136 static struct platform_driver iep_driver = {
1137 .probe = iep_drv_probe,
1138 .remove = iep_drv_remove,
1140 .owner = THIS_MODULE,
1142 #if defined(CONFIG_OF)
1143 .of_match_table = of_match_ptr(iep_dt_ids),
1148 #ifdef CONFIG_PROC_FS
1149 #include <linux/proc_fs.h>
1150 #include <linux/seq_file.h>
1152 static int proc_iep_show(struct seq_file *s, void *v)
1154 struct iep_status sts;
1155 //mutex_lock(&iep_service.mutex);
1157 seq_printf(s, "\nIEP Modules Status:\n");
1158 sts = iep_get_status(iep_drvdata1->iep_base);
1159 seq_printf(s, "scl_sts: %u, dil_sts %u, wyuv_sts %u, "
1160 "ryuv_sts %u, wrgb_sts %u, rrgb_sts %u, voi_sts %u\n",
1161 sts.scl_sts, sts.dil_sts, sts.wyuv_sts, sts.ryuv_sts,
1162 sts.wrgb_sts, sts.rrgb_sts, sts.voi_sts); {
1163 int *reg = (int *)iep_drvdata1->iep_base;
1166 /* could not read validate data from address after base+0x40 */
1167 for (i = 0; i < 0x40; i++) {
1168 seq_printf(s, "%08x ", reg[i]);
1170 if ((i + 1) % 4 == 0)
1171 seq_printf(s, "\n");
1174 seq_printf(s, "\n");
1177 //mutex_unlock(&iep_service.mutex);
1182 static int proc_iep_open(struct inode *inode, struct file *file)
1184 return single_open(file, proc_iep_show, NULL);
1187 static const struct file_operations proc_iep_fops = {
1188 .open = proc_iep_open,
1190 .llseek = seq_lseek,
1191 .release = single_release,
1194 static int __init iep_proc_init(void)
1196 proc_create("iep", 0, NULL, &proc_iep_fops);
1200 static void __exit iep_proc_release(void)
1202 remove_proc_entry("iep", NULL);
1206 #ifdef IEP_TEST_CASE
1207 void iep_test_case0(void);
1210 static int __init iep_init(void)
1214 if ((ret = platform_driver_register(&iep_driver)) != 0) {
1215 IEP_ERR("Platform device register failed (%d).\n", ret);
1219 #ifdef CONFIG_PROC_FS
1223 IEP_INFO("Module initialized.\n");
1225 #ifdef IEP_TEST_CASE
1232 static void __exit iep_exit(void)
1234 IEP_ERR("%s IN\n", __func__);
1235 #ifdef CONFIG_PROC_FS
1240 platform_driver_unregister(&iep_driver);
1243 module_init(iep_init);
1244 module_exit(iep_exit);
1246 /* Module information */
1247 MODULE_AUTHOR("ljf@rock-chips.com");
1248 MODULE_DESCRIPTION("Driver for iep device");
1249 MODULE_LICENSE("GPL");
1251 #ifdef IEP_TEST_CASE
1253 #include "yuv420sp_480x480_interlaced.h"
1254 #include "yuv420sp_480x480_deinterlaced_i2o1.h"
1256 //unsigned char tmp_buf[480*480*3/2];
1258 void iep_test_case0(void)
1261 iep_session session;
1262 unsigned int phy_src, phy_dst, phy_tmp;
1265 unsigned char *tmp_buf;
1267 tmp_buf = kmalloc(480 * 480 * 3 / 2, GFP_KERNEL);
1269 session.pid = current->pid;
1270 INIT_LIST_HEAD(&session.waiting);
1271 INIT_LIST_HEAD(&session.ready);
1272 INIT_LIST_HEAD(&session.running);
1273 INIT_LIST_HEAD(&session.list_session);
1274 init_waitqueue_head(&session.wait);
1275 list_add_tail(&session.list_session, &iep_service.session);
1276 atomic_set(&session.task_running, 0);
1277 atomic_set(&session.num_done, 0);
1279 memset(&msg, 0, sizeof(struct IEP_MSG));
1280 memset(tmp_buf, 0xCC, 480 * 480 * 3 / 2);
1282 dmac_flush_range(&tmp_buf[0], &tmp_buf[480 * 480 * 3 / 2]);
1283 outer_flush_range(virt_to_phys(&tmp_buf[0]), virt_to_phys(&tmp_buf[480 * 480 * 3 / 2]));
1285 phy_src = virt_to_phys(&yuv420sp_480x480_interlaced[0]);
1286 phy_tmp = virt_to_phys(&tmp_buf[0]);
1287 phy_dst = virt_to_phys(&yuv420sp_480x480_deinterlaced_i2o1[0]);
1289 dmac_flush_range(&yuv420sp_480x480_interlaced[0], &yuv420sp_480x480_interlaced[480 * 480 * 3 / 2]);
1290 outer_flush_range(virt_to_phys(&yuv420sp_480x480_interlaced[0]), virt_to_phys(&yuv420sp_480x480_interlaced[480 * 480 * 3 / 2]));
1292 IEP_INFO("*********** IEP MSG GENARATE ************\n");
1294 msg.src.act_w = 480;
1295 msg.src.act_h = 480;
1298 msg.src.vir_w = 480;
1299 msg.src.vir_h = 480;
1300 msg.src.format = IEP_FORMAT_YCbCr_420_SP;
1301 msg.src.mem_addr = (uint32_t *)phy_src;
1302 msg.src.uv_addr = (uint32_t *)(phy_src + 480 * 480);
1305 msg.dst.act_w = 480;
1306 msg.dst.act_h = 480;
1309 msg.dst.vir_w = 480;
1310 msg.dst.vir_h = 480;
1311 msg.dst.format = IEP_FORMAT_YCbCr_420_SP;
1312 msg.dst.mem_addr = (uint32_t *)phy_tmp;
1313 msg.dst.uv_addr = (uint32_t *)(phy_tmp + 480 * 480);
1316 msg.dein_mode = IEP_DEINTERLACE_MODE_I2O1;
1317 msg.field_order = FIELD_ORDER_BOTTOM_FIRST;
1319 IEP_INFO("*********** IEP TEST CASE 0 ************\n");
1321 iep_config(&session, &msg);
1323 if (0 > iep_get_result_sync(&session)) {
1324 IEP_INFO("%s failed, timeout\n", __func__);
1330 dmac_flush_range(&tmp_buf[0], &tmp_buf[480 * 480 * 3 / 2]);
1331 outer_flush_range(virt_to_phys(&tmp_buf[0]), virt_to_phys(&tmp_buf[480 * 480 * 3 / 2]));
1333 IEP_INFO("*********** RESULT CHECKING ************\n");
1335 for (i = 0; i < 480 * 480 * 3 / 2; i++) {
1336 if (tmp_buf[i] != yuv420sp_480x480_deinterlaced_i2o1[i]) {
1337 IEP_INFO("diff occur position %d, 0x%02x 0x%02x\n", i, tmp_buf[i], yuv420sp_480x480_deinterlaced_i2o1[i]);
1346 if (i == 480 * 480 * 3 / 2)
1347 IEP_INFO("IEP pass the checking\n");