From: zsq Date: Thu, 1 Mar 2012 03:32:53 +0000 (-0800) Subject: modify RGA driver some case is valid X-Git-Tag: firefly_0821_release~9595^2~105 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=b7730afe17afaa19ed22657b865ba5e10c9ee841;p=firefly-linux-kernel-4.4.55.git modify RGA driver some case is valid --- diff --git a/drivers/video/rockchip/rga/Kconfig b/drivers/video/rockchip/rga/Kconfig old mode 100644 new mode 100755 diff --git a/drivers/video/rockchip/rga/Makefile b/drivers/video/rockchip/rga/Makefile old mode 100644 new mode 100755 diff --git a/drivers/video/rockchip/rga/RGA_API.c b/drivers/video/rockchip/rga/RGA_API.c old mode 100644 new mode 100755 diff --git a/drivers/video/rockchip/rga/RGA_API.h b/drivers/video/rockchip/rga/RGA_API.h old mode 100644 new mode 100755 diff --git a/drivers/video/rockchip/rga/rga.h b/drivers/video/rockchip/rga/rga.h old mode 100644 new mode 100755 index 68b7fdb36c96..90a17bf20d36 --- a/drivers/video/rockchip/rga/rga.h +++ b/drivers/video/rockchip/rga/rga.h @@ -10,12 +10,22 @@ #define RGA_BLIT_SYNC 0x5017 #define RGA_BLIT_ASYNC 0x5018 #define RGA_FLUSH 0x5019 +#define RGA_GET_RESULT 0x501a #define RGA_REG_CTRL_LEN 0x8 /* 8 */ #define RGA_REG_CMD_LEN 0x1c /* 28 */ #define RGA_CMD_BUF_SIZE 0x700 /* 16*28*4 */ +#define RGA_OUT_OF_RESOURCES -10 +#define RGA_MALLOC_ERROR -11 + + +#define rgaIS_ERROR(status) (status < 0) +#define rgaNO_ERROR(status) (status >= 0) +#define rgaIS_SUCCESS(status) (status == 0) + + /* RGA process mode enum */ enum @@ -280,10 +290,10 @@ struct rga_req { MMU mmu_info; /* mmu information */ - uint8_t alpha_rop_mode; /* ([0~1] alpha mode) */ - /* ([2~3] rop mode) */ - /* ([4] zero mode en) */ - /* ([5] dst alpha mode) */ + uint8_t alpha_rop_mode; /* ([0~1] alpha mode) */ + /* ([2~3] rop mode) */ + /* ([4] zero mode en) */ + /* ([5] dst alpha mode) */ uint8_t src_trans_mode; @@ -339,10 +349,11 @@ typedef struct rga_session { /* a linked list of register data in processing */ struct list_head running; /* all coommand this thread done */ - uint32_t done; + atomic_t done; wait_queue_head_t wait; pid_t pid; atomic_t task_running; + atomic_t num_done; } rga_session; struct rga_reg { @@ -365,16 +376,18 @@ typedef struct rga_service_info { struct list_head done; /* link to link_reg in struct vpu_reg */ struct list_head session; /* link to list_session in struct vpu_session */ atomic_t total_running; - bool enabled; + struct rga_reg *reg; uint32_t cmd_buff[28*16];/* cmd_buff for rga */ uint32_t *pre_scale_buf; atomic_t int_disable; /* 0 int enable 1 int disable */ + atomic_t cmd_num; + //uint32_t mmu_buf[4]; + bool enabled; } rga_service_info; - struct rga_drvdata { struct miscdevice miscdev; struct device dev; diff --git a/drivers/video/rockchip/rga/rga_drv.c b/drivers/video/rockchip/rga/rga_drv.c old mode 100644 new mode 100755 index 1bc9c0e64c9a..a58bdd54f34a --- a/drivers/video/rockchip/rga/rga_drv.c +++ b/drivers/video/rockchip/rga/rga_drv.c @@ -9,7 +9,7 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * */ #include @@ -41,6 +41,8 @@ #include #include #include +#include + #include "rga.h" @@ -49,17 +51,21 @@ #include "RGA_API.h" +#define RGA_TEST 1 + #define PRE_SCALE_BUF_SIZE 2048*1024*4 #define RGA_POWER_OFF_DELAY 4*HZ /* 4s */ #define RGA_TIMEOUT_DELAY 2*HZ /* 2s */ - - static struct rga_drvdata *drvdata = NULL; rga_service_info rga_service; +#if 1//def RGA_TEST +//uint32_t dst_buf[800*480*4]; +#endif + static int rga_blit_async(rga_session *session, struct rga_req *req); @@ -76,12 +82,12 @@ static int rga_blit_async(rga_session *session, struct rga_req *req); /* Logging */ -#define RGA_DEBUG 0 +#define RGA_DEBUG 1 #if RGA_DEBUG #define DBG(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args) -#define ERR(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args) -#define WARNING(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args) -#define INFO(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args) +#define ERR(format, args...) printk(KERN_ERR "%s: " format, DRIVER_NAME, ## args) +#define WARNING(format, args...) printk(KERN_WARN "%s: " format, DRIVER_NAME, ## args) +#define INFO(format, args...) printk(KERN_INFO "%s: " format, DRIVER_NAME, ## args) #else #define DBG(format, args...) #define ERR(format, args...) @@ -123,11 +129,49 @@ static void rga_soft_reset(void) static void rga_dump(void) { int running; + int num_done; struct rga_reg *reg, *reg_tmp; - rga_session *session, *session_tmp; + rga_session *session, *session_tmp; + struct list_head *next; running = atomic_read(&rga_service.total_running); - printk("total_running %d\n", running); + printk("rga total_running %d\n", running); + + /* Dump waiting list info */ + if (!list_empty(&rga_service.waiting)) + { + next = &rga_service.waiting; + + printk("rga_service dump waiting list\n"); + + do + { + reg = list_entry(next->next, struct rga_reg, status_link); + running = atomic_read(®->session->task_running); + num_done = atomic_read(®->session->num_done); + printk("rga session pid %d, done %d, running %d\n", reg->session->pid, num_done, running); + next = next->next; + } + while(!list_empty(next)); + } + + /* Dump running list info */ + if (!list_empty(&rga_service.running)) + { + printk("rga_service dump running list\n"); + + next = &rga_service.running; + do + { + reg = list_entry(next->next, struct rga_reg, status_link); + running = atomic_read(®->session->task_running); + num_done = atomic_read(®->session->num_done); + printk("rga session pid %d, done %d, running %d:\n", reg->session->pid, num_done, running); + next = next->next; + } + while(!list_empty(next)); + } + list_for_each_entry_safe(session, session_tmp, &rga_service.session, list_session) { @@ -203,7 +247,7 @@ static int rga_flush(rga_session *session, unsigned long arg) int ret; - ret = wait_event_interruptible_timeout(session->wait, session->done, RGA_TIMEOUT_DELAY); + ret = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY); if (unlikely(ret < 0)) { pr_err("pid %d wait task ret %d\n", session->pid, ret); @@ -216,6 +260,29 @@ static int rga_flush(rga_session *session, unsigned long arg) } +static int rga_get_result(rga_session *session, unsigned long arg) +{ + //printk("rga_get_result %d\n",drvdata->rga_result); + + int ret = 0; + + int num_done; + + num_done = atomic_read(&session->num_done); + + if (unlikely(copy_to_user((void __user *)arg, &num_done, sizeof(int)))) { + printk("copy_to_user failed\n"); + ERR("copy_to_user failed\n"); + ret = -EFAULT; + } + //idle_condition = 1; + //dmac_clean_range((const void*)&idle_condition,(const void*)&idle_condition+4); + //wake_up_interruptible_sync(&blit_wait_queue); + return ret; +} + + + static int rga_check_param(const struct rga_req *req) { #if 1 @@ -269,22 +336,25 @@ static void rga_copy_reg(struct rga_reg *reg, uint32_t offset) uint32_t *cmd_buf; uint32_t *reg_p; - atomic_add(1, &rga_service.total_running); + atomic_add(1, &rga_service.cmd_num); atomic_add(1, ®->session->task_running); cmd_buf = (uint32_t *)rga_service.cmd_buff + offset*28; reg_p = (uint32_t *)reg->cmd_reg; - - for(i=0; i<28; i++) + + for(i=0; i<28; i++) { cmd_buf[i] = reg_p[i]; - } + } + + dsb(); } static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req) { unsigned long flag; + uint32_t ret; struct rga_reg *reg = kmalloc(sizeof(struct rga_reg), GFP_KERNEL); if (NULL == reg) { pr_err("kmalloc fail in rga_reg_init\n"); @@ -295,8 +365,21 @@ static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req) INIT_LIST_HEAD(®->session_link); INIT_LIST_HEAD(®->status_link); - rga_set_mmu_info(reg, req); - RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg); + if (req->mmu_info.mmu_en) + { + ret = rga_set_mmu_info(reg, req); + if(ret < 0) + { + printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__); + if(reg != NULL) + { + kfree(reg); + } + return NULL; + } + } + + RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg); spin_lock_irqsave(&rga_service.lock, flag); list_add_tail(®->status_link, &rga_service.waiting); @@ -306,6 +389,79 @@ static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req) return reg; } +static struct rga_reg * rga_reg_init_2(rga_session *session, struct rga_req *req0, struct rga_req *req1) +{ + unsigned long flag; + uint32_t ret; + + struct rga_reg *reg0, *reg1; + + reg0 = NULL; + reg1 = NULL; + + do + { + reg0 = kmalloc(sizeof(struct rga_reg), GFP_KERNEL); + if (NULL == reg0) { + pr_err("%s [%d] kmalloc fail in rga_reg_init\n", __FUNCTION__, __LINE__); + break; + } + + reg1 = kmalloc(sizeof(struct rga_reg), GFP_KERNEL); + if (NULL == reg1) { + pr_err("%s [%d] kmalloc fail in rga_reg_init\n", __FUNCTION__, __LINE__); + break; + } + + reg0->session = session; + INIT_LIST_HEAD(®0->session_link); + INIT_LIST_HEAD(®0->status_link); + + if(req0->mmu_info.mmu_en) + { + ret = rga_set_mmu_info(reg0, req0); + if(ret < 0) { + printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__); + break; + } + } + + RGA_gen_reg_info(req0, (uint8_t *)reg0->cmd_reg); + + if(req1->mmu_info.mmu_en) + { + ret = rga_set_mmu_info(reg0, req1); + if(ret < 0) { + printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__); + break; + } + } + + RGA_gen_reg_info(req1, (uint8_t *)reg0->cmd_reg); + + spin_lock_irqsave(&rga_service.lock, flag); + list_add_tail(®0->status_link, &rga_service.waiting); + list_add_tail(®1->status_link, &rga_service.waiting); + list_add_tail(®0->session_link, &session->waiting); + list_add_tail(®1->session_link, &session->waiting); + spin_unlock_irqrestore(&rga_service.lock, flag); + + return reg1; + } + while(0); + + if(reg0 != NULL) { + kfree(reg0); + } + + if(reg1 != NULL) { + kfree(reg1); + } + + return NULL; +} + + static void rga_reg_deinit(struct rga_reg *reg) { list_del_init(®->session_link); @@ -322,24 +478,6 @@ static void rga_reg_from_wait_to_run(struct rga_reg *reg) list_add_tail(®->session_link, ®->session->running); } -#if 0 -static void rga_reg_from_run_to_done(struct rga_reg *reg) -{ - spin_lock(&rga_service.lock); - list_del_init(®->status_link); - list_add_tail(®->status_link, &rga_service.done); - - list_del_init(®->session_link); - list_add_tail(®->session_link, ®->session->done); - - atomic_sub(1, ®->session->task_running); - atomic_sub(1, &rga_service.total_running); - wake_up_interruptible_sync(®->session->wait); - spin_unlock(&rga_service.lock); -} -#endif - - static void rga_service_session_clear(rga_session *session) { struct rga_reg *reg, *n; @@ -356,46 +494,74 @@ static void rga_service_session_clear(rga_session *session) } -static void rga_try_set_reg(void) +static void rga_try_set_reg(uint32_t num) { unsigned long flag; // first get reg from reg list + + if (!num) + { + printk("rga try set reg cmd num is 0\n"); + return; + } + spin_lock_irqsave(&rga_service.lock, flag); if (!list_empty(&rga_service.waiting)) { - struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); - - if(!(rga_read(RGA_STATUS) & 0x1)) - { - /* RGA is busy */ - if((atomic_read(&rga_service.total_running) <= 0xf) && (atomic_read(&rga_service.int_disable) == 0)) - { - rga_copy_reg(reg, atomic_read(&rga_service.total_running)); - rga_reg_from_wait_to_run(reg); - rga_write(RGA_INT, 0x1<<10); - reg->session->done = 0; - rga_write(RGA_CMD_CTRL, (0x1<<3)|(0x1<<1)); - if(atomic_read(®->int_enable)) - atomic_set(&rga_service.int_disable, 1); + do + { + struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); + if((rga_read(RGA_STATUS) & 0x1)) + { + /* RGA is busy */ + if((atomic_read(&rga_service.cmd_num) <= 0xf) && (atomic_read(&rga_service.int_disable) == 0)) + { + rga_copy_reg(reg, atomic_read(&rga_service.cmd_num)); + rga_reg_from_wait_to_run(reg); + + rga_write(0x1<<10, RGA_INT); + + atomic_set(®->session->done, 0); + + rga_write((0x1<<3)|(0x1<<1), RGA_CMD_CTRL); + + if(atomic_read(®->int_enable)) + atomic_set(&rga_service.int_disable, 1); + } } - } - else - { - /* RGA is idle */ - rga_copy_reg(reg, 0); - rga_reg_from_wait_to_run(reg); + else + { + /* RGA is idle */ + rga_copy_reg(reg, 0); + rga_reg_from_wait_to_run(reg); + dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]); - /* MMU */ - rga_write(RGA_CMD_ADDR, 0); - /* All CMD finish int */ - rga_write(RGA_INT, 0x1<<10); + /* + * if cmd buf must use mmu + * it should be writed before cmd start + */ + rga_write((2<<4)|0x1, RGA_MMU_CTRL); + rga_write(virt_to_phys(reg->MMU_base)>>2, RGA_MMU_TBL); + + /* CMD buff */ + rga_write(virt_to_phys(rga_service.cmd_buff) & (~PAGE_MASK), RGA_CMD_ADDR); - /* Start proc */ - reg->session->done = 0; - rga_write(RGA_CMD_CTRL, (0x1<<3)|0x1); - } + /* master mode */ + rga_write(0x1<<2, RGA_SYS_CTRL); + + /* All CMD finish int */ + rga_write(0x1<<10, RGA_INT); + + /* Start proc */ + atomic_set(®->session->done, 0); + rga_write(0x1, RGA_CMD_CTRL); + + } + num--; + } + while(num); } spin_unlock_irqrestore(&rga_service.lock, flag); } @@ -405,7 +571,7 @@ static void rga_try_set_reg(void) static int rga_blit_async(rga_session *session, struct rga_req *req) { int ret = -1; - struct rga_reg *reg0, *reg1; + struct rga_reg *reg; struct rga_req *req2; uint32_t saw, sah, daw, dah; @@ -426,18 +592,14 @@ static int rga_blit_async(rga_session *session, struct rga_req *req) RGA_gen_two_pro(req, req2); - reg0 = rga_reg_init(session, req2); - if(reg0 == NULL) { - return -EFAULT; - } - - reg1 = rga_reg_init(session, req); - if(reg1 == NULL) { + reg = rga_reg_init_2(session, req2, req); + if(reg == NULL) { return -EFAULT; } + + atomic_set(®->int_enable, 1); - rga_try_set_reg(); - rga_try_set_reg(); + rga_try_set_reg(2); if(req2 != NULL) { @@ -452,12 +614,12 @@ static int rga_blit_async(rga_session *session, struct rga_req *req) return -EINVAL; } - reg0 = rga_reg_init(session, req); - if(reg0 == NULL) { + reg = rga_reg_init(session, req); + if(reg == NULL) { return -EFAULT; } - rga_try_set_reg(); + rga_try_set_reg(1); } //printk("rga_blit_async done******************\n"); @@ -476,7 +638,7 @@ error_scale: static int rga_blit_sync(rga_session *session, struct rga_req *req) { int ret = 0; - struct rga_reg *reg0, *reg1; + struct rga_reg *reg; struct rga_req *req2; uint32_t saw, sah, daw, dah; @@ -497,39 +659,34 @@ static int rga_blit_sync(rga_session *session, struct rga_req *req) RGA_gen_two_pro(req, req2); - reg0 = rga_reg_init(session, req2); - if(reg0 == NULL) { + reg = rga_reg_init_2(session, req2, req); + if(reg == NULL) { return -EFAULT; } + + atomic_set(®->int_enable, 1); - reg1 = rga_reg_init(session, req); - if(reg1 == NULL) { - return -EFAULT; - } - atomic_set(®1->int_enable, 1); - - rga_try_set_reg(); - rga_try_set_reg(); + rga_try_set_reg(2); } else { - /* check value if legal */ + /* check value if legal */ ret = rga_check_param(req); if(ret == -EINVAL) { return -EFAULT; } - reg0 = rga_reg_init(session, req); - if(reg0 == NULL) { + reg = rga_reg_init(session, req); + if(reg == NULL) { return -EFAULT; } - atomic_set(®0->int_enable, 1); + + atomic_set(®->int_enable, 1); - rga_try_set_reg(); + rga_try_set_reg(1); } - ret = wait_event_interruptible_timeout(session->wait, session->done, RGA_TIMEOUT_DELAY); - + ret = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY); if (unlikely(ret < 0)) { pr_err("pid %d wait task ret %d\n", session->pid, ret); @@ -580,14 +737,15 @@ static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg) case RGA_FLUSH: ret = rga_flush(session, arg); break; + case RGA_GET_RESULT: + ret = rga_get_result(session, arg); default: ERR("unknown ioctl cmd!\n"); ret = -EINVAL; break; } - if(req != NULL) - { + if(req != NULL) { kfree(req); } @@ -610,6 +768,7 @@ static int rga_open(struct inode *inode, struct file *file) /* no need to protect */ list_add_tail(&session->list_session, &rga_service.session); atomic_set(&session->task_running, 0); + atomic_set(&session->num_done, 0); file->private_data = (void *)session; DBG("*** rga dev opened *** \n"); @@ -624,12 +783,16 @@ static int rga_release(struct inode *inode, struct file *file) rga_session *session = (rga_session *)file->private_data; if (NULL == session) return -EINVAL; - + task_running = atomic_read(&session->task_running); - if (task_running) { + + while (task_running) + { pr_err("rga_service session %d still has %d task running when closing\n", session->pid, task_running); msleep(50); + /*ͬ²½*/ } + wake_up_interruptible_sync(&session->wait); spin_lock_irqsave(&rga_service.lock, flag); list_del(&session->list_session); @@ -644,48 +807,67 @@ static int rga_release(struct inode *inode, struct file *file) static irqreturn_t rga_irq(int irq, void *dev_id) { struct rga_reg *reg; + uint32_t num = 0; + struct list_head *next; + int int_enable = 0; DBG("rga_irq %d \n", irq); + printk("rga_irq is valid\n"); + /*clear INT */ rga_write(rga_read(RGA_INT) | (0x1<<6), RGA_INT); - if(((rga_read(RGA_STATUS) & 0x1) != 0))// idle + + if(((rga_read(RGA_STATUS) & 0x1) != 0))// idle { printk("RGA is not idle!\n"); rga_soft_reset(); } + spin_lock(&rga_service.lock); do { reg = list_entry(rga_service.running.next, struct rga_reg, status_link); + + #if 0 if(reg->MMU_base != NULL) { kfree(reg->MMU_base); } - + #endif + atomic_sub(1, ®->session->task_running); atomic_sub(1, &rga_service.total_running); if(list_empty(®->session->waiting)) { - reg->session->done = 1; + atomic_set(®->session->done, 1); wake_up_interruptible_sync(®->session->wait); } + rga_reg_deinit(reg); } while(!list_empty(&rga_service.running)); + atomic_set(&rga_service.cmd_num, 0); + + spin_unlock(&rga_service.lock); + next = &rga_service.waiting; + /* add cmd to cmd buf */ - while(((!list_empty(&rga_service.waiting)) && (atomic_read(&rga_service.int_disable) == 0))) - { - rga_try_set_reg(); + while((!list_empty(next)) && ((int_enable) == 0) && (num <= 0xf)) + { + num += 1; + reg = list_entry(next->next, struct rga_reg, status_link); + int_enable = atomic_read(®->int_enable); + next = next->next; } - spin_lock(&rga_service.lock); - + rga_try_set_reg(num); + return IRQ_HANDLED; } @@ -693,24 +875,24 @@ static int rga_suspend(struct platform_device *pdev, pm_message_t state) { uint32_t enable; - enable = drvdata->enable; - rga_power_off(NULL); + enable = drvdata->enable; + //rga_power_off(NULL); drvdata->enable = enable; return 0; } static int rga_resume(struct platform_device *pdev) -{ - rga_power_on(); +{ + //rga_power_on(); return 0; } static void rga_shutdown(struct platform_device *pdev) { - pr_cont("shutdown..."); - rga_power_off(NULL); - pr_cont("done\n"); + pr_cont("shutdown..."); + //rga_power_off(NULL); + pr_cont("done\n"); } @@ -748,7 +930,8 @@ static int __devinit rga_drv_probe(struct platform_device *pdev) ERR("failed to allocate driver data.\n"); return -ENOMEM; } - + + #if 0 /* get the clock */ data->pd_display = clk_get(&pdev->dev, "pd_display"); if (IS_ERR(data->pd_display)) @@ -821,6 +1004,7 @@ static int __devinit rga_drv_probe(struct platform_device *pdev) ret = -ENOENT; goto err_clock; } + #endif /* map the memory */ if (!request_mem_region(RK30_RGA_PHYS, RK30_RGA_SIZE, "rga_io")) @@ -892,6 +1076,9 @@ static int rga_drv_remove(struct platform_device *pdev) free_irq(data->irq0, &data->miscdev); iounmap((void __iomem *)(data->rga_base)); + + + #if 0 if(data->axi_clk) { clk_put(data->axi_clk); } @@ -926,6 +1113,7 @@ static int rga_drv_remove(struct platform_device *pdev) if(data->pd_display){ clk_put(data->pd_display); } + #endif kfree(data); return 0; @@ -946,15 +1134,32 @@ static struct platform_driver rga_driver = { static int __init rga_init(void) { int ret; - uint8_t *buf; + uint32_t *mmu_buf; + uint32_t i; + uint32_t *buf_p; - /* malloc pre scale mid buf */ - buf = kmalloc(PRE_SCALE_BUF_SIZE, GFP_KERNEL); - if(buf == NULL) { + /* malloc pre scale mid buf mmu table */ + mmu_buf = (uint32_t *)kmalloc(1024*8, GFP_KERNEL); + if(mmu_buf == NULL) + { ERR("RGA get Pre Scale buff failed. \n"); return -1; } - rga_service.pre_scale_buf = (uint32_t *)buf; + + /* malloc 8 M buf */ + for(i=0; i<2048; i++) + { + buf_p = (uint32_t *)__get_free_page(GFP_KERNEL); + if(buf_p == NULL) + { + printk("RGA init pre scale buf falied\n"); + return -ENOMEM; + } + + mmu_buf[i] = virt_to_phys((void *)((uint32_t)buf_p)); + } + + rga_service.pre_scale_buf = (uint32_t *)mmu_buf; if ((ret = platform_driver_register(&rga_driver)) != 0) { @@ -962,13 +1167,23 @@ static int __init rga_init(void) return ret; } - INFO("Module initialized.\n"); + INFO("Module initialized.\n"); return 0; } static void __exit rga_exit(void) { + uint32_t i; + + for(i=0; i<2048; i++) + { + if((uint32_t *)rga_service.pre_scale_buf[i] != NULL) + { + __free_page((void *)rga_service.pre_scale_buf[i]); + } + } + if(rga_service.pre_scale_buf != NULL) { kfree((uint8_t *)rga_service.pre_scale_buf); } @@ -976,7 +1191,6 @@ static void __exit rga_exit(void) } - module_init(rga_init); module_exit(rga_exit); @@ -985,5 +1199,3 @@ module_exit(rga_exit); MODULE_AUTHOR("zsq@rock-chips.com"); MODULE_DESCRIPTION("Driver for rga device"); MODULE_LICENSE("GPL"); - - diff --git a/drivers/video/rockchip/rga/rga_mmu_info.c b/drivers/video/rockchip/rga/rga_mmu_info.c old mode 100644 new mode 100755 index 4fca3633d818..d4e21b0dfd43 --- a/drivers/video/rockchip/rga/rga_mmu_info.c +++ b/drivers/video/rockchip/rga/rga_mmu_info.c @@ -17,7 +17,6 @@ #include "rga_mmu_info.h" - extern rga_service_info rga_service; #define KERNEL_SPACE_VALID 0xc0000000 @@ -217,46 +216,142 @@ static int rga_MapUserMemory(struct page **pages, { int32_t result; uint32_t i; - - down_read(¤t->mm->mmap_sem); - result = get_user_pages(current, - current->mm, - Memory << PAGE_SHIFT, - pageCount, - 1, - 0, - pages, - NULL - ); - up_read(¤t->mm->mmap_sem); - - if(result <= 0 || result < pageCount) - { - return -EINVAL; - } + uint32_t status; + + do + { + down_read(¤t->mm->mmap_sem); + result = get_user_pages(current, + current->mm, + Memory << PAGE_SHIFT, + pageCount, + 1, + 0, + pages, + NULL + ); + up_read(¤t->mm->mmap_sem); + + if(result <= 0 || result < pageCount) + { + struct vm_area_struct *vma; + + vma = find_vma(current->mm, Memory); + + if (vma && (vma->vm_flags & VM_PFNMAP) ) + { + do + { + pte_t * pte; + spinlock_t * ptl; + unsigned long pfn; + + pgd_t * pgd = pgd_offset(current->mm, Memory); + pud_t * pud = pud_offset(pgd, Memory); + if (pud) + { + pmd_t * pmd = pmd_offset(pud, Memory); + if (pmd) + { + pte = pte_offset_map_lock(current->mm, pmd, Memory, &ptl); + if (!pte) + { + break; + } + } + else + { + break; + } + } + else + { + break; + } + + pfn = pte_pfn(*pte); + + pte_unmap_unlock(pte, ptl); + + /* Free the page table. */ + if (pages != NULL) + { + /* Release the pages if any. */ + if (result > 0) + { + for (i = 0; i < result; i++) + { + if (pages[i] == NULL) + { + break; + } + + page_cache_release(pages[i]); + } + } + } + + return 0; + } + while (0); + + status = RGA_OUT_OF_RESOURCES; + break; + } + else + { + status = RGA_OUT_OF_RESOURCES; + break; + } + } - for (i = 0; i < pageCount; i++) - { - /* Flush the data cache. */ + for (i = 0; i < pageCount; i++) + { + /* Flush the data cache. */ #ifdef ANDROID - dma_sync_single_for_device( - gcvNULL, - page_to_phys(pages[i]), - PAGE_SIZE, - DMA_TO_DEVICE); + dma_sync_single_for_device( + NULL, + page_to_phys(pages[i]), + PAGE_SIZE, + DMA_TO_DEVICE); #else - flush_dcache_page(pages[i]); + flush_dcache_page(pages[i]); #endif - } + } - /* Fill the page table. */ - for(i=0; i 0 && pages != NULL) + { + for (i = 0; i < result; i++) + { + if (pages[i] == NULL) + { + break; + } +#ifdef ANDROID + dma_sync_single_for_device( + NULL, + page_to_phys(pages[i]), + PAGE_SIZE, + DMA_FROM_DEVICE); +#endif + page_cache_release(pages[i]); + } + } + } + + return status; } static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) @@ -267,106 +362,135 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) uint32_t AllSize; uint32_t *MMU_Base, *MMU_p; int ret; + int status; struct page **pages = NULL; - /* cal src buf mmu info */ - SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, - req->src.format, req->src.vir_w, req->src.vir_h, - &SrcStart); - if(SrcMemSize == 0) { - return -EINVAL; - } + MMU_Base = NULL; - /* cal dst buf mmu info */ - DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, - req->dst.format, req->dst.vir_w, req->dst.vir_h, - &DstStart); - if(DstMemSize == 0) { - return -EINVAL; - } - - /* cal cmd buf mmu info */ - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } + do + { + /* cal src buf mmu info */ + SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, + req->src.format, req->src.vir_w, req->src.vir_h, + &SrcStart); + if(SrcMemSize == 0) { + return -EINVAL; + } - AllSize = SrcMemSize + DstMemSize + CMDMemSize; - - pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); - return -EINVAL; - } - - MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); - return -EINVAL; - } + /* cal dst buf mmu info */ + DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, + req->dst.format, req->dst.vir_w, req->dst.vir_h, + &DstStart); + if(DstMemSize == 0) { + return -EINVAL; + } - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) - { - ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); - if (ret < 0) { - pr_err("rga map src memory failed"); - return -EINVAL; + AllSize = SrcMemSize + DstMemSize + CMDMemSize; + + pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); + if(pages == NULL) { + pr_err("RGA MMU malloc pages mem failed"); + status = RGA_MALLOC_ERROR; + break; } - } - else - { - MMU_p = MMU_Base + CMDMemSize; - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) { - MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT)); + ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + if (ret < 0) { + pr_err("rga map src memory failed"); + status = ret; + break; + } } - } - - if(req->dst.yrgb_addr < KERNEL_SPACE_VALID) - { - ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); - if (ret < 0) { - pr_err("rga map dst memory failed"); - return -EINVAL; + else + { + MMU_p = MMU_Base + CMDMemSize; + + for(i=0; idst.yrgb_addr < KERNEL_SPACE_VALID) { - MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT)); - } - } + ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); + if (ret < 0) { + pr_err("rga map dst memory failed"); + status = ret; + break; + } + } + else + { + MMU_p = MMU_Base + CMDMemSize + SrcMemSize; + + for(i=0; immu_info.base_addr = virt_to_phys(MMU_Base); - - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + /* zsq + * change the buf address in req struct + * for the reason of lie to MMU + */ + + req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); - - /*record the malloc buf for the cmd end to release*/ - reg->MMU_base = MMU_Base; + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); + + /*record the malloc buf for the cmd end to release*/ + reg->MMU_base = MMU_Base; + + dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); + + status = 0; + + /* Free the page table */ + if (pages != NULL) { + kfree(pages); + } + + return status; + } + while(0); + + /* Free the page table */ if (pages != NULL) { - /* Free the page table */ kfree(pages); - } + } - return 0; + /* Free MMU table */ + if(MMU_Base != NULL) { + kfree(MMU_Base); + } + + return status; } static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req) @@ -376,8 +500,9 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * struct page **pages = NULL; uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base; - int ret; + uint32_t *MMU_Base = NULL; + uint32_t *MMU_p; + int ret, status; uint32_t stride; uint8_t shift; @@ -387,70 +512,121 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * sw = req->src.vir_w; byte_num = sw >> shift; stride = (byte_num + 3) & (~3); + + do + { - SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart); - if(SrcMemSize == 0) { - return -EINVAL; - } + SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart); + if(SrcMemSize == 0) { + return -EINVAL; + } - DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, - req->dst.format, req->dst.vir_w, req->dst.vir_h, - &DstStart); - if(DstMemSize == 0) { - return -EINVAL; - } + DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, + req->dst.format, req->dst.vir_w, req->dst.vir_h, + &DstStart); + if(DstMemSize == 0) { + return -EINVAL; + } - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } + CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); + if(CMDMemSize == 0) { + return -EINVAL; + } - AllSize = SrcMemSize + DstMemSize + CMDMemSize; - - pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); - return -EINVAL; - } - - MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); - return -EINVAL; - } + AllSize = SrcMemSize + DstMemSize + CMDMemSize; + + pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); + if(pages == NULL) { + pr_err("RGA MMU malloc pages mem failed"); + return -EINVAL; + } - for(i=0; immu_info.base_addr = virt_to_phys(MMU_Base); - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); + /* map src addr */ + if (req->src.yrgb_addr < KERNEL_SPACE_VALID) + { + ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + if (ret < 0) + { + pr_err("rga map src memory failed"); + status = ret; + break; + } + } + else + { + MMU_p = MMU_Base + CMDMemSize; + + for(i=0; iMMU_base = MMU_Base; + /* map dst addr */ + if (req->src.yrgb_addr < KERNEL_SPACE_VALID) + { + ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); + if (ret < 0) + { + pr_err("rga map dst memory failed"); + status = ret; + break; + } + } + else + { + MMU_p = MMU_Base + CMDMemSize + SrcMemSize; + + for(i=0; immu_info.base_addr = virt_to_phys(MMU_Base); + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); + + + /*record the malloc buf for the cmd end to release*/ + reg->MMU_base = MMU_Base; - if (pages != NULL) { /* Free the page table */ + if (pages != NULL) { + kfree(pages); + } + + return status; + + } + while(0); + + /* Free the page table */ + if (pages != NULL) { kfree(pages); - } + } + + /* Free mmu table */ + if (MMU_Base != NULL) { + kfree(MMU_Base); + } return 0; } @@ -462,62 +638,92 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req struct page **pages = NULL; uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base; + uint32_t *MMU_Base, *MMU_p; int ret; - - DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, - req->dst.format, req->dst.vir_w, req->dst.vir_h, - &DstStart); - if(DstMemSize == 0) { - return -EINVAL; - } + int status; - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } + MMU_Base = NULL; - AllSize = DstMemSize + CMDMemSize; - - pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); - return -EINVAL; - } - - MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); - return -EINVAL; - } + do + { + DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, + req->dst.format, req->dst.vir_w, req->dst.vir_h, + &DstStart); + if(DstMemSize == 0) { + return -EINVAL; + } - for(i=0; immu_info.base_addr = virt_to_phys(MMU_Base); - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT); - - - /*record the malloc buf for the cmd end to release*/ - reg->MMU_base = MMU_Base; + AllSize = DstMemSize + CMDMemSize; + + pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); + if(pages == NULL) { + pr_err("RGA MMU malloc pages mem failed"); + status = RGA_MALLOC_ERROR; + break; + } + + MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); + if(pages == NULL) { + pr_err("RGA MMU malloc MMU_Base point failed"); + status = RGA_MALLOC_ERROR; + break; + } + + for(i=0; idst.yrgb_addr < KERNEL_SPACE_VALID) + { + ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize); + if (ret < 0) { + pr_err("rga map dst memory failed"); + status = ret; + break; + } + } + else + { + MMU_p = MMU_Base + CMDMemSize; + + for(i=0; immu_info.base_addr = virt_to_phys(MMU_Base); + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT); + + + /*record the malloc buf for the cmd end to release*/ + reg->MMU_base = MMU_Base; - if (pages != NULL) { /* Free the page table */ + if (pages != NULL) + kfree(pages); + + return 0; + } + while(0); + + if (pages != NULL) kfree(pages); - } - return 0; + if (MMU_Base != NULL) + kfree(MMU_Base); + + return status; } @@ -528,63 +734,93 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_ struct page **pages = NULL; uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base; - int ret; + uint32_t *MMU_Base, *MMU_p; + int ret, status; + + MMU_Base = NULL; + + do + { + /* cal dst buf mmu info */ + DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, + req->dst.format, req->dst.vir_w, req->dst.vir_h, + &DstStart); + if(DstMemSize == 0) { + return -EINVAL; + } - /* cal dst buf mmu info */ - DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, - req->dst.format, req->dst.vir_w, req->dst.vir_h, - &DstStart); - if(DstMemSize == 0) { - return -EINVAL; - } + CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); + if(CMDMemSize == 0) { + return -EINVAL; + } - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } + AllSize = DstMemSize + CMDMemSize; + + pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); + if(pages == NULL) { + pr_err("RGA MMU malloc pages mem failed"); + status = RGA_MALLOC_ERROR; + break; + } + + MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); + if(pages == NULL) { + pr_err("RGA MMU malloc MMU_Base point failed"); + status = RGA_MALLOC_ERROR; + break; + } - AllSize = DstMemSize + CMDMemSize; - - pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); - return -EINVAL; - } - - MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); - return -EINVAL; - } + for(i=0; idst.yrgb_addr < KERNEL_SPACE_VALID) + { + ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize); + if (ret < 0) { + pr_err("rga map dst memory failed"); + status = ret; + break; + } + } + else + { + MMU_p = MMU_Base + CMDMemSize; + + for(i=0; immu_info.base_addr = virt_to_phys(MMU_Base); - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT); - - - /*record the malloc buf for the cmd end to release*/ - reg->MMU_base = MMU_Base; + /* zsq + * change the buf address in req struct + * for the reason of lie to MMU + */ + req->mmu_info.base_addr = virt_to_phys(MMU_Base); + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT); + + + /*record the malloc buf for the cmd end to release*/ + reg->MMU_base = MMU_Base; - if (pages != NULL) { /* Free the page table */ + if (pages != NULL) { + kfree(pages); + } + + return 0; + + } + while(0); + + if (pages != NULL) kfree(pages); - } - return 0; + if (MMU_Base != NULL) + kfree(MMU_Base); + + return status; } static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req) @@ -594,82 +830,127 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r struct page **pages = NULL; uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base; - int ret; + uint32_t *MMU_Base, *MMU_p; + int ret, status; - /* cal src buf mmu info */ - SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, - req->src.format, req->src.vir_w, req->src.vir_h, - &SrcStart); - if(SrcMemSize == 0) { - return -EINVAL; - } + MMU_Base = NULL; + + do + { + /* cal src buf mmu info */ + SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, + req->src.format, req->src.vir_w, req->src.vir_h, + &SrcStart); + if(SrcMemSize == 0) { + return -EINVAL; + } - /* cal dst buf mmu info */ - DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, - req->dst.format, req->dst.vir_w, req->dst.vir_h, - &DstStart); - if(DstMemSize == 0) { - return -EINVAL; - } + /* cal dst buf mmu info */ + DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, + req->dst.format, req->dst.vir_w, req->dst.vir_h, + &DstStart); + if(DstMemSize == 0) { + return -EINVAL; + } - /* cal cmd buf mmu info */ - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } + /* cal cmd buf mmu info */ + CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); + if(CMDMemSize == 0) { + return -EINVAL; + } - AllSize = SrcMemSize + DstMemSize + CMDMemSize; - - pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); - return -EINVAL; - } - - MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); - return -EINVAL; - } + AllSize = SrcMemSize + DstMemSize + CMDMemSize; + + pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); + if(pages == NULL) { + pr_err("RGA MMU malloc pages mem failed"); + status = RGA_MALLOC_ERROR; + break; + } + + MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); + if(pages == NULL) { + pr_err("RGA MMU malloc MMU_Base point failed"); + status = RGA_MALLOC_ERROR; + break; + } - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) + { + ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + if (ret < 0) + { + pr_err("rga map src memory failed"); + status = ret; + break; + } + } + else + { + MMU_p = MMU_Base + CMDMemSize; + + for(i=0; immu_info.base_addr = virt_to_phys(MMU_Base); - - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + + if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) + { + ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); + if (ret < 0) + { + pr_err("rga map dst memory failed"); + status = ret; + break; + } + } + else + { + MMU_p = MMU_Base + CMDMemSize + SrcMemSize; + + for(i=0; idst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); - - /*record the malloc buf for the cmd end to release*/ - reg->MMU_base = MMU_Base; + /* zsq + * change the buf address in req struct + * for the reason of lie to MMU + */ + req->mmu_info.base_addr = virt_to_phys(MMU_Base); + + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); + + /*record the malloc buf for the cmd end to release*/ + reg->MMU_base = MMU_Base; - if (pages != NULL) { /* Free the page table */ + if (pages != NULL) { + kfree(pages); + } + + return 0; + } + while(0); + + if (pages != NULL) kfree(pages); - } - return 0; + if (MMU_Base != NULL) + kfree(MMU_Base); + + return status; } @@ -683,101 +964,144 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) uint32_t AllSize; uint32_t *MMU_Base, *MMU_p; int ret; + int status; - /* cal src buf mmu info */ - SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, - req->src.format, req->src.vir_w, req->src.vir_h, - &SrcStart); - if(SrcMemSize == 0) { - return -EINVAL; - } + MMU_Base = NULL; - /* cal dst buf mmu info */ - DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, - req->dst.format, req->dst.vir_w, req->dst.vir_h, - &DstStart); - if(DstMemSize == 0) { - return -EINVAL; - } + do + { + /* cal src buf mmu info */ + SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, + req->src.format, req->src.vir_w, req->src.vir_h, + &SrcStart); + if(SrcMemSize == 0) { + return -EINVAL; + } - /* cal cmd buf mmu info */ - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } + /* cal dst buf mmu info */ + DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, + req->dst.format, req->dst.vir_w, req->dst.vir_h, + &DstStart); + if(DstMemSize == 0) { + return -EINVAL; + } - AllSize = SrcMemSize + DstMemSize + CMDMemSize; - - pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); - return -EINVAL; - } + /* cal cmd buf mmu info */ + CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); + if(CMDMemSize == 0) { + return -EINVAL; + } - /* - * Allocate MMU Index mem - * This mem release in run_to_done fun - */ - MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); - return -EINVAL; - } + AllSize = SrcMemSize + DstMemSize + CMDMemSize; + + pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); + if(pages == NULL) + { + pr_err("RGA MMU malloc pages mem failed"); + status = RGA_MALLOC_ERROR; + break; + } - for(i=0; idst.yrgb_addr >= 0xc0000000) - { - /* kernel space */ - MMU_p = MMU_Base + CMDMemSize + SrcMemSize; - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) { - MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i)<< PAGE_SHIFT)); + ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + if (ret < 0) { + pr_err("rga map src memory failed"); + status = ret; + break; + } } - } - else - { - /* user space */ - ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); - if (ret < 0) + else { - pr_err("rga map dst memory failed"); - return -EINVAL; - } - } + MMU_p = MMU_Base + CMDMemSize; + + for(i=0; immu_info.base_addr = virt_to_phys(MMU_Base); - - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + + if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID) + { + /* kernel space */ + MMU_p = MMU_Base + CMDMemSize + SrcMemSize; + for(i=0; idst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); - - /*record the malloc buf for the cmd end to release*/ - reg->MMU_base = MMU_Base; + /* zsq + * change the buf address in req struct + * for the reason of lie to MMU + */ + req->mmu_info.base_addr = virt_to_phys(MMU_Base)>>2; + + #if 0 + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); + #else + + req->src.yrgb_addr &= 0xffffff; + req->src.uv_addr &= 0xfffffff; + req->src.v_addr &= 0xfffffff; + + req->dst.yrgb_addr &= 0xfffffff; + + #endif + + /*record the malloc buf for the cmd end to release*/ + reg->MMU_base = MMU_Base; - if (pages != NULL) { /* Free the page table */ + if (pages != NULL) + { + kfree(pages); + } + + return 0; + } + while(0); + + if (pages != NULL) kfree(pages); - } - return 0; + if (MMU_Base != NULL) + kfree(MMU_Base); + + return status; } @@ -788,62 +1112,90 @@ static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rg struct page **pages = NULL; uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base; - int ret; + uint32_t *MMU_Base, *MMU_p; + int ret, status; - /* cal src buf mmu info */ - SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart); - if(SrcMemSize == 0) { - return -EINVAL; - } + MMU_Base = NULL; - /* cal cmd buf mmu info */ - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } + do + { + /* cal src buf mmu info */ + SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart); + if(SrcMemSize == 0) { + return -EINVAL; + } - AllSize = SrcMemSize + DstMemSize + CMDMemSize; - - pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); - return -EINVAL; - } - - MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); - return -EINVAL; - } + /* cal cmd buf mmu info */ + CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); + if(CMDMemSize == 0) { + return -EINVAL; + } - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) + { + ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + if (ret < 0) { + pr_err("rga map src memory failed"); + return -EINVAL; + } + } + else + { + MMU_p = MMU_Base + CMDMemSize; - ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); - if (ret < 0) { - pr_err("rga map src memory failed"); - return -EINVAL; - } + for(i=0; immu_info.base_addr = virt_to_phys(MMU_Base); - - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - - /*record the malloc buf for the cmd end to release*/ - reg->MMU_base = MMU_Base; + /* zsq + * change the buf address in req struct + * for the reason of lie to MMU + */ + req->mmu_info.base_addr = virt_to_phys(MMU_Base); + + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + + /*record the malloc buf for the cmd end to release*/ + reg->MMU_base = MMU_Base; - if (pages != NULL) { - /* Free the page table */ + if (pages != NULL) { + /* Free the page table */ + kfree(pages); + } + + return 0; + } + while(0); + + if (pages != NULL) kfree(pages); - } - return 0; + if (MMU_Base != NULL) + kfree(MMU_Base); + + return status; } static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req) @@ -853,62 +1205,91 @@ static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_ struct page **pages = NULL; uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base; - int ret; + uint32_t *MMU_Base, *MMU_p; + int ret, status; - /* cal src buf mmu info */ - SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart); - if(SrcMemSize == 0) { - return -EINVAL; - } + do + { - /* cal cmd buf mmu info */ - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } + /* cal src buf mmu info */ + SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart); + if(SrcMemSize == 0) { + return -EINVAL; + } - AllSize = SrcMemSize + DstMemSize + CMDMemSize; - - pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); - return -EINVAL; - } - - MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); - return -EINVAL; - } + /* cal cmd buf mmu info */ + CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); + if(CMDMemSize == 0) { + return -EINVAL; + } - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) + { + ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + if (ret < 0) { + pr_err("rga map src memory failed"); + status = ret; + break; + } + } + else + { + MMU_p = MMU_Base + CMDMemSize; - ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); - if (ret < 0) { - pr_err("rga map src memory failed"); - return -EINVAL; - } + for(i=0; immu_info.base_addr = virt_to_phys(MMU_Base); - - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - - /*record the malloc buf for the cmd end to release*/ - reg->MMU_base = MMU_Base; + /* zsq + * change the buf address in req struct + * for the reason of lie to MMU + */ + req->mmu_info.base_addr = virt_to_phys(MMU_Base); + + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + + /*record the malloc buf for the cmd end to release*/ + reg->MMU_base = MMU_Base; + + if (pages != NULL) { + /* Free the page table */ + kfree(pages); + } + + return 0; - if (pages != NULL) { - /* Free the page table */ - kfree(pages); } + while(0); - return 0; + if (pages != NULL) + kfree(pages); + + if (MMU_Base != NULL) + kfree(MMU_Base); + + return status; } int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req) diff --git a/drivers/video/rockchip/rga/rga_mmu_info.h b/drivers/video/rockchip/rga/rga_mmu_info.h old mode 100644 new mode 100755 diff --git a/drivers/video/rockchip/rga/rga_reg_info.c b/drivers/video/rockchip/rga/rga_reg_info.c old mode 100644 new mode 100755 index fe6e35531d1d..7b6df14c2b73 --- a/drivers/video/rockchip/rga/rga_reg_info.c +++ b/drivers/video/rockchip/rga/rga_reg_info.c @@ -99,7 +99,9 @@ dst_ctrl_cal(const struct rga_req *msg, TILE_INFO *tile) xmin = MAX(MIN(MIN(MIN(pos[0], pos[2]), pos[4]), pos[6]), msg->clip.xmin); ymax = MIN(MAX(MAX(MAX(pos[1], pos[3]), pos[5]), pos[7]), msg->clip.ymax); - ymin = MAX(MIN(MIN(MIN(pos[1], pos[3]), pos[5]), pos[7]), msg->clip.ymin); + ymin = MAX(MIN(MIN(MIN(pos[1], pos[3]), pos[5]), pos[7]), msg->clip.ymin); + + printk("xmax = %d, xmin = %d, ymin = %d, ymax = %d\n", xmax, xmin, ymin, ymax); } else if(msg->rotate_mode == 1) { @@ -228,6 +230,8 @@ dst_ctrl_cal(const struct rga_req *msg, TILE_INFO *tile) tile->dst_ctrl.x_off = xmin; tile->dst_ctrl.y_off = ymin; + printk("tile->dst_ctrl.w = %x, tile->dst_ctrl.h = %x\n", tile->dst_ctrl.w, tile->dst_ctrl.h); + tile->tile_x_num = (xmax - xmin + 1 + 7)>>3; tile->tile_y_num = (ymax - ymin + 1 + 7)>>3; @@ -931,10 +935,10 @@ RGA_set_bitblt_reg_info(u8 *base, const struct rga_req * msg, TILE_INFO *tile) bRGA_DST_CTR_INFO = (u32 *)(base + RGA_DST_CTR_INFO_OFFSET); /* Matrix reg fill */ - m0 = (s32)(tile->matrix[0]*(1<<14)); - m1 = (s32)(tile->matrix[1]*(1<<14)); - m2 = (s32)(tile->matrix[2]*(1<<14)); - m3 = (s32)(tile->matrix[3]*(1<<14)); + m0 = (s32)(tile->matrix[0] >> 18); + m1 = (s32)(tile->matrix[1] >> 18); + m2 = (s32)(tile->matrix[2] >> 18); + m3 = (s32)(tile->matrix[3] >> 18); *bRGA_SRC_X_PARA = (m0 & 0xffff) | (m2 << 16); *bRGA_SRC_Y_PARA = (m1 & 0xffff) | (m3 << 16); @@ -1451,7 +1455,7 @@ RGA_gen_reg_info(const struct rga_req *msg, unsigned char *base) matrix_cal(msg, &tile); dst_ctrl_cal(msg, &tile); src_tile_info_cal(msg, &tile); - RGA_set_bitblt_reg_info(base, msg, &tile); + RGA_set_bitblt_reg_info(base, msg, &tile); break; case color_palette_mode : RGA_set_src(base, msg); diff --git a/drivers/video/rockchip/rga/rga_reg_info.h b/drivers/video/rockchip/rga/rga_reg_info.h old mode 100644 new mode 100755 index b869f926b230..049de87e65c6 --- a/drivers/video/rockchip/rga/rga_reg_info.h +++ b/drivers/video/rockchip/rga/rga_reg_info.h @@ -451,7 +451,7 @@ #define RGA_PRESCL_CR_MST_OFFSET (RGA_PRESCL_CR_MST-0x100) //repeat #define RGA_FADING_CON_OFFSET (RGA_FADING_CON-0x100) -#define RGA_MMU_TLB_OFFSET (RGA_MMU_CTRL-0x100) +#define RGA_MMU_TLB_OFFSET (RGA_MMU_TBL-0x100) void matrix_cal(const struct rga_req *msg, TILE_INFO *tile); diff --git a/drivers/video/rockchip/rga/rga_rop.h b/drivers/video/rockchip/rga/rga_rop.h old mode 100644 new mode 100755 diff --git a/drivers/video/rockchip/rga/rga_type.h b/drivers/video/rockchip/rga/rga_type.h old mode 100644 new mode 100755