From 331866ea8dd3bfed33e291f31dca37597f09f962 Mon Sep 17 00:00:00 2001 From: zsq Date: Tue, 27 Mar 2012 09:28:28 -0700 Subject: [PATCH] modify for mmu table search --- drivers/video/rockchip/rga/RGA_API.c | 20 +- drivers/video/rockchip/rga/rga_drv.c | 232 ++++++++++++++-------- drivers/video/rockchip/rga/rga_mmu_info.c | 222 +++++++++++++-------- drivers/video/rockchip/rga/rga_reg_info.c | 14 +- 4 files changed, 316 insertions(+), 172 deletions(-) diff --git a/drivers/video/rockchip/rga/RGA_API.c b/drivers/video/rockchip/rga/RGA_API.c index af89f973e9e0..337c730c583a 100755 --- a/drivers/video/rockchip/rga/RGA_API.c +++ b/drivers/video/rockchip/rga/RGA_API.c @@ -93,8 +93,8 @@ uint32_t RGA_gen_two_pro(struct rga_req *msg, struct rga_req *msg1) mp = msg1; w_ratio = (msg->src.act_w << 16) / msg->dst.act_w; h_ratio = (msg->src.act_h << 16) / msg->dst.act_h; - - memcpy(&msg1, &msg, sizeof(struct rga_req)); + + memcpy(msg1, msg, sizeof(struct rga_req)); msg->dst.format = msg->src.format; @@ -142,17 +142,27 @@ uint32_t RGA_gen_two_pro(struct rga_req *msg, struct rga_req *msg1) msg->src.act_h = (dah - 1) << 3; } } + + printk("test_2\n"); + msg->dst.act_h = dah; msg->dst.vir_h = dah; - msg->dst.yrgb_addr = (u32)rga_service.pre_scale_buf; + //msg->dst.yrgb_addr = (u32)rga_service.pre_scale_buf; msg->dst.uv_addr = msg->dst.yrgb_addr + stride * dah; msg->dst.v_addr = msg->dst.uv_addr + ((stride * dah) >> 1); msg->render_mode = pre_scaling_mode; - memcpy(&msg1->src, &msg->dst, sizeof(rga_img_info_t)); - + msg1->src.yrgb_addr = msg->dst.yrgb_addr; + msg1->src.uv_addr = msg->dst.uv_addr; + msg1->src.v_addr = msg->dst.v_addr; + + msg1->src.act_w = msg->dst.act_w; + msg1->src.act_h = msg->dst.act_h; + msg1->src.vir_w = msg->dst.vir_w; + msg1->src.vir_h = msg->dst.vir_h; + return 0; } diff --git a/drivers/video/rockchip/rga/rga_drv.c b/drivers/video/rockchip/rga/rga_drv.c index 5279ae34e23d..49c5648cb2e9 100755 --- a/drivers/video/rockchip/rga/rga_drv.c +++ b/drivers/video/rockchip/rga/rga_drv.c @@ -49,10 +49,11 @@ #include "rga_mmu_info.h" #include "RGA_API.h" -//#include "bug_320x240_swap0_ABGR8888.h" +#include "bug_320x240_swap0_ABGR8888.h" -#define RGA_TEST 0 +#define RGA_TEST 0 +#define RGA_TEST_TIME 0 #define PRE_SCALE_BUF_SIZE 2048*1024*4 @@ -69,6 +70,9 @@ #define DRIVER_DESC "RGA Device Driver" #define DRIVER_NAME "rga" +ktime_t rga_start; +ktime_t rga_end; + struct rga_drvdata { struct miscdevice miscdev; @@ -148,6 +152,7 @@ static void rga_soft_reset(void) ERR("soft reset timeout.\n"); } + static void rga_dump(void) { int running; @@ -386,7 +391,7 @@ static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req) reg->session = session; INIT_LIST_HEAD(®->session_link); INIT_LIST_HEAD(®->status_link); - + if (req->mmu_info.mmu_en) { ret = rga_set_mmu_info(reg, req); @@ -400,6 +405,12 @@ static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req) return NULL; } } + + #if RGA_TEST_TIME + rga_end = ktime_get(); + rga_end = ktime_sub(rga_end, rga_start); + printk("one cmd end time %d\n", (int)ktime_to_us(rga_end)); + #endif RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg); @@ -435,10 +446,14 @@ static struct rga_reg * rga_reg_init_2(rga_session *session, struct rga_req *req break; } - reg0->session = session; + reg0->session = session; INIT_LIST_HEAD(®0->session_link); INIT_LIST_HEAD(®0->status_link); + reg1->session = session; + INIT_LIST_HEAD(®1->session_link); + INIT_LIST_HEAD(®1->status_link); + if(req0->mmu_info.mmu_en) { ret = rga_set_mmu_info(reg0, req0); @@ -452,19 +467,27 @@ static struct rga_reg * rga_reg_init_2(rga_session *session, struct rga_req *req if(req1->mmu_info.mmu_en) { - ret = rga_set_mmu_info(reg0, req1); + ret = rga_set_mmu_info(reg1, req1); if(ret < 0) { printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__); break; } } - RGA_gen_reg_info(req1, (uint8_t *)reg0->cmd_reg); + RGA_gen_reg_info(req1, (uint8_t *)reg1->cmd_reg); + + { + uint32_t i; + for(i=0; i<28; i++) + { + printk("reg1->cmd_reg[%d] is %.8x\n", i, reg1->cmd_reg[i]); + } + } spin_lock_irqsave(&rga_service.lock, flag); list_add_tail(®0->status_link, &rga_service.waiting); - list_add_tail(®1->status_link, &rga_service.waiting); - list_add_tail(®0->session_link, &session->waiting); + list_add_tail(®0->session_link, &session->waiting); + list_add_tail(®1->status_link, &rga_service.waiting); list_add_tail(®1->session_link, &session->waiting); spin_unlock_irqrestore(&rga_service.lock, flag); @@ -524,7 +547,7 @@ static void rga_try_set_reg(uint32_t num) if (!num) { - #ifdef RGA_TEST + #if RGA_TEST printk("rga try set reg cmd num is 0\n"); #endif @@ -535,29 +558,42 @@ static void rga_try_set_reg(uint32_t num) if (!list_empty(&rga_service.waiting)) { do - { - struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); + { + struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); if((rga_read(RGA_STATUS) & 0x1)) { /* RGA is busy */ + printk("no idel is here \n"); + if((atomic_read(&rga_service.cmd_num) <= 0xf) && (atomic_read(&rga_service.int_disable) == 0)) { - rga_copy_reg(reg, atomic_read(&rga_service.cmd_num)); + uint32_t offset; + + offset = atomic_read(&rga_service.cmd_num); + rga_copy_reg(reg, offset); rga_reg_from_wait_to_run(reg); + + dmac_flush_range(&rga_service.cmd_buff[offset*28], &rga_service.cmd_buff[(offset + 1)*28]); + outer_flush_range(virt_to_phys(&rga_service.cmd_buff[offset*28]), + virt_to_phys(&rga_service.cmd_buff[(offset + 1)*28])); rga_write(0x1<<10, RGA_INT); - #ifdef RGA_TEST + #if RGA_TEST { uint32_t i; - printk("CMD_REG\n"); - for(i=0; i<28; i++) - printk("%.8x\n", rga_service.cmd_buff[i + 28*atomic_read(&rga_service.cmd_num)]); + printk("CMD_REG num is %.8x\n", offset); + for(i=0; i<7; i++) + { + printk("%.8x ", rga_service.cmd_buff[i*4 + 0 + 28*atomic_read(&rga_service.cmd_num)]); + printk("%.8x ", rga_service.cmd_buff[i*4 + 1 + 28*atomic_read(&rga_service.cmd_num)]); + printk("%.8x ", rga_service.cmd_buff[i*4 + 2 + 28*atomic_read(&rga_service.cmd_num)]); + printk("%.8x\n",rga_service.cmd_buff[i*4 + 3 + 28*atomic_read(&rga_service.cmd_num)]); + } } #endif - atomic_set(®->session->done, 0); - + atomic_set(®->session->done, 0); rga_write((0x1<<3)|(0x1<<1), RGA_CMD_CTRL); if(atomic_read(®->int_enable)) @@ -566,9 +602,10 @@ static void rga_try_set_reg(uint32_t num) } else { - /* RGA is idle */ + /* RGA is idle */ rga_copy_reg(reg, 0); rga_reg_from_wait_to_run(reg); + dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]); outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28])); @@ -583,13 +620,13 @@ static void rga_try_set_reg(uint32_t num) /* CMD buff */ rga_write(virt_to_phys(rga_service.cmd_buff) & (~PAGE_MASK), RGA_CMD_ADDR); - #ifdef RGA_TEST + #if RGA_TEST { - uint32_t i; - printk("CMD_REG\n"); - for (i=0; i<28; i++) - printk("%.8x\n", rga_service.cmd_buff[i]); - + uint32_t i, *p; + p = rga_service.cmd_buff; + printk("CMD_REG\n"); + for (i=0; i<7; i++) + printk("%.8x %.8x %.8x %.8x\n", p[i*4+0], p[i*4+1], p[i*4+2], p[i*4+3]); } #endif @@ -599,23 +636,19 @@ static void rga_try_set_reg(uint32_t num) /* All CMD finish int */ rga_write(0x1<<10, RGA_INT); - //rga_write(1, RGA_MMU_STA_CTRL); - /* Start proc */ atomic_set(®->session->done, 0); rga_write(0x1, RGA_CMD_CTRL); - //while(1) - // printk("mmu_status is %.8x\n", rga_read(RGA_MMU_STA)); - - #ifdef RGA_TEST + #if RGA_TEST { uint32_t i; printk("CMD_READ_BACK_REG\n"); - for (i=0; i<28; i++) - printk("%.8x\n", rga_read(0x100 + i*4)); + for (i=0; i<7; i++) + printk("%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0), + rga_read(0x100 + i*16 + 4), rga_read(0x100 + i*16 + 8), rga_read(0x100 + i*16 + 12)); } - #endif + #endif } num--; } @@ -669,7 +702,7 @@ static int rga_blit_async(rga_session *session, struct rga_req *req) if(ret == -EINVAL) { return -EINVAL; } - + reg = rga_reg_init(session, req); if(reg == NULL) { return -EFAULT; @@ -721,10 +754,11 @@ static int rga_blit_sync(rga_session *session, struct rga_req *req) { return -EINVAL; } + memset(req2, 0, sizeof(struct rga_req)); RGA_gen_two_pro(req, req2); - reg = rga_reg_init_2(session, req2, req); + reg = rga_reg_init_2(session, req, req2); if (NULL == reg) { return -EFAULT; @@ -743,16 +777,20 @@ static int rga_blit_sync(rga_session *session, struct rga_req *req) { return -EFAULT; } - + + //printk("rga_reg_int start \n"); reg = rga_reg_init(session, req); if(reg == NULL) { return -EFAULT; } + //printk("rga_reg_int end \n"); atomic_set(®->int_enable, 1); rga_try_set_reg(1); } + + ret_timeout = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY); if (unlikely(ret_timeout< 0)) @@ -778,6 +816,11 @@ static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg) struct rga_req *req; int ret = 0; rga_session *session = (rga_session *)file->private_data; + + #if RGA_TEST_TIME + rga_start = ktime_get(); + #endif + if (NULL == session) { printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__); @@ -790,19 +833,23 @@ static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg) printk("%s [%d] get rga_req mem failed\n",__FUNCTION__,__LINE__); ret = -EINVAL; } - - if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) - { - ERR("copy_from_user failed\n"); - ret = -EFAULT; - } - + switch (cmd) { case RGA_BLIT_SYNC: + if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) + { + ERR("copy_from_user failed\n"); + ret = -EFAULT; + } ret = rga_blit_sync(session, req); break; case RGA_BLIT_ASYNC: + if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) + { + ERR("copy_from_user failed\n"); + ret = -EFAULT; + } ret = rga_blit_async(session, req); break; case RGA_FLUSH: @@ -819,6 +866,8 @@ static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg) if(req != NULL) { kfree(req); } + + return ret; } @@ -896,9 +945,7 @@ static irqreturn_t rga_irq(int irq, void *dev_id) printk("RGA is not idle!\n"); rga_soft_reset(); } - - spin_lock(&rga_service.lock); do { @@ -1061,9 +1108,10 @@ static int __devinit rga_drv_probe(struct platform_device *pdev) goto err_clock; } - + - data->axi_clk = clk_get(&pdev->dev, "aclk_rga"); + data->axi_clk = clk_get(NULL, "aclk_rga"); + if (IS_ERR(data->axi_clk)) { ERR("failed to find rga axi clock source\n"); @@ -1078,7 +1126,6 @@ static int __devinit rga_drv_probe(struct platform_device *pdev) ret = -ENOENT; goto err_clock; } - #endif @@ -1188,7 +1235,7 @@ static int rga_drv_remove(struct platform_device *pdev) if(data->pd_display){ clk_put(data->pd_display); } - #endif + #endif kfree(data); return 0; @@ -1207,7 +1254,7 @@ static struct platform_driver rga_driver = { }; -//void rga_test_0(void); +void rga_test_0(void); static int __init rga_init(void) @@ -1272,12 +1319,15 @@ static void __exit rga_exit(void) } -#if 0 -extern uint32_t ABGR8888_320_240_swap0[240][320]; +#if 1 +extern struct fb_info * rk_get_fb(int fb_id); +EXPORT_SYMBOL(rk_get_fb); -unsigned int src_buf[800*480]; -unsigned int dst_buf[800*480]; -unsigned int mmu_buf[1024]; +extern void rk_direct_fb_show(struct fb_info * fbi); +EXPORT_SYMBOL(rk_direct_fb_show); + +extern uint32_t ABGR8888_320_240_swap0[240][320]; +unsigned int dst_buf[1280*800]; void rga_test_0(void) { @@ -1285,7 +1335,7 @@ void rga_test_0(void) rga_session session; unsigned int *src, *dst; - int i; + struct fb_info *fb; session.pid = current->pid; INIT_LIST_HEAD(&session.waiting); @@ -1298,10 +1348,12 @@ void rga_test_0(void) atomic_set(&session.num_done, 0); //file->private_data = (void *)session; + fb = rk_get_fb(0); + memset(&req, 0, sizeof(struct rga_req)); src = ABGR8888_320_240_swap0; dst = dst_buf; - + #if 0 memset(src_buf, 0x80, 800*480*4); memset(dst_buf, 0xcc, 800*480*4); @@ -1318,43 +1370,59 @@ void rga_test_0(void) req.src.vir_w = 320; req.src.vir_h = 240; - req.src.yrgb_addr = src; + req.src.yrgb_addr = (uint32_t)src; - req.dst.act_w = 320; - req.dst.act_h = 240; + req.dst.act_w = 100; + req.dst.act_h = 80; - req.dst.vir_w = 800; - req.dst.vir_h = 480; - req.dst.yrgb_addr = dst; + req.dst.vir_w = 1280; + req.dst.vir_h = 800; + req.dst.x_offset = 200; + req.dst.y_offset = 200; + req.dst.yrgb_addr = (uint32_t)dst; req.clip.xmin = 0; - req.clip.xmax = 799; + req.clip.xmax = 1279; req.clip.ymin = 0; - req.clip.ymax = 479; - - - + req.clip.ymax = 799; + req.render_mode = 0; - req.rotate_mode = 0; + req.rotate_mode = 1; + req.scale_mode = 2; + + req.sina = 0; + req.cosa = 0x10000; req.mmu_info.mmu_flag = 0x21; req.mmu_info.mmu_en = 1; rga_blit_sync(&session, &req); - #if 0 - outer_inv_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480])); - dmac_inv_range(&dst_buf[0], &dst_buf[800*480]); + fb->var.bits_per_pixel = 32; - for(i=0; i<800*480; i++) - { - if(src[i] != dst[i]) - { - printk("src != dst %d\n", i); - printk("src = %.8x, dst = %.8x \n", src[i], dst[i]); - } - } - #endif + fb->var.xres = 1280; + fb->var.yres = 800; + + fb->var.red.length = 8; + fb->var.red.offset = 0; + fb->var.red.msb_right = 0; + + fb->var.green.length = 8; + fb->var.green.offset = 8; + fb->var.green.msb_right = 0; + + fb->var.blue.length = 8; + fb->var.blue.offset = 16; + fb->var.blue.msb_right = 0; + + fb->var.transp.length = 8; + fb->var.transp.offset = 24; + fb->var.transp.msb_right = 0; + + fb->fix.smem_start = virt_to_phys(dst); + + rk_direct_fb_show(fb); + } #endif diff --git a/drivers/video/rockchip/rga/rga_mmu_info.c b/drivers/video/rockchip/rga/rga_mmu_info.c index 27d3a25d3685..5f802d1bd10e 100755 --- a/drivers/video/rockchip/rga/rga_mmu_info.c +++ b/drivers/video/rockchip/rga/rga_mmu_info.c @@ -218,7 +218,7 @@ static int rga_MapUserMemory(struct page **pages, int32_t result; uint32_t i; uint32_t status; - + uint32_t Address; status = 0; do @@ -234,30 +234,37 @@ static int rga_MapUserMemory(struct page **pages, NULL ); up_read(¤t->mm->mmap_sem); - + if(result <= 0 || result < pageCount) { struct vm_area_struct *vma; - vma = find_vma(current->mm, Memory); - - if (vma && (vma->vm_flags & VM_PFNMAP) ) + for(i=0; imm, (Memory + i) << PAGE_SHIFT); - pgd_t * pgd = pgd_offset(current->mm, Memory); - pud_t * pud = pud_offset(pgd, Memory); - if (pud) + if (vma && (vma->vm_flags & VM_PFNMAP) ) + { + do { - pmd_t * pmd = pmd_offset(pud, Memory); - if (pmd) + pte_t * pte; + spinlock_t * ptl; + unsigned long pfn; + + pgd_t * pgd = pgd_offset(current->mm, ((Memory + i)<< PAGE_SHIFT)); + pud_t * pud = pud_offset(pgd, ((Memory + i) << PAGE_SHIFT)); + if (pud) { - pte = pte_offset_map_lock(current->mm, pmd, Memory, &ptl); - if (!pte) + pmd_t * pmd = pmd_offset(pud, ((Memory + i) << PAGE_SHIFT)); + if (pmd) + { + pte = pte_offset_map_lock(current->mm, pmd, ((Memory + i)<< PAGE_SHIFT), &ptl); + if (!pte) + { + break; + } + } + else { break; } @@ -266,46 +273,43 @@ static int rga_MapUserMemory(struct page **pages, { break; } - } - else - { - break; - } - pfn = pte_pfn(*pte); - - pte_unmap_unlock(pte, ptl); + pfn = pte_pfn(*pte); - /* Free the page table. */ - if (pages != NULL) - { - /* Release the pages if any. */ - if (result > 0) + Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK)); + + pte_unmap_unlock(pte, ptl); + + /* Free the page table. */ + if (pages != NULL) { - for (i = 0; i < result; i++) + /* Release the pages if any. */ + if (result > 0) { - if (pages[i] == NULL) + for (i = 0; i < result; i++) { - break; - } + if (pages[i] == NULL) + { + break; + } - page_cache_release(pages[i]); + page_cache_release(pages[i]); + } } } + + pageTable[i] = Address; } - - return 0; + while (0); } - while (0); - - status = RGA_OUT_OF_RESOURCES; - break; + else + { + status = RGA_OUT_OF_RESOURCES; + break; + } } - else - { - status = RGA_OUT_OF_RESOURCES; - break; - } + + return 0; } for (i = 0; i < pageCount; i++) @@ -323,10 +327,10 @@ static int rga_MapUserMemory(struct page **pages, } /* Fill the page table. */ - for(i=0; idst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, - req->dst.format, req->dst.vir_w, req->dst.vir_h, + req->dst.format, req->dst.vir_w, (req->dst.act_h + req->dst.y_offset), &DstStart); if(DstMemSize == 0) { return -EINVAL; @@ -402,35 +406,32 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); + pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; break; } MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); if(MMU_Base == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); + pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; break; } - printk("MMU_Base addr is %.8x\n", MMU_Base); - printk("CMDStart is %.8x\n",CMDStart); - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) { + ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); if (ret < 0) { - pr_err("rga map src memory failed"); + pr_err("rga map src memory failed\n"); status = ret; break; } + } else { @@ -454,17 +455,25 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) } } } - - printk("MMU_Base[1] = %.8x\n", MMU_Base[1]); if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) { + #if 0 + ktime_t start, end; + start = ktime_get(); + #endif ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); if (ret < 0) { - pr_err("rga map dst memory failed"); + pr_err("rga map dst memory failed\n"); status = ret; break; } + + #if 0 + end = ktime_get(); + end = ktime_sub(end, start); + printk("dst mmu map time = %d\n", (int)ktime_to_us(end)); + #endif } else { @@ -479,14 +488,23 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) /* zsq * change the buf address in req struct */ + #if 0 + printk("CMDMemSize is %.8x\n", CMDMemSize); + printk("SrcMemSize is %.8x\n", SrcMemSize); + printk("DstMemSize is %.8x\n", DstMemSize); + printk("CMDStart is %.8x\n", CMDStart); + printk("SrcStart is %.8x\n", SrcStart); + printk("DstStart is %.8x\n", DstStart); + #endif + req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); - + /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; @@ -563,13 +581,13 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); + pr_err("RGA MMU malloc pages mem failed\n"); return -EINVAL; } MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); if(MMU_Base == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); + pr_err("RGA MMU malloc MMU_Base point failed\n"); break; } @@ -585,7 +603,7 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); if (ret < 0) { - pr_err("rga map src memory failed"); + pr_err("rga map src memory failed\n"); status = ret; break; } @@ -634,6 +652,10 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; + /* flush data to DDR */ + dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); + outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize)); + /* Free the page table */ if (pages != NULL) { kfree(pages); @@ -734,6 +756,10 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; + /* flush data to DDR */ + dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); + outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize)); + /* Free the page table */ if (pages != NULL) kfree(pages); @@ -829,6 +855,10 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_ /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; + /* flush data to DDR */ + dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); + outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize)); + /* Free the page table */ if (pages != NULL) { kfree(pages); @@ -888,14 +918,14 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); + pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; break; } MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); + pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; break; } @@ -909,7 +939,7 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); if (ret < 0) { - pr_err("rga map src memory failed"); + pr_err("rga map src memory failed\n"); status = ret; break; } @@ -930,7 +960,7 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); if (ret < 0) { - pr_err("rga map dst memory failed"); + pr_err("rga map dst memory failed\n"); status = ret; break; } @@ -960,6 +990,10 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; + /* flush data to DDR */ + dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); + outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize)); + /* Free the page table */ if (pages != NULL) { kfree(pages); @@ -1018,11 +1052,20 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) } AllSize = SrcMemSize + DstMemSize + CMDMemSize; + + + #if 0 + printk("AllSize = %d\n", AllSize); + printk("SrcSize = %d\n", SrcMemSize); + printk("CMDSize = %d\n", CMDMemSize); + printk("DstSize = %d\n", DstMemSize); + printk("DstStart = %d\n", DstStart); + #endif pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); + pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; break; } @@ -1033,7 +1076,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) */ MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); + pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; break; } @@ -1048,7 +1091,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) { ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); if (ret < 0) { - pr_err("rga map src memory failed"); + pr_err("rga map src memory failed\n"); status = ret; break; } @@ -1090,7 +1133,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); if (ret < 0) { - pr_err("rga map dst memory failed"); + pr_err("rga map dst memory failed\n"); status = ret; break; } @@ -1100,6 +1143,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) * change the buf address in req struct * for the reason of lie to MMU */ + req->mmu_info.base_addr = virt_to_phys(MMU_Base)>>2; req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); @@ -1107,10 +1151,14 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); - + /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; + /* flush data to DDR */ + dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); + outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize)); + /* Free the page table */ if (pages != NULL) { @@ -1133,7 +1181,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req) { - int SrcMemSize, DstMemSize, CMDMemSize; + int SrcMemSize, CMDMemSize; uint32_t SrcStart, CMDStart; struct page **pages = NULL; uint32_t i; @@ -1157,18 +1205,18 @@ static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rg return -EINVAL; } - AllSize = SrcMemSize + DstMemSize + CMDMemSize; + AllSize = SrcMemSize + CMDMemSize; pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); + pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; break; } MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); + pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; break; } @@ -1181,7 +1229,7 @@ static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rg { ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); if (ret < 0) { - pr_err("rga map src memory failed"); + pr_err("rga map src memory failed\n"); return -EINVAL; } } @@ -1206,6 +1254,10 @@ static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rg /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; + /* flush data to DDR */ + dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); + outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize)); + if (pages != NULL) { /* Free the page table */ kfree(pages); @@ -1253,14 +1305,14 @@ static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_ pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed"); + pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; break; } MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed"); + pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; break; } @@ -1273,7 +1325,7 @@ static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_ { ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); if (ret < 0) { - pr_err("rga map src memory failed"); + pr_err("rga map src memory failed\n"); status = ret; break; } @@ -1299,6 +1351,10 @@ static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_ /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; + /* flush data to DDR */ + dmac_flush_range(MMU_Base, (MMU_Base + AllSize)); + outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize)); + if (pages != NULL) { /* Free the page table */ kfree(pages); diff --git a/drivers/video/rockchip/rga/rga_reg_info.c b/drivers/video/rockchip/rga/rga_reg_info.c index 15a298dbc313..f7b19cde0318 100755 --- a/drivers/video/rockchip/rga/rga_reg_info.c +++ b/drivers/video/rockchip/rga/rga_reg_info.c @@ -1003,6 +1003,11 @@ RGA_set_bitblt_reg_info(u8 *base, const struct rga_req * msg, TILE_INFO *tile) xp = CLIP(xp, msg->src.x_offset, msg->src.x_offset + msg->src.act_w - 1); yp = CLIP(yp, msg->src.y_offset, msg->src.y_offset + msg->src.act_h - 1); } + + printk("xoffset = %.8x\n", msg->src.x_offset); + printk("yoffset = %.8x\n", msg->src.y_offset); + printk("xp = %.8x\n", xp); + printk("yp = %.8x\n", yp); switch(msg->src.format) { @@ -1047,6 +1052,8 @@ RGA_set_bitblt_reg_info(u8 *base, const struct rga_req * msg, TILE_INFO *tile) break; } + printk("y_addr is %.8x\n", y_addr); + *bRGA_SRC_Y_MST = y_addr; *bRGA_SRC_CB_MST = u_addr; *bRGA_SRC_CR_MST = v_addr; @@ -1290,8 +1297,11 @@ RGA_set_pre_scale_reg_info(u8 *base, const struct rga_req *msg) dst_width = msg->dst.act_w; dst_height = msg->dst.act_h; - h_ratio = (src_width )<<16 / dst_width; - v_ratio = (src_height)<<16 / dst_height; + printk("src_act_w = %.8x, src_act_h =%.8x dst_act_w = %.8x, dst_act_h = %.8x\n", + msg->src.act_w, msg->src.act_h, msg->dst.act_w, msg->dst.act_h); + + h_ratio = (src_width <<16) / dst_width; + v_ratio = (src_height<<16) / dst_height; if (h_ratio <= (1<<16)) h_ratio = 0; -- 2.34.1