From 5b971e824349f5e9a857ef39ec935da16cd29bf5 Mon Sep 17 00:00:00 2001 From: zsq Date: Tue, 10 Apr 2012 22:37:03 -0800 Subject: [PATCH] fix shutdown rga crash bug --- drivers/video/rockchip/rga/rga.h | 2 +- drivers/video/rockchip/rga/rga_drv.c | 89 +++++++----- drivers/video/rockchip/rga/rga_mmu_info.c | 161 ++++++++-------------- 3 files changed, 109 insertions(+), 143 deletions(-) diff --git a/drivers/video/rockchip/rga/rga.h b/drivers/video/rockchip/rga/rga.h index 1d0a9bb0fcab..ba573bbbdd32 100755 --- a/drivers/video/rockchip/rga/rga.h +++ b/drivers/video/rockchip/rga/rga.h @@ -369,7 +369,7 @@ typedef struct rga_service_info { atomic_t total_running; struct rga_reg *reg; - uint32_t cmd_buff[28*16];/* cmd_buff for rga */ + uint32_t cmd_buff[28*8];/* cmd_buff for rga */ uint32_t *pre_scale_buf; atomic_t int_disable; /* 0 int enable 1 int disable */ atomic_t cmd_num; diff --git a/drivers/video/rockchip/rga/rga_drv.c b/drivers/video/rockchip/rga/rga_drv.c index c439f784187a..1cfc565bac01 100755 --- a/drivers/video/rockchip/rga/rga_drv.c +++ b/drivers/video/rockchip/rga/rga_drv.c @@ -52,6 +52,8 @@ #define RGA_TEST 0 #define RGA_TEST_TIME 0 #define RGA_TEST_FLUSH_TIME 0 +#define RGA_INFO_BUS_ERROR 0 + #define PRE_SCALE_BUF_SIZE 2048*1024*4 @@ -224,8 +226,7 @@ static void rga_power_on(void) static void rga_power_off(void) { int total_running; - - + if(!rga_service.enable) return; @@ -238,7 +239,7 @@ static void rga_power_off(void) pr_alert("power off when %d task running!!\n", total_running); mdelay(50); pr_alert("delay 50 ms for running task\n"); - rga_dump(); + //rga_dump(); } clk_disable(aclk_rga); @@ -540,23 +541,28 @@ static void rga_try_set_reg(uint32_t num) /* RGA is idle */ reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); rga_soft_reset(); - //rga_del_running_list(); rga_copy_reg(reg, 0); rga_reg_from_wait_to_run(reg); dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]); outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28])); - /* - * if cmd buf must use mmu - * it should be configured before cmd start - */ - rga_write((2<<4)|0x1, RGA_MMU_CTRL); - - rga_write(virt_to_phys(reg->MMU_base)>>2, RGA_MMU_TBL); - + rga_write(0, RGA_MMU_CTRL); + /* CMD buff */ - rga_write(virt_to_phys(rga_service.cmd_buff) & (~PAGE_MASK), RGA_CMD_ADDR); + rga_write(virt_to_phys(rga_service.cmd_buff), RGA_CMD_ADDR); + + #if RGA_TEST + { + printk("cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR)); + uint32_t i; + uint32_t *p; + p = rga_service.cmd_buff; + printk("CMD_REG\n"); + for (i=0; i<7; i++) + printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]); + } + #endif /* master mode */ rga_write(0x1<<2, RGA_SYS_CTRL); @@ -565,9 +571,11 @@ static void rga_try_set_reg(uint32_t num) rga_write((0x1<<10)|(0x1<<8), RGA_INT); /* Start proc */ - atomic_set(®->session->done, 0); + atomic_set(®->session->done, 0); rga_write(0x1, RGA_CMD_CTRL); - //rga_write(0x1<<1, RGA_SYS_CTRL); + + atomic_add(1, &rga_service.total_running); + atomic_add(1, ®->session->task_running); #if RGA_TEST { @@ -583,9 +591,7 @@ static void rga_try_set_reg(uint32_t num) } while(0); } - spin_unlock_irqrestore(&rga_service.lock, flag); - - + spin_unlock_irqrestore(&rga_service.lock, flag); } @@ -662,6 +668,12 @@ static int rga_blit(rga_session *session, struct rga_req *req) if(NULL == req2) { return -EFAULT; } + + ret = rga_check_param(req); + if(ret == -EINVAL) { + printk("req 0 argument is inval\n"); + break; + } ret = RGA_gen_two_pro(req, req2); if(ret == -EINVAL) { @@ -691,6 +703,7 @@ static int rga_blit(rga_session *session, struct rga_req *req) /* check value if legal */ ret = rga_check_param(req); if(ret == -EINVAL) { + printk("req argument is inval\n"); break; } @@ -701,7 +714,6 @@ static int rga_blit(rga_session *session, struct rga_req *req) num = 1; } - //rga_power_on(); atomic_set(®->int_enable, 1); rga_try_set_reg(num); @@ -898,17 +910,19 @@ static irqreturn_t rga_irq(int irq, void *dev_id) #if RGA_TEST printk("rga_irq is valid\n"); #endif + + while(((rga_read(RGA_STATUS) & 0x1) != 0) && (i<10))// idle + { + mdelay(1); + i++; + } + #if RGA_INFO_BUS_ERROR if(rga_read(RGA_INT) & 0x1) { printk("bus Error interrupt is occur\n"); } - - while(((rga_read(RGA_STATUS) & 0x1) != 0) && (i<10))// idle - { - mdelay(1); - i++; - } + #endif /*clear INT */ rga_write(rga_read(RGA_INT) | (0x1<<6) | (0x1<<7) | (0x1<<4), RGA_INT); @@ -1120,6 +1134,8 @@ static struct platform_driver rga_driver = { void rga_test_0(void); +void rga_test_1(void); + static int __init rga_init(void) @@ -1159,6 +1175,7 @@ static int __init rga_init(void) } //rga_test_0(); + //rga_test_1(); INFO("Module initialized.\n"); @@ -1202,7 +1219,7 @@ extern void rk_direct_fb_show(struct fb_info * fbi); EXPORT_SYMBOL(rk_direct_fb_show); unsigned int src_buf[1920*1080]; -unsigned int dst_buf[1280*800]; +unsigned int dst_buf[1920*1080]; void rga_test_0(void) { @@ -1247,21 +1264,21 @@ void rga_test_0(void) req.src.vir_w = 1920; req.src.vir_h = 1080; - req.src.yrgb_addr = (uint32_t)src_buf; + req.src.yrgb_addr = (uint32_t)virt_to_phys(src_buf); req.src.uv_addr = req.src.yrgb_addr + 1920; //req.src.v_addr = (uint32_t)V4200_320_240_swap0; - req.src.format = RK_FORMAT_YCbCr_420_SP; + req.src.format = RK_FORMAT_RGB_565; - req.dst.act_w = 1280; - req.dst.act_h = 736; + req.dst.act_w = 1024; + req.dst.act_h = 768; req.dst.vir_w = 1280; - req.dst.vir_h = 736; + req.dst.vir_h = 800; req.dst.x_offset = 0; req.dst.y_offset = 0; - req.dst.yrgb_addr = (uint32_t)dst; + req.dst.yrgb_addr = (uint32_t)virt_to_phys(dst); - //req.dst.format = RK_FORMAT_RGB_565; + req.dst.format = RK_FORMAT_RGB_565; req.clip.xmin = 0; req.clip.xmax = 1279; @@ -1272,7 +1289,7 @@ void rga_test_0(void) //req.fg_color = 0x80ffffff; req.rotate_mode = 1; - req.scale_mode = 0; + req.scale_mode = 1; req.alpha_rop_flag = 0; req.alpha_rop_mode = 0x1; @@ -1280,8 +1297,8 @@ void rga_test_0(void) req.sina = 0; req.cosa = 65536; - req.mmu_info.mmu_flag = 0x21; - req.mmu_info.mmu_en = 1; + req.mmu_info.mmu_flag = 0x0; + req.mmu_info.mmu_en = 0; rga_blit_sync(&session, &req); diff --git a/drivers/video/rockchip/rga/rga_mmu_info.c b/drivers/video/rockchip/rga/rga_mmu_info.c index eb28209bf953..c9e676397705 100755 --- a/drivers/video/rockchip/rga/rga_mmu_info.c +++ b/drivers/video/rockchip/rga/rga_mmu_info.c @@ -222,6 +222,7 @@ static int rga_MapUserMemory(struct page **pages, uint32_t Address; uint32_t t_mem; status = 0; + Address = 0; do { @@ -349,8 +350,8 @@ static int rga_MapUserMemory(struct page **pages, static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) { - int SrcMemSize, DstMemSize, CMDMemSize; - uint32_t SrcStart, DstStart, CMDStart; + int SrcMemSize, DstMemSize; + uint32_t SrcStart, DstStart; uint32_t i; uint32_t AllSize; uint32_t *MMU_Base, *MMU_p; @@ -379,16 +380,9 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) if(DstMemSize == 0) { return -EINVAL; } - - CMDMemSize = 0; - /* cal cmd buf mmu info */ - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } - + /* Cal out the needed mem size */ - AllSize = SrcMemSize + DstMemSize + CMDMemSize; + AllSize = SrcMemSize + DstMemSize; pages = (struct page **)kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { @@ -404,13 +398,9 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) break; } - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) { - ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize); if (ret < 0) { pr_err("rga map src memory failed\n"); status = ret; @@ -419,7 +409,7 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) } else { - MMU_p = MMU_Base + CMDMemSize; + MMU_p = MMU_Base; if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) { @@ -446,7 +436,7 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) ktime_t start, end; start = ktime_get(); #endif - ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); + ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize); if (ret < 0) { pr_err("rga map dst memory failed\n"); status = ret; @@ -461,7 +451,7 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) } else { - MMU_p = MMU_Base + CMDMemSize + SrcMemSize; + MMU_p = MMU_Base + SrcMemSize; for(i=0; isrc.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT); - req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT); + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)); + req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT); + req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT); - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT); /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; @@ -660,8 +650,8 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req) { - int DstMemSize, CMDMemSize; - uint32_t DstStart, CMDStart; + int DstMemSize; + uint32_t DstStart; struct page **pages = NULL; uint32_t i; uint32_t AllSize; @@ -680,34 +670,25 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req return -EINVAL; } - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } - - AllSize = DstMemSize + CMDMemSize; + AllSize = DstMemSize; - pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); + pages = (struct page **)kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; break; } - MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); + MMU_Base = (uint32_t *)kmalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; break; } - for(i=0; idst.yrgb_addr < KERNEL_SPACE_VALID) { - ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize); + ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize); if (ret < 0) { pr_err("rga map dst memory failed\n"); status = ret; @@ -716,7 +697,7 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req } else { - MMU_p = MMU_Base + CMDMemSize; + MMU_p = MMU_Base; for(i=0; immu_info.base_addr = (virt_to_phys(MMU_Base)>>2); - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT); + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)); /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; @@ -759,8 +740,8 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req) { - int DstMemSize, CMDMemSize; - uint32_t DstStart, CMDStart; + int DstMemSize; + uint32_t DstStart; struct page **pages = NULL; uint32_t i; uint32_t AllSize; @@ -779,12 +760,7 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_ return -EINVAL; } - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } - - AllSize = DstMemSize + CMDMemSize; + AllSize = DstMemSize; pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { @@ -800,13 +776,9 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_ break; } - for(i=0; idst.yrgb_addr < KERNEL_SPACE_VALID) { - ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize); + ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize); if (ret < 0) { pr_err("rga map dst memory failed\n"); status = ret; @@ -815,7 +787,7 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_ } else { - MMU_p = MMU_Base + CMDMemSize; + MMU_p = MMU_Base; for(i=0; immu_info.base_addr = (virt_to_phys(MMU_Base) >> 2); - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT); + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)); /*record the malloc buf for the cmd end to release*/ @@ -859,8 +831,8 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req) { - int SrcMemSize, DstMemSize, CMDMemSize; - uint32_t SrcStart, DstStart, CMDStart; + int SrcMemSize, DstMemSize; + uint32_t SrcStart, DstStart; struct page **pages = NULL; uint32_t i; uint32_t AllSize; @@ -888,35 +860,25 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r return -EINVAL; } - /* cal cmd buf mmu info */ - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } - - AllSize = SrcMemSize + DstMemSize + CMDMemSize; + AllSize = SrcMemSize + DstMemSize; - pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL); + pages = (struct page **)kmalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; break; } - MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); + MMU_Base = (uint32_t *)kmalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; break; } - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) { - ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize); if (ret < 0) { pr_err("rga map src memory failed\n"); @@ -926,7 +888,7 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r } else { - MMU_p = MMU_Base + CMDMemSize; + MMU_p = MMU_Base; for(i=0; idst.yrgb_addr < KERNEL_SPACE_VALID) { - ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); + ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize); if (ret < 0) { pr_err("rga map dst memory failed\n"); @@ -947,7 +909,7 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r } else { - MMU_p = MMU_Base + CMDMemSize + SrcMemSize; + MMU_p = MMU_Base + SrcMemSize; for(i=0; isrc.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT); - req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT); + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)); + req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT); + req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT); uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT; v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT; - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); - req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + uv_size) << PAGE_SHIFT); - req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + v_size) << PAGE_SHIFT); + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT); + req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT); + req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT); /*record the malloc buf for the cmd end to release*/ @@ -1005,8 +967,8 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) { - int SrcMemSize, DstMemSize, CMDMemSize; - uint32_t SrcStart, DstStart, CMDStart; + int SrcMemSize, DstMemSize; + uint32_t SrcStart, DstStart; struct page **pages = NULL; uint32_t i; uint32_t AllSize; @@ -1035,13 +997,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) return -EINVAL; } - /* cal cmd buf mmu info */ - CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); - if(CMDMemSize == 0) { - return -EINVAL; - } - - AllSize = SrcMemSize + DstMemSize + CMDMemSize; + AllSize = SrcMemSize + DstMemSize; pages = (struct page **)kmalloc((AllSize)* sizeof(struct page *), GFP_KERNEL); if(pages == NULL) @@ -1062,15 +1018,10 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) break; } - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) { - ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); + ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize); if (ret < 0) { pr_err("rga map src memory failed\n"); status = ret; @@ -1079,7 +1030,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) } else { - MMU_p = MMU_Base + CMDMemSize; + MMU_p = MMU_Base; for(i=0; idst.yrgb_addr >= KERNEL_SPACE_VALID) { /* kernel space */ - MMU_p = MMU_Base + CMDMemSize + SrcMemSize; + MMU_p = MMU_Base + SrcMemSize; if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) { @@ -1111,7 +1062,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) else { /* user space */ - ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); + ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize); if (ret < 0) { pr_err("rga map dst memory failed\n"); @@ -1130,16 +1081,16 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT); - req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT); + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)); + req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT); + req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT); uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT; v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT; - req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); - req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + uv_size) << PAGE_SHIFT); - req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + v_size) << PAGE_SHIFT); + req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT); + req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT); + req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT); /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; @@ -1377,7 +1328,6 @@ int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req) ret = rga_mmu_info_color_palette_mode(reg, req); break; case color_fill_mode : - //printk("color_fill_mode is enable\n"); ret = rga_mmu_info_color_fill_mode(reg, req); break; case line_point_drawing_mode : @@ -1387,7 +1337,6 @@ int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req) ret = rga_mmu_info_blur_sharp_filter_mode(reg, req); break; case pre_scaling_mode : - //printk("pre_scaleing_mode is enable\n"); ret = rga_mmu_info_pre_scale_mode(reg, req); break; case update_palette_table_mode : -- 2.34.1