#define RGA_TEST 0\r
#define RGA_TEST_TIME 0\r
#define RGA_TEST_FLUSH_TIME 0\r
+#define RGA_INFO_BUS_ERROR 0\r
+\r
\r
\r
#define PRE_SCALE_BUF_SIZE 2048*1024*4\r
static void rga_power_off(void)\r
{\r
int total_running;\r
- \r
- \r
+ \r
if(!rga_service.enable)\r
return;\r
\r
pr_alert("power off when %d task running!!\n", total_running); \r
mdelay(50);\r
pr_alert("delay 50 ms for running task\n"); \r
- rga_dump();\r
+ //rga_dump();\r
}\r
\r
clk_disable(aclk_rga);\r
/* RGA is idle */\r
reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); \r
rga_soft_reset();\r
- //rga_del_running_list();\r
rga_copy_reg(reg, 0); \r
rga_reg_from_wait_to_run(reg);\r
\r
dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]);\r
outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28]));\r
\r
- /* \r
- * if cmd buf must use mmu\r
- * it should be configured before cmd start \r
- */\r
- rga_write((2<<4)|0x1, RGA_MMU_CTRL); \r
- \r
- rga_write(virt_to_phys(reg->MMU_base)>>2, RGA_MMU_TBL);\r
- \r
+ rga_write(0, RGA_MMU_CTRL); \r
+ \r
/* CMD buff */\r
- rga_write(virt_to_phys(rga_service.cmd_buff) & (~PAGE_MASK), RGA_CMD_ADDR); \r
+ rga_write(virt_to_phys(rga_service.cmd_buff), RGA_CMD_ADDR);\r
+ \r
+ #if RGA_TEST\r
+ {\r
+ printk("cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));\r
+ uint32_t i;\r
+ uint32_t *p;\r
+ p = rga_service.cmd_buff; \r
+ printk("CMD_REG\n");\r
+ for (i=0; i<7; i++) \r
+ printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]); \r
+ }\r
+ #endif\r
\r
/* master mode */\r
rga_write(0x1<<2, RGA_SYS_CTRL);\r
rga_write((0x1<<10)|(0x1<<8), RGA_INT);\r
\r
/* Start proc */\r
- atomic_set(®->session->done, 0);\r
+ atomic_set(®->session->done, 0); \r
rga_write(0x1, RGA_CMD_CTRL);\r
- //rga_write(0x1<<1, RGA_SYS_CTRL);\r
+\r
+ atomic_add(1, &rga_service.total_running);\r
+ atomic_add(1, ®->session->task_running);\r
\r
#if RGA_TEST\r
{\r
}\r
while(0);\r
}\r
- spin_unlock_irqrestore(&rga_service.lock, flag);\r
-\r
- \r
+ spin_unlock_irqrestore(&rga_service.lock, flag); \r
}\r
\r
\r
if(NULL == req2) {\r
return -EFAULT; \r
}\r
+\r
+ ret = rga_check_param(req);\r
+ if(ret == -EINVAL) {\r
+ printk("req 0 argument is inval\n");\r
+ break;\r
+ }\r
\r
ret = RGA_gen_two_pro(req, req2); \r
if(ret == -EINVAL) {\r
/* check value if legal */\r
ret = rga_check_param(req);\r
if(ret == -EINVAL) {\r
+ printk("req argument is inval\n");\r
break;\r
}\r
\r
num = 1; \r
} \r
\r
- //rga_power_on();\r
atomic_set(®->int_enable, 1); \r
rga_try_set_reg(num);\r
\r
#if RGA_TEST\r
printk("rga_irq is valid\n");\r
#endif\r
+ \r
+ while(((rga_read(RGA_STATUS) & 0x1) != 0) && (i<10))// idle\r
+ { \r
+ mdelay(1);\r
+ i++;\r
+ }\r
\r
+ #if RGA_INFO_BUS_ERROR\r
if(rga_read(RGA_INT) & 0x1)\r
{\r
printk("bus Error interrupt is occur\n");\r
}\r
- \r
- while(((rga_read(RGA_STATUS) & 0x1) != 0) && (i<10))// idle\r
- { \r
- mdelay(1);\r
- i++;\r
- } \r
+ #endif\r
\r
/*clear INT */\r
rga_write(rga_read(RGA_INT) | (0x1<<6) | (0x1<<7) | (0x1<<4), RGA_INT);\r
\r
\r
void rga_test_0(void);\r
+void rga_test_1(void);\r
+\r
\r
\r
static int __init rga_init(void)\r
}\r
\r
//rga_test_0();\r
+ //rga_test_1();\r
\r
INFO("Module initialized.\n"); \r
\r
EXPORT_SYMBOL(rk_direct_fb_show);\r
\r
unsigned int src_buf[1920*1080];\r
-unsigned int dst_buf[1280*800];\r
+unsigned int dst_buf[1920*1080];\r
\r
void rga_test_0(void)\r
{\r
\r
req.src.vir_w = 1920;\r
req.src.vir_h = 1080;\r
- req.src.yrgb_addr = (uint32_t)src_buf;\r
+ req.src.yrgb_addr = (uint32_t)virt_to_phys(src_buf);\r
req.src.uv_addr = req.src.yrgb_addr + 1920;\r
//req.src.v_addr = (uint32_t)V4200_320_240_swap0;\r
- req.src.format = RK_FORMAT_YCbCr_420_SP;\r
+ req.src.format = RK_FORMAT_RGB_565;\r
\r
- req.dst.act_w = 1280;\r
- req.dst.act_h = 736;\r
+ req.dst.act_w = 1024;\r
+ req.dst.act_h = 768;\r
\r
req.dst.vir_w = 1280;\r
- req.dst.vir_h = 736;\r
+ req.dst.vir_h = 800;\r
req.dst.x_offset = 0;\r
req.dst.y_offset = 0;\r
- req.dst.yrgb_addr = (uint32_t)dst;\r
+ req.dst.yrgb_addr = (uint32_t)virt_to_phys(dst);\r
\r
- //req.dst.format = RK_FORMAT_RGB_565;\r
+ req.dst.format = RK_FORMAT_RGB_565;\r
\r
req.clip.xmin = 0;\r
req.clip.xmax = 1279;\r
//req.fg_color = 0x80ffffff;\r
\r
req.rotate_mode = 1;\r
- req.scale_mode = 0;\r
+ req.scale_mode = 1;\r
\r
req.alpha_rop_flag = 0;\r
req.alpha_rop_mode = 0x1;\r
req.sina = 0;\r
req.cosa = 65536;\r
\r
- req.mmu_info.mmu_flag = 0x21;\r
- req.mmu_info.mmu_en = 1;\r
+ req.mmu_info.mmu_flag = 0x0;\r
+ req.mmu_info.mmu_en = 0;\r
\r
rga_blit_sync(&session, &req);\r
\r
uint32_t Address;\r
uint32_t t_mem;\r
status = 0;\r
+ Address = 0;\r
\r
do\r
{ \r
\r
static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)\r
{ \r
- int SrcMemSize, DstMemSize, CMDMemSize;\r
- uint32_t SrcStart, DstStart, CMDStart; \r
+ int SrcMemSize, DstMemSize;\r
+ uint32_t SrcStart, DstStart; \r
uint32_t i;\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_p;\r
if(DstMemSize == 0) {\r
return -EINVAL; \r
}\r
- \r
- CMDMemSize = 0;\r
- /* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
- \r
+ \r
/* Cal out the needed mem size */\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ AllSize = SrcMemSize + DstMemSize;\r
\r
pages = (struct page **)kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
break; \r
}\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT)); \r
- }\r
-\r
if(req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
{ \r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
if (ret < 0) {\r
pr_err("rga map src memory failed\n");\r
status = ret;\r
}\r
else\r
{\r
- MMU_p = MMU_Base + CMDMemSize;\r
+ MMU_p = MMU_Base;\r
\r
if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)\r
{\r
ktime_t start, end;\r
start = ktime_get();\r
#endif\r
- ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
+ ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
if (ret < 0) {\r
pr_err("rga map dst memory failed\n");\r
status = ret;\r
}\r
else\r
{\r
- MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
+ MMU_p = MMU_Base + SrcMemSize;\r
\r
for(i=0; i<DstMemSize; i++)\r
{\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
\r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT);\r
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT);\r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
+ req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
+ req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int DstMemSize, CMDMemSize;\r
- uint32_t DstStart, CMDStart;\r
+ int DstMemSize;\r
+ uint32_t DstStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
return -EINVAL; \r
}\r
\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
-\r
- AllSize = DstMemSize + CMDMemSize;\r
+ AllSize = DstMemSize;\r
\r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ pages = (struct page **)kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR; \r
break;\r
}\r
\r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ MMU_Base = (uint32_t *)kmalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));\r
- }\r
-\r
if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) \r
{\r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);\r
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
if (ret < 0) {\r
pr_err("rga map dst memory failed\n");\r
status = ret;\r
}\r
else\r
{\r
- MMU_p = MMU_Base + CMDMemSize;\r
+ MMU_p = MMU_Base;\r
\r
for(i=0; i<DstMemSize; i++)\r
{\r
*/\r
\r
req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); \r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int DstMemSize, CMDMemSize;\r
- uint32_t DstStart, CMDStart;\r
+ int DstMemSize;\r
+ uint32_t DstStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
return -EINVAL; \r
}\r
\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
-\r
- AllSize = DstMemSize + CMDMemSize;\r
+ AllSize = DstMemSize;\r
\r
pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
break;\r
}\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));\r
- }\r
-\r
if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);\r
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
if (ret < 0) {\r
pr_err("rga map dst memory failed\n");\r
status = ret;\r
}\r
else\r
{\r
- MMU_p = MMU_Base + CMDMemSize;\r
+ MMU_p = MMU_Base;\r
\r
for(i=0; i<DstMemSize; i++)\r
{\r
* for the reason of lie to MMU \r
*/\r
req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2); \r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
\r
\r
/*record the malloc buf for the cmd end to release*/\r
\r
static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int SrcMemSize, DstMemSize, CMDMemSize;\r
- uint32_t SrcStart, DstStart, CMDStart;\r
+ int SrcMemSize, DstMemSize;\r
+ uint32_t SrcStart, DstStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
return -EINVAL; \r
}\r
\r
- /* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
-\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ AllSize = SrcMemSize + DstMemSize;\r
\r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ pages = (struct page **)kmalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
\r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ MMU_Base = (uint32_t *)kmalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<< PAGE_SHIFT));\r
- }\r
-\r
if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
if (ret < 0) \r
{\r
pr_err("rga map src memory failed\n");\r
}\r
else\r
{\r
- MMU_p = MMU_Base + CMDMemSize;\r
+ MMU_p = MMU_Base;\r
\r
for(i=0; i<SrcMemSize; i++)\r
{\r
\r
if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
- ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
+ ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
if (ret < 0) \r
{\r
pr_err("rga map dst memory failed\n");\r
}\r
else\r
{\r
- MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
+ MMU_p = MMU_Base + SrcMemSize;\r
\r
for(i=0; i<DstMemSize; i++)\r
{\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
\r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT);\r
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT);\r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
+ req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
+ req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
\r
uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
- req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + uv_size) << PAGE_SHIFT);\r
- req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + v_size) << PAGE_SHIFT);\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
+ req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
+ req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);\r
\r
\r
/*record the malloc buf for the cmd end to release*/\r
\r
static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int SrcMemSize, DstMemSize, CMDMemSize;\r
- uint32_t SrcStart, DstStart, CMDStart;\r
+ int SrcMemSize, DstMemSize;\r
+ uint32_t SrcStart, DstStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
uint32_t AllSize;\r
return -EINVAL; \r
}\r
\r
- /* cal cmd buf mmu info */\r
- CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL; \r
- }\r
-\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ AllSize = SrcMemSize + DstMemSize;\r
\r
pages = (struct page **)kmalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) \r
break; \r
}\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
- }\r
-\r
-\r
/* map src pages */\r
if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
- ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
if (ret < 0) {\r
pr_err("rga map src memory failed\n");\r
status = ret;\r
}\r
else\r
{\r
- MMU_p = MMU_Base + CMDMemSize;\r
+ MMU_p = MMU_Base;\r
\r
for(i=0; i<SrcMemSize; i++)\r
{\r
if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID) \r
{ \r
/* kernel space */\r
- MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
+ MMU_p = MMU_Base + SrcMemSize;\r
\r
if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)\r
{\r
else \r
{\r
/* user space */\r
- ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
+ ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
if (ret < 0) \r
{\r
pr_err("rga map dst memory failed\n");\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
\r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT);\r
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT);\r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
+ req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
+ req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
\r
uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
- req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + uv_size) << PAGE_SHIFT);\r
- req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + v_size) << PAGE_SHIFT);\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT);\r
+ req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
+ req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);\r
\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
ret = rga_mmu_info_color_palette_mode(reg, req);\r
break;\r
case color_fill_mode :\r
- //printk("color_fill_mode is enable\n");\r
ret = rga_mmu_info_color_fill_mode(reg, req);\r
break;\r
case line_point_drawing_mode :\r
ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);\r
break;\r
case pre_scaling_mode :\r
- //printk("pre_scaleing_mode is enable\n");\r
ret = rga_mmu_info_pre_scale_mode(reg, req);\r
break;\r
case update_palette_table_mode :\r