\r
#if RGA_TEST\r
{\r
- printk("cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));\r
+ //printk(KERN_DEBUG "cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));\r
uint32_t i;\r
uint32_t *p;\r
p = rga_service.cmd_buff; \r
- printk("CMD_REG\n");\r
+ printk(KERN_DEBUG "CMD_REG\n");\r
for (i=0; i<7; i++) \r
printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]); \r
}\r
#if RGA_TEST\r
{\r
uint32_t i;\r
- printk("CMD_READ_BACK_REG\n");\r
+ printk(KERN_DEBUG "CMD_READ_BACK_REG\n");\r
for (i=0; i<7; i++) \r
- printk("%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0), \r
+ printk(KERN_DEBUG "%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0), \r
rga_read(0x100 + i*16 + 4), rga_read(0x100 + i*16 + 8), rga_read(0x100 + i*16 + 12)); \r
}\r
#endif\r
\r
if(list_empty(®->session->waiting))\r
{\r
- atomic_set(®->session->done, 1);\r
+ atomic_set(®->session->done, 1); \r
wake_up_interruptible_sync(®->session->wait);\r
}\r
\r
atomic_add(num, &rga_service.total_running);\r
spin_unlock_irqrestore(&rga_service.lock, flag);\r
\r
- rga_try_set_reg(num);\r
+ rga_try_set_reg(1);\r
\r
return 0; \r
}\r
/* Cal out the needed mem size */\r
AllSize = SrcMemSize + DstMemSize;\r
\r
- pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
+ pages = kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
\r
- MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
+ MMU_Base = kmalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
if(MMU_Base == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
status = 0;\r
\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
/* Free the page table */\r
if (pages != NULL) { \r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
/* Free the page table */\r
if (pages != NULL) \r
\r
AllSize = DstMemSize;\r
\r
- pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break;\r
}\r
\r
- MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
/* Free the page table */\r
if (pages != NULL) { \r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
/* Free the page table */\r
if (pages != NULL) { \r
reg->MMU_base = MMU_Base;\r
\r
/* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
/* Free the page table */\r
if (pages != NULL) \r