add mmu table flush for master bug
authorzsq <zsq@rock-chips.com>
Fri, 11 May 2012 02:29:43 +0000 (10:29 +0800)
committerzsq <zsq@rock-chips.com>
Fri, 11 May 2012 02:29:43 +0000 (10:29 +0800)
drivers/video/rockchip/rga/rga_drv.c
drivers/video/rockchip/rga/rga_mmu_info.c

index e79a5dc8f6a78f84b4b311c1a8d2677440258c1b..5c3fcef3b141773d90d386f4ec51aa21b8eb9d59 100755 (executable)
@@ -595,11 +595,11 @@ static void rga_try_set_reg(uint32_t num)
                \r
                 #if RGA_TEST\r
                 {\r
-                    printk("cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));\r
+                    //printk(KERN_DEBUG "cmd_addr = %.8x\n", rga_read(RGA_CMD_ADDR));\r
                     uint32_t i;\r
                     uint32_t *p;\r
                     p = rga_service.cmd_buff;                    \r
-                    printk("CMD_REG\n");\r
+                    printk(KERN_DEBUG "CMD_REG\n");\r
                     for (i=0; i<7; i++)                    \r
                         printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);                   \r
                 }\r
@@ -618,9 +618,9 @@ static void rga_try_set_reg(uint32_t num)
                 #if RGA_TEST\r
                 {\r
                     uint32_t i;\r
-                    printk("CMD_READ_BACK_REG\n");\r
+                    printk(KERN_DEBUG "CMD_READ_BACK_REG\n");\r
                     for (i=0; i<7; i++)                    \r
-                        printk("%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0), \r
+                        printk(KERN_DEBUG "%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0), \r
                                rga_read(0x100 + i*16 + 4), rga_read(0x100 + i*16 + 8), rga_read(0x100 + i*16 + 12));                    \r
                 }\r
                 #endif\r
@@ -672,7 +672,7 @@ static void rga_del_running_list(void)
         \r
         if(list_empty(&reg->session->waiting))\r
         {\r
-            atomic_set(&reg->session->done, 1);\r
+            atomic_set(&reg->session->done, 1);            \r
             wake_up_interruptible_sync(&reg->session->wait);\r
         }\r
         \r
@@ -843,7 +843,7 @@ static int rga_blit(rga_session *session, struct rga_req *req)
         atomic_add(num, &rga_service.total_running);\r
         spin_unlock_irqrestore(&rga_service.lock, flag);\r
         \r
-        rga_try_set_reg(num);\r
+        rga_try_set_reg(1);\r
         \r
         return 0;         \r
     }\r
index 468897df6351c68a3f153908456213e3dee87346..677e6d8d0c5ba63651f93d19caae7f7981feb17f 100755 (executable)
@@ -392,14 +392,14 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
         /* Cal out the needed mem size */\r
         AllSize = SrcMemSize + DstMemSize;\r
                            \r
-        pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
+        pages = kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) {\r
             pr_err("RGA MMU malloc pages mem failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;                \r
         }\r
         \r
-        MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
+        MMU_Base = kmalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
         if(MMU_Base == NULL) {\r
             pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;\r
@@ -488,8 +488,8 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
         reg->MMU_base = MMU_Base;\r
         \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         status = 0;\r
                \r
@@ -632,8 +632,8 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         /* Free the page table */\r
         if (pages != NULL) {            \r
@@ -728,8 +728,8 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         /* Free the page table */\r
         if (pages != NULL)             \r
@@ -773,14 +773,14 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_
 \r
         AllSize = DstMemSize;\r
                    \r
-        pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+        pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) {\r
             pr_err("RGA MMU malloc pages mem failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;\r
         }\r
         \r
-        MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+        MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
         if(pages == NULL) {\r
             pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;\r
@@ -818,8 +818,8 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         /* Free the page table */\r
         if (pages != NULL) {            \r
@@ -955,8 +955,8 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         /* Free the page table */\r
         if (pages != NULL) {        \r
@@ -1111,8 +1111,8 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
 \r
         /* Free the page table */\r
         if (pages != NULL) \r