fix rga copy read bug
authorzsq <zsq@rock-chips.com>
Sun, 1 Apr 2012 10:03:03 +0000 (02:03 -0800)
committerzsq <zsq@rock-chips.com>
Sun, 1 Apr 2012 10:03:03 +0000 (02:03 -0800)
drivers/video/rockchip/rga/rga.h
drivers/video/rockchip/rga/rga_drv.c
drivers/video/rockchip/rga/rga_mmu_info.c

index 19807abce3bc9280a1038743709579a213ea1bdb..e102c7014d63a7088ac4c4e0bd86ea36eced0795 100755 (executable)
@@ -374,7 +374,6 @@ typedef struct rga_service_info {
     atomic_t            cmd_num;\r
     atomic_t            src_format_swt;\r
     int                 last_prc_src_format;\r
-    bool                           enabled;\r
 } rga_service_info;\r
 \r
 \r
index cdf4c34808671585c7e5b0ef1313d33beadfe610..2d5995722783e03480470eba2e6ec39014bbb2c5 100755 (executable)
@@ -224,7 +224,7 @@ static void rga_power_off(void)
 {\r
     int total_running;\r
     \r
-    //printk("rga_power_off\n");\r
+    printk("rga_power_off\n");\r
        if(!drvdata->enable)\r
                return;\r
 \r
@@ -378,7 +378,7 @@ static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req)
     reg->session = session;\r
        INIT_LIST_HEAD(&reg->session_link);\r
        INIT_LIST_HEAD(&reg->status_link);\r
-    \r
+            \r
     if (req->mmu_info.mmu_en)\r
     {\r
         ret = rga_set_mmu_info(reg, req);\r
@@ -392,9 +392,18 @@ static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req)
             return NULL; \r
         }\r
     }\r
-        \r
+\r
+    #if RGA_TEST_TIME\r
+    rga_start = ktime_get();\r
+    #endif        \r
     RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg);\r
 \r
+    #if RGA_TEST_TIME\r
+    rga_end = ktime_get();\r
+    rga_end = ktime_sub(rga_end, rga_start);\r
+    printk("one cmd end time %d\n", (int)ktime_to_us(rga_end));\r
+    #endif\r
+\r
     spin_lock_irqsave(&rga_service.lock, flag);\r
        list_add_tail(&reg->status_link, &rga_service.waiting);\r
        list_add_tail(&reg->session_link, &session->waiting);\r
@@ -533,50 +542,9 @@ static void rga_try_set_reg(uint32_t num)
         do\r
         {            \r
             struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link);\r
-            \r
-            //if(((reg->cmd_reg[0] & 0xf0) >= 3) && ((reg->cmd_reg[0] & 0xf0) <= 7) && rga_service.last_prc_src_format == 0)    \r
-                \r
             offset = atomic_read(&rga_service.cmd_num);\r
             if((rga_read(RGA_STATUS) & 0x1)) \r
-            {   \r
-                #if 0\r
-                #if RGA_TEST\r
-                /* RGA is busy */\r
-                printk(" rga try set reg while rga is working \n");\r
-                #endif\r
-                \r
-                if((atomic_read(&rga_service.cmd_num) <= 0xf) && (atomic_read(&rga_service.int_disable) == 0)) \r
-                {                                        \r
-                    rga_copy_reg(reg, offset);                \r
-                    rga_reg_from_wait_to_run(reg);\r
-\r
-                    dmac_flush_range(&rga_service.cmd_buff[offset*28], &rga_service.cmd_buff[(offset + 1)*28]);\r
-                    outer_flush_range(virt_to_phys(&rga_service.cmd_buff[offset*28]),\r
-                                       virt_to_phys(&rga_service.cmd_buff[(offset + 1)*28]));\r
-                    \r
-                    rga_write(0x1<<10, RGA_INT);\r
-\r
-                    #if RGA_TEST\r
-                    {\r
-                        uint32_t i;\r
-                        printk("CMD_REG num is %.8x\n", offset);\r
-                        for(i=0; i<7; i++)\r
-                        {\r
-                            printk("%.8x ", rga_service.cmd_buff[i*4 + 0 + 28*atomic_read(&rga_service.cmd_num)]);\r
-                            printk("%.8x ", rga_service.cmd_buff[i*4 + 1 + 28*atomic_read(&rga_service.cmd_num)]);\r
-                            printk("%.8x ", rga_service.cmd_buff[i*4 + 2 + 28*atomic_read(&rga_service.cmd_num)]);\r
-                            printk("%.8x\n",rga_service.cmd_buff[i*4 + 3 + 28*atomic_read(&rga_service.cmd_num)]);\r
-                        }\r
-                    }\r
-                    #endif\r
-                    \r
-                    atomic_set(&reg->session->done, 0);                    \r
-                    rga_write((0x1<<3)|(0x1<<1), RGA_CMD_CTRL);\r
-                    \r
-                    if(atomic_read(&reg->int_enable))\r
-                        atomic_set(&rga_service.int_disable, 1);\r
-                }\r
-                #endif\r
+            {                   \r
                 break;\r
             }\r
             else \r
@@ -614,16 +582,11 @@ static void rga_try_set_reg(uint32_t num)
                                                               \r
                 /* All CMD finish int */\r
                 rga_write(0x1<<10, RGA_INT);\r
-                \r
-                #if RGA_TEST_TIME\r
-                rga_start = ktime_get();\r
-                #endif\r
-                \r
+                                                \r
                 /* Start proc */\r
                 atomic_set(&reg->session->done, 0);\r
                 rga_write(0x1, RGA_CMD_CTRL);                \r
 \r
-\r
                 #if RGA_TEST\r
                 {\r
                     uint32_t i;\r
@@ -642,9 +605,9 @@ static void rga_try_set_reg(uint32_t num)
 }\r
 \r
 \r
+#if RGA_TEST  \r
 static void print_info(struct rga_req *req)\r
-{\r
-    #if RGA_TEST    \r
+{      \r
     printk("src.yrgb_addr = %.8x, src.uv_addr = %.8x, src.v_addr = %.8x\n", \r
             req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr);\r
     printk("src : act_w = %d, act_h = %d, vir_w = %d, vir_h = %d\n", \r
@@ -658,9 +621,9 @@ static void print_info(struct rga_req *req)
         req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h);\r
 \r
     printk("clip.xmin = %d, clip.xmax = %d. clip.ymin = %d, clip.ymax = %d\n", \r
-        req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax);\r
-    #endif\r
+        req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax);   \r
 }\r
+#endif\r
 \r
 \r
 static int rga_blit_async(rga_session *session, struct rga_req *req)\r
@@ -678,16 +641,12 @@ static int rga_blit_async(rga_session *session, struct rga_req *req)
     printk("*** rga_blit_async proc ***\n");\r
     print_info(req);\r
     #endif\r
-            \r
+                  \r
     saw = req->src.act_w;\r
     sah = req->src.act_h;\r
     daw = req->dst.act_w;\r
     dah = req->dst.act_h;\r
 \r
-    /* special case proc */\r
-    if(req->src.act_w == 360 && req->src.act_h == 64 && req->rotate_mode == 0)\r
-        req->rotate_mode = 1;\r
-\r
     do\r
     {\r
         if((req->render_mode == bitblt_mode) && (((saw>>1) >= daw) || ((sah>>1) >= dah))) \r
@@ -734,8 +693,8 @@ static int rga_blit_async(rga_session *session, struct rga_req *req)
         //rga_power_on();\r
         atomic_set(&reg->int_enable, 1);        \r
         rga_try_set_reg(num);\r
-\r
-        return 0; \r
+        \r
+        return 0;         \r
     }\r
     while(0);\r
 \r
@@ -766,12 +725,7 @@ static int rga_blit_sync(rga_session *session, struct rga_req *req)
     printk("*** rga_blit_sync proc ***\n");\r
     print_info(req);\r
     #endif\r
-\r
-    /* special case proc*/\r
-    if(req->src.act_w == 360 && req->src.act_h == 64 && req->rotate_mode == 0)\r
-        req->rotate_mode = 1;\r
-        \r
-\r
+       \r
     do\r
     {\r
         if((req->render_mode == bitblt_mode) && (((saw>>1) >= daw) || ((sah>>1) >= dah))) \r
@@ -897,7 +851,7 @@ static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
     if(req != NULL) {\r
         kfree(req);\r
     }    \r
-    \r
+        \r
        return ret;\r
 }\r
 \r
@@ -959,8 +913,6 @@ static irqreturn_t rga_irq(int irq,  void *dev_id)
     uint32_t num = 0;\r
     struct list_head *next;\r
     int int_enable = 0;\r
-\r
-    //DBG("rga_irq %d \n", irq);\r
     \r
     #if RGA_TEST\r
     printk("rga_irq is valid\n");\r
@@ -1009,15 +961,16 @@ static irqreturn_t rga_irq(int irq,  void *dev_id)
     next = &rga_service.waiting;\r
    \r
     /* add cmd to cmd buf */\r
+    /*\r
     while((!list_empty(next)) && ((int_enable) == 0) && (num <= 0xf))\r
     {        \r
         num += 1;\r
         reg = list_entry(next->next, struct rga_reg, status_link);\r
         int_enable = atomic_read(&reg->int_enable);        \r
         next = next->next;\r
-    }   \r
-\r
-    rga_try_set_reg(num);\r
+    } \r
+    */\r
+    rga_try_set_reg(1);\r
                        \r
        return IRQ_HANDLED;\r
 }\r
@@ -1046,8 +999,6 @@ static void rga_shutdown(struct platform_device *pdev)
     pr_cont("done\n");\r
 }\r
 \r
-\r
-\r
 struct file_operations rga_fops = {\r
        .owner          = THIS_MODULE,\r
        .open           = rga_open,\r
@@ -1078,7 +1029,7 @@ static int __devinit rga_drv_probe(struct platform_device *pdev)
     atomic_set(&rga_service.total_running, 0);\r
     atomic_set(&rga_service.src_format_swt, 0);\r
     rga_service.last_prc_src_format = 1; /* default is yuv first*/\r
-       rga_service.enabled     = false;    \r
+    data->enable = 0;\r
           \r
        if(NULL == data)\r
        {\r
index aa4c373d28606ab8c9d3ff19540d89fe71c362e6..e598fb041e2181bcabff61038f630c88869ce1d5 100755 (executable)
@@ -243,9 +243,9 @@ static int rga_MapUserMemory(struct page **pages,
 \r
             for(i=0; i<pageCount; i++)\r
             {\r
-                t_mem = Memory + i;\r
+                t_mem = (Memory + i) << PAGE_SHIFT;\r
                 \r
-                vma = find_vma(current->mm, (t_mem) << PAGE_SHIFT);\r
+                vma = find_vma(current->mm, t_mem);\r
 \r
                 if (vma && (vma->vm_flags & VM_PFNMAP) )\r
                 {\r
@@ -255,14 +255,14 @@ static int rga_MapUserMemory(struct page **pages,
                         spinlock_t  * ptl;\r
                         unsigned long pfn;                                                                        \r
 \r
-                        pgd_t * pgd = pgd_offset(current->mm, ((t_mem)<< PAGE_SHIFT));\r
-                        pud_t * pud = pud_offset(pgd, ((t_mem) << PAGE_SHIFT));\r
+                        pgd_t * pgd = pgd_offset(current->mm, t_mem);\r
+                        pud_t * pud = pud_offset(pgd, t_mem);\r
                         if (pud)\r
                         {\r
-                            pmd_t * pmd = pmd_offset(pud, ((t_mem) << PAGE_SHIFT));\r
+                            pmd_t * pmd = pmd_offset(pud, t_mem);\r
                             if (pmd)\r
                             {\r
-                                pte = pte_offset_map_lock(current->mm, pmd, ((t_mem)<< PAGE_SHIFT), &ptl);\r
+                                pte = pte_offset_map_lock(current->mm, pmd, t_mem, &ptl);\r
                                 if (!pte)\r
                                 {\r
                                     break;\r
@@ -279,7 +279,7 @@ static int rga_MapUserMemory(struct page **pages,
                         }\r
 \r
                         pfn = pte_pfn(*pte);\r
-                        Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((t_mem) << PAGE_SHIFT)) & ~PAGE_MASK));                        \r
+                        Address = ((pfn << PAGE_SHIFT) | (((unsigned long)t_mem) & ~PAGE_MASK));                        \r
                         pte_unmap_unlock(pte, ptl);\r
                         \r
                         #if 0\r
@@ -381,17 +381,12 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
     struct page **pages = NULL;\r
 \r
     MMU_Base = NULL;\r
-\r
-    if(req->src.act_w == 360 && req->src.act_h == 64)\r
-        mmu_flag = 1;\r
-    else\r
-        mmu_flag = 0;\r
-\r
+    \r
     do\r
     {               \r
         /* cal src buf mmu info */                     \r
         SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
-                                        req->src.format, req->src.vir_w, req->src.vir_h,\r
+                                        req->src.format, req->src.vir_w, (req->src.act_h + req->src.y_offset),\r
                                         &SrcStart);\r
         if(SrcMemSize == 0) {\r
             return -EINVAL;                \r
@@ -400,12 +395,14 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
 \r
         /* cal dst buf mmu info */    \r
         DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
-                                        req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+                                        req->dst.format, req->dst.vir_w, (req->dst.act_h + req->dst.y_offset),\r
                                         &DstStart);        \r
         if(DstMemSize == 0) {\r
             return -EINVAL; \r
         }\r
 \r
+        //DstMemSize += 1;\r
+\r
         /* cal cmd buf mmu info */\r
         CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
         if(CMDMemSize == 0) {\r
@@ -414,15 +411,15 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
         \r
         /* Cal out the needed mem size */\r
         AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
-                   \r
-        pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+                           \r
+        pages = (struct page **)kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) {\r
             pr_err("RGA MMU malloc pages mem failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;                \r
         }\r
         \r
-        MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);      \r
+        MMU_Base = (uint32_t *)kmalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);      \r
         if(MMU_Base == NULL) {\r
             pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;\r
@@ -457,16 +454,16 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
                 }                \r
             }\r
             else\r
-            {                           \r
+            {                      \r
                 for(i=0; i<SrcMemSize; i++)\r
                 {\r
                     MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
-                }\r
+                }                \r
             }            \r
         }\r
         \r
         if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
-        {\r
+        {     \r
             #if 0\r
             ktime_t start, end;\r
             start = ktime_get();\r
@@ -491,7 +488,7 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
             for(i=0; i<DstMemSize; i++)\r
             {\r
                 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
-            }       \r
+            }                   \r
         }\r
 \r
         /* zsq \r
@@ -511,13 +508,13 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
                 \r
         /*record the malloc buf for the cmd end to release*/\r
         reg->MMU_base = MMU_Base;\r
-\r
+        \r
         /* flush data to DDR */\r
         dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
         outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
-        \r
-        status = 0;\r
 \r
+        status = 0;\r
+               \r
         /* Free the page table */        \r
         if (pages != NULL) {\r
             kfree(pages);\r
@@ -1068,7 +1065,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
 \r
         AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
                    \r
-        pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+        pages = (struct page **)kmalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) \r
         {\r
             pr_err("RGA MMU malloc pages mem failed\n");\r
@@ -1080,7 +1077,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
          * Allocate MMU Index mem\r
          * This mem release in run_to_done fun \r
          */\r
-        MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+        MMU_Base = (uint32_t *)kmalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
         if(pages == NULL) {\r
             pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;            \r
@@ -1228,7 +1225,7 @@ static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rg
             break;    \r
         }\r
         \r
-        MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+        MMU_Base = (uint32_t *)kmalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);\r
         if(pages == NULL) {\r
             pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;\r