modify for mmu table search
authorzsq <zsq@rock-chips.com>
Tue, 27 Mar 2012 16:28:28 +0000 (09:28 -0700)
committerzsq <zsq@rock-chips.com>
Tue, 27 Mar 2012 16:28:28 +0000 (09:28 -0700)
drivers/video/rockchip/rga/RGA_API.c
drivers/video/rockchip/rga/rga_drv.c
drivers/video/rockchip/rga/rga_mmu_info.c
drivers/video/rockchip/rga/rga_reg_info.c

index af89f973e9e071dccc5617af08b7800a981c21f8..337c730c583aaaf0e35536aa195c33894e8a232d 100755 (executable)
@@ -93,8 +93,8 @@ uint32_t RGA_gen_two_pro(struct rga_req *msg, struct rga_req *msg1)
     mp = msg1;\r
     w_ratio = (msg->src.act_w << 16) / msg->dst.act_w;\r
     h_ratio = (msg->src.act_h << 16) / msg->dst.act_h;\r
-\r
-    memcpy(&msg1, &msg, sizeof(struct rga_req));\r
+   \r
+    memcpy(msg1, msg, sizeof(struct rga_req));\r
 \r
     msg->dst.format = msg->src.format;\r
 \r
@@ -142,17 +142,27 @@ uint32_t RGA_gen_two_pro(struct rga_req *msg, struct rga_req *msg1)
             msg->src.act_h = (dah - 1) << 3;                                                    \r
         }\r
     }\r
+\r
+    printk("test_2\n");\r
+    \r
     msg->dst.act_h = dah;\r
     msg->dst.vir_h = dah;\r
             \r
-    msg->dst.yrgb_addr = (u32)rga_service.pre_scale_buf;\r
+    //msg->dst.yrgb_addr = (u32)rga_service.pre_scale_buf;\r
     msg->dst.uv_addr = msg->dst.yrgb_addr + stride * dah;\r
     msg->dst.v_addr = msg->dst.uv_addr + ((stride * dah) >> 1);\r
 \r
     msg->render_mode = pre_scaling_mode;\r
 \r
-    memcpy(&msg1->src, &msg->dst, sizeof(rga_img_info_t));\r
-        \r
+    msg1->src.yrgb_addr = msg->dst.yrgb_addr;\r
+    msg1->src.uv_addr = msg->dst.uv_addr;\r
+    msg1->src.v_addr = msg->dst.v_addr;\r
+\r
+    msg1->src.act_w = msg->dst.act_w;\r
+    msg1->src.act_h = msg->dst.act_h;\r
+    msg1->src.vir_w = msg->dst.vir_w;\r
+    msg1->src.vir_h = msg->dst.vir_h;\r
+            \r
     return 0;\r
 }\r
 \r
index 5279ae34e23d4208c6bf9200c2b8c146f821e178..49c5648cb2e90e3ad61b26479907622ba8c79a06 100755 (executable)
 #include "rga_mmu_info.h"\r
 #include "RGA_API.h"\r
 \r
-//#include "bug_320x240_swap0_ABGR8888.h"\r
+#include "bug_320x240_swap0_ABGR8888.h"\r
 \r
 \r
-#define RGA_TEST 0\r
+#define RGA_TEST 0\r\r
+#define RGA_TEST_TIME 0\r
 \r
 #define PRE_SCALE_BUF_SIZE  2048*1024*4\r
 \r
@@ -69,6 +70,9 @@
 #define DRIVER_DESC            "RGA Device Driver"\r
 #define DRIVER_NAME            "rga"\r
 \r
+ktime_t rga_start;\r
+ktime_t rga_end;\r
+\r
 \r
 struct rga_drvdata {\r
        struct miscdevice miscdev;\r
@@ -148,6 +152,7 @@ static void rga_soft_reset(void)
                ERR("soft reset timeout.\n");\r
 }\r
 \r
+\r
 static void rga_dump(void)\r
 {\r
        int running;\r
@@ -386,7 +391,7 @@ static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req)
     reg->session = session;\r
        INIT_LIST_HEAD(&reg->session_link);\r
        INIT_LIST_HEAD(&reg->status_link);\r
-\r
+    \r
     if (req->mmu_info.mmu_en)\r
     {\r
         ret = rga_set_mmu_info(reg, req);\r
@@ -400,6 +405,12 @@ static struct rga_reg * rga_reg_init(rga_session *session, struct rga_req *req)
             return NULL; \r
         }\r
     }\r
+\r
+    #if RGA_TEST_TIME\r
+    rga_end = ktime_get();\r
+    rga_end = ktime_sub(rga_end, rga_start);\r
+    printk("one cmd end time %d\n", (int)ktime_to_us(rga_end));\r
+    #endif\r
     \r
     RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg);\r
 \r
@@ -435,10 +446,14 @@ static struct rga_reg * rga_reg_init_2(rga_session *session, struct rga_req *req
             break;\r
        }\r
 \r
-        reg0->session = session;\r
+        reg0->session = session;        \r
        INIT_LIST_HEAD(&reg0->session_link);\r
        INIT_LIST_HEAD(&reg0->status_link);\r
 \r
+        reg1->session = session;\r
+        INIT_LIST_HEAD(&reg1->session_link);\r
+       INIT_LIST_HEAD(&reg1->status_link);\r
+\r
         if(req0->mmu_info.mmu_en)\r
         {\r
             ret = rga_set_mmu_info(reg0, req0);\r
@@ -452,19 +467,27 @@ static struct rga_reg * rga_reg_init_2(rga_session *session, struct rga_req *req
 \r
         if(req1->mmu_info.mmu_en)\r
         {\r
-            ret = rga_set_mmu_info(reg0, req1);\r
+            ret = rga_set_mmu_info(reg1, req1);\r
             if(ret < 0) {\r
                 printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
                 break;        \r
             }\r
         }\r
         \r
-        RGA_gen_reg_info(req1, (uint8_t *)reg0->cmd_reg);\r
+        RGA_gen_reg_info(req1, (uint8_t *)reg1->cmd_reg);\r
+\r
+        {\r
+            uint32_t i;\r
+            for(i=0; i<28; i++)\r
+            {\r
+                printk("reg1->cmd_reg[%d] is %.8x\n", i, reg1->cmd_reg[i]);\r
+            }\r
+        }\r
 \r
         spin_lock_irqsave(&rga_service.lock, flag);\r
        list_add_tail(&reg0->status_link, &rga_service.waiting);\r
-        list_add_tail(&reg1->status_link, &rga_service.waiting);\r
-       list_add_tail(&reg0->session_link, &session->waiting);    \r
+        list_add_tail(&reg0->session_link, &session->waiting);\r
+        list_add_tail(&reg1->status_link, &rga_service.waiting);           \r
        list_add_tail(&reg1->session_link, &session->waiting);\r
        spin_unlock_irqrestore(&rga_service.lock, flag);\r
 \r
@@ -524,7 +547,7 @@ static void rga_try_set_reg(uint32_t num)
 \r
     if (!num)\r
     {\r
-        #ifdef RGA_TEST        \r
+        #if RGA_TEST        \r
         printk("rga try set reg cmd num is 0\n");\r
         #endif\r
         \r
@@ -535,29 +558,42 @@ static void rga_try_set_reg(uint32_t num)
        if (!list_empty(&rga_service.waiting)) \r
     {\r
         do\r
-        {\r
-            struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link);            \r
+        {            \r
+            struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link);\r
             if((rga_read(RGA_STATUS) & 0x1)) \r
             {            \r
                 /* RGA is busy */\r
+                printk("no idel is here \n");\r
+                \r
                 if((atomic_read(&rga_service.cmd_num) <= 0xf) && (atomic_read(&rga_service.int_disable) == 0)) \r
                 {\r
-                    rga_copy_reg(reg, atomic_read(&rga_service.cmd_num));                \r
+                    uint32_t offset;\r
+\r
+                    offset = atomic_read(&rga_service.cmd_num);\r
+                    rga_copy_reg(reg, offset);                \r
                     rga_reg_from_wait_to_run(reg);\r
+\r
+                    dmac_flush_range(&rga_service.cmd_buff[offset*28], &rga_service.cmd_buff[(offset + 1)*28]);\r
+                    outer_flush_range(virt_to_phys(&rga_service.cmd_buff[offset*28]),\r
+                                       virt_to_phys(&rga_service.cmd_buff[(offset + 1)*28]));\r
                     \r
                     rga_write(0x1<<10, RGA_INT);\r
 \r
-                    #ifdef RGA_TEST\r
+                    #if RGA_TEST\r
                     {\r
                         uint32_t i;\r
-                        printk("CMD_REG\n");\r
-                        for(i=0; i<28; i++)                        \r
-                            printk("%.8x\n", rga_service.cmd_buff[i + 28*atomic_read(&rga_service.cmd_num)]);                                                                                    \r
+                        printk("CMD_REG num is %.8x\n", offset);\r
+                        for(i=0; i<7; i++)\r
+                        {\r
+                            printk("%.8x ", rga_service.cmd_buff[i*4 + 0 + 28*atomic_read(&rga_service.cmd_num)]);\r
+                            printk("%.8x ", rga_service.cmd_buff[i*4 + 1 + 28*atomic_read(&rga_service.cmd_num)]);\r
+                            printk("%.8x ", rga_service.cmd_buff[i*4 + 2 + 28*atomic_read(&rga_service.cmd_num)]);\r
+                            printk("%.8x\n",rga_service.cmd_buff[i*4 + 3 + 28*atomic_read(&rga_service.cmd_num)]);\r
+                        }\r
                     }\r
                     #endif\r
                     \r
-                    atomic_set(&reg->session->done, 0);\r
-                    \r
+                    atomic_set(&reg->session->done, 0);                    \r
                     rga_write((0x1<<3)|(0x1<<1), RGA_CMD_CTRL);\r
                     \r
                     if(atomic_read(&reg->int_enable))\r
@@ -566,9 +602,10 @@ static void rga_try_set_reg(uint32_t num)
             }\r
             else \r
             {  \r
-                /* RGA is idle */\r
+                /* RGA is idle */                \r
                 rga_copy_reg(reg, 0);            \r
                 rga_reg_from_wait_to_run(reg);\r
+                \r
                 dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]);\r
                 outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28]));\r
 \r
@@ -583,13 +620,13 @@ static void rga_try_set_reg(uint32_t num)
                 /* CMD buff */\r
                 rga_write(virt_to_phys(rga_service.cmd_buff) & (~PAGE_MASK), RGA_CMD_ADDR);               \r
 \r
-                #ifdef RGA_TEST\r
+                #if RGA_TEST\r
                 {\r
-                    uint32_t i;\r
-                    printk("CMD_REG\n");\r
-                    for (i=0; i<28; i++)                    \r
-                        printk("%.8x\n", rga_service.cmd_buff[i]);                        \r
-                                                        \r
+                    uint32_t i, *p;\r
+                    p = rga_service.cmd_buff;\r
+                    printk("CMD_REG\n");                    \r
+                    for (i=0; i<7; i++)                    \r
+                        printk("%.8x %.8x %.8x %.8x\n", p[i*4+0], p[i*4+1], p[i*4+2], p[i*4+3]);\r
                 }\r
                 #endif\r
 \r
@@ -599,23 +636,19 @@ static void rga_try_set_reg(uint32_t num)
                 /* All CMD finish int */\r
                 rga_write(0x1<<10, RGA_INT);\r
                 \r
-                //rga_write(1, RGA_MMU_STA_CTRL);\r
-\r
                 /* Start proc */\r
                 atomic_set(&reg->session->done, 0);\r
                 rga_write(0x1, RGA_CMD_CTRL);                \r
 \r
-                //while(1)\r
-                //    printk("mmu_status is %.8x\n", rga_read(RGA_MMU_STA));\r
-\r
-                #ifdef RGA_TEST\r
+                #if RGA_TEST\r
                 {\r
                     uint32_t i;\r
                     printk("CMD_READ_BACK_REG\n");\r
-                    for (i=0; i<28; i++)                        \r
-                        printk("%.8x\n", rga_read(0x100 + i*4));                                                                                \r
+                    for (i=0; i<7; i++)                    \r
+                        printk("%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0), \r
+                               rga_read(0x100 + i*16 + 4), rga_read(0x100 + i*16 + 8), rga_read(0x100 + i*16 + 12));                    \r
                 }\r
-                #endif                \r
+                #endif\r
             }\r
             num--;\r
         }\r
@@ -669,7 +702,7 @@ static int rga_blit_async(rga_session *session, struct rga_req *req)
                if(ret == -EINVAL) {\r
                 return -EINVAL;\r
                }\r
-            \r
+\r
             reg = rga_reg_init(session, req);\r
             if(reg == NULL) {\r
                 return -EFAULT;\r
@@ -721,10 +754,11 @@ static int rga_blit_sync(rga_session *session, struct rga_req *req)
         {\r
             return -EINVAL;            \r
         }\r
+        memset(req2, 0, sizeof(struct rga_req));\r
         \r
         RGA_gen_two_pro(req, req2);\r
 \r
-        reg = rga_reg_init_2(session, req2, req);\r
+        reg = rga_reg_init_2(session, req, req2);\r
         if (NULL == reg) \r
         {\r
             return -EFAULT;\r
@@ -743,16 +777,20 @@ static int rga_blit_sync(rga_session *session, struct rga_req *req)
         {\r
                return -EFAULT;\r
        }\r
-        \r
+\r
+        //printk("rga_reg_int start \n");        \r
         reg = rga_reg_init(session, req);\r
         if(reg == NULL) \r
         {            \r
             return -EFAULT;\r
         }\r
+        //printk("rga_reg_int end \n");\r
         \r
         atomic_set(&reg->int_enable, 1);        \r
         rga_try_set_reg(1);\r
     }    \r
+\r
+    \r
     \r
     ret_timeout = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);\r
     if (unlikely(ret_timeout< 0)) \r
@@ -778,6 +816,11 @@ static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
     struct rga_req *req;\r
        int ret = 0;\r
     rga_session *session = (rga_session *)file->private_data;\r
+\r
+    #if RGA_TEST_TIME\r
+    rga_start = ktime_get();\r
+    #endif\r
+\r
        if (NULL == session) \r
     {\r
         printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);\r
@@ -790,19 +833,23 @@ static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
         printk("%s [%d] get rga_req mem failed\n",__FUNCTION__,__LINE__);\r
         ret = -EINVAL;\r
     }\r
-        \r
-    if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) \r
-    {\r
-               ERR("copy_from_user failed\n");\r
-               ret = -EFAULT;\r
-       }\r
-    \r
+       \r
        switch (cmd)\r
        {\r
                case RGA_BLIT_SYNC:\r
+               if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) \r
+            {\r
+                       ERR("copy_from_user failed\n");\r
+                       ret = -EFAULT;\r
+               }\r
             ret = rga_blit_sync(session, req);\r
             break;\r
                case RGA_BLIT_ASYNC:\r
+               if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) \r
+            {\r
+                       ERR("copy_from_user failed\n");\r
+                       ret = -EFAULT;\r
+               }\r
                        ret = rga_blit_async(session, req);            \r
                        break;\r
                case RGA_FLUSH:\r
@@ -819,6 +866,8 @@ static long rga_ioctl(struct file *file, uint32_t cmd, unsigned long arg)
     if(req != NULL) {\r
         kfree(req);\r
     }    \r
+\r
+    \r
        return ret;\r
 }\r
 \r
@@ -896,9 +945,7 @@ static irqreturn_t rga_irq(int irq,  void *dev_id)
                printk("RGA is not idle!\n");\r
                rga_soft_reset();\r
        }\r
-\r
     \r
-\r
     spin_lock(&rga_service.lock);\r
     do\r
     {\r
@@ -1061,9 +1108,10 @@ static int __devinit rga_drv_probe(struct platform_device *pdev)
                goto err_clock;\r
        }\r
 \r
-   \r
+    \r
        \r
-       data->axi_clk = clk_get(&pdev->dev, "aclk_rga");\r
+       data->axi_clk = clk_get(NULL, "aclk_rga");\r
+    \r
        if (IS_ERR(data->axi_clk))\r
        {\r
                ERR("failed to find rga axi clock source\n");\r
@@ -1078,7 +1126,6 @@ static int __devinit rga_drv_probe(struct platform_device *pdev)
                ret = -ENOENT;\r
                goto err_clock;\r
        }\r
-\r
     #endif\r
     \r
     \r
@@ -1188,7 +1235,7 @@ static int rga_drv_remove(struct platform_device *pdev)
        if(data->pd_display){\r
                clk_put(data->pd_display);\r
        }\r
-    #endif\r
+    #endif    \r
 \r
     kfree(data);\r
     return 0;\r
@@ -1207,7 +1254,7 @@ static struct platform_driver rga_driver = {
 };\r
 \r
 \r
-//void rga_test_0(void);\r
+void rga_test_0(void);\r
 \r
 \r
 static int __init rga_init(void)\r
@@ -1272,12 +1319,15 @@ static void __exit rga_exit(void)
 }\r
 \r
 \r
-#if 0\r
-extern uint32_t ABGR8888_320_240_swap0[240][320];\r
+#if 1\r
+extern struct fb_info * rk_get_fb(int fb_id);\r
+EXPORT_SYMBOL(rk_get_fb);\r
 \r
-unsigned int src_buf[800*480];\r
-unsigned int dst_buf[800*480];\r
-unsigned int mmu_buf[1024];\r
+extern void rk_direct_fb_show(struct fb_info * fbi);\r
+EXPORT_SYMBOL(rk_direct_fb_show);\r
+\r
+extern uint32_t ABGR8888_320_240_swap0[240][320];\r
+unsigned int dst_buf[1280*800];\r
 \r
 void rga_test_0(void)\r
 {\r
@@ -1285,7 +1335,7 @@ void rga_test_0(void)
     rga_session session;\r
     unsigned int *src, *dst;\r
 \r
-    int i;\r
+    struct fb_info *fb;\r
 \r
     session.pid        = current->pid;\r
        INIT_LIST_HEAD(&session.waiting);\r
@@ -1298,10 +1348,12 @@ void rga_test_0(void)
     atomic_set(&session.num_done, 0);\r
        //file->private_data = (void *)session;\r
 \r
+    fb = rk_get_fb(0);\r
+\r
     memset(&req, 0, sizeof(struct rga_req));\r
     src = ABGR8888_320_240_swap0;\r
     dst = dst_buf;\r
-\r
+        \r
     #if 0\r
     memset(src_buf, 0x80, 800*480*4);\r
     memset(dst_buf, 0xcc, 800*480*4);\r
@@ -1318,43 +1370,59 @@ void rga_test_0(void)
 \r
     req.src.vir_w = 320;\r
     req.src.vir_h = 240;\r
-    req.src.yrgb_addr = src;\r
+    req.src.yrgb_addr = (uint32_t)src;\r
 \r
-    req.dst.act_w = 320;\r
-    req.dst.act_h = 240;\r
+    req.dst.act_w = 100;\r
+    req.dst.act_h = 80;\r
 \r
-    req.dst.vir_w = 800;\r
-    req.dst.vir_h = 480;\r
-    req.dst.yrgb_addr = dst;\r
+    req.dst.vir_w = 1280;\r
+    req.dst.vir_h = 800;\r
+    req.dst.x_offset = 200;\r
+    req.dst.y_offset = 200;\r
+    req.dst.yrgb_addr = (uint32_t)dst;\r
 \r
     req.clip.xmin = 0;\r
-    req.clip.xmax = 799;\r
+    req.clip.xmax = 1279;\r
     req.clip.ymin = 0;\r
-    req.clip.ymax = 479;\r
-    \r
-    \r
-\r
+    req.clip.ymax = 799;\r
+        \r
     req.render_mode = 0;\r
-    req.rotate_mode = 0;\r
+    req.rotate_mode = 1;\r
+    req.scale_mode = 2;\r
+\r
+    req.sina = 0;\r
+    req.cosa = 0x10000;\r
 \r
     req.mmu_info.mmu_flag = 0x21;\r
     req.mmu_info.mmu_en = 1;\r
 \r
     rga_blit_sync(&session, &req);\r
 \r
-    #if 0\r
-    outer_inv_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480])); \r
-    dmac_inv_range(&dst_buf[0], &dst_buf[800*480]);\r
+    fb->var.bits_per_pixel = 32;\r
 \r
-    for(i=0; i<800*480; i++)\r
-    {        \r
-        if(src[i] != dst[i])\r
-        {\r
-            printk("src != dst %d\n", i);\r
-            printk("src = %.8x, dst = %.8x \n", src[i], dst[i]);\r
-        }\r
-    }\r
-    #endif\r
+    fb->var.xres = 1280;\r
+    fb->var.yres = 800;\r
+    \r
+    fb->var.red.length = 8;\r
+    fb->var.red.offset = 0;\r
+    fb->var.red.msb_right = 0;\r
+    \r
+    fb->var.green.length = 8;\r
+    fb->var.green.offset = 8;\r
+    fb->var.green.msb_right = 0;\r
+    \r
+    fb->var.blue.length = 8;\r
+    fb->var.blue.offset = 16;\r
+    fb->var.blue.msb_right = 0;\r
+    \r
+    fb->var.transp.length = 8;\r
+    fb->var.transp.offset = 24;\r
+    fb->var.transp.msb_right = 0;\r
+\r
+    fb->fix.smem_start = virt_to_phys(dst);\r
+\r
+    rk_direct_fb_show(fb);   \r
+    \r
 }\r
 \r
 #endif\r
index 27d3a25d3685e876f83e0c5802cbd8371ebefa17..5f802d1bd10e0f5d973e55ea7d2d270abf32f566 100755 (executable)
@@ -218,7 +218,7 @@ static int rga_MapUserMemory(struct page **pages,
     int32_t result;\r
     uint32_t i;\r
     uint32_t status;\r
-\r
+    uint32_t Address;\r
     status = 0;\r
 \r
     do\r
@@ -234,30 +234,37 @@ static int rga_MapUserMemory(struct page **pages,
                 NULL\r
                 );\r
         up_read(&current->mm->mmap_sem);\r
-        \r
+\r
         if(result <= 0 || result < pageCount) \r
         {\r
             struct vm_area_struct *vma;\r
 \r
-            vma = find_vma(current->mm, Memory);\r
-\r
-            if (vma && (vma->vm_flags & VM_PFNMAP) )\r
+            for(i=0; i<pageCount; i++)\r
             {\r
-                do\r
-                {\r
-                    pte_t       * pte;\r
-                    spinlock_t  * ptl;\r
-                    unsigned long pfn;\r
+                vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);\r
 \r
-                    pgd_t * pgd = pgd_offset(current->mm, Memory);\r
-                    pud_t * pud = pud_offset(pgd, Memory);\r
-                    if (pud)\r
+                if (vma && (vma->vm_flags & VM_PFNMAP) )\r
+                {\r
+                    do\r
                     {\r
-                        pmd_t * pmd = pmd_offset(pud, Memory);\r
-                        if (pmd)\r
+                        pte_t       * pte;\r
+                        spinlock_t  * ptl;\r
+                        unsigned long pfn;\r
+\r
+                        pgd_t * pgd = pgd_offset(current->mm, ((Memory + i)<< PAGE_SHIFT));\r
+                        pud_t * pud = pud_offset(pgd, ((Memory + i) << PAGE_SHIFT));\r
+                        if (pud)\r
                         {\r
-                            pte = pte_offset_map_lock(current->mm, pmd, Memory, &ptl);\r
-                            if (!pte)\r
+                            pmd_t * pmd = pmd_offset(pud, ((Memory + i) << PAGE_SHIFT));\r
+                            if (pmd)\r
+                            {\r
+                                pte = pte_offset_map_lock(current->mm, pmd, ((Memory + i)<< PAGE_SHIFT), &ptl);\r
+                                if (!pte)\r
+                                {\r
+                                    break;\r
+                                }\r
+                            }\r
+                            else\r
                             {\r
                                 break;\r
                             }\r
@@ -266,46 +273,43 @@ static int rga_MapUserMemory(struct page **pages,
                         {\r
                             break;\r
                         }\r
-                    }\r
-                    else\r
-                    {\r
-                        break;\r
-                    }\r
 \r
-                    pfn      = pte_pfn(*pte);\r
-                    \r
-                    pte_unmap_unlock(pte, ptl);\r
+                        pfn      = pte_pfn(*pte);\r
 \r
-                    /* Free the page table. */\r
-                    if (pages != NULL)\r
-                    {\r
-                        /* Release the pages if any. */\r
-                        if (result > 0)\r
+                        Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));\r
+                        \r
+                        pte_unmap_unlock(pte, ptl);\r
+\r
+                        /* Free the page table. */\r
+                        if (pages != NULL)\r
                         {\r
-                            for (i = 0; i < result; i++)\r
+                            /* Release the pages if any. */\r
+                            if (result > 0)\r
                             {\r
-                                if (pages[i] == NULL)\r
+                                for (i = 0; i < result; i++)\r
                                 {\r
-                                    break;\r
-                                }\r
+                                    if (pages[i] == NULL)\r
+                                    {\r
+                                        break;\r
+                                    }\r
 \r
-                                page_cache_release(pages[i]);\r
+                                    page_cache_release(pages[i]);\r
+                                }\r
                             }\r
                         }\r
+                        \r
+                        pageTable[i] = Address;\r
                     }\r
-\r
-                    return 0;\r
+                    while (0);\r
                 }\r
-                while (0);\r
-\r
-                status = RGA_OUT_OF_RESOURCES;\r
-                break;\r
+                else\r
+                {\r
+                    status = RGA_OUT_OF_RESOURCES;\r
+                    break;\r
+                }     \r
             }\r
-            else\r
-            {\r
-                status = RGA_OUT_OF_RESOURCES;\r
-                break;\r
-            }                \r
+\r
+            return 0;\r
         }\r
 \r
         for (i = 0; i < pageCount; i++)\r
@@ -323,10 +327,10 @@ static int rga_MapUserMemory(struct page **pages,
         }\r
 \r
         /* Fill the page table. */\r
-        for(i=0; i<pageCount; i++) {\r
-\r
+        for(i=0; i<pageCount; i++) \r
+        {\r
             /* Get the physical address from page struct. */\r
-            pageTable[i * (PAGE_SIZE/4096)] = page_to_phys(pages[i]);\r
+            pageTable[i] = page_to_phys(pages[i]);\r
         }    \r
     }\r
     while(0);\r
@@ -384,7 +388,7 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
 \r
         /* cal dst buf mmu info */    \r
         DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
-                                        req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+                                        req->dst.format, req->dst.vir_w, (req->dst.act_h + req->dst.y_offset),\r
                                         &DstStart);\r
         if(DstMemSize == 0) {\r
             return -EINVAL; \r
@@ -402,35 +406,32 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
                    \r
         pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) {\r
-            pr_err("RGA MMU malloc pages mem failed");\r
+            pr_err("RGA MMU malloc pages mem failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;                \r
         }\r
         \r
         MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
         if(MMU_Base == NULL) {\r
-            pr_err("RGA MMU malloc MMU_Base point failed");\r
+            pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;                \r
         }\r
 \r
-        printk("MMU_Base addr is %.8x\n", MMU_Base);\r
-        printk("CMDStart is %.8x\n",CMDStart);\r
-\r
         for(i=0; i<CMDMemSize; i++) {\r
             MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));            \r
         }\r
 \r
-        printk("MMU_Base[0] = %.8x\n", MMU_Base[0]);\r
-\r
         if(req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
         {\r
+            \r
             ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
             if (ret < 0) {\r
-                pr_err("rga map src memory failed");\r
+                pr_err("rga map src memory failed\n");\r
                 status = ret;\r
                 break;\r
             }\r
+            \r
         }\r
         else\r
         {\r
@@ -454,17 +455,25 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
                 }\r
             }            \r
         }\r
-\r
-        printk("MMU_Base[1] = %.8x\n", MMU_Base[1]);\r
         \r
         if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
         {\r
+            #if 0\r
+            ktime_t start, end;\r
+            start = ktime_get();\r
+            #endif\r
             ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
             if (ret < 0) {\r
-                pr_err("rga map dst memory failed");\r
+                pr_err("rga map dst memory failed\n");\r
                 status = ret;\r
                 break;\r
             }\r
+\r
+            #if 0\r
+            end = ktime_get();\r
+            end = ktime_sub(end, start);\r
+            printk("dst mmu map time = %d\n", (int)ktime_to_us(end));\r
+            #endif\r
         }\r
         else\r
         {\r
@@ -479,14 +488,23 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
         /* zsq \r
          * change the buf address in req struct     \r
          */\r
+        #if 0 \r
+        printk("CMDMemSize is %.8x\n", CMDMemSize);\r
+        printk("SrcMemSize is %.8x\n", SrcMemSize);\r
+        printk("DstMemSize is %.8x\n", DstMemSize);\r
+        printk("CMDStart is %.8x\n", CMDStart);\r
+        printk("SrcStart is %.8x\n", SrcStart);\r
+        printk("DstStart is %.8x\n", DstStart);\r
+        #endif\r
+        \r
         req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);        \r
 \r
         req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
         req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
-        req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+        req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);        \r
 \r
         req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
-        \r
+                \r
         /*record the malloc buf for the cmd end to release*/\r
         reg->MMU_base = MMU_Base;\r
 \r
@@ -563,13 +581,13 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *
                    \r
         pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) {\r
-            pr_err("RGA MMU malloc pages mem failed");\r
+            pr_err("RGA MMU malloc pages mem failed\n");\r
             return -EINVAL;                \r
         }\r
 \r
         MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
         if(MMU_Base == NULL) {\r
-            pr_err("RGA MMU malloc MMU_Base point failed");\r
+            pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             break;            \r
         }\r
 \r
@@ -585,7 +603,7 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *
             ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
             if (ret < 0) \r
             {\r
-                pr_err("rga map src memory failed");\r
+                pr_err("rga map src memory failed\n");\r
                 status = ret;\r
                 break;            \r
             }\r
@@ -634,6 +652,10 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *
         /*record the malloc buf for the cmd end to release*/\r
         reg->MMU_base = MMU_Base;\r
 \r
+        /* flush data to DDR */\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
         /* Free the page table */\r
         if (pages != NULL) {            \r
             kfree(pages);\r
@@ -734,6 +756,10 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req
         /*record the malloc buf for the cmd end to release*/\r
         reg->MMU_base = MMU_Base;\r
 \r
+        /* flush data to DDR */\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
         /* Free the page table */\r
         if (pages != NULL)             \r
             kfree(pages);\r
@@ -829,6 +855,10 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_
         /*record the malloc buf for the cmd end to release*/\r
         reg->MMU_base = MMU_Base;\r
 \r
+        /* flush data to DDR */\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
         /* Free the page table */\r
         if (pages != NULL) {            \r
             kfree(pages);\r
@@ -888,14 +918,14 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r
                    \r
         pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) {\r
-            pr_err("RGA MMU malloc pages mem failed");\r
+            pr_err("RGA MMU malloc pages mem failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;    \r
         }\r
         \r
         MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
         if(pages == NULL) {\r
-            pr_err("RGA MMU malloc MMU_Base point failed");\r
+            pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;   \r
         }\r
@@ -909,7 +939,7 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r
             ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
             if (ret < 0) \r
             {\r
-                pr_err("rga map src memory failed");\r
+                pr_err("rga map src memory failed\n");\r
                 status = ret;\r
                 break;\r
             }\r
@@ -930,7 +960,7 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r
             ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
             if (ret < 0) \r
             {\r
-                pr_err("rga map dst memory failed");\r
+                pr_err("rga map dst memory failed\n");\r
                 status = ret;\r
                 break;\r
             }\r
@@ -960,6 +990,10 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r
         /*record the malloc buf for the cmd end to release*/\r
         reg->MMU_base = MMU_Base;\r
 \r
+        /* flush data to DDR */\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
         /* Free the page table */\r
         if (pages != NULL) {        \r
             kfree(pages);\r
@@ -1018,11 +1052,20 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
         }\r
 \r
         AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+\r
+        \r
+        #if 0\r
+        printk("AllSize = %d\n", AllSize);\r
+        printk("SrcSize = %d\n", SrcMemSize);\r
+        printk("CMDSize = %d\n", CMDMemSize);\r
+        printk("DstSize = %d\n", DstMemSize);\r
+        printk("DstStart = %d\n", DstStart);\r
+        #endif\r
                    \r
         pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) \r
         {\r
-            pr_err("RGA MMU malloc pages mem failed");\r
+            pr_err("RGA MMU malloc pages mem failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;                \r
         }\r
@@ -1033,7 +1076,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
          */\r
         MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
         if(pages == NULL) {\r
-            pr_err("RGA MMU malloc MMU_Base point failed");\r
+            pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;            \r
             break;                \r
         }\r
@@ -1048,7 +1091,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
         {\r
             ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
             if (ret < 0) {\r
-                pr_err("rga map src memory failed");\r
+                pr_err("rga map src memory failed\n");\r
                 status = ret;\r
                 break;\r
             }\r
@@ -1090,7 +1133,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
             ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
             if (ret < 0) \r
             {\r
-                pr_err("rga map dst memory failed");\r
+                pr_err("rga map dst memory failed\n");\r
                 status = ret;\r
                 break;\r
             }        \r
@@ -1100,6 +1143,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
          * change the buf address in req struct\r
          * for the reason of lie to MMU \r
          */\r
+        \r
         req->mmu_info.base_addr = virt_to_phys(MMU_Base)>>2;\r
 \r
         req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
@@ -1107,10 +1151,14 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
         req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
 \r
         req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
-        \r
+\r
         /*record the malloc buf for the cmd end to release*/\r
         reg->MMU_base = MMU_Base;\r
 \r
+        /* flush data to DDR */\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
         /* Free the page table */\r
         if (pages != NULL) \r
         {            \r
@@ -1133,7 +1181,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
 \r
 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)\r
 {\r
-    int SrcMemSize, DstMemSize, CMDMemSize;\r
+    int SrcMemSize, CMDMemSize;\r
     uint32_t SrcStart, CMDStart;\r
     struct page **pages = NULL;\r
     uint32_t i;\r
@@ -1157,18 +1205,18 @@ static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rg
             return -EINVAL; \r
         }\r
 \r
-        AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+        AllSize = SrcMemSize + CMDMemSize;\r
                    \r
         pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) {\r
-            pr_err("RGA MMU malloc pages mem failed");\r
+            pr_err("RGA MMU malloc pages mem failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;    \r
         }\r
         \r
         MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
         if(pages == NULL) {\r
-            pr_err("RGA MMU malloc MMU_Base point failed");\r
+            pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;                \r
         }\r
@@ -1181,7 +1229,7 @@ static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rg
         {\r
             ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
             if (ret < 0) {\r
-                pr_err("rga map src memory failed");\r
+                pr_err("rga map src memory failed\n");\r
                 return -EINVAL;\r
             }\r
         }\r
@@ -1206,6 +1254,10 @@ static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rg
         /*record the malloc buf for the cmd end to release*/\r
         reg->MMU_base = MMU_Base;\r
 \r
+        /* flush data to DDR */\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
         if (pages != NULL) {\r
             /* Free the page table */\r
             kfree(pages);\r
@@ -1253,14 +1305,14 @@ static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_
                    \r
         pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
         if(pages == NULL) {\r
-            pr_err("RGA MMU malloc pages mem failed");\r
+            pr_err("RGA MMU malloc pages mem failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;                \r
         }\r
         \r
         MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
         if(pages == NULL) {\r
-            pr_err("RGA MMU malloc MMU_Base point failed");\r
+            pr_err("RGA MMU malloc MMU_Base point failed\n");\r
             status = RGA_MALLOC_ERROR;\r
             break;                \r
         }\r
@@ -1273,7 +1325,7 @@ static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_
         {\r
             ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
             if (ret < 0) {\r
-                pr_err("rga map src memory failed");\r
+                pr_err("rga map src memory failed\n");\r
                 status = ret;\r
                 break;\r
             }\r
@@ -1299,6 +1351,10 @@ static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_
         /*record the malloc buf for the cmd end to release*/\r
         reg->MMU_base = MMU_Base;\r
 \r
+        /* flush data to DDR */\r
+        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
         if (pages != NULL) {\r
             /* Free the page table */\r
             kfree(pages);\r
index 15a298dbc313fdb47de00b77994867dfabdeb796..f7b19cde031898d9ad00c14de3c048c228ae214e 100755 (executable)
@@ -1003,6 +1003,11 @@ RGA_set_bitblt_reg_info(u8 *base, const struct rga_req * msg, TILE_INFO *tile)
             xp = CLIP(xp, msg->src.x_offset, msg->src.x_offset + msg->src.act_w - 1);\r
             yp = CLIP(yp, msg->src.y_offset, msg->src.y_offset + msg->src.act_h - 1);\r
         }\r
+\r
+        printk("xoffset = %.8x\n", msg->src.x_offset);\r
+        printk("yoffset = %.8x\n", msg->src.y_offset);\r
+        printk("xp = %.8x\n", xp);\r
+        printk("yp = %.8x\n", yp);\r
         \r
         switch(msg->src.format)\r
         {        \r
@@ -1047,6 +1052,8 @@ RGA_set_bitblt_reg_info(u8 *base, const struct rga_req * msg, TILE_INFO *tile)
                 break;\r
         }\r
 \r
+        printk("y_addr is %.8x\n", y_addr);\r
+\r
         *bRGA_SRC_Y_MST = y_addr;\r
         *bRGA_SRC_CB_MST = u_addr;\r
         *bRGA_SRC_CR_MST = v_addr;\r
@@ -1290,8 +1297,11 @@ RGA_set_pre_scale_reg_info(u8 *base, const struct rga_req *msg)
    dst_width = msg->dst.act_w;\r
    dst_height = msg->dst.act_h;\r
 \r
-   h_ratio = (src_width )<<16 / dst_width;\r
-   v_ratio = (src_height)<<16 / dst_height;\r
+   printk("src_act_w = %.8x, src_act_h =%.8x dst_act_w = %.8x, dst_act_h = %.8x\n", \r
+    msg->src.act_w, msg->src.act_h, msg->dst.act_w, msg->dst.act_h);\r
+\r
+   h_ratio = (src_width <<16) / dst_width;\r
+   v_ratio = (src_height<<16) / dst_height;\r
 \r
    if (h_ratio <= (1<<16))    \r
        h_ratio = 0;\r