rockchip/rga: add rga flush data cache
authorZikim,Wei <wzq@rock-chips.com>
Wed, 28 Sep 2016 13:35:27 +0000 (21:35 +0800)
committerHuang, Tao <huangtao@rock-chips.com>
Fri, 14 Oct 2016 09:11:35 +0000 (17:11 +0800)
If the buffer alloced from the ion, user can flush
it by ion apis, but if the buffer was alloced by
other apis like malloc, user space is not easy to
flush the data cache. So rga flush the data cache
for cache coherence.

Change-Id: I5fcfc3e00c6a8f6b12ed66043b37b0c7c840e7ee
Signed-off-by: Zikim,Wei <wzq@rock-chips.com>
drivers/video/rockchip/rga2/rga2_mmu_info.c

index 21c9a8695d554a3af52eb1273e594650d87960e6..f1e04599ab15e960db2d00d2638dc90c94f5fd00 100644 (file)
@@ -35,6 +35,16 @@ extern struct rga2_mmu_buf_t rga2_mmu_buf;
 #define V7_VATOPA_GET_NS(X)            ((X>>9) & 1)\r
 #define V7_VATOPA_GET_SS(X)            ((X>>1) & 1)\r
 \r
+static void rga_dma_flush_range(void *pstart, void *pend)
+{
+#ifdef CONFIG_ARM
+       dmac_flush_range(pstart, pend);
+       outer_flush_range(virt_to_phys(pstart), virt_to_phys(pend));
+#elif CONFIG_ARM64
+       __dma_flush_range(pstart, pend);
+#endif
+}
+
 #if 0\r
 static unsigned int armv7_va_to_pa(unsigned int v_addr)\r
 {\r
@@ -197,8 +207,6 @@ static int rga2_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, uns
             end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));\r
             end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
             pageCount = end - start;\r
-            //printk("yrgb_addr = %.8x\n", yrgb_addr);\r
-            //printk("uv_addr = %.8x\n", uv_addr);\r
             break;\r
         case RGA2_FORMAT_YCbCr_420_P :\r
         case RGA2_FORMAT_YCrCb_420_P :\r
@@ -244,126 +252,92 @@ static int rga2_buf_size_cal(unsigned long yrgb_addr, unsigned long uv_addr, uns
     return pageCount;\r
 }\r
 \r
-static int rga2_MapUserMemory(struct page **pages,\r
-                                            uint32_t *pageTable,\r
-                                            unsigned long Memory,\r
-                                            uint32_t pageCount)\r
-{\r
-    int32_t result;\r
-    uint32_t i;\r
-    uint32_t status;\r
-    unsigned long Address;\r
-\r
-    status = 0;\r
-    Address = 0;\r
-\r
-    do\r
-    {\r
-        down_read(&current->mm->mmap_sem);\r
-        result = get_user_pages(current,\r
-                current->mm,\r
-                Memory << PAGE_SHIFT,\r
-                pageCount,\r
-                1,\r
-                0,\r
-                pages,\r
-                NULL\r
-                );\r
-        up_read(&current->mm->mmap_sem);\r
-\r
-        if(result <= 0 || result < pageCount)\r
-        {\r
-            struct vm_area_struct *vma;\r
-\r
-            if (result>0) {\r
-                           down_read(&current->mm->mmap_sem);\r
-                           for (i = 0; i < result; i++)\r
-                                   put_page(pages[i]);\r
-                           up_read(&current->mm->mmap_sem);\r
-                   }\r
-\r
-            for(i=0; i<pageCount; i++)\r
-            {\r
-                vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);\r
-\r
-                if (vma)//&& (vma->vm_flags & VM_PFNMAP) )\r
-                {\r
-                    do\r
-                    {\r
-                        pte_t       * pte;\r
-                        spinlock_t  * ptl;\r
-                        unsigned long pfn;\r
-                        pgd_t * pgd;\r
-                        pud_t * pud;\r
-\r
-                        pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);\r
-\r
-                        if(pgd_val(*pgd) == 0)\r
-                        {\r
-                            //printk("rga pgd value is zero \n");\r
-                            break;\r
-                        }\r
-\r
-                        pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);\r
-                        if (pud)\r
-                        {\r
-                            pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);\r
-                            if (pmd)\r
-                            {\r
-                                pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);\r
-                                if (!pte)\r
-                                {\r
-                                    pte_unmap_unlock(pte, ptl);\r
-                                    break;\r
-                                }\r
-                            }\r
-                            else\r
-                            {\r
-                                break;\r
-                            }\r
-                        }\r
-                        else\r
-                        {\r
-                            break;\r
-                        }\r
-\r
-                        pfn = pte_pfn(*pte);\r
-                        Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));\r
-                        pte_unmap_unlock(pte, ptl);\r
-                    }\r
-                    while (0);\r
-\r
-                    pageTable[i] = (uint32_t)Address;\r
-                }\r
-                else\r
-                {\r
-                    status = RGA2_OUT_OF_RESOURCES;\r
-                    break;\r
-                }\r
-            }\r
-\r
-            return status;\r
-        }\r
-\r
-        /* Fill the page table. */\r
-        for(i=0; i<pageCount; i++)\r
-        {\r
-            /* Get the physical address from page struct. */\r
-            pageTable[i] = page_to_phys(pages[i]);\r
-        }\r
-\r
-        down_read(&current->mm->mmap_sem);\r
-               for (i = 0; i < result; i++)\r
-                       put_page(pages[i]);\r
-               up_read(&current->mm->mmap_sem);\r
-\r
-        return 0;\r
-    }\r
-    while(0);\r
-\r
-    return status;\r
-}\r
-\r
+static int rga2_MapUserMemory(struct page **pages, uint32_t *pageTable,
+                             unsigned long Memory, uint32_t pageCount,
+                             int writeFlag)
+{
+       struct vm_area_struct *vma;
+       int32_t result;
+       uint32_t i;
+       uint32_t status;
+       unsigned long Address;
+       unsigned long pfn;
+       void *virt;
+       spinlock_t * ptl;
+       pte_t * pte;
+       pgd_t * pgd;
+       pud_t * pud;
+       pmd_t * pmd;
+
+       status = 0;
+       Address = 0;
+       down_read(&current->mm->mmap_sem);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+       result = get_user_pages(current, current->mm, Memory << PAGE_SHIFT,
+                               pageCount, writeFlag, 0, pages, NULL);
+#else
+       result = get_user_pages_remote(current, current->mm,
+                                      Memory << PAGE_SHIFT,
+                                      pageCount, writeFlag, 0, pages, NULL);
+#endif
+       if (result > 0 && result >= pageCount) {
+               /* Fill the page table. */
+               for (i = 0; i < pageCount; i++) {
+                       /* Get the physical address from page struct. */
+                       pageTable[i] = page_to_phys(pages[i]);
+                       virt = phys_to_virt(pageTable[i]);
+                       rga_dma_flush_range(virt, virt + 4 * 1024);
+               }
+               for (i = 0; i < result; i++)
+                       put_page(pages[i]);
+               up_read(&current->mm->mmap_sem);
+               return 0;
+       }
+       if (result > 0) {
+               for (i = 0; i < result; i++)
+                       put_page(pages[i]);
+       }
+       for (i = 0; i < pageCount; i++) {
+               vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
+               if (!vma) {
+                       status = RGA2_OUT_OF_RESOURCES;
+                       break;
+               }
+               pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
+               if (pgd_val(*pgd) == 0) {
+                       status = RGA2_OUT_OF_RESOURCES;
+                       break;
+               }
+               pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
+               if (!pud) {
+                       status = RGA2_OUT_OF_RESOURCES;
+                       break;
+               }
+               pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
+               if (!pmd) {
+                       status = RGA2_OUT_OF_RESOURCES;
+                       break;
+               }
+               pte = pte_offset_map_lock(current->mm, pmd,
+                                         (Memory + i) << PAGE_SHIFT,
+                                         &ptl);
+               if (!pte) {
+                       pte_unmap_unlock(pte, ptl);
+                       status = RGA2_OUT_OF_RESOURCES;
+                       break;
+               }
+               pfn = pte_pfn(*pte);
+               Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i)
+                          << PAGE_SHIFT)) & ~PAGE_MASK));
+               pte_unmap_unlock(pte, ptl);
+               pageTable[i] = (uint32_t)Address;
+               virt = phys_to_virt(pageTable[i]);
+               rga_dma_flush_range(virt, virt + 4 * 1024);
+       }
+       up_read(&current->mm->mmap_sem);
+       return status;
+}
+
 static int rga2_MapION(struct sg_table *sg,\r
                                uint32_t *Memory,\r
                                int32_t  pageCount)\r
@@ -401,167 +375,164 @@ static int rga2_MapION(struct sg_table *sg,
 }\r
 \r
 \r
-static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)\r
-{\r
-    int Src0MemSize, DstMemSize, Src1MemSize;\r
-    unsigned long Src0Start, Src1Start, DstStart;\r
-    uint32_t AllSize;\r
-    uint32_t *MMU_Base, *MMU_Base_phys;\r
-    int ret;\r
-    int status;\r
-    uint32_t uv_size, v_size;\r
-\r
-    struct page **pages = NULL;\r
-\r
-    MMU_Base = NULL;\r
-\r
-    Src0MemSize = 0;\r
-    Src1MemSize = 0;\r
-    DstMemSize  = 0;\r
-\r
-    do {\r
-        /* cal src0 buf mmu info */\r
-        if(req->mmu_info.src0_mmu_flag & 1) {\r
-            Src0MemSize = rga2_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
-                                        req->src.format, req->src.vir_w,\r
-                                        (req->src.vir_h),\r
-                                        &Src0Start);\r
-            if (Src0MemSize == 0) {\r
-                return -EINVAL;\r
-            }\r
-        }\r
-\r
-        /* cal src1 buf mmu info */\r
-        if(req->mmu_info.src1_mmu_flag & 1) {\r
-            Src1MemSize = rga2_buf_size_cal(req->src1.yrgb_addr, req->src1.uv_addr, req->src1.v_addr,\r
-                                        req->src1.format, req->src1.vir_w,\r
-                                        (req->src1.vir_h),\r
-                                        &Src1Start);\r
-            Src0MemSize = (Src0MemSize + 3) & (~3);\r
-            if (Src1MemSize == 0) {\r
-                return -EINVAL;\r
-            }\r
-        }\r
-\r
-\r
-        /* cal dst buf mmu info */\r
-        if(req->mmu_info.dst_mmu_flag & 1) {\r
-            DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
-                                            req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
-                                            &DstStart);\r
-            if(DstMemSize == 0) {\r
-                return -EINVAL;\r
-            }\r
-        }\r
-\r
-        /* Cal out the needed mem size */\r
-        Src0MemSize = (Src0MemSize+15)&(~15);\r
-        Src1MemSize = (Src1MemSize+15)&(~15);\r
-        DstMemSize  = (DstMemSize+15)&(~15);\r
-        AllSize = Src0MemSize + Src1MemSize + DstMemSize;\r
-\r
-        if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {\r
-            pr_err("RGA2 Get MMU mem failed\n");\r
-            status = RGA2_MALLOC_ERROR;\r
-            break;\r
-        }\r
-\r
-        pages = rga2_mmu_buf.pages;\r
-\r
-        mutex_lock(&rga2_service.lock);\r
-        MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));\r
-        MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));\r
-        mutex_unlock(&rga2_service.lock);\r
-        if(Src0MemSize) {\r
-            if (req->sg_src0) {\r
-                ret = rga2_MapION(req->sg_src0, &MMU_Base[0], Src0MemSize);\r
-            }\r
-            else {\r
-                ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], Src0Start, Src0MemSize);\r
-            }\r
-\r
-            if (ret < 0) {\r
-                pr_err("rga2 map src0 memory failed\n");\r
-                status = ret;\r
-                break;\r
-            }\r
-\r
-            /* change the buf address in req struct */\r
-            req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));\r
-            uv_size = (req->src.uv_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;\r
-            v_size = (req->src.v_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;\r
-\r
-            req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
-            req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
-            req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
-        }\r
-\r
-        if(Src1MemSize) {\r
-            if (req->sg_src1) {\r
-                ret = rga2_MapION(req->sg_src1, MMU_Base + Src0MemSize, Src1MemSize);\r
-            }\r
-            else {\r
-                ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize, Src1Start, Src1MemSize);\r
-            }\r
-\r
-            if (ret < 0) {\r
-                pr_err("rga2 map src1 memory failed\n");\r
-                status = ret;\r
-                break;\r
-            }\r
-\r
-            /* change the buf address in req struct */\r
-            req->mmu_info.src1_base_addr = ((unsigned long)(MMU_Base_phys + Src0MemSize));\r
-            req->src1.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
-        }\r
-\r
-        if(DstMemSize) {\r
-            if (req->sg_dst) {\r
-                ret = rga2_MapION(req->sg_dst, MMU_Base + Src0MemSize + Src1MemSize, DstMemSize);\r
-            }\r
-            else {\r
-                ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize + Src1MemSize, DstStart, DstMemSize);\r
-            }\r
-            if (ret < 0) {\r
-                pr_err("rga2 map dst memory failed\n");\r
-                status = ret;\r
-                break;\r
-            }\r
-\r
-            /* change the buf address in req struct */\r
-            req->mmu_info.dst_base_addr  = ((unsigned long)(MMU_Base_phys + Src0MemSize + Src1MemSize));\r
-            req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
-            uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
-            v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
-            req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((uv_size) << PAGE_SHIFT);\r
-            req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((v_size) << PAGE_SHIFT);\r
-\r
-           if (((req->alpha_rop_flag & 1) == 1) && (req->bitblt_mode == 0)) {\r
-               req->mmu_info.src1_base_addr = req->mmu_info.dst_base_addr;\r
-               req->mmu_info.src1_mmu_flag  = req->mmu_info.dst_mmu_flag;\r
-           }\r
-        }\r
-\r
-        /* flush data to DDR */\r
-        #ifdef CONFIG_ARM\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
-        #elif defined(CONFIG_ARM64)\r
-        __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        #endif\r
-\r
-        rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
-        reg->MMU_len = AllSize;\r
-\r
-        status = 0;\r
-\r
-        return status;\r
-    }\r
-    while(0);\r
-\r
-    return status;\r
-}\r
-\r
+static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)
+{
+       int Src0MemSize, DstMemSize, Src1MemSize;
+       unsigned long Src0Start, Src1Start, DstStart;
+       unsigned long Src0PageCount, Src1PageCount, DstPageCount;
+       uint32_t AllSize;
+       uint32_t *MMU_Base, *MMU_Base_phys;
+       int ret;
+       int status;
+       uint32_t uv_size, v_size;
+       struct page **pages = NULL;
+       MMU_Base = NULL;
+       Src0MemSize = 0;
+       Src1MemSize = 0;
+       DstMemSize  = 0;
+       Src0PageCount = 0;
+       Src1PageCount = 0;
+       DstPageCount = 0;
+
+       /* cal src0 buf mmu info */
+       if (req->mmu_info.src0_mmu_flag & 1) {
+               Src0PageCount = rga2_buf_size_cal(req->src.yrgb_addr,
+                                                 req->src.uv_addr,
+                                                 req->src.v_addr,
+                                                 req->src.format,
+                                                 req->src.vir_w,
+                                                 (req->src.vir_h),
+                                                 &Src0Start);
+               if (Src0PageCount == 0)
+                       return -EINVAL;
+       }
+       /* cal src1 buf mmu info */
+       if (req->mmu_info.src1_mmu_flag & 1) {
+               Src1PageCount = rga2_buf_size_cal(req->src1.yrgb_addr,
+                                                 req->src1.uv_addr,
+                                                 req->src1.v_addr,
+                                                 req->src1.format,
+                                                 req->src1.vir_w,
+                                                 (req->src1.vir_h),
+                                                 &Src1Start);
+               Src1PageCount = (Src1PageCount + 3) & (~3);
+               if (Src1PageCount == 0)
+                       return -EINVAL;
+       }
+       /* cal dst buf mmu info */
+       if (req->mmu_info.dst_mmu_flag & 1) {
+               DstPageCount = rga2_buf_size_cal(req->dst.yrgb_addr,
+                                                req->dst.uv_addr,
+                                                req->dst.v_addr,
+                                                req->dst.format,
+                                                req->dst.vir_w,
+                                                req->dst.vir_h,
+                                                &DstStart);
+               if (DstPageCount == 0)
+                       return -EINVAL;
+       }
+       /* Cal out the needed mem size */
+       Src0MemSize = (Src0PageCount + 15) & (~15);
+       Src1MemSize = (Src1PageCount + 15) & (~15);
+       DstMemSize  = (DstPageCount + 15) & (~15);
+       AllSize = Src0MemSize + Src1MemSize + DstMemSize;
+
+       if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
+               pr_err("RGA2 Get MMU mem failed\n");
+               status = RGA2_MALLOC_ERROR;
+               goto out;
+       }
+       pages = rga2_mmu_buf.pages;
+       mutex_lock(&rga2_service.lock);
+        MMU_Base = rga2_mmu_buf.buf_virtual +
+                               (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
+        MMU_Base_phys = rga2_mmu_buf.buf +
+                               (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
+        mutex_unlock(&rga2_service.lock);
+        if (Src0MemSize) {
+               if (req->sg_src0)
+                       ret = rga2_MapION(req->sg_src0,
+                                         &MMU_Base[0], Src0MemSize);
+               else
+                       ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
+                                                Src0Start, Src0PageCount, 0);
+
+               if (ret < 0) {
+                       pr_err("rga2 map src0 memory failed\n");
+                       status = ret;
+                       goto out;
+               }
+               /* change the buf address in req struct */
+               req->mmu_info.src0_base_addr = (((unsigned long)MMU_Base_phys));
+               uv_size = (req->src.uv_addr
+                          - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
+               v_size = (req->src.v_addr
+                         - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
+
+               req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
+               req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) |
+                                                       (uv_size << PAGE_SHIFT);
+               req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) |
+                                                       (v_size << PAGE_SHIFT);
+       }
+        if (Src1MemSize) {
+               if (req->sg_src1)
+                       ret = rga2_MapION(req->sg_src1,
+                                       MMU_Base + Src0MemSize, Src1MemSize);
+               else
+                       ret = rga2_MapUserMemory(&pages[0],
+                                                MMU_Base + Src0MemSize,
+                                                Src1Start, Src1PageCount, 0);
+               if (ret < 0) {
+                       pr_err("rga2 map src1 memory failed\n");
+                       status = ret;
+                       goto out;
+               }
+               /* change the buf address in req struct */
+               req->mmu_info.src1_base_addr = ((unsigned long)(MMU_Base_phys
+                                               + Src0MemSize));
+               req->src1.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
+       }
+        if (DstMemSize) {
+               if (req->sg_dst)
+                       ret = rga2_MapION(req->sg_dst, MMU_Base + Src0MemSize
+                                         + Src1MemSize, DstMemSize);
+               else
+                       ret = rga2_MapUserMemory(&pages[0], MMU_Base
+                                                + Src0MemSize + Src1MemSize,
+                                                DstStart, DstPageCount, 1);
+               if (ret < 0) {
+                       pr_err("rga2 map dst memory failed\n");
+                       status = ret;
+                       goto out;
+               }
+               /* change the buf address in req struct */
+               req->mmu_info.dst_base_addr  = ((unsigned long)(MMU_Base_phys
+                                       + Src0MemSize + Src1MemSize));
+               req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
+               uv_size = (req->dst.uv_addr
+                          - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
+               v_size = (req->dst.v_addr
+                         - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
+               req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) |
+                                                  ((uv_size) << PAGE_SHIFT);
+               req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) |
+                       ((v_size) << PAGE_SHIFT);
+
+               if (((req->alpha_rop_flag & 1) == 1) && (req->bitblt_mode == 0)) {
+                       req->mmu_info.src1_base_addr = req->mmu_info.dst_base_addr;
+                       req->mmu_info.src1_mmu_flag  = req->mmu_info.dst_mmu_flag;
+               }
+       }
+       /* flush data to DDR */
+       rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
+       rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
+       reg->MMU_len = AllSize;
+       status = 0;
+out:
+       return status;
+}
+
 static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req)\r
 {\r
     int SrcMemSize, DstMemSize;\r
@@ -625,7 +596,8 @@ static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_re
         mutex_unlock(&rga2_service.lock);\r
 \r
         if(SrcMemSize) {\r
-            ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
+               ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
+                                        SrcStart, SrcMemSize, 0);
             if (ret < 0) {\r
                 pr_err("rga2 map src0 memory failed\n");\r
                 status = ret;\r
@@ -638,7 +610,8 @@ static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_re
         }\r
 \r
         if(DstMemSize) {\r
-            ret = rga2_MapUserMemory(&pages[0], MMU_Base + SrcMemSize, DstStart, DstMemSize);\r
+               ret = rga2_MapUserMemory(&pages[0], MMU_Base + SrcMemSize,
+                                        DstStart, DstMemSize, 1);
             if (ret < 0) {\r
                 pr_err("rga2 map dst memory failed\n");\r
                 status = ret;\r
@@ -651,13 +624,7 @@ static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_re
         }\r
 \r
         /* flush data to DDR */\r
-        #ifdef CONFIG_ARM\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
-        #elif defined(CONFIG_ARM64)\r
-        __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        #endif\r
-\r
+        rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
         rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
         reg->MMU_len = AllSize;\r
 \r
@@ -711,7 +678,8 @@ static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req *
                 ret = rga2_MapION(req->sg_dst, &MMU_Base[0], DstMemSize);\r
             }\r
             else {\r
-                ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
+                   ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
+                                            DstStart, DstMemSize, 1);
             }\r
             if (ret < 0) {\r
                 pr_err("rga2 map dst memory failed\n");\r
@@ -725,13 +693,7 @@ static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req *
         }\r
 \r
         /* flush data to DDR */\r
-        #ifdef CONFIG_ARM\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
-        #elif defined(CONFIG_ARM64)\r
-        __dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
-        #endif\r
-\r
+        rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
         rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
 \r
         return 0;\r
@@ -777,7 +739,8 @@ static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct
         pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
 \r
         if(SrcMemSize) {\r
-            ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
+               ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0],
+                                        SrcStart, SrcMemSize, 0);
             if (ret < 0) {\r
                 pr_err("rga2 map palette memory failed\n");\r
                 status = ret;\r
@@ -790,13 +753,7 @@ static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct
         }\r
 \r
         /* flush data to DDR */\r
-        #ifdef CONFIG_ARM\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize));\r
-        #elif defined(CONFIG_ARM64)\r
-        __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        #endif\r
-\r
+        rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
         rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
         reg->MMU_len = AllSize;\r
 \r
@@ -844,7 +801,9 @@ static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rg
 \r
         if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
         {\r
-            ret = rga2_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+               ret = rga2_MapUserMemory(&pages[CMDMemSize],
+                                        &MMU_Base[CMDMemSize],
+                                        SrcStart, SrcMemSize, 1);
             if (ret < 0) {\r
                 pr_err("rga map src memory failed\n");\r
                 status = ret;\r
@@ -873,13 +832,7 @@ static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rg
         reg->MMU_base = MMU_Base;\r
 \r
         /* flush data to DDR */\r
-        #ifdef CONFIG_ARM\r
-        dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
-        #elif defined(CONFIG_ARM64)\r
-        __dma_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-        #endif\r
-\r
+        rga_dma_flush_range(MMU_Base, (MMU_Base + AllSize));
         return 0;\r
 \r
     }\r