From 527d26699ddf48021b439a6eb6804ed853a34362 Mon Sep 17 00:00:00 2001 From: zsq Date: Mon, 3 Jun 2013 08:54:41 +0800 Subject: [PATCH] fix rot 90 dst loss data bug --- drivers/video/rockchip/rga/rga_mmu_info.c | 472 +++++++++++----------- 1 file changed, 236 insertions(+), 236 deletions(-) diff --git a/drivers/video/rockchip/rga/rga_mmu_info.c b/drivers/video/rockchip/rga/rga_mmu_info.c index e5b338f0abc3..f79eb64e8cdc 100755 --- a/drivers/video/rockchip/rga/rga_mmu_info.c +++ b/drivers/video/rockchip/rga/rga_mmu_info.c @@ -36,7 +36,7 @@ extern rga_service_info rga_service; static unsigned int armv7_va_to_pa(unsigned int v_addr) { unsigned int p_addr; - __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n" + __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n" "isb\n" "dsb\n" "mrc p15, 0, %0, c7, c4, 0\n" @@ -51,7 +51,7 @@ static unsigned int armv7_va_to_pa(unsigned int v_addr) } #endif -static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr) +static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr) { uint32_t start, end; uint32_t pageCount; @@ -60,11 +60,11 @@ static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr) start = Mem >> PAGE_SHIFT; pageCount = end - start; *StartAddr = start; - return pageCount; + return pageCount; } -static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr, - int format, uint32_t w, uint32_t h, uint32_t *StartAddr ) +static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr, + int format, uint32_t w, uint32_t h, uint32_t *StartAddr ) { uint32_t size_yrgb = 0; uint32_t size_uv = 0; @@ -72,7 +72,7 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add uint32_t stride = 0; uint32_t start, end; uint32_t pageCount; - + switch(format) { case RK_FORMAT_RGBA_8888 : @@ -80,7 +80,7 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add size_yrgb = stride*h; end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT; start = yrgb_addr >> PAGE_SHIFT; - pageCount = end - start; + pageCount = end - start; break; case RK_FORMAT_RGBX_8888 : stride = (w * 4 + 3) & (~3); @@ -103,28 +103,28 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add pageCount = end - start; break; case RK_FORMAT_RGB_565 : - stride = (w*2 + 3) & (~3); + stride = (w*2 + 3) & (~3); size_yrgb = stride * h; end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT; start = yrgb_addr >> PAGE_SHIFT; pageCount = end - start; break; case RK_FORMAT_RGBA_5551 : - stride = (w*2 + 3) & (~3); + stride = (w*2 + 3) & (~3); size_yrgb = stride * h; end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT; start = yrgb_addr >> PAGE_SHIFT; pageCount = end - start; break; case RK_FORMAT_RGBA_4444 : - stride = (w*2 + 3) & (~3); + stride = (w*2 + 3) & (~3); size_yrgb = stride * h; end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT; start = yrgb_addr >> PAGE_SHIFT; pageCount = end - start; break; case RK_FORMAT_BGR_888 : - stride = (w*3 + 3) & (~3); + stride = (w*3 + 3) & (~3); size_yrgb = stride * h; end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT; start = yrgb_addr >> PAGE_SHIFT; @@ -137,10 +137,10 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add size_yrgb = stride * h; size_uv = stride * h; start = MIN(yrgb_addr, uv_addr); - start >>= PAGE_SHIFT; + start >>= PAGE_SHIFT; end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)); end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - pageCount = end - start; + pageCount = end - start; break; case RK_FORMAT_YCbCr_422_P : stride = (w + 3) & (~3); @@ -148,17 +148,17 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add size_uv = ((stride >> 1) * h); size_v = ((stride >> 1) * h); start = MIN(MIN(yrgb_addr, uv_addr), v_addr); - start = start >> PAGE_SHIFT; + start = start >> PAGE_SHIFT; end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v)); end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - pageCount = end - start; + pageCount = end - start; break; case RK_FORMAT_YCbCr_420_SP : stride = (w + 3) & (~3); size_yrgb = stride * h; - size_uv = (stride * (h >> 1)); + size_uv = (stride * (h >> 1)); start = MIN(yrgb_addr, uv_addr); - start >>= PAGE_SHIFT; + start >>= PAGE_SHIFT; end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)); end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT; pageCount = end - start; @@ -169,10 +169,10 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add size_uv = ((stride >> 1) * (h >> 1)); size_v = ((stride >> 1) * (h >> 1)); start = MIN(MIN(yrgb_addr, uv_addr), v_addr); - start >>= PAGE_SHIFT; + start >>= PAGE_SHIFT; end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v)); end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT; - pageCount = end - start; + pageCount = end - start; break; case RK_FORMAT_YCrCb_422_SP : @@ -180,7 +180,7 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add size_yrgb = stride * h; size_uv = stride * h; start = MIN(yrgb_addr, uv_addr); - start >>= PAGE_SHIFT; + start >>= PAGE_SHIFT; end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)); end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT; pageCount = end - start; @@ -191,18 +191,18 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add size_uv = ((stride >> 1) * h); size_v = ((stride >> 1) * h); start = MIN(MIN(yrgb_addr, uv_addr), v_addr); - start >>= PAGE_SHIFT; + start >>= PAGE_SHIFT; end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v)); end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT; pageCount = end - start; break; - + case RK_FORMAT_YCrCb_420_SP : stride = (w + 3) & (~3); size_yrgb = stride * h; - size_uv = (stride * (h >> 1)); + size_uv = (stride * (h >> 1)); start = MIN(yrgb_addr, uv_addr); - start >>= PAGE_SHIFT; + start >>= PAGE_SHIFT; end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)); end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT; pageCount = end - start; @@ -213,13 +213,13 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add size_uv = ((stride >> 1) * (h >> 1)); size_v = ((stride >> 1) * (h >> 1)); start = MIN(MIN(yrgb_addr, uv_addr), v_addr); - start >>= PAGE_SHIFT; + start >>= PAGE_SHIFT; end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v)); end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT; pageCount = end - start; break; - #if 0 - case RK_FORMAT_BPP1 : + #if 0 + case RK_FORMAT_BPP1 : break; case RK_FORMAT_BPP2 : break; @@ -227,7 +227,7 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add break; case RK_FORMAT_BPP8 : break; - #endif + #endif default : pageCount = 0; start = 0; @@ -235,12 +235,12 @@ static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_add } *StartAddr = start; - return pageCount; + return pageCount; } -static int rga_MapUserMemory(struct page **pages, - uint32_t *pageTable, - uint32_t Memory, +static int rga_MapUserMemory(struct page **pages, + uint32_t *pageTable, + uint32_t Memory, uint32_t pageCount) { int32_t result; @@ -248,12 +248,12 @@ static int rga_MapUserMemory(struct page **pages, uint32_t status; uint32_t Address; //uint32_t temp; - + status = 0; Address = 0; - + do - { + { down_read(¤t->mm->mmap_sem); result = get_user_pages(current, current->mm, @@ -266,13 +266,13 @@ static int rga_MapUserMemory(struct page **pages, ); up_read(¤t->mm->mmap_sem); - #if 0 - if(result <= 0 || result < pageCount) + #if 0 + if(result <= 0 || result < pageCount) { status = 0; for(i=0; imm, (Memory + i) << PAGE_SHIFT); if (vma)//&& (vma->vm_flags & VM_PFNMAP) ) @@ -302,10 +302,10 @@ static int rga_MapUserMemory(struct page **pages, { pte_t * pte; spinlock_t * ptl; - unsigned long pfn; + unsigned long pfn; pgd_t * pgd; pud_t * pud; - + pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT); if(pgd_val(*pgd) == 0) @@ -313,7 +313,7 @@ static int rga_MapUserMemory(struct page **pages, //printk("rga pgd value is zero \n"); break; } - + pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT); if (pud) { @@ -338,30 +338,30 @@ static int rga_MapUserMemory(struct page **pages, } pfn = pte_pfn(*pte); - Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK)); - pte_unmap_unlock(pte, ptl); + Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK)); + pte_unmap_unlock(pte, ptl); } while (0); - + #else - + do { pte_t * pte; spinlock_t * ptl; - unsigned long pfn; + unsigned long pfn; pgd_t * pgd; pud_t * pud; pmd_t * pmd; - - pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT); + + pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT); pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT); pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT); pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl); - + pfn = pte_pfn(*pte); - Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK)); - pte_unmap_unlock(pte, ptl); + Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK)); + pte_unmap_unlock(pte, ptl); } while (0); #endif @@ -372,15 +372,15 @@ static int rga_MapUserMemory(struct page **pages, { status = RGA_OUT_OF_RESOURCES; break; - } + } } - + return status; } #endif /* Fill the page table. */ - for(i=0; isrc.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset, &SrcStart); if(SrcMemSize == 0) { - return -EINVAL; + return -EINVAL; } - - /* cal dst buf mmu info */ + + /* cal dst buf mmu info */ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, - req->dst.format, req->dst.vir_w, req->dst.act_h + req->dst.y_offset, - &DstStart); + req->dst.format, req->dst.vir_w, req->dst.vir_h, + &DstStart); if(DstMemSize == 0) { - return -EINVAL; + return -EINVAL; } - + /* Cal out the needed mem size */ AllSize = SrcMemSize + DstMemSize; - + pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; - break; + break; } - + MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL); if(MMU_Base == NULL) { pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; - break; + break; } if(req->src.yrgb_addr < KERNEL_SPACE_VALID) - { + { ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize); if (ret < 0) { pr_err("rga map src memory failed\n"); status = ret; break; - } + } } else { MMU_p = MMU_Base; - + if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) { /* Down scale ratio over 2, Last prc */ /* MMU table copy from pre scale table */ - + for(i=0; idst.yrgb_addr < KERNEL_SPACE_VALID) - { + { #if 0 ktime_t start, end; start = ktime_get(); - #endif + #endif ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize); if (ret < 0) { pr_err("rga map dst memory failed\n"); @@ -497,40 +497,40 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) else { MMU_p = MMU_Base + SrcMemSize; - + for(i=0; immu_info.base_addr = (virt_to_phys(MMU_Base)>>2); uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; - + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)); req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT); req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT); req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT); - + /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; - + /* flush data to DDR */ dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1)); outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); status = 0; - - /* Free the page table */ + + /* Free the page table */ if (pages != NULL) { kfree(pages); } @@ -539,8 +539,8 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) } while(0); - - /* Free the page table */ + + /* Free the page table */ if (pages != NULL) { kfree(pages); } @@ -567,7 +567,7 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * uint8_t shift; uint16_t sw, byte_num; - + shift = 3 - (req->palette_mode & 3); sw = req->src.vir_w; byte_num = sw >> shift; @@ -575,59 +575,59 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * do { - + SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart); if(SrcMemSize == 0) { - return -EINVAL; + return -EINVAL; } DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format, req->dst.vir_w, req->dst.vir_h, &DstStart); if(DstMemSize == 0) { - return -EINVAL; + return -EINVAL; } CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); if(CMDMemSize == 0) { - return -EINVAL; + return -EINVAL; } AllSize = SrcMemSize + DstMemSize + CMDMemSize; - + pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc pages mem failed\n"); - return -EINVAL; + return -EINVAL; } MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); if(MMU_Base == NULL) { pr_err("RGA MMU malloc MMU_Base point failed\n"); - break; + break; } /* map CMD addr */ - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) - { + if (req->src.yrgb_addr < KERNEL_SPACE_VALID) + { ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); - if (ret < 0) + if (ret < 0) { pr_err("rga map src memory failed\n"); status = ret; - break; + break; } } else { MMU_p = MMU_Base + CMDMemSize; - + for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) + if (req->src.yrgb_addr < KERNEL_SPACE_VALID) { ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); - if (ret < 0) + if (ret < 0) { pr_err("rga map dst memory failed\n"); status = ret; @@ -648,19 +648,19 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * else { MMU_p = MMU_Base + CMDMemSize + SrcMemSize; - + for(i=0; immu_info.base_addr = (virt_to_phys(MMU_Base)>>2); + req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); @@ -673,7 +673,7 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); /* Free the page table */ - if (pages != NULL) { + if (pages != NULL) { kfree(pages); } @@ -683,7 +683,7 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * while(0); /* Free the page table */ - if (pages != NULL) { + if (pages != NULL) { kfree(pages); } @@ -709,31 +709,31 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req MMU_Base = NULL; do - { + { DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format, req->dst.vir_w, req->dst.vir_h, &DstStart); if(DstMemSize == 0) { - return -EINVAL; + return -EINVAL; } AllSize = DstMemSize; - + pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc pages mem failed\n"); - status = RGA_MALLOC_ERROR; + status = RGA_MALLOC_ERROR; break; } - + MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; - break; + break; } - if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) + if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) { ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize); if (ret < 0) { @@ -745,22 +745,22 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req else { MMU_p = MMU_Base; - + for(i=0; immu_info.base_addr = (virt_to_phys(MMU_Base)>>2); + + req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)); - + /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; @@ -769,19 +769,19 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); /* Free the page table */ - if (pages != NULL) + if (pages != NULL) kfree(pages); return 0; } while(0); - if (pages != NULL) + if (pages != NULL) kfree(pages); if (MMU_Base != NULL) kfree(MMU_Base); - + return status; } @@ -799,24 +799,24 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_ MMU_Base = NULL; do - { - /* cal dst buf mmu info */ + { + /* cal dst buf mmu info */ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format, req->dst.vir_w, req->dst.vir_h, &DstStart); if(DstMemSize == 0) { - return -EINVAL; + return -EINVAL; } AllSize = DstMemSize; - + pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; break; } - + MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc MMU_Base point failed\n"); @@ -836,21 +836,21 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_ else { MMU_p = MMU_Base; - + for(i=0; immu_info.base_addr = (virt_to_phys(MMU_Base) >> 2); + req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2); req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)); - - + + /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; @@ -859,9 +859,9 @@ static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); /* Free the page table */ - if (pages != NULL) { + if (pages != NULL) { kfree(pages); - } + } return 0; @@ -889,45 +889,45 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r uint32_t uv_size, v_size; MMU_Base = NULL; - + do { - /* cal src buf mmu info */ + /* cal src buf mmu info */ SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format, req->src.vir_w, req->src.vir_h, &SrcStart); if(SrcMemSize == 0) { - return -EINVAL; + return -EINVAL; } - /* cal dst buf mmu info */ + /* cal dst buf mmu info */ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format, req->dst.vir_w, req->dst.vir_h, &DstStart); if(DstMemSize == 0) { - return -EINVAL; + return -EINVAL; } AllSize = SrcMemSize + DstMemSize; - + pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; - break; + break; } - + MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; - break; + break; } if (req->src.yrgb_addr < KERNEL_SPACE_VALID) { ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize); - if (ret < 0) + if (ret < 0) { pr_err("rga map src memory failed\n"); status = ret; @@ -937,18 +937,18 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r else { MMU_p = MMU_Base; - + for(i=0; idst.yrgb_addr < KERNEL_SPACE_VALID) { ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize); - if (ret < 0) + if (ret < 0) { pr_err("rga map dst memory failed\n"); status = ret; @@ -958,7 +958,7 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r else { MMU_p = MMU_Base + SrcMemSize; - + for(i=0; immu_info.base_addr = (virt_to_phys(MMU_Base) >> 2); uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; - + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)); req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT); req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT); @@ -986,8 +986,8 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT); req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT); req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT); - - + + /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; @@ -996,9 +996,9 @@ static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_r outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); /* Free the page table */ - if (pages != NULL) { + if (pages != NULL) { kfree(pages); - } + } return 0; } @@ -1031,41 +1031,41 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) do { - /* cal src buf mmu info */ + /* cal src buf mmu info */ SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format, req->src.vir_w, req->src.vir_h, &SrcStart); if(SrcMemSize == 0) { - return -EINVAL; + return -EINVAL; } - /* cal dst buf mmu info */ + /* cal dst buf mmu info */ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format, req->dst.vir_w, req->dst.vir_h, &DstStart); if(DstMemSize == 0) { - return -EINVAL; + return -EINVAL; } AllSize = SrcMemSize + DstMemSize; - + pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) + if(pages == NULL) { pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; - break; + break; } - /* + /* * Allocate MMU Index mem - * This mem release in run_to_done fun + * This mem release in run_to_done fun */ MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc MMU_Base point failed\n"); - status = RGA_MALLOC_ERROR; - break; + status = RGA_MALLOC_ERROR; + break; } /* map src pages */ @@ -1081,16 +1081,16 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) else { MMU_p = MMU_Base; - + for(i=0; idst.yrgb_addr >= KERNEL_SPACE_VALID) - { + + if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID) + { /* kernel space */ MMU_p = MMU_Base + SrcMemSize; @@ -1103,31 +1103,31 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) } else { - for(i=0; immu_info.base_addr = (virt_to_phys(MMU_Base)>>2); uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; @@ -1152,10 +1152,10 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); /* Free the page table */ - if (pages != NULL) - { + if (pages != NULL) + { kfree(pages); - } + } return 0; } @@ -1184,33 +1184,33 @@ static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rg MMU_Base = NULL; do - { - /* cal src buf mmu info */ + { + /* cal src buf mmu info */ SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart); if(SrcMemSize == 0) { - return -EINVAL; + return -EINVAL; } /* cal cmd buf mmu info */ CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); if(CMDMemSize == 0) { - return -EINVAL; + return -EINVAL; } AllSize = SrcMemSize + CMDMemSize; - + pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; - break; + break; } - + MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; - break; + break; } for(i=0; immu_info.base_addr = (virt_to_phys(MMU_Base) >> 2); - - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - + + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; @@ -1253,7 +1253,7 @@ static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rg if (pages != NULL) { /* Free the page table */ kfree(pages); - } + } return 0; } @@ -1283,32 +1283,32 @@ static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_ do { - /* cal src buf mmu info */ + /* cal src buf mmu info */ SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart); if(SrcMemSize == 0) { - return -EINVAL; + return -EINVAL; } /* cal cmd buf mmu info */ CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart); if(CMDMemSize == 0) { - return -EINVAL; + return -EINVAL; } AllSize = SrcMemSize + CMDMemSize; - + pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc pages mem failed\n"); status = RGA_MALLOC_ERROR; - break; + break; } - + MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); if(pages == NULL) { pr_err("RGA MMU malloc MMU_Base point failed\n"); status = RGA_MALLOC_ERROR; - break; + break; } for(i=0; immu_info.base_addr = (virt_to_phys(MMU_Base) >> 2); - - req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); - + + req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); + /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; @@ -1369,11 +1369,11 @@ static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_ } int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req) -{ +{ int ret; - + switch (req->render_mode) { - case bitblt_mode : + case bitblt_mode : ret = rga_mmu_info_BitBlt_mode(reg, req); break; case color_palette_mode : -- 2.34.1