From: zsq Date: Mon, 15 Dec 2014 09:24:31 +0000 (+0800) Subject: remove rga driver mmu buf malloc to prevent crash X-Git-Tag: firefly_0821_release~4377 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=bc190eca53cd1bc268befbb84e3a9d22429016e2;p=firefly-linux-kernel-4.4.55.git remove rga driver mmu buf malloc to prevent crash --- diff --git a/drivers/video/rockchip/rga/rga.h b/drivers/video/rockchip/rga/rga.h index bd121895cc4e..833837fe7a45 100755 --- a/drivers/video/rockchip/rga/rga.h +++ b/drivers/video/rockchip/rga/rga.h @@ -332,6 +332,16 @@ typedef struct TILE_INFO } TILE_INFO; +struct rga_mmu_buf_t { + int32_t front; + int32_t back; + int32_t size; + int32_t curr; + unsigned int *buf; + unsigned int *buf_virtual; + + struct page **pages; +}; /** * struct for process session which connect to rga @@ -361,6 +371,7 @@ struct rga_reg { uint32_t cmd_reg[RGA_REG_CMD_LEN]; uint32_t *MMU_base; + uint32_t MMU_len; //atomic_t int_enable; //struct rga_req req; diff --git a/drivers/video/rockchip/rga/rga_drv.c b/drivers/video/rockchip/rga/rga_drv.c index 7664cafa1e6d..fb2f82586bab 100755 --- a/drivers/video/rockchip/rga/rga_drv.c +++ b/drivers/video/rockchip/rga/rga_drv.c @@ -55,7 +55,7 @@ #define RGA_TEST_CASE 0 -#define RGA_TEST 0 +#define RGA_TEST 1 #define RGA_TEST_TIME 0 #define RGA_TEST_FLUSH_TIME 0 #define RGA_INFO_BUS_ERROR 1 @@ -108,6 +108,8 @@ struct rga_drvdata { static struct rga_drvdata *drvdata; rga_service_info rga_service; +struct rga_mmu_buf_t rga_mmu_buf; + #if defined(CONFIG_ION_ROCKCHIP) extern struct ion_client *rockchip_ion_client_create(const char * name); @@ -120,7 +122,7 @@ static void rga_try_set_reg(void); /* Logging */ -#define RGA_DEBUG 0 +#define RGA_DEBUG 1 #if RGA_DEBUG #define DBG(format, args...) printk(KERN_DEBUG "%s: " format, DRIVER_NAME, ## args) #define ERR(format, args...) printk(KERN_ERR "%s: " format, DRIVER_NAME, ## args) @@ -700,11 +702,14 @@ static void rga_del_running_list(void) { reg = list_entry(rga_service.running.next, struct rga_reg, status_link); - if(reg->MMU_base != NULL) + if(reg->MMU_len != 0) { - kfree(reg->MMU_base); - reg->MMU_base = NULL; + if (rga_mmu_buf.back + reg->MMU_len > 2*rga_mmu_buf.size) + rga_mmu_buf.back = reg->MMU_len + rga_mmu_buf.size; + else + rga_mmu_buf.back += reg->MMU_len; } + atomic_sub(1, ®->session->task_running); atomic_sub(1, &rga_service.total_running); @@ -1339,27 +1344,32 @@ static int __init rga_init(void) /* malloc pre scale mid buf mmu table */ mmu_buf = kzalloc(1024*8, GFP_KERNEL); - if(mmu_buf == NULL) - { + if(mmu_buf == NULL) { printk(KERN_ERR "RGA get Pre Scale buff failed. \n"); return -1; } /* malloc 4 M buf */ - for(i=0; i<1024; i++) - { + for(i=0; i<1024; i++) { buf_p = (uint32_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); - if(buf_p == NULL) - { + if(buf_p == NULL) { printk(KERN_ERR "RGA init pre scale buf falied\n"); return -ENOMEM; } - mmu_buf[i] = virt_to_phys((void *)((uint32_t)buf_p)); } rga_service.pre_scale_buf = (uint32_t *)mmu_buf; + buf_p = kmalloc(1024*256, GFP_KERNEL); + rga_mmu_buf.buf_virtual = buf_p; + rga_mmu_buf.buf = (uint32_t *)virt_to_phys((void *)((uint32_t)buf_p)); + rga_mmu_buf.front = 0; + rga_mmu_buf.back = 64*1024; + rga_mmu_buf.size = 64*1024; + + rga_mmu_buf.pages = kmalloc((32768)* sizeof(struct page *), GFP_KERNEL); + if ((ret = platform_driver_register(&rga_driver)) != 0) { printk(KERN_ERR "Platform device register failed (%d).\n", ret); @@ -1413,6 +1423,13 @@ static void __exit rga_exit(void) if(rga_service.pre_scale_buf != NULL) { kfree((uint8_t *)rga_service.pre_scale_buf); } + + if (rga_mmu_buf.buf_virtual) + kfree(rga_mmu_buf.buf_virtual); + + if (rga_mmu_buf.pages) + kfree(rga_mmu_buf.pages); + platform_driver_unregister(&rga_driver); } diff --git a/drivers/video/rockchip/rga/rga_mmu_info.c b/drivers/video/rockchip/rga/rga_mmu_info.c index f2e82370b65e..56f2110bed2c 100755 --- a/drivers/video/rockchip/rga/rga_mmu_info.c +++ b/drivers/video/rockchip/rga/rga_mmu_info.c @@ -18,38 +18,44 @@ #include #include #include "rga_mmu_info.h" +#include extern rga_service_info rga_service; -//extern int mmu_buff_temp[1024]; +extern struct rga_mmu_buf_t rga_mmu_buf; #define KERNEL_SPACE_VALID 0xc0000000 -#define V7_VATOPA_SUCESS_MASK (0x1) -#define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000) -#define V7_VATOPA_GET_INER(X) ((X>>4) & 7) -#define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3) -#define V7_VATOPA_GET_SH(X) ((X>>7) & 1) -#define V7_VATOPA_GET_NS(X) ((X>>9) & 1) -#define V7_VATOPA_GET_SS(X) ((X>>1) & 1) +static int rga_mmu_buf_get(struct rga_mmu_buf_t *t, uint32_t size) +{ + mutex_lock(&rga_service.lock); + t->front += size; + mutex_unlock(&rga_service.lock); + + return 0; +} -#if 0 -static unsigned int armv7_va_to_pa(unsigned int v_addr) +static int rga_mmu_buf_get_try(struct rga_mmu_buf_t *t, uint32_t size) { - unsigned int p_addr; - __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n" - "isb\n" - "dsb\n" - "mrc p15, 0, %0, c7, c4, 0\n" - : "=r" (p_addr) - : "r" (v_addr) - : "cc"); - - if (p_addr & V7_VATOPA_SUCESS_MASK) - return 0xFFFFFFFF; - else - return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr)); + mutex_lock(&rga_service.lock); + if((t->back - t->front) > t->size) { + if(t->front + size > t->back - t->size) + return -1; + } + else { + if((t->front + size) > t->back) + return -1; + + if(t->front + size > t->size) { + if (size > (t->back - t->size)) { + return -1; + } + t->front = 0; + } + } + mutex_unlock(&rga_service.lock); + + return 0; } -#endif static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr) { @@ -297,7 +303,6 @@ static int rga_MapUserMemory(struct page **pages, if (vma)//&& (vma->vm_flags & VM_PFNMAP) ) { - #if 1 do { pte_t * pte; @@ -343,28 +348,6 @@ static int rga_MapUserMemory(struct page **pages, } while (0); - #else - do - { - pte_t * pte; - spinlock_t * ptl; - unsigned long pfn; - pgd_t * pgd; - pud_t * pud; - pmd_t * pmd; - - pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT); - pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT); - pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT); - pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl); - - pfn = pte_pfn(*pte); - Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK)); - pte_unmap_unlock(pte, ptl); - } - while (0); - #endif - pageTable[i] = Address; } else @@ -399,31 +382,80 @@ static int rga_MapUserMemory(struct page **pages, static int rga_MapION(struct sg_table *sg, uint32_t *Memory, - int32_t pageCount) + int32_t pageCount, + uint32_t offset) { uint32_t i; uint32_t status; uint32_t Address; uint32_t mapped_size = 0; - uint32_t len; + uint32_t len = 0; struct scatterlist *sgl = sg->sgl; uint32_t sg_num = 0; status = 0; Address = 0; - do { + offset = offset >> PAGE_SHIFT; + if (offset != 0) { + do { + len += (sg_dma_len(sgl) >> PAGE_SHIFT); + if (len == offset) { + sg_num += 1; + break; + } + else { + if (len > offset) + break; + } + sg_num += 1; + } + while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents)); + + sgl = sg->sgl; + len = 0; + do { + len += (sg_dma_len(sgl) >> PAGE_SHIFT); + sgl = sg_next(sgl); + } + while(--sg_num); + + offset -= len; + len = sg_dma_len(sgl) >> PAGE_SHIFT; Address = sg_phys(sgl); + Address += offset; - for(i=0; i> PAGE_SHIFT; + Address = sg_phys(sgl); - mapped_size += len; - sg_num += 1; - } - while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents)); + for(i=0; inents)); + } + else { + do { + len = sg_dma_len(sgl) >> PAGE_SHIFT; + Address = sg_phys(sgl); + for(i=0; inents)); + } return 0; } @@ -434,7 +466,7 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) uint32_t SrcStart, DstStart; uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base, *MMU_p; + uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys; int ret; int status; uint32_t uv_size, v_size; @@ -446,10 +478,8 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) SrcMemSize = 0; DstMemSize = 0; - do - { + do { /* cal src buf mmu info */ - SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr, req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset, &SrcStart); @@ -457,38 +487,35 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) return -EINVAL; } - /* cal dst buf mmu info */ DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr, req->dst.format, req->dst.vir_w, req->dst.vir_h, &DstStart); - if(DstMemSize == 0) { + if(DstMemSize == 0) return -EINVAL; - } /* Cal out the needed mem size */ SrcMemSize = (SrcMemSize + 15) & (~15); DstMemSize = (DstMemSize + 15) & (~15); AllSize = SrcMemSize + DstMemSize; - pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed\n"); + if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) { + pr_err("RGA Get MMU mem failed\n"); status = RGA_MALLOC_ERROR; break; } - MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL); - if(MMU_Base == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed\n"); - status = RGA_MALLOC_ERROR; - break; - } + mutex_lock(&rga_service.lock); + MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1)); + MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1)); + mutex_unlock(&rga_service.lock); + + pages = rga_mmu_buf.pages; if((req->mmu_info.mmu_flag >> 8) & 1) { if (req->sg_src) { - ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize); + ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize, req->line_draw_info.flag); } else { ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize); @@ -514,7 +541,7 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) if ((req->mmu_info.mmu_flag >> 10) & 1) { if (req->sg_dst) { - ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize); + ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width); } else { ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize); @@ -531,13 +558,13 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT); } - MMU_Base[AllSize] = MMU_Base[AllSize - 1]; + MMU_Base[AllSize] = MMU_Base[AllSize-1]; /* zsq * change the buf address in req struct */ - req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); + req->mmu_info.base_addr = (uint32_t)MMU_Base_phys >> 2; uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; @@ -551,35 +578,19 @@ static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req) req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT); req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT); - /*record the malloc buf for the cmd end to release*/ - reg->MMU_base = MMU_Base; - /* flush data to DDR */ dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1)); - outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); + outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize + 1)); - status = 0; + rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16); + reg->MMU_len = AllSize + 16; - /* Free the page table */ - if (pages != NULL) { - kfree(pages); - } + status = 0; return status; } while(0); - - /* Free the page table */ - if (pages != NULL) { - kfree(pages); - } - - /* Free MMU table */ - if(MMU_Base != NULL) { - kfree(MMU_Base); - } - return status; } @@ -590,7 +601,7 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * struct page **pages = NULL; uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base = NULL; + uint32_t *MMU_Base = NULL, *MMU_Base_phys = NULL; uint32_t *MMU_p; int ret, status; uint32_t stride; @@ -603,9 +614,7 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * byte_num = sw >> shift; stride = (byte_num + 3) & (~3); - do - { - + do { SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart); if(SrcMemSize == 0) { return -EINVAL; @@ -623,39 +632,40 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * return -EINVAL; } - AllSize = SrcMemSize + DstMemSize + CMDMemSize; + SrcMemSize = (SrcMemSize + 15) & (~15); + DstMemSize = (DstMemSize + 15) & (~15); + CMDMemSize = (CMDMemSize + 15) & (~15); - pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed\n"); - return -EINVAL; - } + AllSize = SrcMemSize + DstMemSize + CMDMemSize; - MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); - if(MMU_Base == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed\n"); + if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) { + pr_err("RGA Get MMU mem failed\n"); + status = RGA_MALLOC_ERROR; break; } + mutex_lock(&rga_service.lock); + MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1)); + MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1)); + mutex_unlock(&rga_service.lock); + + pages = rga_mmu_buf.pages; + /* map CMD addr */ - for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) - { + if (req->src.yrgb_addr < KERNEL_SPACE_VALID) { ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize); - if (ret < 0) - { + if (ret < 0) { pr_err("rga map src memory failed\n"); status = ret; break; } } - else - { + else { MMU_p = MMU_Base + CMDMemSize; for(i=0; isrc.yrgb_addr < KERNEL_SPACE_VALID) - { + if (req->src.yrgb_addr < KERNEL_SPACE_VALID) { ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize); - if (ret < 0) - { + if (ret < 0) { pr_err("rga map dst memory failed\n"); status = ret; break; } } - else - { + else { MMU_p = MMU_Base + CMDMemSize + SrcMemSize; - for(i=0; isrc.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT); - /*record the malloc buf for the cmd end to release*/ reg->MMU_base = MMU_Base; @@ -702,26 +705,14 @@ static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req * dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1)); outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); - /* Free the page table */ - if (pages != NULL) { - kfree(pages); - } + rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16); + reg->MMU_len = AllSize + 16; return status; } while(0); - /* Free the page table */ - if (pages != NULL) { - kfree(pages); - } - - /* Free mmu table */ - if (MMU_Base != NULL) { - kfree(MMU_Base); - } - return 0; } @@ -732,7 +723,7 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req struct page **pages = NULL; uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base, *MMU_p; + uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys; int ret; int status; @@ -747,25 +738,24 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req return -EINVAL; } - AllSize = DstMemSize; + AllSize = (DstMemSize + 15) & (~15); - pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed\n"); - status = RGA_MALLOC_ERROR; - break; - } + pages = rga_mmu_buf.pages; - MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed\n"); + if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) { + pr_err("RGA Get MMU mem failed\n"); status = RGA_MALLOC_ERROR; break; } + mutex_lock(&rga_service.lock); + MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1)); + MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1)); + mutex_unlock(&rga_service.lock); + if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) { if (req->sg_dst) { - ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize); + ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize, req->line_draw_info.line_width); } else { ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize); @@ -788,7 +778,7 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req * change the buf address in req struct */ - req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); + req->mmu_info.base_addr = ((uint32_t)(MMU_Base_phys)>>2); req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)); /*record the malloc buf for the cmd end to release*/ @@ -798,20 +788,13 @@ static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1)); outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); - /* Free the page table */ - if (pages != NULL) - kfree(pages); + rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16); + reg->MMU_len = AllSize + 16; return 0; } while(0); - if (pages != NULL) - kfree(pages); - - if (MMU_Base != NULL) - kfree(MMU_Base); - return status; } @@ -1052,7 +1035,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) struct page **pages = NULL; uint32_t i; uint32_t AllSize; - uint32_t *MMU_Base, *MMU_p; + uint32_t *MMU_Base, *MMU_p, *MMU_Base_phys; int ret; int status; uint32_t uv_size, v_size; @@ -1076,30 +1059,28 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) return -EINVAL; } + SrcMemSize = (SrcMemSize + 15) & (~15); + DstMemSize = (DstMemSize + 15) & (~15); + AllSize = SrcMemSize + DstMemSize; - pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc pages mem failed\n"); - status = RGA_MALLOC_ERROR; - break; - } + pages = rga_mmu_buf.pages; - /* - * Allocate MMU Index mem - * This mem release in run_to_done fun - */ - MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL); - if(pages == NULL) { - pr_err("RGA MMU malloc MMU_Base point failed\n"); + if (rga_mmu_buf_get_try(&rga_mmu_buf, AllSize + 16)) { + pr_err("RGA Get MMU mem failed\n"); status = RGA_MALLOC_ERROR; break; } + mutex_lock(&rga_service.lock); + MMU_Base = rga_mmu_buf.buf_virtual + (rga_mmu_buf.front & (rga_mmu_buf.size - 1)); + MMU_Base_phys = rga_mmu_buf.buf + (rga_mmu_buf.front & (rga_mmu_buf.size - 1)); + mutex_unlock(&rga_service.lock); + /* map src pages */ if ((req->mmu_info.mmu_flag >> 8) & 1) { if (req->sg_src) { - ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize); + ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize,req->line_draw_info.flag); } else { ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize); @@ -1119,7 +1100,7 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) if((req->mmu_info.mmu_flag >> 10) & 1) { if (req->sg_dst) { - ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize); + ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize, req->line_draw_info.line_width); } else { ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize); @@ -1145,14 +1126,14 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) } } - MMU_Base[AllSize] = MMU_Base[AllSize - 1]; + MMU_Base[AllSize] = MMU_Base[AllSize]; /* zsq * change the buf address in req struct * for the reason of lie to MMU */ - req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); + req->mmu_info.base_addr = ((uint32_t)(MMU_Base_phys)>>2); uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT; @@ -1175,22 +1156,13 @@ static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req) dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1)); outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1)); - /* Free the page table */ - if (pages != NULL) - { - kfree(pages); - } + rga_mmu_buf_get(&rga_mmu_buf, AllSize + 16); + reg->MMU_len = AllSize + 16; return 0; } while(0); - if (pages != NULL) - kfree(pages); - - if (MMU_Base != NULL) - kfree(MMU_Base); - return status; }