static unsigned int armv7_va_to_pa(unsigned int v_addr)\r
{\r
unsigned int p_addr;\r
- __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n" \r
+ __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"\r
"isb\n"\r
"dsb\n"\r
"mrc p15, 0, %0, c7, c4, 0\n"\r
}\r
#endif\r
\r
-static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr) \r
+static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)\r
{\r
uint32_t start, end;\r
uint32_t pageCount;\r
start = Mem >> PAGE_SHIFT;\r
pageCount = end - start;\r
*StartAddr = start;\r
- return pageCount; \r
+ return pageCount;\r
}\r
\r
-static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr, \r
- int format, uint32_t w, uint32_t h, uint32_t *StartAddr ) \r
+static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,\r
+ int format, uint32_t w, uint32_t h, uint32_t *StartAddr )\r
{\r
uint32_t size_yrgb = 0;\r
uint32_t size_uv = 0;\r
uint32_t stride = 0;\r
uint32_t start, end;\r
uint32_t pageCount;\r
- \r
+\r
switch(format)\r
{\r
case RK_FORMAT_RGBA_8888 :\r
size_yrgb = stride*h;\r
end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
start = yrgb_addr >> PAGE_SHIFT;\r
- pageCount = end - start; \r
+ pageCount = end - start;\r
break;\r
case RK_FORMAT_RGBX_8888 :\r
stride = (w * 4 + 3) & (~3);\r
pageCount = end - start;\r
break;\r
case RK_FORMAT_RGB_565 :\r
- stride = (w*2 + 3) & (~3); \r
+ stride = (w*2 + 3) & (~3);\r
size_yrgb = stride * h;\r
end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
start = yrgb_addr >> PAGE_SHIFT;\r
pageCount = end - start;\r
break;\r
case RK_FORMAT_RGBA_5551 :\r
- stride = (w*2 + 3) & (~3); \r
+ stride = (w*2 + 3) & (~3);\r
size_yrgb = stride * h;\r
end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
start = yrgb_addr >> PAGE_SHIFT;\r
pageCount = end - start;\r
break;\r
case RK_FORMAT_RGBA_4444 :\r
- stride = (w*2 + 3) & (~3); \r
+ stride = (w*2 + 3) & (~3);\r
size_yrgb = stride * h;\r
end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
start = yrgb_addr >> PAGE_SHIFT;\r
pageCount = end - start;\r
break;\r
case RK_FORMAT_BGR_888 :\r
- stride = (w*3 + 3) & (~3); \r
+ stride = (w*3 + 3) & (~3);\r
size_yrgb = stride * h;\r
end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
start = yrgb_addr >> PAGE_SHIFT;\r
size_yrgb = stride * h;\r
size_uv = stride * h;\r
start = MIN(yrgb_addr, uv_addr);\r
- start >>= PAGE_SHIFT; \r
+ start >>= PAGE_SHIFT;\r
end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));\r
end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
- pageCount = end - start; \r
+ pageCount = end - start;\r
break;\r
case RK_FORMAT_YCbCr_422_P :\r
stride = (w + 3) & (~3);\r
size_uv = ((stride >> 1) * h);\r
size_v = ((stride >> 1) * h);\r
start = MIN(MIN(yrgb_addr, uv_addr), v_addr);\r
- start = start >> PAGE_SHIFT; \r
+ start = start >> PAGE_SHIFT;\r
end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));\r
end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
- pageCount = end - start; \r
+ pageCount = end - start;\r
break;\r
case RK_FORMAT_YCbCr_420_SP :\r
stride = (w + 3) & (~3);\r
size_yrgb = stride * h;\r
- size_uv = (stride * (h >> 1)); \r
+ size_uv = (stride * (h >> 1));\r
start = MIN(yrgb_addr, uv_addr);\r
- start >>= PAGE_SHIFT; \r
+ start >>= PAGE_SHIFT;\r
end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));\r
end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
pageCount = end - start;\r
size_uv = ((stride >> 1) * (h >> 1));\r
size_v = ((stride >> 1) * (h >> 1));\r
start = MIN(MIN(yrgb_addr, uv_addr), v_addr);\r
- start >>= PAGE_SHIFT; \r
+ start >>= PAGE_SHIFT;\r
end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));\r
end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
- pageCount = end - start; \r
+ pageCount = end - start;\r
break;\r
\r
case RK_FORMAT_YCrCb_422_SP :\r
size_yrgb = stride * h;\r
size_uv = stride * h;\r
start = MIN(yrgb_addr, uv_addr);\r
- start >>= PAGE_SHIFT; \r
+ start >>= PAGE_SHIFT;\r
end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));\r
end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
pageCount = end - start;\r
size_uv = ((stride >> 1) * h);\r
size_v = ((stride >> 1) * h);\r
start = MIN(MIN(yrgb_addr, uv_addr), v_addr);\r
- start >>= PAGE_SHIFT; \r
+ start >>= PAGE_SHIFT;\r
end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));\r
end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
pageCount = end - start;\r
break;\r
- \r
+\r
case RK_FORMAT_YCrCb_420_SP :\r
stride = (w + 3) & (~3);\r
size_yrgb = stride * h;\r
- size_uv = (stride * (h >> 1)); \r
+ size_uv = (stride * (h >> 1));\r
start = MIN(yrgb_addr, uv_addr);\r
- start >>= PAGE_SHIFT; \r
+ start >>= PAGE_SHIFT;\r
end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));\r
end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
pageCount = end - start;\r
size_uv = ((stride >> 1) * (h >> 1));\r
size_v = ((stride >> 1) * (h >> 1));\r
start = MIN(MIN(yrgb_addr, uv_addr), v_addr);\r
- start >>= PAGE_SHIFT; \r
+ start >>= PAGE_SHIFT;\r
end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));\r
end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
pageCount = end - start;\r
break;\r
- #if 0 \r
- case RK_FORMAT_BPP1 : \r
+ #if 0\r
+ case RK_FORMAT_BPP1 :\r
break;\r
case RK_FORMAT_BPP2 :\r
break;\r
break;\r
case RK_FORMAT_BPP8 :\r
break;\r
- #endif \r
+ #endif\r
default :\r
pageCount = 0;\r
start = 0;\r
}\r
\r
*StartAddr = start;\r
- return pageCount; \r
+ return pageCount;\r
}\r
\r
-static int rga_MapUserMemory(struct page **pages, \r
- uint32_t *pageTable, \r
- uint32_t Memory, \r
+static int rga_MapUserMemory(struct page **pages,\r
+ uint32_t *pageTable,\r
+ uint32_t Memory,\r
uint32_t pageCount)\r
{\r
int32_t result;\r
uint32_t status;\r
uint32_t Address;\r
//uint32_t temp;\r
- \r
+\r
status = 0;\r
Address = 0;\r
- \r
+\r
do\r
- { \r
+ {\r
down_read(¤t->mm->mmap_sem);\r
result = get_user_pages(current,\r
current->mm,\r
);\r
up_read(¤t->mm->mmap_sem);\r
\r
- #if 0 \r
- if(result <= 0 || result < pageCount) \r
+ #if 0\r
+ if(result <= 0 || result < pageCount)\r
{\r
status = 0;\r
\r
for(i=0; i<pageCount; i++)\r
- { \r
+ {\r
temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);\r
if (temp == 0xffffffff)\r
{\r
status = RGA_OUT_OF_RESOURCES;\r
break;\r
}\r
- \r
- pageTable[i] = temp; \r
+\r
+ pageTable[i] = temp;\r
}\r
\r
return status;\r
}\r
#else\r
- if(result <= 0 || result < pageCount) \r
+ if(result <= 0 || result < pageCount)\r
{\r
struct vm_area_struct *vma;\r
\r
for(i=0; i<pageCount; i++)\r
- { \r
+ {\r
vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);\r
\r
if (vma)//&& (vma->vm_flags & VM_PFNMAP) )\r
{\r
pte_t * pte;\r
spinlock_t * ptl;\r
- unsigned long pfn; \r
+ unsigned long pfn;\r
pgd_t * pgd;\r
pud_t * pud;\r
- \r
+\r
pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);\r
\r
if(pgd_val(*pgd) == 0)\r
//printk("rga pgd value is zero \n");\r
break;\r
}\r
- \r
+\r
pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);\r
if (pud)\r
{\r
}\r
\r
pfn = pte_pfn(*pte);\r
- Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK)); \r
- pte_unmap_unlock(pte, ptl); \r
+ Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));\r
+ pte_unmap_unlock(pte, ptl);\r
}\r
while (0);\r
- \r
+\r
#else\r
- \r
+\r
do\r
{\r
pte_t * pte;\r
spinlock_t * ptl;\r
- unsigned long pfn; \r
+ unsigned long pfn;\r
pgd_t * pgd;\r
pud_t * pud;\r
pmd_t * pmd;\r
- \r
- pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT); \r
+\r
+ pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);\r
pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);\r
pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);\r
pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);\r
- \r
+\r
pfn = pte_pfn(*pte);\r
- Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK)); \r
- pte_unmap_unlock(pte, ptl); \r
+ Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));\r
+ pte_unmap_unlock(pte, ptl);\r
}\r
while (0);\r
#endif\r
{\r
status = RGA_OUT_OF_RESOURCES;\r
break;\r
- } \r
+ }\r
}\r
- \r
+\r
return status;\r
}\r
#endif\r
\r
/* Fill the page table. */\r
- for(i=0; i<pageCount; i++) \r
+ for(i=0; i<pageCount; i++)\r
{\r
/* Get the physical address from page struct. */\r
pageTable[i] = page_to_phys(pages[i]);\r
return 0;\r
}\r
while(0);\r
- \r
+\r
return status;\r
}\r
\r
static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)\r
-{ \r
+{\r
int SrcMemSize, DstMemSize;\r
- uint32_t SrcStart, DstStart; \r
+ uint32_t SrcStart, DstStart;\r
uint32_t i;\r
uint32_t AllSize;\r
uint32_t *MMU_Base, *MMU_p;\r
struct page **pages = NULL;\r
\r
MMU_Base = NULL;\r
- \r
+\r
do\r
- { \r
- /* cal src buf mmu info */ \r
+ {\r
+ /* cal src buf mmu info */\r
SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,\r
&SrcStart);\r
if(SrcMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
- \r
- /* cal dst buf mmu info */ \r
+\r
+ /* cal dst buf mmu info */\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.act_h + req->dst.y_offset,\r
- &DstStart); \r
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ &DstStart);\r
if(DstMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
- \r
+\r
/* Cal out the needed mem size */\r
AllSize = SrcMemSize + DstMemSize;\r
- \r
+\r
pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
- break; \r
+ break;\r
}\r
- \r
+\r
MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
if(MMU_Base == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
- break; \r
+ break;\r
}\r
\r
if(req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- { \r
+ {\r
ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
if (ret < 0) {\r
pr_err("rga map src memory failed\n");\r
status = ret;\r
break;\r
- } \r
+ }\r
}\r
else\r
{\r
MMU_p = MMU_Base;\r
- \r
+\r
if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)\r
{\r
/* Down scale ratio over 2, Last prc */\r
/* MMU table copy from pre scale table */\r
- \r
+\r
for(i=0; i<SrcMemSize; i++)\r
{\r
MMU_p[i] = rga_service.pre_scale_buf[i];\r
- } \r
+ }\r
}\r
else\r
- { \r
+ {\r
for(i=0; i<SrcMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- } \r
- } \r
+ }\r
+ }\r
}\r
- \r
+\r
if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- { \r
+ {\r
#if 0\r
ktime_t start, end;\r
start = ktime_get();\r
- #endif \r
+ #endif\r
ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
if (ret < 0) {\r
pr_err("rga map dst memory failed\n");\r
else\r
{\r
MMU_p = MMU_Base + SrcMemSize;\r
- \r
+\r
for(i=0; i<DstMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- } \r
+ }\r
}\r
\r
MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
\r
- /* zsq \r
- * change the buf address in req struct \r
+ /* zsq\r
+ * change the buf address in req struct\r
*/\r
- \r
+\r
req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
- \r
+\r
req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
- \r
+\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
- \r
+\r
/* flush data to DDR */\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
status = 0;\r
- \r
- /* Free the page table */ \r
+\r
+ /* Free the page table */\r
if (pages != NULL) {\r
kfree(pages);\r
}\r
}\r
while(0);\r
\r
- \r
- /* Free the page table */ \r
+\r
+ /* Free the page table */\r
if (pages != NULL) {\r
kfree(pages);\r
}\r
\r
uint8_t shift;\r
uint16_t sw, byte_num;\r
- \r
+\r
shift = 3 - (req->palette_mode & 3);\r
sw = req->src.vir_w;\r
byte_num = sw >> shift;\r
\r
do\r
{\r
- \r
+\r
SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);\r
if(SrcMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
&DstStart);\r
if(DstMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
- \r
+\r
pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
if(MMU_Base == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- break; \r
+ break;\r
}\r
\r
/* map CMD addr */\r
- for(i=0; i<CMDMemSize; i++) \r
+ for(i=0; i<CMDMemSize; i++)\r
{\r
MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
}\r
\r
/* map src addr */\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID) \r
- { \r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
+ {\r
ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
- if (ret < 0) \r
+ if (ret < 0)\r
{\r
pr_err("rga map src memory failed\n");\r
status = ret;\r
- break; \r
+ break;\r
}\r
}\r
else\r
{\r
MMU_p = MMU_Base + CMDMemSize;\r
- \r
+\r
for(i=0; i<SrcMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
}\r
\r
/* map dst addr */\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID) \r
+ if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0) \r
+ if (ret < 0)\r
{\r
pr_err("rga map dst memory failed\n");\r
status = ret;\r
else\r
{\r
MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
- \r
+\r
for(i=0; i<DstMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
}\r
}\r
- \r
\r
- /* zsq \r
+\r
+ /* zsq\r
* change the buf address in req struct\r
- * for the reason of lie to MMU \r
+ * for the reason of lie to MMU\r
*/\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); \r
+ req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);\r
req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
/* Free the page table */\r
- if (pages != NULL) { \r
+ if (pages != NULL) {\r
kfree(pages);\r
}\r
\r
while(0);\r
\r
/* Free the page table */\r
- if (pages != NULL) { \r
+ if (pages != NULL) {\r
kfree(pages);\r
}\r
\r
MMU_Base = NULL;\r
\r
do\r
- { \r
+ {\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
&DstStart);\r
if(DstMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
AllSize = DstMemSize;\r
- \r
+\r
pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
- status = RGA_MALLOC_ERROR; \r
+ status = RGA_MALLOC_ERROR;\r
break;\r
}\r
- \r
+\r
MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
- break; \r
+ break;\r
}\r
\r
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) \r
+ if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
if (ret < 0) {\r
else\r
{\r
MMU_p = MMU_Base;\r
- \r
+\r
for(i=0; i<DstMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
}\r
}\r
\r
- MMU_Base[AllSize] = MMU_Base[AllSize - 1]; \r
- \r
- /* zsq \r
- * change the buf address in req struct \r
+ MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
+\r
+ /* zsq\r
+ * change the buf address in req struct\r
*/\r
- \r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); \r
+\r
+ req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
- \r
+\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
/* Free the page table */\r
- if (pages != NULL) \r
+ if (pages != NULL)\r
kfree(pages);\r
\r
return 0;\r
}\r
while(0);\r
\r
- if (pages != NULL) \r
+ if (pages != NULL)\r
kfree(pages);\r
\r
if (MMU_Base != NULL)\r
kfree(MMU_Base);\r
- \r
+\r
return status;\r
}\r
\r
MMU_Base = NULL;\r
\r
do\r
- { \r
- /* cal dst buf mmu info */ \r
+ {\r
+ /* cal dst buf mmu info */\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
&DstStart);\r
if(DstMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
AllSize = DstMemSize;\r
- \r
+\r
pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break;\r
}\r
- \r
+\r
MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
else\r
{\r
MMU_p = MMU_Base;\r
- \r
+\r
for(i=0; i<DstMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
}\r
}\r
\r
- /* zsq \r
+ /* zsq\r
* change the buf address in req struct\r
- * for the reason of lie to MMU \r
+ * for the reason of lie to MMU\r
*/\r
- req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2); \r
+ req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
- \r
- \r
+\r
+\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
/* Free the page table */\r
- if (pages != NULL) { \r
+ if (pages != NULL) {\r
kfree(pages);\r
- } \r
+ }\r
\r
return 0;\r
\r
uint32_t uv_size, v_size;\r
\r
MMU_Base = NULL;\r
- \r
+\r
do\r
{\r
- /* cal src buf mmu info */ \r
+ /* cal src buf mmu info */\r
SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
req->src.format, req->src.vir_w, req->src.vir_h,\r
&SrcStart);\r
if(SrcMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
- /* cal dst buf mmu info */ \r
+ /* cal dst buf mmu info */\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
&DstStart);\r
if(DstMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
AllSize = SrcMemSize + DstMemSize;\r
- \r
+\r
pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
- break; \r
+ break;\r
}\r
- \r
+\r
MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
- break; \r
+ break;\r
}\r
\r
if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
- if (ret < 0) \r
+ if (ret < 0)\r
{\r
pr_err("rga map src memory failed\n");\r
status = ret;\r
else\r
{\r
MMU_p = MMU_Base;\r
- \r
+\r
for(i=0; i<SrcMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- } \r
+ }\r
}\r
\r
- \r
+\r
if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0) \r
+ if (ret < 0)\r
{\r
pr_err("rga map dst memory failed\n");\r
status = ret;\r
else\r
{\r
MMU_p = MMU_Base + SrcMemSize;\r
- \r
+\r
for(i=0; i<DstMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
\r
MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
\r
- /* zsq \r
+ /* zsq\r
* change the buf address in req struct\r
- * for the reason of lie to MMU \r
+ * for the reason of lie to MMU\r
*/\r
req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
- \r
+\r
req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);\r
- \r
- \r
+\r
+\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
/* Free the page table */\r
- if (pages != NULL) { \r
+ if (pages != NULL) {\r
kfree(pages);\r
- } \r
+ }\r
\r
return 0;\r
}\r
\r
do\r
{\r
- /* cal src buf mmu info */ \r
+ /* cal src buf mmu info */\r
SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
req->src.format, req->src.vir_w, req->src.vir_h,\r
&SrcStart);\r
if(SrcMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
- /* cal dst buf mmu info */ \r
+ /* cal dst buf mmu info */\r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
&DstStart);\r
if(DstMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
AllSize = SrcMemSize + DstMemSize;\r
- \r
+\r
pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) \r
+ if(pages == NULL)\r
{\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
- break; \r
+ break;\r
}\r
\r
- /* \r
+ /*\r
* Allocate MMU Index mem\r
- * This mem release in run_to_done fun \r
+ * This mem release in run_to_done fun\r
*/\r
MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- status = RGA_MALLOC_ERROR; \r
- break; \r
+ status = RGA_MALLOC_ERROR;\r
+ break;\r
}\r
\r
/* map src pages */\r
else\r
{\r
MMU_p = MMU_Base;\r
- \r
+\r
for(i=0; i<SrcMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- } \r
+ }\r
}\r
\r
- \r
- if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID) \r
- { \r
+\r
+ if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID)\r
+ {\r
/* kernel space */\r
MMU_p = MMU_Base + SrcMemSize;\r
\r
}\r
else\r
{\r
- for(i=0; i<DstMemSize; i++) \r
+ for(i=0; i<DstMemSize; i++)\r
{\r
- MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT)); \r
- } \r
- } \r
+ MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
+ }\r
+ }\r
}\r
- else \r
+ else\r
{\r
/* user space */\r
ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0) \r
+ if (ret < 0)\r
{\r
pr_err("rga map dst memory failed\n");\r
status = ret;\r
break;\r
- } \r
+ }\r
}\r
\r
MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
\r
- /* zsq \r
+ /* zsq\r
* change the buf address in req struct\r
- * for the reason of lie to MMU \r
+ * for the reason of lie to MMU\r
*/\r
- \r
+\r
req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);\r
\r
uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
/* Free the page table */\r
- if (pages != NULL) \r
- { \r
+ if (pages != NULL)\r
+ {\r
kfree(pages);\r
- } \r
+ }\r
\r
return 0;\r
}\r
MMU_Base = NULL;\r
\r
do\r
- { \r
- /* cal src buf mmu info */ \r
+ {\r
+ /* cal src buf mmu info */\r
SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);\r
if(SrcMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
/* cal cmd buf mmu info */\r
CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
AllSize = SrcMemSize + CMDMemSize;\r
- \r
+\r
pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
- break; \r
+ break;\r
}\r
- \r
+\r
MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
- break; \r
+ break;\r
}\r
\r
for(i=0; i<CMDMemSize; i++) {\r
else\r
{\r
MMU_p = MMU_Base + CMDMemSize;\r
- \r
+\r
for(i=0; i<SrcMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- } \r
+ }\r
}\r
\r
- /* zsq \r
+ /* zsq\r
* change the buf address in req struct\r
- * for the reason of lie to MMU \r
+ * for the reason of lie to MMU\r
*/\r
req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);\r
- \r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); \r
- \r
+\r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
if (pages != NULL) {\r
/* Free the page table */\r
kfree(pages);\r
- } \r
+ }\r
\r
return 0;\r
}\r
do\r
{\r
\r
- /* cal src buf mmu info */ \r
+ /* cal src buf mmu info */\r
SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);\r
if(SrcMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
/* cal cmd buf mmu info */\r
CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
- return -EINVAL; \r
+ return -EINVAL;\r
}\r
\r
AllSize = SrcMemSize + CMDMemSize;\r
- \r
+\r
pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
- break; \r
+ break;\r
}\r
- \r
+\r
MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
- break; \r
+ break;\r
}\r
\r
for(i=0; i<CMDMemSize; i++) {\r
else\r
{\r
MMU_p = MMU_Base + CMDMemSize;\r
- \r
+\r
for(i=0; i<SrcMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- } \r
+ }\r
}\r
\r
- /* zsq \r
+ /* zsq\r
* change the buf address in req struct\r
- * for the reason of lie to MMU \r
+ * for the reason of lie to MMU\r
*/\r
req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);\r
- \r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); \r
- \r
+\r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
}\r
\r
int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)\r
-{ \r
+{\r
int ret;\r
- \r
+\r
switch (req->render_mode) {\r
- case bitblt_mode : \r
+ case bitblt_mode :\r
ret = rga_mmu_info_BitBlt_mode(reg, req);\r
break;\r
case color_palette_mode :\r