printk("clip.xmin = %d, clip.xmax = %d. clip.ymin = %d, clip.ymax = %d\n",\r
req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax);\r
\r
+ printk("mmu_flag = %.8x\n", req->mmu_info.mmu_flag);\r
+\r
//printk("alpha_rop_flag = %.8x\n", req->alpha_rop_flag);\r
//printk("alpha_rop_mode = %.8x\n", req->alpha_rop_mode);\r
//printk("PD_mode = %.8x\n", req->PD_mode);\r
\r
#if defined(CONFIG_OF)\r
static const struct of_device_id rockchip_rga_dt_ids[] = {\r
- { .compatible = "rockchip,rga_drv", },\r
+ { .compatible = "rockchip,rk312x-rga", },\r
{},\r
};\r
#endif\r
mutex_init(&rga_service.lock);\r
mutex_init(&rga_service.mutex);\r
atomic_set(&rga_service.total_running, 0);\r
- atomic_set(&rga_service.src_format_swt, 0);\r
- rga_service.last_prc_src_format = 1; /* default is yuv first*/\r
rga_service.enable = false;\r
\r
rga_ioctl_kernel_p = rga_ioctl_kernel_imp;\r
return -EINVAL;\r
}\r
\r
-\r
/* Cal out the needed mem size */\r
AllSize = SrcMemSize + DstMemSize;\r
\r
break;\r
}\r
\r
- if((req->mmu_info.mmu_flag >> 8) & 1)\r
- {\r
+ if((req->mmu_info.mmu_flag >> 8) & 1) {\r
if (req->sg_src) {\r
ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize);\r
}\r
}\r
}\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base;\r
\r
- if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)\r
- {\r
- /* Down scale ratio over 2, Last prc */\r
- /* MMU table copy from pre scale table */\r
-\r
+ if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) {\r
for(i=0; i<SrcMemSize; i++)\r
MMU_p[i] = rga_service.pre_scale_buf[i];\r
-\r
}\r
else {\r
for(i=0; i<SrcMemSize; i++)\r
}\r
\r
if ((req->mmu_info.mmu_flag >> 10) & 1) {\r
- ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
+ if (req->sg_dst) {\r
+ ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize);\r
+ }\r
+ else {\r
+ ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map dst memory failed\n");\r
+ status = ret;\r
+ break;\r
+ }\r
}\r
}\r
else {\r
MMU_p = MMU_Base + SrcMemSize;\r
-\r
for(i=0; i<DstMemSize; i++)\r
MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);\r
}\r
req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);\r
req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);\r
\r
+ uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;\r
+\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);\r
+ req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);\r
\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
break;\r
}\r
\r
- if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
+ if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) {\r
+ if (req->sg_dst) {\r
+ ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize);\r
+ }\r
+ else {\r
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map dst memory failed\n");\r
+ status = ret;\r
+ break;\r
+ }\r
}\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base;\r
-\r
for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
+ MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);\r
}\r
\r
MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
\r
MMU_Base = NULL;\r
\r
- do\r
- {\r
+ do {\r
/* cal src buf mmu info */\r
SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
req->src.format, req->src.vir_w, req->src.vir_h,\r
AllSize = SrcMemSize + DstMemSize;\r
\r
pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL)\r
- {\r
+ if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break;\r
}\r
\r
/* map src pages */\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
- if (ret < 0) {\r
- pr_err("rga map src memory failed\n");\r
- status = ret;\r
- break;\r
+ if ((req->mmu_info.mmu_flag >> 8) & 1) {\r
+ if (req->sg_src) {\r
+ ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize);\r
+ }\r
+ else {\r
+ ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map src memory failed\n");\r
+ status = ret;\r
+ break;\r
+ }\r
}\r
}\r
- else\r
- {\r
+ else {\r
MMU_p = MMU_Base;\r
\r
for(i=0; i<SrcMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- }\r
+ MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);\r
}\r
\r
-\r
- if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID)\r
+ if((req->mmu_info.mmu_flag >> 10) & 1) {\r
+ if (req->sg_dst) {\r
+ ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize);\r
+ }\r
+ else {\r
+ ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga map dst memory failed\n");\r
+ status = ret;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ else\r
{\r
/* kernel space */\r
MMU_p = MMU_Base + SrcMemSize;\r
\r
- if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)\r
- {\r
+ if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) {\r
for(i=0; i<DstMemSize; i++)\r
- {\r
MMU_p[i] = rga_service.pre_scale_buf[i];\r
- }\r
}\r
- else\r
- {\r
+ else {\r
for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
- }\r
- }\r
- else\r
- {\r
- /* user space */\r
- ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0)\r
- {\r
- pr_err("rga map dst memory failed\n");\r
- status = ret;\r
- break;\r
+ MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);\r
}\r
}\r
\r