#include <linux/slab.h>\r
#include <linux/fb.h>\r
#include <linux/wakelock.h>\r
+#include <linux/scatterlist.h>\r
+\r
\r
#if defined(CONFIG_ION_ROCKCHIP)\r
#include <linux/rockchip_ion.h>\r
#endif\r
\r
-\r
#include "rga2.h"\r
#include "rga2_reg_info.h"\r
#include "rga2_mmu_info.h"\r
#include "RGA2_API.h"\r
\r
+#if defined(CONFIG_ROCKCHIP_IOMMU) & defined(CONFIG_ION_ROCKCHIP)\r
+#define CONFIG_RGA_IOMMU\r
+#endif\r
+\r
+\r
\r
#define RGA2_TEST_FLUSH_TIME 0\r
#define RGA2_INFO_BUS_ERROR 1\r
|| (req->mmu_info.dst_mmu_flag & 1) || (req->mmu_info.els_mmu_flag & 1))\r
{\r
ret = rga2_set_mmu_info(reg, req);\r
- if(ret < 0)\r
- {\r
+ if(ret < 0) {\r
printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
if(reg != NULL)\r
- {\r
kfree(reg);\r
- }\r
+\r
return NULL;\r
}\r
}\r
\r
- if(RGA2_gen_reg_info((uint8_t *)reg->cmd_reg, req) == -1)\r
- {\r
+ if(RGA2_gen_reg_info((uint8_t *)reg->cmd_reg, req) == -1) {\r
printk("gen reg info error\n");\r
if(reg != NULL)\r
- {\r
kfree(reg);\r
- }\r
+\r
return NULL;\r
}\r
\r
\r
rga2_power_on();\r
udelay(1);\r
+ //mdelay(500);\r
\r
rga2_copy_reg(reg, 0);\r
rga2_reg_from_wait_to_run(reg);\r
ion_phys_addr_t phy_addr;\r
size_t len;\r
int ret;\r
+ req->sg_src0 = NULL;\r
+ req->sg_src1 = NULL;\r
+ req->sg_dst = NULL;\r
+ req->sg_els = NULL;\r
\r
if(req->src.yrgb_addr) {\r
hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->src.yrgb_addr);\r
printk("RGA2 ERROR ion buf handle\n");\r
return ret;\r
}\r
- ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
+ #ifdef CONFIG_RGA_IOMMU\r
+ if (req->mmu_info.src0_mmu_flag) {\r
+ req->sg_src0 = ion_sg_table(rga2_drvdata->ion_client, hdl);\r
+ req->src.yrgb_addr = req->src.uv_addr;\r
+ req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);\r
+ req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;\r
+ }\r
+ else {\r
+ ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
+ req->src.yrgb_addr = phy_addr;\r
+ req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);\r
+ req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;\r
+ }\r
+ #else\r
+ ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
req->src.yrgb_addr = phy_addr;\r
req->src.uv_addr = req->src.yrgb_addr + (req->src.vir_w * req->src.vir_h);\r
req->src.v_addr = req->src.uv_addr + (req->src.vir_w * req->src.vir_h)/4;\r
+ #endif\r
ion_free(rga2_drvdata->ion_client, hdl);\r
}\r
else {\r
printk("RGA2 ERROR ion buf handle\n");\r
return ret;\r
}\r
+ #ifdef CONFIG_RGA_IOMMU\r
+ if (req->mmu_info.dst_mmu_flag) {\r
+ req->sg_dst = ion_sg_table(rga2_drvdata->ion_client, hdl);\r
+ req->dst.yrgb_addr = req->dst.uv_addr;\r
+ req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+ else {\r
+ ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
+ req->dst.yrgb_addr = phy_addr;\r
+ req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+ #else\r
ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
req->dst.yrgb_addr = phy_addr;\r
req->dst.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ #endif\r
ion_free(rga2_drvdata->ion_client, hdl);\r
}\r
else {\r
req->dst.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
}\r
\r
+ if(req->src1.yrgb_addr) {\r
+ hdl = ion_import_dma_buf(rga2_drvdata->ion_client, req->src1.yrgb_addr);\r
+ if (IS_ERR(hdl)) {\r
+ ret = PTR_ERR(hdl);\r
+ printk("RGA2 ERROR ion buf handle\n");\r
+ return ret;\r
+ }\r
+ #ifdef CONFIG_RGA_IOMMU\r
+ if (req->mmu_info.dst_mmu_flag) {\r
+ req->sg_src1 = ion_sg_table(rga2_drvdata->ion_client, hdl);\r
+ req->src1.yrgb_addr = 0;\r
+ req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+ else {\r
+ ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
+ req->src1.yrgb_addr = phy_addr;\r
+ req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+ #else\r
+ ion_phys(rga2_drvdata->ion_client, hdl, &phy_addr, &len);\r
+ req->src1.yrgb_addr = phy_addr;\r
+ req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ #endif\r
+ ion_free(rga2_drvdata->ion_client, hdl);\r
+ }\r
+ else {\r
+ req->src1.yrgb_addr = req->dst.uv_addr;\r
+ req->src1.uv_addr = req->dst.yrgb_addr + (req->dst.vir_w * req->dst.vir_h);\r
+ req->src1.v_addr = req->dst.uv_addr + (req->dst.vir_w * req->dst.vir_h)/4;\r
+ }\r
+\r
return 0;\r
}\r
\r
switch (cmd)\r
{\r
case RGA_BLIT_SYNC:\r
-\r
if (unlikely(copy_from_user(&req_rga, (struct rga_req*)arg, sizeof(struct rga_req))))\r
{\r
ERR("copy_from_user failed\n");\r
printk("************ RGA2_TEST ************\n");\r
printk("********************************\n\n");\r
\r
+ req.pat.act_w = 16;\r
+ req.pat.act_h = 16;\r
+ req.pat.vir_w = 16;\r
+ req.pat.vir_h = 16;\r
+ req.pat.yrgb_addr = virt_to_phys(src);\r
+ req.render_mode = update_palette_table_mode;\r
+ rga2_blit_sync(&session, &req);\r
+\r
req.src.act_w = 4096;\r
req.src.act_h = 2304;\r
\r
#include <linux/slab.h>\r
#include <linux/memory.h>\r
#include <linux/dma-mapping.h>\r
+#include <linux/scatterlist.h>\r
#include <asm/memory.h>\r
#include <asm/atomic.h>\r
#include <asm/cacheflush.h>\r
size_yrgb = stride * h;\r
size_uv = stride * h;\r
start = MIN(yrgb_addr, uv_addr);\r
-\r
start >>= PAGE_SHIFT;\r
end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));\r
end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));\r
end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;\r
pageCount = end - start;\r
+ //printk("yrgb_addr = %.8x\n", yrgb_addr);\r
+ //printk("uv_addr = %.8x\n", uv_addr);\r
break;\r
case RGA2_FORMAT_YCbCr_420_P :\r
case RGA2_FORMAT_YCrCb_420_P :\r
return status;\r
}\r
\r
+static int rga2_MapION(struct sg_table *sg,\r
+ uint32_t *Memory,\r
+ int32_t pageCount)\r
+{\r
+ uint32_t i;\r
+ uint32_t status;\r
+ uint32_t Address;\r
+ uint32_t mapped_size = 0;\r
+ uint32_t len;\r
+ struct scatterlist *sgl = sg->sgl;\r
+ uint32_t sg_num = 0;\r
+\r
+ status = 0;\r
+ Address = 0;\r
+ //printk("pageCount=%d\n", pageCount);\r
+ do {\r
+ len = sg_dma_len(sgl) >> PAGE_SHIFT;\r
+ Address = sg_phys(sgl);\r
+ //printk("len = %d\n", len);\r
+ //printk("Address = %.8x\n", Address);\r
+ for(i=0; i<len; i++) {\r
+ Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);\r
+ }\r
+\r
+ //printk("mapped_size = %d\n", mapped_size);\r
+ mapped_size += len;\r
+ sg_num += 1;\r
+ }\r
+ while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));\r
+\r
+ return 0;\r
+}\r
+\r
+\r
static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
int Src0MemSize, DstMemSize, Src1MemSize;\r
DstMemSize = (DstMemSize+15)&(~15);\r
AllSize = Src0MemSize + Src1MemSize + DstMemSize;\r
\r
- pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
+ if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {\r
+ pr_err("RGA2 Get MMU mem failed\n");\r
status = RGA2_MALLOC_ERROR;\r
break;\r
}\r
\r
- if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {\r
- pr_err("RGA2 Get MMU mem failed\n");\r
+ pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
+ if(pages == NULL) {\r
+ pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA2_MALLOC_ERROR;\r
break;\r
}\r
MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));\r
MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));\r
mutex_unlock(&rga2_service.lock);\r
-\r
if(Src0MemSize) {\r
- ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], Src0Start, Src0MemSize);\r
+ if (req->sg_src0) {\r
+ ret = rga2_MapION(req->sg_src0, &MMU_Base[0], Src0MemSize);\r
+ }\r
+ else {\r
+ ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], Src0Start, Src0MemSize);\r
+ }\r
+\r
if (ret < 0) {\r
pr_err("rga2 map src0 memory failed\n");\r
+ pr_err("RGA2 : yrgb = %.8x, uv = %.8x format = %d\n", req->src.yrgb_addr, req->src.uv_addr, req->src.format);\r
+ pr_err("RGA2 : vir_w = %d, vir_h = %d\n", req->src.vir_w, req->src.vir_h);\r
status = ret;\r
break;\r
}\r
}\r
\r
if(Src1MemSize) {\r
- ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize, Src1Start, Src1MemSize);\r
+ if (req->sg_src1) {\r
+ ret = rga2_MapION(req->sg_src1, MMU_Base + Src0MemSize, Src1MemSize);\r
+ }\r
+ else {\r
+ ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize, Src1Start, Src1MemSize);\r
+ }\r
+\r
if (ret < 0) {\r
pr_err("rga2 map src1 memory failed\n");\r
+ pr_err("RGA2 : yrgb = %.8x, format = %d\n", req->src1.yrgb_addr, req->src1.format);\r
+ pr_err("RGA2 : vir_w = %d, vir_h = %d\n", req->src1.vir_w, req->src1.vir_h);\r
status = ret;\r
break;\r
}\r
}\r
\r
if(DstMemSize) {\r
- ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize + Src1MemSize, DstStart, DstMemSize);\r
+ if (req->sg_dst) {\r
+ ret = rga2_MapION(req->sg_dst, MMU_Base + Src0MemSize + Src1MemSize, DstMemSize);\r
+ }\r
+ else {\r
+ ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize + Src1MemSize, DstStart, DstMemSize);\r
+ }\r
if (ret < 0) {\r
pr_err("rga2 map dst memory failed\n");\r
+ pr_err("RGA2 : yrgb = %.8x, uv = %.8x\n, format = %d\n", req->dst.yrgb_addr, req->dst.uv_addr, req->dst.format);\r
+ pr_err("RGA2 : vir_w = %d, vir_h = %d\n", req->dst.vir_w, req->dst.vir_h);\r
status = ret;\r
break;\r
}\r
kfree(pages);\r
}\r
\r
- /* Free MMU table */\r
- if(MMU_Base != NULL) {\r
- kfree(MMU_Base);\r
- }\r
-\r
return status;\r
}\r
\r
static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
- int SrcMemSize, DstMemSize, CMDMemSize;\r
- uint32_t SrcStart, DstStart, CMDStart;\r
+ int SrcMemSize, DstMemSize;\r
+ uint32_t SrcStart, DstStart;\r
struct page **pages = NULL;\r
- uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base = NULL;\r
- uint32_t *MMU_p;\r
+ uint32_t *MMU_Base = NULL, *MMU_Base_phys;\r
int ret, status;\r
uint32_t stride;\r
\r
uint16_t sw, byte_num;\r
\r
shift = 3 - (req->palette_mode & 3);\r
- sw = req->src.vir_w;\r
+ sw = req->src.vir_w*req->src.vir_h;\r
byte_num = sw >> shift;\r
stride = (byte_num + 3) & (~3);\r
\r
+ SrcMemSize = 0;\r
+ DstMemSize = 0;\r
+\r
do\r
{\r
-\r
- SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);\r
- if(SrcMemSize == 0) {\r
- return -EINVAL;\r
+ if (req->mmu_info.src0_mmu_flag) {\r
+ SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);\r
+ if(SrcMemSize == 0) {\r
+ return -EINVAL;\r
+ }\r
}\r
\r
- DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
- &DstStart);\r
- if(DstMemSize == 0) {\r
- return -EINVAL;\r
+ if (req->mmu_info.dst_mmu_flag) {\r
+ DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
+ req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ &DstStart);\r
+ if(DstMemSize == 0) {\r
+ return -EINVAL;\r
+ }\r
}\r
\r
- CMDMemSize = rga2_mem_size_cal((uint32_t)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL;\r
- }\r
+ SrcMemSize = (SrcMemSize + 15) & (~15);\r
+ DstMemSize = (DstMemSize + 15) & (~15);\r
+\r
+ AllSize = SrcMemSize + DstMemSize;\r
\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {\r
+ pr_err("RGA2 Get MMU mem failed\n");\r
+ status = RGA2_MALLOC_ERROR;\r
+ break;\r
+ }\r
\r
pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
return -EINVAL;\r
}\r
\r
- MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
- if(MMU_Base == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
- break;\r
- }\r
-\r
- /* map CMD addr */\r
- for(i=0; i<CMDMemSize; i++)\r
- {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));\r
- }\r
+ mutex_lock(&rga2_service.lock);\r
+ MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));\r
+ MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));\r
+ mutex_unlock(&rga2_service.lock);\r
\r
- /* map src addr */\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga2_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
- if (ret < 0)\r
- {\r
- pr_err("rga map src memory failed\n");\r
+ if(SrcMemSize) {\r
+ ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga2 map src0 memory failed\n");\r
status = ret;\r
break;\r
}\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base + CMDMemSize;\r
\r
- for(i=0; i<SrcMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- }\r
+ /* change the buf address in req struct */\r
+ req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys));\r
+ req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
- /* map dst addr */\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga2_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
- if (ret < 0)\r
- {\r
- pr_err("rga map dst memory failed\n");\r
+ if(DstMemSize) {\r
+ ret = rga2_MapUserMemory(&pages[0], MMU_Base + SrcMemSize, DstStart, DstMemSize);\r
+ if (ret < 0) {\r
+ pr_err("rga2 map dst memory failed\n");\r
status = ret;\r
break;\r
}\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base + CMDMemSize + SrcMemSize;\r
\r
- for(i=0; i<DstMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- }\r
+ /* change the buf address in req struct */\r
+ req->mmu_info.dst_base_addr = ((uint32_t)(MMU_Base_phys + SrcMemSize));\r
+ req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
+ /* flush data to DDR */\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
\r
- /* zsq\r
- * change the buf address in req struct\r
- * for the reason of lie to MMU\r
- */\r
- req->mmu_info.src0_base_addr = (virt_to_phys(MMU_Base)>>2);\r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
-\r
-\r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
+ rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
+ reg->MMU_len = AllSize;\r
\r
- /* flush data to DDR */\r
- dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
+ status = 0;\r
\r
/* Free the page table */\r
if (pages != NULL) {\r
}\r
\r
return status;\r
-\r
}\r
while(0);\r
\r
kfree(pages);\r
}\r
\r
- /* Free mmu table */\r
- if (MMU_Base != NULL) {\r
- kfree(MMU_Base);\r
- }\r
-\r
return 0;\r
}\r
\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));\r
\r
- rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize);\r
+ rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
\r
/* Free the page table */\r
if (pages != NULL)\r
if (pages != NULL)\r
kfree(pages);\r
\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
return status;\r
}\r
\r
\r
static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct rga2_req *req)\r
{\r
- int SrcMemSize, CMDMemSize;\r
- uint32_t SrcStart, CMDStart;\r
+ int SrcMemSize;\r
+ uint32_t SrcStart;\r
struct page **pages = NULL;\r
- uint32_t i;\r
uint32_t AllSize;\r
- uint32_t *MMU_Base, *MMU_p;\r
+ uint32_t *MMU_Base, *MMU_Base_phys;\r
int ret, status;\r
\r
MMU_Base = NULL;\r
do\r
{\r
/* cal src buf mmu info */\r
- SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);\r
+ SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h, &SrcStart);\r
if(SrcMemSize == 0) {\r
return -EINVAL;\r
}\r
\r
- /* cal cmd buf mmu info */\r
- CMDMemSize = rga2_mem_size_cal((uint32_t)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);\r
- if(CMDMemSize == 0) {\r
- return -EINVAL;\r
- }\r
+ SrcMemSize = (SrcMemSize + 15) & (~15);\r
+ AllSize = SrcMemSize;\r
\r
- AllSize = SrcMemSize + CMDMemSize;\r
-\r
- pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
- if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed\n");\r
+ if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {\r
+ pr_err("RGA2 Get MMU mem failed\n");\r
status = RGA2_MALLOC_ERROR;\r
break;\r
}\r
\r
- MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);\r
+ mutex_lock(&rga2_service.lock);\r
+ MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));\r
+ MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));\r
+ mutex_unlock(&rga2_service.lock);\r
+\r
+ pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed\n");\r
+ pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA2_MALLOC_ERROR;\r
break;\r
}\r
\r
- for(i=0; i<CMDMemSize; i++) {\r
- MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));\r
- }\r
-\r
- if (req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
- ret = rga2_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
+ if(SrcMemSize) {\r
+ ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);\r
if (ret < 0) {\r
- pr_err("rga map src memory failed\n");\r
- return -EINVAL;\r
+ pr_err("rga2 map palette memory failed\n");\r
+ status = ret;\r
+ break;\r
}\r
- }\r
- else\r
- {\r
- MMU_p = MMU_Base + CMDMemSize;\r
\r
- for(i=0; i<SrcMemSize; i++)\r
- {\r
- MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- }\r
+ /* change the buf address in req struct */\r
+ req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys));\r
+ req->pat.yrgb_addr = (req->pat.yrgb_addr & (~PAGE_MASK));\r
}\r
\r
- /* zsq\r
- * change the buf address in req struct\r
- * for the reason of lie to MMU\r
- */\r
- req->mmu_info.src0_base_addr = (virt_to_phys(MMU_Base) >> 2);\r
-\r
- req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
-\r
- /*record the malloc buf for the cmd end to release*/\r
- reg->MMU_base = MMU_Base;\r
-\r
/* flush data to DDR */\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
- outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ outer_flush_range(virt_to_phys(MMU_Base), virt_to_phys(MMU_Base + AllSize));\r
+\r
+ rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);\r
+ reg->MMU_len = AllSize;\r
\r
if (pages != NULL) {\r
/* Free the page table */\r
if (pages != NULL)\r
kfree(pages);\r
\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
return status;\r
}\r
\r
if (pages != NULL)\r
kfree(pages);\r
\r
- if (MMU_Base != NULL)\r
- kfree(MMU_Base);\r
-\r
return status;\r
}\r
\r