mp = msg1;\r
w_ratio = (msg->src.act_w << 16) / msg->dst.act_w;\r
h_ratio = (msg->src.act_h << 16) / msg->dst.act_h;\r
-\r
- memcpy(&msg1, &msg, sizeof(struct rga_req));\r
+ \r
+ memcpy(msg1, msg, sizeof(struct rga_req));\r
\r
msg->dst.format = msg->src.format;\r
\r
msg->src.act_h = (dah - 1) << 3; \r
}\r
}\r
+\r
+ printk("test_2\n");\r
+ \r
msg->dst.act_h = dah;\r
msg->dst.vir_h = dah;\r
\r
- msg->dst.yrgb_addr = (u32)rga_service.pre_scale_buf;\r
+ //msg->dst.yrgb_addr = (u32)rga_service.pre_scale_buf;\r
msg->dst.uv_addr = msg->dst.yrgb_addr + stride * dah;\r
msg->dst.v_addr = msg->dst.uv_addr + ((stride * dah) >> 1);\r
\r
msg->render_mode = pre_scaling_mode;\r
\r
- memcpy(&msg1->src, &msg->dst, sizeof(rga_img_info_t));\r
- \r
+ msg1->src.yrgb_addr = msg->dst.yrgb_addr;\r
+ msg1->src.uv_addr = msg->dst.uv_addr;\r
+ msg1->src.v_addr = msg->dst.v_addr;\r
+\r
+ msg1->src.act_w = msg->dst.act_w;\r
+ msg1->src.act_h = msg->dst.act_h;\r
+ msg1->src.vir_w = msg->dst.vir_w;\r
+ msg1->src.vir_h = msg->dst.vir_h;\r
+ \r
return 0;\r
}\r
\r
#include "rga_mmu_info.h"\r
#include "RGA_API.h"\r
\r
-//#include "bug_320x240_swap0_ABGR8888.h"\r
+#include "bug_320x240_swap0_ABGR8888.h"\r
\r
\r
-#define RGA_TEST 0\r
+#define RGA_TEST 0\r\r
+#define RGA_TEST_TIME 0\r
\r
#define PRE_SCALE_BUF_SIZE 2048*1024*4\r
\r
#define DRIVER_DESC "RGA Device Driver"\r
#define DRIVER_NAME "rga"\r
\r
+ktime_t rga_start;\r
+ktime_t rga_end;\r
+\r
\r
struct rga_drvdata {\r
struct miscdevice miscdev;\r
ERR("soft reset timeout.\n");\r
}\r
\r
+\r
static void rga_dump(void)\r
{\r
int running;\r
reg->session = session;\r
INIT_LIST_HEAD(®->session_link);\r
INIT_LIST_HEAD(®->status_link);\r
-\r
+ \r
if (req->mmu_info.mmu_en)\r
{\r
ret = rga_set_mmu_info(reg, req);\r
return NULL; \r
}\r
}\r
+\r
+ #if RGA_TEST_TIME\r
+ rga_end = ktime_get();\r
+ rga_end = ktime_sub(rga_end, rga_start);\r
+ printk("one cmd end time %d\n", (int)ktime_to_us(rga_end));\r
+ #endif\r
\r
RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg);\r
\r
break;\r
}\r
\r
- reg0->session = session;\r
+ reg0->session = session; \r
INIT_LIST_HEAD(®0->session_link);\r
INIT_LIST_HEAD(®0->status_link);\r
\r
+ reg1->session = session;\r
+ INIT_LIST_HEAD(®1->session_link);\r
+ INIT_LIST_HEAD(®1->status_link);\r
+\r
if(req0->mmu_info.mmu_en)\r
{\r
ret = rga_set_mmu_info(reg0, req0);\r
\r
if(req1->mmu_info.mmu_en)\r
{\r
- ret = rga_set_mmu_info(reg0, req1);\r
+ ret = rga_set_mmu_info(reg1, req1);\r
if(ret < 0) {\r
printk("%s, [%d] set mmu info error \n", __FUNCTION__, __LINE__);\r
break; \r
}\r
}\r
\r
- RGA_gen_reg_info(req1, (uint8_t *)reg0->cmd_reg);\r
+ RGA_gen_reg_info(req1, (uint8_t *)reg1->cmd_reg);\r
+\r
+ {\r
+ uint32_t i;\r
+ for(i=0; i<28; i++)\r
+ {\r
+ printk("reg1->cmd_reg[%d] is %.8x\n", i, reg1->cmd_reg[i]);\r
+ }\r
+ }\r
\r
spin_lock_irqsave(&rga_service.lock, flag);\r
list_add_tail(®0->status_link, &rga_service.waiting);\r
- list_add_tail(®1->status_link, &rga_service.waiting);\r
- list_add_tail(®0->session_link, &session->waiting); \r
+ list_add_tail(®0->session_link, &session->waiting);\r
+ list_add_tail(®1->status_link, &rga_service.waiting); \r
list_add_tail(®1->session_link, &session->waiting);\r
spin_unlock_irqrestore(&rga_service.lock, flag);\r
\r
\r
if (!num)\r
{\r
- #ifdef RGA_TEST \r
+ #if RGA_TEST \r
printk("rga try set reg cmd num is 0\n");\r
#endif\r
\r
if (!list_empty(&rga_service.waiting)) \r
{\r
do\r
- {\r
- struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); \r
+ { \r
+ struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link);\r
if((rga_read(RGA_STATUS) & 0x1)) \r
{ \r
/* RGA is busy */\r
+ printk("no idel is here \n");\r
+ \r
if((atomic_read(&rga_service.cmd_num) <= 0xf) && (atomic_read(&rga_service.int_disable) == 0)) \r
{\r
- rga_copy_reg(reg, atomic_read(&rga_service.cmd_num)); \r
+ uint32_t offset;\r
+\r
+ offset = atomic_read(&rga_service.cmd_num);\r
+ rga_copy_reg(reg, offset); \r
rga_reg_from_wait_to_run(reg);\r
+\r
+ dmac_flush_range(&rga_service.cmd_buff[offset*28], &rga_service.cmd_buff[(offset + 1)*28]);\r
+ outer_flush_range(virt_to_phys(&rga_service.cmd_buff[offset*28]),\r
+ virt_to_phys(&rga_service.cmd_buff[(offset + 1)*28]));\r
\r
rga_write(0x1<<10, RGA_INT);\r
\r
- #ifdef RGA_TEST\r
+ #if RGA_TEST\r
{\r
uint32_t i;\r
- printk("CMD_REG\n");\r
- for(i=0; i<28; i++) \r
- printk("%.8x\n", rga_service.cmd_buff[i + 28*atomic_read(&rga_service.cmd_num)]); \r
+ printk("CMD_REG num is %.8x\n", offset);\r
+ for(i=0; i<7; i++)\r
+ {\r
+ printk("%.8x ", rga_service.cmd_buff[i*4 + 0 + 28*atomic_read(&rga_service.cmd_num)]);\r
+ printk("%.8x ", rga_service.cmd_buff[i*4 + 1 + 28*atomic_read(&rga_service.cmd_num)]);\r
+ printk("%.8x ", rga_service.cmd_buff[i*4 + 2 + 28*atomic_read(&rga_service.cmd_num)]);\r
+ printk("%.8x\n",rga_service.cmd_buff[i*4 + 3 + 28*atomic_read(&rga_service.cmd_num)]);\r
+ }\r
}\r
#endif\r
\r
- atomic_set(®->session->done, 0);\r
- \r
+ atomic_set(®->session->done, 0); \r
rga_write((0x1<<3)|(0x1<<1), RGA_CMD_CTRL);\r
\r
if(atomic_read(®->int_enable))\r
}\r
else \r
{ \r
- /* RGA is idle */\r
+ /* RGA is idle */ \r
rga_copy_reg(reg, 0); \r
rga_reg_from_wait_to_run(reg);\r
+ \r
dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]);\r
outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28]));\r
\r
/* CMD buff */\r
rga_write(virt_to_phys(rga_service.cmd_buff) & (~PAGE_MASK), RGA_CMD_ADDR); \r
\r
- #ifdef RGA_TEST\r
+ #if RGA_TEST\r
{\r
- uint32_t i;\r
- printk("CMD_REG\n");\r
- for (i=0; i<28; i++) \r
- printk("%.8x\n", rga_service.cmd_buff[i]); \r
- \r
+ uint32_t i, *p;\r
+ p = rga_service.cmd_buff;\r
+ printk("CMD_REG\n"); \r
+ for (i=0; i<7; i++) \r
+ printk("%.8x %.8x %.8x %.8x\n", p[i*4+0], p[i*4+1], p[i*4+2], p[i*4+3]);\r
}\r
#endif\r
\r
/* All CMD finish int */\r
rga_write(0x1<<10, RGA_INT);\r
\r
- //rga_write(1, RGA_MMU_STA_CTRL);\r
-\r
/* Start proc */\r
atomic_set(®->session->done, 0);\r
rga_write(0x1, RGA_CMD_CTRL); \r
\r
- //while(1)\r
- // printk("mmu_status is %.8x\n", rga_read(RGA_MMU_STA));\r
-\r
- #ifdef RGA_TEST\r
+ #if RGA_TEST\r
{\r
uint32_t i;\r
printk("CMD_READ_BACK_REG\n");\r
- for (i=0; i<28; i++) \r
- printk("%.8x\n", rga_read(0x100 + i*4)); \r
+ for (i=0; i<7; i++) \r
+ printk("%.8x %.8x %.8x %.8x\n", rga_read(0x100 + i*16 + 0), \r
+ rga_read(0x100 + i*16 + 4), rga_read(0x100 + i*16 + 8), rga_read(0x100 + i*16 + 12)); \r
}\r
- #endif \r
+ #endif\r
}\r
num--;\r
}\r
if(ret == -EINVAL) {\r
return -EINVAL;\r
}\r
- \r
+\r
reg = rga_reg_init(session, req);\r
if(reg == NULL) {\r
return -EFAULT;\r
{\r
return -EINVAL; \r
}\r
+ memset(req2, 0, sizeof(struct rga_req));\r
\r
RGA_gen_two_pro(req, req2);\r
\r
- reg = rga_reg_init_2(session, req2, req);\r
+ reg = rga_reg_init_2(session, req, req2);\r
if (NULL == reg) \r
{\r
return -EFAULT;\r
{\r
return -EFAULT;\r
}\r
- \r
+\r
+ //printk("rga_reg_int start \n"); \r
reg = rga_reg_init(session, req);\r
if(reg == NULL) \r
{ \r
return -EFAULT;\r
}\r
+ //printk("rga_reg_int end \n");\r
\r
atomic_set(®->int_enable, 1); \r
rga_try_set_reg(1);\r
} \r
+\r
+ \r
\r
ret_timeout = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);\r
if (unlikely(ret_timeout< 0)) \r
struct rga_req *req;\r
int ret = 0;\r
rga_session *session = (rga_session *)file->private_data;\r
+\r
+ #if RGA_TEST_TIME\r
+ rga_start = ktime_get();\r
+ #endif\r
+\r
if (NULL == session) \r
{\r
printk("%s [%d] rga thread session is null\n",__FUNCTION__,__LINE__);\r
printk("%s [%d] get rga_req mem failed\n",__FUNCTION__,__LINE__);\r
ret = -EINVAL;\r
}\r
- \r
- if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) \r
- {\r
- ERR("copy_from_user failed\n");\r
- ret = -EFAULT;\r
- }\r
- \r
+ \r
switch (cmd)\r
{\r
case RGA_BLIT_SYNC:\r
+ if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) \r
+ {\r
+ ERR("copy_from_user failed\n");\r
+ ret = -EFAULT;\r
+ }\r
ret = rga_blit_sync(session, req);\r
break;\r
case RGA_BLIT_ASYNC:\r
+ if (unlikely(copy_from_user(req, (struct rga_req*)arg, sizeof(struct rga_req)))) \r
+ {\r
+ ERR("copy_from_user failed\n");\r
+ ret = -EFAULT;\r
+ }\r
ret = rga_blit_async(session, req); \r
break;\r
case RGA_FLUSH:\r
if(req != NULL) {\r
kfree(req);\r
} \r
+\r
+ \r
return ret;\r
}\r
\r
printk("RGA is not idle!\n");\r
rga_soft_reset();\r
}\r
-\r
\r
-\r
spin_lock(&rga_service.lock);\r
do\r
{\r
goto err_clock;\r
}\r
\r
- \r
+ \r
\r
- data->axi_clk = clk_get(&pdev->dev, "aclk_rga");\r
+ data->axi_clk = clk_get(NULL, "aclk_rga");\r
+ \r
if (IS_ERR(data->axi_clk))\r
{\r
ERR("failed to find rga axi clock source\n");\r
ret = -ENOENT;\r
goto err_clock;\r
}\r
-\r
#endif\r
\r
\r
if(data->pd_display){\r
clk_put(data->pd_display);\r
}\r
- #endif\r
+ #endif \r
\r
kfree(data);\r
return 0;\r
};\r
\r
\r
-//void rga_test_0(void);\r
+void rga_test_0(void);\r
\r
\r
static int __init rga_init(void)\r
}\r
\r
\r
-#if 0\r
-extern uint32_t ABGR8888_320_240_swap0[240][320];\r
+#if 1\r
+extern struct fb_info * rk_get_fb(int fb_id);\r
+EXPORT_SYMBOL(rk_get_fb);\r
\r
-unsigned int src_buf[800*480];\r
-unsigned int dst_buf[800*480];\r
-unsigned int mmu_buf[1024];\r
+extern void rk_direct_fb_show(struct fb_info * fbi);\r
+EXPORT_SYMBOL(rk_direct_fb_show);\r
+\r
+extern uint32_t ABGR8888_320_240_swap0[240][320];\r
+unsigned int dst_buf[1280*800];\r
\r
void rga_test_0(void)\r
{\r
rga_session session;\r
unsigned int *src, *dst;\r
\r
- int i;\r
+ struct fb_info *fb;\r
\r
session.pid = current->pid;\r
INIT_LIST_HEAD(&session.waiting);\r
atomic_set(&session.num_done, 0);\r
//file->private_data = (void *)session;\r
\r
+ fb = rk_get_fb(0);\r
+\r
memset(&req, 0, sizeof(struct rga_req));\r
src = ABGR8888_320_240_swap0;\r
dst = dst_buf;\r
-\r
+ \r
#if 0\r
memset(src_buf, 0x80, 800*480*4);\r
memset(dst_buf, 0xcc, 800*480*4);\r
\r
req.src.vir_w = 320;\r
req.src.vir_h = 240;\r
- req.src.yrgb_addr = src;\r
+ req.src.yrgb_addr = (uint32_t)src;\r
\r
- req.dst.act_w = 320;\r
- req.dst.act_h = 240;\r
+ req.dst.act_w = 100;\r
+ req.dst.act_h = 80;\r
\r
- req.dst.vir_w = 800;\r
- req.dst.vir_h = 480;\r
- req.dst.yrgb_addr = dst;\r
+ req.dst.vir_w = 1280;\r
+ req.dst.vir_h = 800;\r
+ req.dst.x_offset = 200;\r
+ req.dst.y_offset = 200;\r
+ req.dst.yrgb_addr = (uint32_t)dst;\r
\r
req.clip.xmin = 0;\r
- req.clip.xmax = 799;\r
+ req.clip.xmax = 1279;\r
req.clip.ymin = 0;\r
- req.clip.ymax = 479;\r
- \r
- \r
-\r
+ req.clip.ymax = 799;\r
+ \r
req.render_mode = 0;\r
- req.rotate_mode = 0;\r
+ req.rotate_mode = 1;\r
+ req.scale_mode = 2;\r
+\r
+ req.sina = 0;\r
+ req.cosa = 0x10000;\r
\r
req.mmu_info.mmu_flag = 0x21;\r
req.mmu_info.mmu_en = 1;\r
\r
rga_blit_sync(&session, &req);\r
\r
- #if 0\r
- outer_inv_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480])); \r
- dmac_inv_range(&dst_buf[0], &dst_buf[800*480]);\r
+ fb->var.bits_per_pixel = 32;\r
\r
- for(i=0; i<800*480; i++)\r
- { \r
- if(src[i] != dst[i])\r
- {\r
- printk("src != dst %d\n", i);\r
- printk("src = %.8x, dst = %.8x \n", src[i], dst[i]);\r
- }\r
- }\r
- #endif\r
+ fb->var.xres = 1280;\r
+ fb->var.yres = 800;\r
+ \r
+ fb->var.red.length = 8;\r
+ fb->var.red.offset = 0;\r
+ fb->var.red.msb_right = 0;\r
+ \r
+ fb->var.green.length = 8;\r
+ fb->var.green.offset = 8;\r
+ fb->var.green.msb_right = 0;\r
+ \r
+ fb->var.blue.length = 8;\r
+ fb->var.blue.offset = 16;\r
+ fb->var.blue.msb_right = 0;\r
+ \r
+ fb->var.transp.length = 8;\r
+ fb->var.transp.offset = 24;\r
+ fb->var.transp.msb_right = 0;\r
+\r
+ fb->fix.smem_start = virt_to_phys(dst);\r
+\r
+ rk_direct_fb_show(fb); \r
+ \r
}\r
\r
#endif\r
int32_t result;\r
uint32_t i;\r
uint32_t status;\r
-\r
+ uint32_t Address;\r
status = 0;\r
\r
do\r
NULL\r
);\r
up_read(¤t->mm->mmap_sem);\r
- \r
+\r
if(result <= 0 || result < pageCount) \r
{\r
struct vm_area_struct *vma;\r
\r
- vma = find_vma(current->mm, Memory);\r
-\r
- if (vma && (vma->vm_flags & VM_PFNMAP) )\r
+ for(i=0; i<pageCount; i++)\r
{\r
- do\r
- {\r
- pte_t * pte;\r
- spinlock_t * ptl;\r
- unsigned long pfn;\r
+ vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);\r
\r
- pgd_t * pgd = pgd_offset(current->mm, Memory);\r
- pud_t * pud = pud_offset(pgd, Memory);\r
- if (pud)\r
+ if (vma && (vma->vm_flags & VM_PFNMAP) )\r
+ {\r
+ do\r
{\r
- pmd_t * pmd = pmd_offset(pud, Memory);\r
- if (pmd)\r
+ pte_t * pte;\r
+ spinlock_t * ptl;\r
+ unsigned long pfn;\r
+\r
+ pgd_t * pgd = pgd_offset(current->mm, ((Memory + i)<< PAGE_SHIFT));\r
+ pud_t * pud = pud_offset(pgd, ((Memory + i) << PAGE_SHIFT));\r
+ if (pud)\r
{\r
- pte = pte_offset_map_lock(current->mm, pmd, Memory, &ptl);\r
- if (!pte)\r
+ pmd_t * pmd = pmd_offset(pud, ((Memory + i) << PAGE_SHIFT));\r
+ if (pmd)\r
+ {\r
+ pte = pte_offset_map_lock(current->mm, pmd, ((Memory + i)<< PAGE_SHIFT), &ptl);\r
+ if (!pte)\r
+ {\r
+ break;\r
+ }\r
+ }\r
+ else\r
{\r
break;\r
}\r
{\r
break;\r
}\r
- }\r
- else\r
- {\r
- break;\r
- }\r
\r
- pfn = pte_pfn(*pte);\r
- \r
- pte_unmap_unlock(pte, ptl);\r
+ pfn = pte_pfn(*pte);\r
\r
- /* Free the page table. */\r
- if (pages != NULL)\r
- {\r
- /* Release the pages if any. */\r
- if (result > 0)\r
+ Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));\r
+ \r
+ pte_unmap_unlock(pte, ptl);\r
+\r
+ /* Free the page table. */\r
+ if (pages != NULL)\r
{\r
- for (i = 0; i < result; i++)\r
+ /* Release the pages if any. */\r
+ if (result > 0)\r
{\r
- if (pages[i] == NULL)\r
+ for (i = 0; i < result; i++)\r
{\r
- break;\r
- }\r
+ if (pages[i] == NULL)\r
+ {\r
+ break;\r
+ }\r
\r
- page_cache_release(pages[i]);\r
+ page_cache_release(pages[i]);\r
+ }\r
}\r
}\r
+ \r
+ pageTable[i] = Address;\r
}\r
-\r
- return 0;\r
+ while (0);\r
}\r
- while (0);\r
-\r
- status = RGA_OUT_OF_RESOURCES;\r
- break;\r
+ else\r
+ {\r
+ status = RGA_OUT_OF_RESOURCES;\r
+ break;\r
+ } \r
}\r
- else\r
- {\r
- status = RGA_OUT_OF_RESOURCES;\r
- break;\r
- } \r
+\r
+ return 0;\r
}\r
\r
for (i = 0; i < pageCount; i++)\r
}\r
\r
/* Fill the page table. */\r
- for(i=0; i<pageCount; i++) {\r
-\r
+ for(i=0; i<pageCount; i++) \r
+ {\r
/* Get the physical address from page struct. */\r
- pageTable[i * (PAGE_SIZE/4096)] = page_to_phys(pages[i]);\r
+ pageTable[i] = page_to_phys(pages[i]);\r
} \r
}\r
while(0);\r
\r
/* cal dst buf mmu info */ \r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ req->dst.format, req->dst.vir_w, (req->dst.act_h + req->dst.y_offset),\r
&DstStart);\r
if(DstMemSize == 0) {\r
return -EINVAL; \r
\r
pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
+ pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
\r
MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
if(MMU_Base == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
+ pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
\r
- printk("MMU_Base addr is %.8x\n", MMU_Base);\r
- printk("CMDStart is %.8x\n",CMDStart);\r
-\r
for(i=0; i<CMDMemSize; i++) {\r
MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT)); \r
}\r
\r
- printk("MMU_Base[0] = %.8x\n", MMU_Base[0]);\r
-\r
if(req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
+ \r
ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
if (ret < 0) {\r
- pr_err("rga map src memory failed");\r
+ pr_err("rga map src memory failed\n");\r
status = ret;\r
break;\r
}\r
+ \r
}\r
else\r
{\r
}\r
} \r
}\r
-\r
- printk("MMU_Base[1] = %.8x\n", MMU_Base[1]);\r
\r
if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
+ #if 0\r
+ ktime_t start, end;\r
+ start = ktime_get();\r
+ #endif\r
ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
if (ret < 0) {\r
- pr_err("rga map dst memory failed");\r
+ pr_err("rga map dst memory failed\n");\r
status = ret;\r
break;\r
}\r
+\r
+ #if 0\r
+ end = ktime_get();\r
+ end = ktime_sub(end, start);\r
+ printk("dst mmu map time = %d\n", (int)ktime_to_us(end));\r
+ #endif\r
}\r
else\r
{\r
/* zsq \r
* change the buf address in req struct \r
*/\r
+ #if 0 \r
+ printk("CMDMemSize is %.8x\n", CMDMemSize);\r
+ printk("SrcMemSize is %.8x\n", SrcMemSize);\r
+ printk("DstMemSize is %.8x\n", DstMemSize);\r
+ printk("CMDStart is %.8x\n", CMDStart);\r
+ printk("SrcStart is %.8x\n", SrcStart);\r
+ printk("DstStart is %.8x\n", DstStart);\r
+ #endif\r
+ \r
req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); \r
\r
req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
- req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
+ req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT); \r
\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
- \r
+ \r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
\r
pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
+ pr_err("RGA MMU malloc pages mem failed\n");\r
return -EINVAL; \r
}\r
\r
MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
if(MMU_Base == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
+ pr_err("RGA MMU malloc MMU_Base point failed\n");\r
break; \r
}\r
\r
ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
if (ret < 0) \r
{\r
- pr_err("rga map src memory failed");\r
+ pr_err("rga map src memory failed\n");\r
status = ret;\r
break; \r
}\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
+ /* flush data to DDR */\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
/* Free the page table */\r
if (pages != NULL) { \r
kfree(pages);\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
+ /* flush data to DDR */\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
/* Free the page table */\r
if (pages != NULL) \r
kfree(pages);\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
+ /* flush data to DDR */\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
/* Free the page table */\r
if (pages != NULL) { \r
kfree(pages);\r
\r
pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
+ pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
\r
MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
+ pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
if (ret < 0) \r
{\r
- pr_err("rga map src memory failed");\r
+ pr_err("rga map src memory failed\n");\r
status = ret;\r
break;\r
}\r
ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
if (ret < 0) \r
{\r
- pr_err("rga map dst memory failed");\r
+ pr_err("rga map dst memory failed\n");\r
status = ret;\r
break;\r
}\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
+ /* flush data to DDR */\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
/* Free the page table */\r
if (pages != NULL) { \r
kfree(pages);\r
}\r
\r
AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+\r
+ \r
+ #if 0\r
+ printk("AllSize = %d\n", AllSize);\r
+ printk("SrcSize = %d\n", SrcMemSize);\r
+ printk("CMDSize = %d\n", CMDMemSize);\r
+ printk("DstSize = %d\n", DstMemSize);\r
+ printk("DstStart = %d\n", DstStart);\r
+ #endif\r
\r
pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) \r
{\r
- pr_err("RGA MMU malloc pages mem failed");\r
+ pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
*/\r
MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
+ pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR; \r
break; \r
}\r
{\r
ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
if (ret < 0) {\r
- pr_err("rga map src memory failed");\r
+ pr_err("rga map src memory failed\n");\r
status = ret;\r
break;\r
}\r
ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);\r
if (ret < 0) \r
{\r
- pr_err("rga map dst memory failed");\r
+ pr_err("rga map dst memory failed\n");\r
status = ret;\r
break;\r
} \r
* change the buf address in req struct\r
* for the reason of lie to MMU \r
*/\r
+ \r
req->mmu_info.base_addr = virt_to_phys(MMU_Base)>>2;\r
\r
req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
\r
req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);\r
- \r
+\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
+ /* flush data to DDR */\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
/* Free the page table */\r
if (pages != NULL) \r
{ \r
\r
static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)\r
{\r
- int SrcMemSize, DstMemSize, CMDMemSize;\r
+ int SrcMemSize, CMDMemSize;\r
uint32_t SrcStart, CMDStart;\r
struct page **pages = NULL;\r
uint32_t i;\r
return -EINVAL; \r
}\r
\r
- AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
+ AllSize = SrcMemSize + CMDMemSize;\r
\r
pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
+ pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
\r
MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
+ pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
{\r
ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
if (ret < 0) {\r
- pr_err("rga map src memory failed");\r
+ pr_err("rga map src memory failed\n");\r
return -EINVAL;\r
}\r
}\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
+ /* flush data to DDR */\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
if (pages != NULL) {\r
/* Free the page table */\r
kfree(pages);\r
\r
pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
- pr_err("RGA MMU malloc pages mem failed");\r
+ pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
\r
MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
- pr_err("RGA MMU malloc MMU_Base point failed");\r
+ pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
{\r
ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
if (ret < 0) {\r
- pr_err("rga map src memory failed");\r
+ pr_err("rga map src memory failed\n");\r
status = ret;\r
break;\r
}\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
+ /* flush data to DDR */\r
+ dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+\r
if (pages != NULL) {\r
/* Free the page table */\r
kfree(pages);\r
xp = CLIP(xp, msg->src.x_offset, msg->src.x_offset + msg->src.act_w - 1);\r
yp = CLIP(yp, msg->src.y_offset, msg->src.y_offset + msg->src.act_h - 1);\r
}\r
+\r
+ printk("xoffset = %.8x\n", msg->src.x_offset);\r
+ printk("yoffset = %.8x\n", msg->src.y_offset);\r
+ printk("xp = %.8x\n", xp);\r
+ printk("yp = %.8x\n", yp);\r
\r
switch(msg->src.format)\r
{ \r
break;\r
}\r
\r
+ printk("y_addr is %.8x\n", y_addr);\r
+\r
*bRGA_SRC_Y_MST = y_addr;\r
*bRGA_SRC_CB_MST = u_addr;\r
*bRGA_SRC_CR_MST = v_addr;\r
dst_width = msg->dst.act_w;\r
dst_height = msg->dst.act_h;\r
\r
- h_ratio = (src_width )<<16 / dst_width;\r
- v_ratio = (src_height)<<16 / dst_height;\r
+ printk("src_act_w = %.8x, src_act_h =%.8x dst_act_w = %.8x, dst_act_h = %.8x\n", \r
+ msg->src.act_w, msg->src.act_h, msg->dst.act_w, msg->dst.act_h);\r
+\r
+ h_ratio = (src_width <<16) / dst_width;\r
+ v_ratio = (src_height<<16) / dst_height;\r
\r
if (h_ratio <= (1<<16)) \r
h_ratio = 0;\r