#include "rga_mmu_info.h"\r
#include "RGA_API.h"\r
\r
+//#include "bug_320x240_swap0_ABGR8888.h"\r
+\r
\r
#define RGA_TEST 0\r
\r
rga_copy_reg(reg, 0); \r
rga_reg_from_wait_to_run(reg);\r
dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]);\r
-\r
+ outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28]));\r
\r
/* \r
* if cmd buf must use mmu\r
* it should be configured before cmd start \r
*/\r
- rga_write((2<<4)|0x1, RGA_MMU_CTRL);\r
+ rga_write((2<<4)|0x1, RGA_MMU_CTRL); \r
+ \r
rga_write(virt_to_phys(reg->MMU_base)>>2, RGA_MMU_TBL);\r
\r
/* CMD buff */\r
- rga_write(virt_to_phys(rga_service.cmd_buff) & (~PAGE_MASK), RGA_CMD_ADDR);\r
-\r
- /* master mode */\r
- rga_write(0x1<<2, RGA_SYS_CTRL);\r
+ rga_write(virt_to_phys(rga_service.cmd_buff) & (~PAGE_MASK), RGA_CMD_ADDR); \r
\r
#ifdef RGA_TEST\r
{\r
}\r
#endif\r
\r
+ /* master mode */\r
+ rga_write(0x1<<2, RGA_SYS_CTRL);\r
+ \r
/* All CMD finish int */\r
rga_write(0x1<<10, RGA_INT);\r
+ \r
+ //rga_write(1, RGA_MMU_STA_CTRL);\r
\r
/* Start proc */\r
atomic_set(®->session->done, 0);\r
- rga_write(0x1, RGA_CMD_CTRL);\r
+ rga_write(0x1, RGA_CMD_CTRL); \r
+\r
+ //while(1)\r
+ // printk("mmu_status is %.8x\n", rga_read(RGA_MMU_STA));\r
\r
#ifdef RGA_TEST\r
{\r
for (i=0; i<28; i++) \r
printk("%.8x\n", rga_read(0x100 + i*4)); \r
}\r
- #endif\r
-\r
+ #endif \r
}\r
num--;\r
}\r
atomic_set(®->int_enable, 1); \r
rga_try_set_reg(1);\r
} \r
-\r
+ \r
ret_timeout = wait_event_interruptible_timeout(session->wait, atomic_read(&session->done), RGA_TIMEOUT_DELAY);\r
if (unlikely(ret_timeout< 0)) \r
{\r
ret = -ETIMEDOUT;\r
}\r
\r
+ \r
+\r
return ret;\r
\r
//printk("rga_blit_sync done******************\n");\r
int int_enable = 0;\r
\r
DBG("rga_irq %d \n", irq);\r
-\r
+ \r
+ #if RGA_TEST\r
printk("rga_irq is valid\n");\r
+ #endif\r
\r
/*clear INT */\r
rga_write(rga_read(RGA_INT) | (0x1<<6), RGA_INT);\r
+ rga_write(rga_read(RGA_INT) | (0x1<<7), RGA_INT);\r
\r
if(((rga_read(RGA_STATUS) & 0x1) != 0))// idle\r
{ \r
rga_soft_reset();\r
}\r
\r
+ \r
\r
spin_lock(&rga_service.lock);\r
do\r
goto err_clock;\r
}\r
\r
- #endif\r
+ \r
\r
data->axi_clk = clk_get(&pdev->dev, "aclk_rga");\r
if (IS_ERR(data->axi_clk))\r
ret = -ENOENT;\r
goto err_clock;\r
}\r
+\r
+ #endif\r
\r
\r
/* map the memory */\r
};\r
\r
\r
+//void rga_test_0(void);\r
+\r
+\r
static int __init rga_init(void)\r
{\r
int ret;\r
ERR("Platform device register failed (%d).\n", ret);\r
return ret;\r
}\r
+\r
+ //rga_test_0();\r
\r
INFO("Module initialized.\n"); \r
\r
platform_driver_unregister(&rga_driver); \r
}\r
\r
+\r
+#if 0\r
+extern uint32_t ABGR8888_320_240_swap0[240][320];\r
+\r
+unsigned int src_buf[800*480];\r
+unsigned int dst_buf[800*480];\r
+unsigned int mmu_buf[1024];\r
+\r
+void rga_test_0(void)\r
+{\r
+ struct rga_req req;\r
+ rga_session session;\r
+ unsigned int *src, *dst;\r
+\r
+ int i;\r
+\r
+ session.pid = current->pid;\r
+ INIT_LIST_HEAD(&session.waiting);\r
+ INIT_LIST_HEAD(&session.running);\r
+ INIT_LIST_HEAD(&session.list_session);\r
+ init_waitqueue_head(&session.wait);\r
+ /* no need to protect */\r
+ list_add_tail(&session.list_session, &rga_service.session);\r
+ atomic_set(&session.task_running, 0);\r
+ atomic_set(&session.num_done, 0);\r
+ //file->private_data = (void *)session;\r
+\r
+ memset(&req, 0, sizeof(struct rga_req));\r
+ src = ABGR8888_320_240_swap0;\r
+ dst = dst_buf;\r
+\r
+ #if 0\r
+ memset(src_buf, 0x80, 800*480*4);\r
+ memset(dst_buf, 0xcc, 800*480*4);\r
+\r
+ dmac_flush_range(&src_buf[0], &src_buf[800*480]);\r
+ outer_flush_range(virt_to_phys(&src_buf[0]),virt_to_phys(&src_buf[800*480]));\r
+ \r
+ dmac_flush_range(&dst_buf[0], &dst_buf[800*480]);\r
+ outer_flush_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480]));\r
+ #endif\r
+ \r
+ req.src.act_w = 320;\r
+ req.src.act_h = 240;\r
+\r
+ req.src.vir_w = 320;\r
+ req.src.vir_h = 240;\r
+ req.src.yrgb_addr = src;\r
+\r
+ req.dst.act_w = 320;\r
+ req.dst.act_h = 240;\r
+\r
+ req.dst.vir_w = 800;\r
+ req.dst.vir_h = 480;\r
+ req.dst.yrgb_addr = dst;\r
+\r
+ req.clip.xmin = 0;\r
+ req.clip.xmax = 799;\r
+ req.clip.ymin = 0;\r
+ req.clip.ymax = 479;\r
+ \r
+ \r
+\r
+ req.render_mode = 0;\r
+ req.rotate_mode = 0;\r
+\r
+ req.mmu_info.mmu_flag = 0x21;\r
+ req.mmu_info.mmu_en = 1;\r
+\r
+ rga_blit_sync(&session, &req);\r
+\r
+ #if 0\r
+ outer_inv_range(virt_to_phys(&dst_buf[0]),virt_to_phys(&dst_buf[800*480])); \r
+ dmac_inv_range(&dst_buf[0], &dst_buf[800*480]);\r
+\r
+ for(i=0; i<800*480; i++)\r
+ { \r
+ if(src[i] != dst[i])\r
+ {\r
+ printk("src != dst %d\n", i);\r
+ printk("src = %.8x, dst = %.8x \n", src[i], dst[i]);\r
+ }\r
+ }\r
+ #endif\r
+}\r
+\r
+#endif\r
module_init(rga_init);\r
module_exit(rga_exit);\r
\r
#include "rga_mmu_info.h"\r
\r
extern rga_service_info rga_service;\r
+extern int mmu_buf[1024];\r
\r
#define KERNEL_SPACE_VALID 0xc0000000\r
\r
break; \r
}\r
\r
+ printk("MMU_Base addr is %.8x\n", MMU_Base);\r
+ printk("CMDStart is %.8x\n",CMDStart);\r
+\r
for(i=0; i<CMDMemSize; i++) {\r
MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT)); \r
}\r
\r
+ printk("MMU_Base[0] = %.8x\n", MMU_Base[0]);\r
+\r
if(req->src.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);\r
}\r
} \r
}\r
+\r
+ printk("MMU_Base[1] = %.8x\n", MMU_Base[1]);\r
\r
if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
{\r
/* zsq \r
* change the buf address in req struct \r
*/\r
- \r
req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2); \r
\r
req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
\r
+ /* flush data to DDR */\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
-\r
+ outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
+ \r
status = 0;\r
\r
/* Free the page table */ \r