#define RGA_TEST 0\r
#define RGA_TEST_TIME 0\r
#define RGA_TEST_FLUSH_TIME 0\r
-#define RGA_INFO_BUS_ERROR 0\r
+#define RGA_INFO_BUS_ERROR 1\r
\r
#define PRE_SCALE_BUF_SIZE 2048*1024*4\r
\r
#define RGA_POWER_OFF_DELAY 4*HZ /* 4s */\r
-#define RGA_TIMEOUT_DELAY 1*HZ /* 1s */\r
+#define RGA_TIMEOUT_DELAY 2*HZ /* 2s */\r
\r
#define RGA_MAJOR 255\r
\r
\r
static int rga_blit_async(rga_session *session, struct rga_req *req);\r
static void rga_del_running_list(void);\r
+static void rga_del_running_list_timeout(void);\r
static void rga_try_set_reg(uint32_t num);\r
\r
\r
-\r
/* Logging */\r
#define RGA_DEBUG 1\r
#if RGA_DEBUG\r
\r
if (unlikely(ret_timeout < 0)) {\r
pr_err("flush pid %d wait task ret %d\n", session->pid, ret); \r
- rga_soft_reset();\r
rga_del_running_list();\r
ret = -ETIMEDOUT;\r
} else if (0 == ret_timeout) {\r
pr_err("flush pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));\r
printk("bus = %.8x\n", rga_read(RGA_INT));\r
- rga_soft_reset();\r
- rga_del_running_list();\r
+ rga_del_running_list_timeout();\r
rga_try_set_reg(1);\r
ret = -ETIMEDOUT;\r
}\r
reg->session = session;\r
INIT_LIST_HEAD(®->session_link);\r
INIT_LIST_HEAD(®->status_link);\r
+\r
+ memcpy(®->req, req, sizeof(struct rga_req));\r
\r
if (req->mmu_info.mmu_en)\r
{\r
INIT_LIST_HEAD(®1->session_link);\r
INIT_LIST_HEAD(®1->status_link);\r
\r
+ memcpy(®0->req, req0, sizeof(struct rga_req));\r
+ memcpy(®1->req, req1, sizeof(struct rga_req));\r
+\r
if(req0->mmu_info.mmu_en)\r
{\r
ret = rga_set_mmu_info(reg0, req0);\r
else \r
{ \r
/* RGA is idle */\r
- reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); \r
- rga_soft_reset();\r
+ reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link); \r
rga_copy_reg(reg, 0); \r
rga_reg_from_wait_to_run(reg);\r
\r
dmac_flush_range(&rga_service.cmd_buff[0], &rga_service.cmd_buff[28]);\r
outer_flush_range(virt_to_phys(&rga_service.cmd_buff[0]),virt_to_phys(&rga_service.cmd_buff[28]));\r
\r
+ rga_soft_reset();\r
rga_write(0, RGA_MMU_CTRL); \r
\r
/* CMD buff */\r
#endif\r
\r
/* master mode */\r
- rga_write(0x1<<2, RGA_SYS_CTRL);\r
+ rga_write((0x1<<2)|(0x1<<3), RGA_SYS_CTRL);\r
\r
/* All CMD finish int */\r
- rga_write((0x1<<10)|(0x1<<8), RGA_INT);\r
+ rga_write(rga_read(RGA_INT)|(0x1<<10)|(0x1<<8), RGA_INT);\r
\r
/* Start proc */\r
atomic_set(®->session->done, 0); \r
}\r
\r
\r
-#if RGA_TEST \r
+#if 1//RGA_TEST \r
static void print_info(struct rga_req *req)\r
{ \r
printk("src.yrgb_addr = %.8x, src.uv_addr = %.8x, src.v_addr = %.8x\n", \r
} \r
}\r
\r
+static void rga_del_running_list_timeout(void)\r
+{\r
+ struct rga_reg *reg;\r
+ \r
+ while(!list_empty(&rga_service.running))\r
+ {\r
+ reg = list_entry(rga_service.running.next, struct rga_reg, status_link);\r
+\r
+ if(reg->MMU_base != NULL)\r
+ {\r
+ kfree(reg->MMU_base);\r
+ }\r
+ \r
+ atomic_sub(1, ®->session->task_running);\r
+ atomic_sub(1, &rga_service.total_running);\r
+\r
+ \r
+ #if 0\r
+ printk("RGA_INT is %.8x\n", rga_read(RGA_INT));\r
+ printk("reg->session->task_running = %d\n", atomic_read(®->session->task_running));\r
+ printk("rga_service.total_running = %d\n", atomic_read(&rga_service.total_running));\r
+\r
+ print_info(®->req);\r
+\r
+ {\r
+ uint32_t *p, i;\r
+ p = reg->cmd_reg; \r
+ for (i=0; i<7; i++) \r
+ printk("%.8x %.8x %.8x %.8x\n", p[0 + i*4], p[1+i*4], p[2 + i*4], p[3 + i*4]);\r
+ \r
+ }\r
+ #endif\r
+ \r
+ if(list_empty(®->session->waiting))\r
+ {\r
+ atomic_set(®->session->done, 1);\r
+ wake_up_interruptible_sync(®->session->wait);\r
+ }\r
+ \r
+ rga_reg_deinit(reg); \r
+ } \r
+}\r
+\r
+\r
\r
static int rga_blit(rga_session *session, struct rga_req *req)\r
{\r
if (unlikely(ret_timeout< 0)) \r
{\r
pr_err("sync pid %d wait task ret %d\n", session->pid, ret_timeout); \r
- rga_soft_reset();\r
rga_del_running_list();\r
ret = -ETIMEDOUT;\r
} \r
else if (0 == ret_timeout)\r
{\r
pr_err("sync pid %d wait %d task done timeout\n", session->pid, atomic_read(&session->task_running));\r
- rga_soft_reset();\r
- rga_del_running_list();\r
+ rga_del_running_list_timeout();\r
rga_try_set_reg(1);\r
ret = -ETIMEDOUT;\r
}\r
break;\r
}\r
rga_power_on();\r
- ret = rga_blit_async(session, req); \r
+\r
+ if((atomic_read(&rga_service.total_running) > 16))\r
+ {\r
+ ret = rga_blit_sync(session, req); \r
+ }\r
+ else\r
+ {\r
+ ret = rga_blit_async(session, req); \r
+ }\r
break;\r
case RGA_FLUSH:\r
ret = rga_flush(session, arg);\r
\r
#if RGA_TEST\r
printk("rga_irq is valid\n");\r
- #endif\r
- \r
- while(((rga_read(RGA_STATUS) & 0x1) != 0) && (i<10))// idle\r
- { \r
- mdelay(1);\r
- i++;\r
- }\r
+ #endif \r
\r
- #if RGA_INFO_BUS_ERROR\r
+ #if 0//RGA_INFO_BUS_ERROR\r
if(rga_read(RGA_INT) & 0x1)\r
{\r
printk("bus Error interrupt is occur\n");\r
+ rga_soft_reset();\r
}\r
#endif\r
\r
- /*clear INT */\r
+ /*clear INT */\r
rga_write(rga_read(RGA_INT) | (0x1<<6) | (0x1<<7) | (0x1<<4), RGA_INT);\r
- spin_lock_irqsave(&rga_service.lock, flag);\r
+\r
+ spin_lock_irqsave(&rga_service.lock, flag); \r
rga_del_running_list();\r
spin_unlock_irqrestore(&rga_service.lock, flag);\r
\r
{ \r
/* cal src buf mmu info */ \r
SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
- req->src.format, req->src.vir_w, req->src.vir_h,\r
+ req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,\r
&SrcStart);\r
if(SrcMemSize == 0) {\r
return -EINVAL; \r
\r
/* cal dst buf mmu info */ \r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ req->dst.format, req->dst.vir_w, req->dst.act_h + req->dst.y_offset,\r
&DstStart); \r
if(DstMemSize == 0) {\r
return -EINVAL; \r
} \r
}\r
\r
+ MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
+\r
/* zsq \r
* change the buf address in req struct \r
*/\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
}\r
}\r
- \r
+\r
+ MMU_Base[AllSize] = MMU_Base[AllSize - 1]; \r
\r
/* zsq \r
* change the buf address in req struct \r
}\r
}\r
\r
+ MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
+\r
/* zsq \r
* change the buf address in req struct\r
* for the reason of lie to MMU \r
} \r
}\r
\r
+ MMU_Base[AllSize] = MMU_Base[AllSize - 1];\r
+\r
/* zsq \r
* change the buf address in req struct\r
* for the reason of lie to MMU \r