{\r
int total_running;\r
\r
- //printk("rga_power_off\n");\r
+ printk("rga_power_off\n");\r
if(!drvdata->enable)\r
return;\r
\r
reg->session = session;\r
INIT_LIST_HEAD(®->session_link);\r
INIT_LIST_HEAD(®->status_link);\r
- \r
+ \r
if (req->mmu_info.mmu_en)\r
{\r
ret = rga_set_mmu_info(reg, req);\r
return NULL; \r
}\r
}\r
- \r
+\r
+ #if RGA_TEST_TIME\r
+ rga_start = ktime_get();\r
+ #endif \r
RGA_gen_reg_info(req, (uint8_t *)reg->cmd_reg);\r
\r
+ #if RGA_TEST_TIME\r
+ rga_end = ktime_get();\r
+ rga_end = ktime_sub(rga_end, rga_start);\r
+ printk("one cmd end time %d\n", (int)ktime_to_us(rga_end));\r
+ #endif\r
+\r
spin_lock_irqsave(&rga_service.lock, flag);\r
list_add_tail(®->status_link, &rga_service.waiting);\r
list_add_tail(®->session_link, &session->waiting);\r
do\r
{ \r
struct rga_reg *reg = list_entry(rga_service.waiting.next, struct rga_reg, status_link);\r
- \r
- //if(((reg->cmd_reg[0] & 0xf0) >= 3) && ((reg->cmd_reg[0] & 0xf0) <= 7) && rga_service.last_prc_src_format == 0) \r
- \r
offset = atomic_read(&rga_service.cmd_num);\r
if((rga_read(RGA_STATUS) & 0x1)) \r
- { \r
- #if 0\r
- #if RGA_TEST\r
- /* RGA is busy */\r
- printk(" rga try set reg while rga is working \n");\r
- #endif\r
- \r
- if((atomic_read(&rga_service.cmd_num) <= 0xf) && (atomic_read(&rga_service.int_disable) == 0)) \r
- { \r
- rga_copy_reg(reg, offset); \r
- rga_reg_from_wait_to_run(reg);\r
-\r
- dmac_flush_range(&rga_service.cmd_buff[offset*28], &rga_service.cmd_buff[(offset + 1)*28]);\r
- outer_flush_range(virt_to_phys(&rga_service.cmd_buff[offset*28]),\r
- virt_to_phys(&rga_service.cmd_buff[(offset + 1)*28]));\r
- \r
- rga_write(0x1<<10, RGA_INT);\r
-\r
- #if RGA_TEST\r
- {\r
- uint32_t i;\r
- printk("CMD_REG num is %.8x\n", offset);\r
- for(i=0; i<7; i++)\r
- {\r
- printk("%.8x ", rga_service.cmd_buff[i*4 + 0 + 28*atomic_read(&rga_service.cmd_num)]);\r
- printk("%.8x ", rga_service.cmd_buff[i*4 + 1 + 28*atomic_read(&rga_service.cmd_num)]);\r
- printk("%.8x ", rga_service.cmd_buff[i*4 + 2 + 28*atomic_read(&rga_service.cmd_num)]);\r
- printk("%.8x\n",rga_service.cmd_buff[i*4 + 3 + 28*atomic_read(&rga_service.cmd_num)]);\r
- }\r
- }\r
- #endif\r
- \r
- atomic_set(®->session->done, 0); \r
- rga_write((0x1<<3)|(0x1<<1), RGA_CMD_CTRL);\r
- \r
- if(atomic_read(®->int_enable))\r
- atomic_set(&rga_service.int_disable, 1);\r
- }\r
- #endif\r
+ { \r
break;\r
}\r
else \r
\r
/* All CMD finish int */\r
rga_write(0x1<<10, RGA_INT);\r
- \r
- #if RGA_TEST_TIME\r
- rga_start = ktime_get();\r
- #endif\r
- \r
+ \r
/* Start proc */\r
atomic_set(®->session->done, 0);\r
rga_write(0x1, RGA_CMD_CTRL); \r
\r
-\r
#if RGA_TEST\r
{\r
uint32_t i;\r
}\r
\r
\r
+#if RGA_TEST \r
static void print_info(struct rga_req *req)\r
-{\r
- #if RGA_TEST \r
+{ \r
printk("src.yrgb_addr = %.8x, src.uv_addr = %.8x, src.v_addr = %.8x\n", \r
req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr);\r
printk("src : act_w = %d, act_h = %d, vir_w = %d, vir_h = %d\n", \r
req->dst.act_w, req->dst.act_h, req->dst.vir_w, req->dst.vir_h);\r
\r
printk("clip.xmin = %d, clip.xmax = %d. clip.ymin = %d, clip.ymax = %d\n", \r
- req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax);\r
- #endif\r
+ req->clip.xmin, req->clip.xmax, req->clip.ymin, req->clip.ymax); \r
}\r
+#endif\r
\r
\r
static int rga_blit_async(rga_session *session, struct rga_req *req)\r
printk("*** rga_blit_async proc ***\n");\r
print_info(req);\r
#endif\r
- \r
+ \r
saw = req->src.act_w;\r
sah = req->src.act_h;\r
daw = req->dst.act_w;\r
dah = req->dst.act_h;\r
\r
- /* special case proc */\r
- if(req->src.act_w == 360 && req->src.act_h == 64 && req->rotate_mode == 0)\r
- req->rotate_mode = 1;\r
-\r
do\r
{\r
if((req->render_mode == bitblt_mode) && (((saw>>1) >= daw) || ((sah>>1) >= dah))) \r
//rga_power_on();\r
atomic_set(®->int_enable, 1); \r
rga_try_set_reg(num);\r
-\r
- return 0; \r
+ \r
+ return 0; \r
}\r
while(0);\r
\r
printk("*** rga_blit_sync proc ***\n");\r
print_info(req);\r
#endif\r
-\r
- /* special case proc*/\r
- if(req->src.act_w == 360 && req->src.act_h == 64 && req->rotate_mode == 0)\r
- req->rotate_mode = 1;\r
- \r
-\r
+ \r
do\r
{\r
if((req->render_mode == bitblt_mode) && (((saw>>1) >= daw) || ((sah>>1) >= dah))) \r
if(req != NULL) {\r
kfree(req);\r
} \r
- \r
+ \r
return ret;\r
}\r
\r
uint32_t num = 0;\r
struct list_head *next;\r
int int_enable = 0;\r
-\r
- //DBG("rga_irq %d \n", irq);\r
\r
#if RGA_TEST\r
printk("rga_irq is valid\n");\r
next = &rga_service.waiting;\r
\r
/* add cmd to cmd buf */\r
+ /*\r
while((!list_empty(next)) && ((int_enable) == 0) && (num <= 0xf))\r
{ \r
num += 1;\r
reg = list_entry(next->next, struct rga_reg, status_link);\r
int_enable = atomic_read(®->int_enable); \r
next = next->next;\r
- } \r
-\r
- rga_try_set_reg(num);\r
+ } \r
+ */\r
+ rga_try_set_reg(1);\r
\r
return IRQ_HANDLED;\r
}\r
pr_cont("done\n");\r
}\r
\r
-\r
-\r
struct file_operations rga_fops = {\r
.owner = THIS_MODULE,\r
.open = rga_open,\r
atomic_set(&rga_service.total_running, 0);\r
atomic_set(&rga_service.src_format_swt, 0);\r
rga_service.last_prc_src_format = 1; /* default is yuv first*/\r
- rga_service.enabled = false; \r
+ data->enable = 0;\r
\r
if(NULL == data)\r
{\r
\r
for(i=0; i<pageCount; i++)\r
{\r
- t_mem = Memory + i;\r
+ t_mem = (Memory + i) << PAGE_SHIFT;\r
\r
- vma = find_vma(current->mm, (t_mem) << PAGE_SHIFT);\r
+ vma = find_vma(current->mm, t_mem);\r
\r
if (vma && (vma->vm_flags & VM_PFNMAP) )\r
{\r
spinlock_t * ptl;\r
unsigned long pfn; \r
\r
- pgd_t * pgd = pgd_offset(current->mm, ((t_mem)<< PAGE_SHIFT));\r
- pud_t * pud = pud_offset(pgd, ((t_mem) << PAGE_SHIFT));\r
+ pgd_t * pgd = pgd_offset(current->mm, t_mem);\r
+ pud_t * pud = pud_offset(pgd, t_mem);\r
if (pud)\r
{\r
- pmd_t * pmd = pmd_offset(pud, ((t_mem) << PAGE_SHIFT));\r
+ pmd_t * pmd = pmd_offset(pud, t_mem);\r
if (pmd)\r
{\r
- pte = pte_offset_map_lock(current->mm, pmd, ((t_mem)<< PAGE_SHIFT), &ptl);\r
+ pte = pte_offset_map_lock(current->mm, pmd, t_mem, &ptl);\r
if (!pte)\r
{\r
break;\r
}\r
\r
pfn = pte_pfn(*pte);\r
- Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((t_mem) << PAGE_SHIFT)) & ~PAGE_MASK)); \r
+ Address = ((pfn << PAGE_SHIFT) | (((unsigned long)t_mem) & ~PAGE_MASK)); \r
pte_unmap_unlock(pte, ptl);\r
\r
#if 0\r
struct page **pages = NULL;\r
\r
MMU_Base = NULL;\r
-\r
- if(req->src.act_w == 360 && req->src.act_h == 64)\r
- mmu_flag = 1;\r
- else\r
- mmu_flag = 0;\r
-\r
+ \r
do\r
{ \r
/* cal src buf mmu info */ \r
SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,\r
- req->src.format, req->src.vir_w, req->src.vir_h,\r
+ req->src.format, req->src.vir_w, (req->src.act_h + req->src.y_offset),\r
&SrcStart);\r
if(SrcMemSize == 0) {\r
return -EINVAL; \r
\r
/* cal dst buf mmu info */ \r
DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,\r
- req->dst.format, req->dst.vir_w, req->dst.vir_h,\r
+ req->dst.format, req->dst.vir_w, (req->dst.act_h + req->dst.y_offset),\r
&DstStart); \r
if(DstMemSize == 0) {\r
return -EINVAL; \r
}\r
\r
+ //DstMemSize += 1;\r
+\r
/* cal cmd buf mmu info */\r
CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);\r
if(CMDMemSize == 0) {\r
\r
/* Cal out the needed mem size */\r
AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
- \r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ \r
+ pages = (struct page **)kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc pages mem failed\n");\r
status = RGA_MALLOC_ERROR;\r
break; \r
}\r
\r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL); \r
+ MMU_Base = (uint32_t *)kmalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL); \r
if(MMU_Base == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r
} \r
}\r
else\r
- { \r
+ { \r
for(i=0; i<SrcMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));\r
- }\r
+ } \r
} \r
}\r
\r
if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)\r
- {\r
+ { \r
#if 0\r
ktime_t start, end;\r
start = ktime_get();\r
for(i=0; i<DstMemSize; i++)\r
{\r
MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));\r
- } \r
+ } \r
}\r
\r
/* zsq \r
\r
/*record the malloc buf for the cmd end to release*/\r
reg->MMU_base = MMU_Base;\r
-\r
+ \r
/* flush data to DDR */\r
dmac_flush_range(MMU_Base, (MMU_Base + AllSize));\r
outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));\r
- \r
- status = 0;\r
\r
+ status = 0;\r
+ \r
/* Free the page table */ \r
if (pages != NULL) {\r
kfree(pages);\r
\r
AllSize = SrcMemSize + DstMemSize + CMDMemSize;\r
\r
- pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);\r
+ pages = (struct page **)kmalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);\r
if(pages == NULL) \r
{\r
pr_err("RGA MMU malloc pages mem failed\n");\r
* Allocate MMU Index mem\r
* This mem release in run_to_done fun \r
*/\r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ MMU_Base = (uint32_t *)kmalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR; \r
break; \r
}\r
\r
- MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);\r
+ MMU_Base = (uint32_t *)kmalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);\r
if(pages == NULL) {\r
pr_err("RGA MMU malloc MMU_Base point failed\n");\r
status = RGA_MALLOC_ERROR;\r