3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <linux/memory.h>
\r
16 #include <linux/dma-mapping.h>
\r
17 #include <asm/memory.h>
\r
18 #include <asm/atomic.h>
\r
19 #include <asm/cacheflush.h>
\r
20 #include "rga2_mmu_info.h"
\r
22 extern struct rga2_service_info rga2_service;
\r
23 extern struct rga2_mmu_buf_t rga2_mmu_buf;
\r
25 //extern int mmu_buff_temp[1024];
\r
27 #define KERNEL_SPACE_VALID 0xc0000000
\r
29 #define V7_VATOPA_SUCESS_MASK (0x1)
\r
30 #define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)
\r
31 #define V7_VATOPA_GET_INER(X) ((X>>4) & 7)
\r
32 #define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)
\r
33 #define V7_VATOPA_GET_SH(X) ((X>>7) & 1)
\r
34 #define V7_VATOPA_GET_NS(X) ((X>>9) & 1)
\r
35 #define V7_VATOPA_GET_SS(X) ((X>>1) & 1)
\r
38 static unsigned int armv7_va_to_pa(unsigned int v_addr)
\r
40 unsigned int p_addr;
\r
41 __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"
\r
44 "mrc p15, 0, %0, c7, c4, 0\n"
\r
49 if (p_addr & V7_VATOPA_SUCESS_MASK)
\r
52 return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));
\r
56 static int rga2_mmu_buf_get(struct rga2_mmu_buf_t *t, uint32_t size)
\r
58 mutex_lock(&rga2_service.lock);
\r
60 mutex_unlock(&rga2_service.lock);
\r
65 static int rga2_mmu_buf_get_try(struct rga2_mmu_buf_t *t, uint32_t size)
\r
67 mutex_lock(&rga2_service.lock);
\r
68 if((t->back - t->front) > t->size) {
\r
69 if(t->front + size > t->back - t->size)
\r
73 if((t->front + size) > t->back)
\r
76 if(t->front + size > t->size) {
\r
77 if (size > (t->back - t->size)) {
\r
83 mutex_unlock(&rga2_service.lock);
\r
89 static int rga2_mmu_buf_cal(struct rga2_mmu_buf_t *t, uint32_t size)
\r
91 if((t->front + size) > t->back) {
\r
102 static int rga2_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)
\r
104 uint32_t start, end;
\r
105 uint32_t pageCount;
\r
107 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
108 start = Mem >> PAGE_SHIFT;
\r
109 pageCount = end - start;
\r
110 *StartAddr = start;
\r
114 static int rga2_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,
\r
115 int format, uint32_t w, uint32_t h, uint32_t *StartAddr )
\r
117 uint32_t size_yrgb = 0;
\r
118 uint32_t size_uv = 0;
\r
119 uint32_t size_v = 0;
\r
120 uint32_t stride = 0;
\r
121 uint32_t start, end;
\r
122 uint32_t pageCount;
\r
126 case RGA2_FORMAT_RGBA_8888 :
\r
127 stride = (w * 4 + 3) & (~3);
\r
128 size_yrgb = stride*h;
\r
129 start = yrgb_addr >> PAGE_SHIFT;
\r
130 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
132 case RGA2_FORMAT_RGBX_8888 :
\r
133 stride = (w * 4 + 3) & (~3);
\r
134 size_yrgb = stride*h;
\r
135 start = yrgb_addr >> PAGE_SHIFT;
\r
136 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
138 case RGA2_FORMAT_RGB_888 :
\r
139 stride = (w * 3 + 3) & (~3);
\r
140 size_yrgb = stride*h;
\r
141 start = yrgb_addr >> PAGE_SHIFT;
\r
142 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
144 case RGA2_FORMAT_BGRA_8888 :
\r
146 start = yrgb_addr >> PAGE_SHIFT;
\r
147 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
149 case RGA2_FORMAT_RGB_565 :
\r
150 stride = (w*2 + 3) & (~3);
\r
151 size_yrgb = stride * h;
\r
152 start = yrgb_addr >> PAGE_SHIFT;
\r
153 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
155 case RGA2_FORMAT_RGBA_5551 :
\r
156 stride = (w*2 + 3) & (~3);
\r
157 size_yrgb = stride * h;
\r
158 start = yrgb_addr >> PAGE_SHIFT;
\r
159 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
161 case RGA2_FORMAT_RGBA_4444 :
\r
162 stride = (w*2 + 3) & (~3);
\r
163 size_yrgb = stride * h;
\r
164 start = yrgb_addr >> PAGE_SHIFT;
\r
165 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
167 case RGA2_FORMAT_BGR_888 :
\r
168 stride = (w*3 + 3) & (~3);
\r
169 size_yrgb = stride * h;
\r
170 start = yrgb_addr >> PAGE_SHIFT;
\r
171 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
175 case RGA2_FORMAT_YCbCr_422_SP :
\r
176 case RGA2_FORMAT_YCrCb_422_SP :
\r
177 stride = (w + 3) & (~3);
\r
178 size_yrgb = stride * h;
\r
179 size_uv = stride * h;
\r
180 start = MIN(yrgb_addr, uv_addr);
\r
182 start >>= PAGE_SHIFT;
\r
183 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
184 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
185 pageCount = end - start;
\r
187 case RGA2_FORMAT_YCbCr_422_P :
\r
188 case RGA2_FORMAT_YCrCb_422_P :
\r
189 stride = (w + 3) & (~3);
\r
190 size_yrgb = stride * h;
\r
191 size_uv = ((stride >> 1) * h);
\r
192 size_v = ((stride >> 1) * h);
\r
193 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
194 start = start >> PAGE_SHIFT;
\r
195 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
196 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
197 pageCount = end - start;
\r
199 case RGA2_FORMAT_YCbCr_420_SP :
\r
200 case RGA2_FORMAT_YCrCb_420_SP :
\r
201 stride = (w + 3) & (~3);
\r
202 size_yrgb = stride * h;
\r
203 size_uv = (stride * (h >> 1));
\r
204 start = MIN(yrgb_addr, uv_addr);
\r
205 start >>= PAGE_SHIFT;
\r
206 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
207 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
208 pageCount = end - start;
\r
210 case RGA2_FORMAT_YCbCr_420_P :
\r
211 case RGA2_FORMAT_YCrCb_420_P :
\r
212 stride = (w + 3) & (~3);
\r
213 size_yrgb = stride * h;
\r
214 size_uv = ((stride >> 1) * (h >> 1));
\r
215 size_v = ((stride >> 1) * (h >> 1));
\r
216 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
217 start >>= PAGE_SHIFT;
\r
218 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
219 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
220 pageCount = end - start;
\r
223 case RK_FORMAT_BPP1 :
\r
225 case RK_FORMAT_BPP2 :
\r
227 case RK_FORMAT_BPP4 :
\r
229 case RK_FORMAT_BPP8 :
\r
238 *StartAddr = start;
\r
242 static int rga2_MapUserMemory(struct page **pages,
\r
243 uint32_t *pageTable,
\r
245 uint32_t pageCount)
\r
258 down_read(¤t->mm->mmap_sem);
\r
259 result = get_user_pages(current,
\r
261 Memory << PAGE_SHIFT,
\r
268 up_read(¤t->mm->mmap_sem);
\r
271 if(result <= 0 || result < pageCount)
\r
275 for(i=0; i<pageCount; i++)
\r
277 temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);
\r
278 if (temp == 0xffffffff)
\r
280 printk("rga find mmu phy ddr error\n ");
\r
281 status = RGA_OUT_OF_RESOURCES;
\r
285 pageTable[i] = temp;
\r
291 if(result <= 0 || result < pageCount)
\r
293 struct vm_area_struct *vma;
\r
295 for(i=0; i<pageCount; i++)
\r
297 vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
\r
299 if (vma)//&& (vma->vm_flags & VM_PFNMAP) )
\r
310 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
\r
312 if(pgd_val(*pgd) == 0)
\r
314 //printk("rga pgd value is zero \n");
\r
318 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
\r
321 pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
\r
324 pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
\r
327 pte_unmap_unlock(pte, ptl);
\r
341 pfn = pte_pfn(*pte);
\r
342 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
\r
343 pte_unmap_unlock(pte, ptl);
\r
357 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
\r
358 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
\r
359 pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
\r
360 pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
\r
362 pfn = pte_pfn(*pte);
\r
363 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
\r
364 pte_unmap_unlock(pte, ptl);
\r
369 pageTable[i] = Address;
\r
373 status = RGA2_OUT_OF_RESOURCES;
\r
382 /* Fill the page table. */
\r
383 for(i=0; i<pageCount; i++)
\r
385 /* Get the physical address from page struct. */
\r
386 pageTable[i] = page_to_phys(pages[i]);
\r
396 static int rga2_mmu_info_BitBlt_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
398 int Src0MemSize, DstMemSize, Src1MemSize;
\r
399 uint32_t Src0Start, Src1Start, DstStart;
\r
401 uint32_t *MMU_Base, *MMU_Base_phys;
\r
404 uint32_t uv_size, v_size;
\r
406 struct page **pages = NULL;
\r
416 /* cal src0 buf mmu info */
\r
417 if(req->mmu_info.src0_mmu_flag & 1) {
\r
418 Src0MemSize = rga2_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
419 req->src.format, req->src.vir_w,
\r
422 if (Src0MemSize == 0) {
\r
427 /* cal src1 buf mmu info */
\r
428 if(req->mmu_info.src1_mmu_flag & 1) {
\r
429 Src1MemSize = rga2_buf_size_cal(req->src1.yrgb_addr, req->src1.uv_addr, req->src1.v_addr,
\r
430 req->src1.format, req->src1.vir_w,
\r
433 Src0MemSize = (Src0MemSize + 3) & (~3);
\r
434 if (Src1MemSize == 0) {
\r
440 /* cal dst buf mmu info */
\r
441 if(req->mmu_info.dst_mmu_flag & 1) {
\r
442 DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
443 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
445 if(DstMemSize == 0) {
\r
450 /* Cal out the needed mem size */
\r
451 Src0MemSize = (Src0MemSize+15)&(~15);
\r
452 Src1MemSize = (Src1MemSize+15)&(~15);
\r
453 DstMemSize = (DstMemSize+15)&(~15);
\r
454 AllSize = Src0MemSize + Src1MemSize + DstMemSize;
\r
456 pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);
\r
457 if(pages == NULL) {
\r
458 pr_err("RGA MMU malloc pages mem failed\n");
\r
459 status = RGA2_MALLOC_ERROR;
\r
463 if (rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
\r
464 pr_err("RGA2 Get MMU mem failed\n");
\r
465 status = RGA2_MALLOC_ERROR;
\r
469 mutex_lock(&rga2_service.lock);
\r
470 MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
471 MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
472 mutex_unlock(&rga2_service.lock);
\r
475 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], Src0Start, Src0MemSize);
\r
477 pr_err("rga2 map src0 memory failed\n");
\r
482 /* change the buf address in req struct */
\r
483 req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys));
\r
484 uv_size = (req->src.uv_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
485 v_size = (req->src.v_addr - (Src0Start << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
487 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
488 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
489 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
493 ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize, Src1Start, Src1MemSize);
\r
495 pr_err("rga2 map src1 memory failed\n");
\r
500 /* change the buf address in req struct */
\r
501 req->mmu_info.src1_base_addr = ((uint32_t)(MMU_Base_phys + Src0MemSize));
\r
502 req->src1.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
506 ret = rga2_MapUserMemory(&pages[0], MMU_Base + Src0MemSize + Src1MemSize, DstStart, DstMemSize);
\r
508 pr_err("rga2 map dst memory failed\n");
\r
513 /* change the buf address in req struct */
\r
514 req->mmu_info.dst_base_addr = ((uint32_t)(MMU_Base_phys + Src0MemSize + Src1MemSize));
\r
515 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
516 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
517 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
518 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((uv_size) << PAGE_SHIFT);
\r
519 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((v_size) << PAGE_SHIFT);
\r
522 /* flush data to DDR */
\r
523 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
524 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
526 rga2_mmu_buf_get(&rga2_mmu_buf, AllSize);
\r
527 reg->MMU_len = AllSize;
\r
531 /* Free the page table */
\r
532 if (pages != NULL) {
\r
541 /* Free the page table */
\r
542 if (pages != NULL) {
\r
546 /* Free MMU table */
\r
547 if(MMU_Base != NULL) {
\r
554 static int rga2_mmu_info_color_palette_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
556 int SrcMemSize, DstMemSize, CMDMemSize;
\r
557 uint32_t SrcStart, DstStart, CMDStart;
\r
558 struct page **pages = NULL;
\r
561 uint32_t *MMU_Base = NULL;
\r
567 uint16_t sw, byte_num;
\r
569 shift = 3 - (req->palette_mode & 3);
\r
570 sw = req->src.vir_w;
\r
571 byte_num = sw >> shift;
\r
572 stride = (byte_num + 3) & (~3);
\r
577 SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
578 if(SrcMemSize == 0) {
\r
582 DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
583 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
585 if(DstMemSize == 0) {
\r
589 CMDMemSize = rga2_mem_size_cal((uint32_t)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);
\r
590 if(CMDMemSize == 0) {
\r
594 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
596 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
597 if(pages == NULL) {
\r
598 pr_err("RGA MMU malloc pages mem failed\n");
\r
602 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
603 if(MMU_Base == NULL) {
\r
604 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
609 for(i=0; i<CMDMemSize; i++)
\r
611 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
\r
615 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
617 ret = rga2_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
620 pr_err("rga map src memory failed\n");
\r
627 MMU_p = MMU_Base + CMDMemSize;
\r
629 for(i=0; i<SrcMemSize; i++)
\r
631 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
636 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
638 ret = rga2_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
641 pr_err("rga map dst memory failed\n");
\r
648 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
650 for(i=0; i<DstMemSize; i++)
\r
652 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
658 * change the buf address in req struct
\r
659 * for the reason of lie to MMU
\r
661 req->mmu_info.src0_base_addr = (virt_to_phys(MMU_Base)>>2);
\r
662 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
663 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
666 /*record the malloc buf for the cmd end to release*/
\r
667 reg->MMU_base = MMU_Base;
\r
669 /* flush data to DDR */
\r
670 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
671 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
673 /* Free the page table */
\r
674 if (pages != NULL) {
\r
683 /* Free the page table */
\r
684 if (pages != NULL) {
\r
688 /* Free mmu table */
\r
689 if (MMU_Base != NULL) {
\r
696 static int rga2_mmu_info_color_fill_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
700 struct page **pages = NULL;
\r
702 uint32_t *MMU_Base, *MMU_Base_phys;
\r
710 if(req->mmu_info.dst_mmu_flag & 1) {
\r
711 DstMemSize = rga2_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
712 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
714 if(DstMemSize == 0) {
\r
719 AllSize = (DstMemSize + 3) & (~3);
\r
721 pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);
\r
722 if(pages == NULL) {
\r
723 pr_err("RGA2 MMU malloc pages mem failed\n");
\r
724 status = RGA2_MALLOC_ERROR;
\r
728 if(rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize)) {
\r
729 pr_err("RGA2 Get MMU mem failed\n");
\r
730 status = RGA2_MALLOC_ERROR;
\r
734 mutex_lock(&rga2_service.lock);
\r
735 MMU_Base_phys = rga2_mmu_buf.buf + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
736 MMU_Base = rga2_mmu_buf.buf_virtual + (rga2_mmu_buf.front & (rga2_mmu_buf.size - 1));
\r
737 mutex_unlock(&rga2_service.lock);
\r
741 ret = rga2_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
743 pr_err("rga2 map dst memory failed\n");
\r
748 /* change the buf address in req struct */
\r
749 req->mmu_info.src0_base_addr = (((uint32_t)MMU_Base_phys)>>4);
\r
750 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
753 /* flush data to DDR */
\r
754 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
755 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
757 rga2_mmu_buf_get_try(&rga2_mmu_buf, AllSize);
\r
759 /* Free the page table */
\r
770 if (MMU_Base != NULL)
\r
777 static int rga2_mmu_info_update_palette_table_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
779 int SrcMemSize, CMDMemSize;
\r
780 uint32_t SrcStart, CMDStart;
\r
781 struct page **pages = NULL;
\r
784 uint32_t *MMU_Base, *MMU_p;
\r
791 /* cal src buf mmu info */
\r
792 SrcMemSize = rga2_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
\r
793 if(SrcMemSize == 0) {
\r
797 /* cal cmd buf mmu info */
\r
798 CMDMemSize = rga2_mem_size_cal((uint32_t)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);
\r
799 if(CMDMemSize == 0) {
\r
803 AllSize = SrcMemSize + CMDMemSize;
\r
805 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
806 if(pages == NULL) {
\r
807 pr_err("RGA MMU malloc pages mem failed\n");
\r
808 status = RGA2_MALLOC_ERROR;
\r
812 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
813 if(pages == NULL) {
\r
814 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
815 status = RGA2_MALLOC_ERROR;
\r
819 for(i=0; i<CMDMemSize; i++) {
\r
820 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
823 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
825 ret = rga2_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
827 pr_err("rga map src memory failed\n");
\r
833 MMU_p = MMU_Base + CMDMemSize;
\r
835 for(i=0; i<SrcMemSize; i++)
\r
837 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
842 * change the buf address in req struct
\r
843 * for the reason of lie to MMU
\r
845 req->mmu_info.src0_base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
847 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
849 /*record the malloc buf for the cmd end to release*/
\r
850 reg->MMU_base = MMU_Base;
\r
852 /* flush data to DDR */
\r
853 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
854 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
856 if (pages != NULL) {
\r
857 /* Free the page table */
\r
868 if (MMU_Base != NULL)
\r
874 static int rga2_mmu_info_update_patten_buff_mode(struct rga2_reg *reg, struct rga2_req *req)
\r
876 int SrcMemSize, CMDMemSize;
\r
877 uint32_t SrcStart, CMDStart;
\r
878 struct page **pages = NULL;
\r
881 uint32_t *MMU_Base, *MMU_p;
\r
884 MMU_Base = MMU_p = 0;
\r
889 /* cal src buf mmu info */
\r
890 SrcMemSize = rga2_mem_size_cal(req->pat.yrgb_addr, req->pat.act_w * req->pat.act_h * 4, &SrcStart);
\r
891 if(SrcMemSize == 0) {
\r
895 /* cal cmd buf mmu info */
\r
896 CMDMemSize = rga2_mem_size_cal((uint32_t)rga2_service.cmd_buff, RGA2_CMD_BUF_SIZE, &CMDStart);
\r
897 if(CMDMemSize == 0) {
\r
901 AllSize = SrcMemSize + CMDMemSize;
\r
903 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
904 if(pages == NULL) {
\r
905 pr_err("RGA MMU malloc pages mem failed\n");
\r
906 status = RGA2_MALLOC_ERROR;
\r
910 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
911 if(pages == NULL) {
\r
912 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
913 status = RGA2_MALLOC_ERROR;
\r
917 for(i=0; i<CMDMemSize; i++) {
\r
918 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
921 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
923 ret = rga2_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
925 pr_err("rga map src memory failed\n");
\r
932 MMU_p = MMU_Base + CMDMemSize;
\r
934 for(i=0; i<SrcMemSize; i++)
\r
936 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
941 * change the buf address in req struct
\r
942 * for the reason of lie to MMU
\r
944 req->mmu_info.src0_base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
946 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
948 /*record the malloc buf for the cmd end to release*/
\r
949 reg->MMU_base = MMU_Base;
\r
951 /* flush data to DDR */
\r
952 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
953 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
955 if (pages != NULL) {
\r
956 /* Free the page table */
\r
968 if (MMU_Base != NULL)
\r
974 int rga2_set_mmu_info(struct rga2_reg *reg, struct rga2_req *req)
\r
978 switch (req->render_mode) {
\r
980 ret = rga2_mmu_info_BitBlt_mode(reg, req);
\r
982 case color_palette_mode :
\r
983 ret = rga2_mmu_info_color_palette_mode(reg, req);
\r
985 case color_fill_mode :
\r
986 ret = rga2_mmu_info_color_fill_mode(reg, req);
\r
988 case update_palette_table_mode :
\r
989 ret = rga2_mmu_info_update_palette_table_mode(reg, req);
\r
991 case update_patten_buff_mode :
\r
992 ret = rga2_mmu_info_update_patten_buff_mode(reg, req);
\r