3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <linux/memory.h>
\r
16 #include <linux/dma-mapping.h>
\r
17 #include <asm/memory.h>
\r
18 #include <asm/atomic.h>
\r
19 #include <asm/cacheflush.h>
\r
20 #include "rga_mmu_info.h"
\r
22 extern rga_service_info rga_service;
\r
23 //extern int mmu_buff_temp[1024];
\r
25 #define KERNEL_SPACE_VALID 0xc0000000
\r
27 #define V7_VATOPA_SUCESS_MASK (0x1)
\r
28 #define V7_VATOPA_GET_PADDR(X) (X & 0xFFFFF000)
\r
29 #define V7_VATOPA_GET_INER(X) ((X>>4) & 7)
\r
30 #define V7_VATOPA_GET_OUTER(X) ((X>>2) & 3)
\r
31 #define V7_VATOPA_GET_SH(X) ((X>>7) & 1)
\r
32 #define V7_VATOPA_GET_NS(X) ((X>>9) & 1)
\r
33 #define V7_VATOPA_GET_SS(X) ((X>>1) & 1)
\r
36 static unsigned int armv7_va_to_pa(unsigned int v_addr)
\r
38 unsigned int p_addr;
\r
39 __asm__ volatile ( "mcr p15, 0, %1, c7, c8, 0\n"
\r
42 "mrc p15, 0, %0, c7, c4, 0\n"
\r
47 if (p_addr & V7_VATOPA_SUCESS_MASK)
\r
50 return (V7_VATOPA_GET_SS(p_addr) ? 0xFFFFFFFF : V7_VATOPA_GET_PADDR(p_addr));
\r
54 static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)
\r
56 uint32_t start, end;
\r
59 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
60 start = Mem >> PAGE_SHIFT;
\r
61 pageCount = end - start;
\r
66 static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,
\r
67 int format, uint32_t w, uint32_t h, uint32_t *StartAddr )
\r
69 uint32_t size_yrgb = 0;
\r
70 uint32_t size_uv = 0;
\r
71 uint32_t size_v = 0;
\r
72 uint32_t stride = 0;
\r
73 uint32_t start, end;
\r
78 case RK_FORMAT_RGBA_8888 :
\r
79 stride = (w * 4 + 3) & (~3);
\r
80 size_yrgb = stride*h;
\r
81 start = yrgb_addr >> PAGE_SHIFT;
\r
82 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
84 case RK_FORMAT_RGBX_8888 :
\r
85 stride = (w * 4 + 3) & (~3);
\r
86 size_yrgb = stride*h;
\r
87 start = yrgb_addr >> PAGE_SHIFT;
\r
88 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
90 case RK_FORMAT_RGB_888 :
\r
91 stride = (w * 3 + 3) & (~3);
\r
92 size_yrgb = stride*h;
\r
93 start = yrgb_addr >> PAGE_SHIFT;
\r
94 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
96 case RK_FORMAT_BGRA_8888 :
\r
98 start = yrgb_addr >> PAGE_SHIFT;
\r
99 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
101 case RK_FORMAT_RGB_565 :
\r
102 stride = (w*2 + 3) & (~3);
\r
103 size_yrgb = stride * h;
\r
104 start = yrgb_addr >> PAGE_SHIFT;
\r
105 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
107 case RK_FORMAT_RGBA_5551 :
\r
108 stride = (w*2 + 3) & (~3);
\r
109 size_yrgb = stride * h;
\r
110 start = yrgb_addr >> PAGE_SHIFT;
\r
111 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
113 case RK_FORMAT_RGBA_4444 :
\r
114 stride = (w*2 + 3) & (~3);
\r
115 size_yrgb = stride * h;
\r
116 start = yrgb_addr >> PAGE_SHIFT;
\r
117 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
119 case RK_FORMAT_BGR_888 :
\r
120 stride = (w*3 + 3) & (~3);
\r
121 size_yrgb = stride * h;
\r
122 start = yrgb_addr >> PAGE_SHIFT;
\r
123 pageCount = (size_yrgb + PAGE_SIZE - 1) >> PAGE_SHIFT;
\r
127 case RK_FORMAT_YCbCr_422_SP :
\r
128 stride = (w + 3) & (~3);
\r
129 size_yrgb = stride * h;
\r
130 size_uv = stride * h;
\r
131 start = MIN(yrgb_addr, uv_addr);
\r
133 start >>= PAGE_SHIFT;
\r
134 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
135 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
136 pageCount = end - start;
\r
138 case RK_FORMAT_YCbCr_422_P :
\r
139 stride = (w + 3) & (~3);
\r
140 size_yrgb = stride * h;
\r
141 size_uv = ((stride >> 1) * h);
\r
142 size_v = ((stride >> 1) * h);
\r
143 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
144 start = start >> PAGE_SHIFT;
\r
145 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
146 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
147 pageCount = end - start;
\r
149 case RK_FORMAT_YCbCr_420_SP :
\r
150 stride = (w + 3) & (~3);
\r
151 size_yrgb = stride * h;
\r
152 size_uv = (stride * (h >> 1));
\r
153 start = MIN(yrgb_addr, uv_addr);
\r
154 start >>= PAGE_SHIFT;
\r
155 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
156 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
157 pageCount = end - start;
\r
159 case RK_FORMAT_YCbCr_420_P :
\r
160 stride = (w + 3) & (~3);
\r
161 size_yrgb = stride * h;
\r
162 size_uv = ((stride >> 1) * (h >> 1));
\r
163 size_v = ((stride >> 1) * (h >> 1));
\r
164 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
165 start >>= PAGE_SHIFT;
\r
166 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
167 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
168 pageCount = end - start;
\r
171 case RK_FORMAT_YCrCb_422_SP :
\r
172 stride = (w + 3) & (~3);
\r
173 size_yrgb = stride * h;
\r
174 size_uv = stride * h;
\r
175 start = MIN(yrgb_addr, uv_addr);
\r
176 start >>= PAGE_SHIFT;
\r
177 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
178 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
179 pageCount = end - start;
\r
181 case RK_FORMAT_YCrCb_422_P :
\r
182 stride = (w + 3) & (~3);
\r
183 size_yrgb = stride * h;
\r
184 size_uv = ((stride >> 1) * h);
\r
185 size_v = ((stride >> 1) * h);
\r
186 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
187 start >>= PAGE_SHIFT;
\r
188 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
189 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
190 pageCount = end - start;
\r
193 case RK_FORMAT_YCrCb_420_SP :
\r
194 stride = (w + 3) & (~3);
\r
195 size_yrgb = stride * h;
\r
196 size_uv = (stride * (h >> 1));
\r
197 start = MIN(yrgb_addr, uv_addr);
\r
198 start >>= PAGE_SHIFT;
\r
199 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
200 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
201 pageCount = end - start;
\r
203 case RK_FORMAT_YCrCb_420_P :
\r
204 stride = (w + 3) & (~3);
\r
205 size_yrgb = stride * h;
\r
206 size_uv = ((stride >> 1) * (h >> 1));
\r
207 size_v = ((stride >> 1) * (h >> 1));
\r
208 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
209 start >>= PAGE_SHIFT;
\r
210 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
211 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
212 pageCount = end - start;
\r
215 case RK_FORMAT_BPP1 :
\r
217 case RK_FORMAT_BPP2 :
\r
219 case RK_FORMAT_BPP4 :
\r
221 case RK_FORMAT_BPP8 :
\r
230 *StartAddr = start;
\r
234 static int rga_MapUserMemory(struct page **pages,
\r
235 uint32_t *pageTable,
\r
237 uint32_t pageCount)
\r
250 down_read(¤t->mm->mmap_sem);
\r
251 result = get_user_pages(current,
\r
253 Memory << PAGE_SHIFT,
\r
260 up_read(¤t->mm->mmap_sem);
\r
263 if(result <= 0 || result < pageCount)
\r
267 for(i=0; i<pageCount; i++)
\r
269 temp = armv7_va_to_pa((Memory + i) << PAGE_SHIFT);
\r
270 if (temp == 0xffffffff)
\r
272 printk("rga find mmu phy ddr error\n ");
\r
273 status = RGA_OUT_OF_RESOURCES;
\r
277 pageTable[i] = temp;
\r
283 if(result <= 0 || result < pageCount)
\r
285 struct vm_area_struct *vma;
\r
288 down_read(¤t->mm->mmap_sem);
\r
289 for (i = 0; i < result; i++)
\r
290 put_page(pages[i]);
\r
291 up_read(¤t->mm->mmap_sem);
\r
294 for(i=0; i<pageCount; i++)
\r
296 vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
\r
298 if (vma)//&& (vma->vm_flags & VM_PFNMAP) )
\r
309 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
\r
311 if(pgd_val(*pgd) == 0)
\r
313 //printk("rga pgd value is zero \n");
\r
317 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
\r
320 pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
\r
323 pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
\r
326 pte_unmap_unlock(pte, ptl);
\r
340 pfn = pte_pfn(*pte);
\r
341 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
\r
342 pte_unmap_unlock(pte, ptl);
\r
356 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
\r
357 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
\r
358 pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
\r
359 pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
\r
361 pfn = pte_pfn(*pte);
\r
362 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
\r
363 pte_unmap_unlock(pte, ptl);
\r
368 pageTable[i] = Address;
\r
372 status = RGA_OUT_OF_RESOURCES;
\r
381 /* Fill the page table. */
\r
382 for(i=0; i<pageCount; i++)
\r
384 /* Get the physical address from page struct. */
\r
385 pageTable[i] = page_to_phys(pages[i]);
\r
388 down_read(¤t->mm->mmap_sem);
\r
389 for (i = 0; i < result; i++)
\r
390 put_page(pages[i]);
\r
391 up_read(¤t->mm->mmap_sem);
\r
400 static int rga_MapION(struct sg_table *sg,
\r
407 uint32_t mapped_size = 0;
\r
409 struct scatterlist *sgl = sg->sgl;
\r
410 uint32_t sg_num = 0;
\r
415 len = sg_dma_len(sgl) >> PAGE_SHIFT;
\r
416 Address = sg_phys(sgl);
\r
418 for(i=0; i<len; i++) {
\r
419 Memory[mapped_size + i] = Address + (i << PAGE_SHIFT);
\r
422 mapped_size += len;
\r
425 while((sgl = sg_next(sgl)) && (mapped_size < pageCount) && (sg_num < sg->nents));
\r
431 static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
\r
433 int SrcMemSize, DstMemSize;
\r
434 uint32_t SrcStart, DstStart;
\r
437 uint32_t *MMU_Base, *MMU_p;
\r
440 uint32_t uv_size, v_size;
\r
442 struct page **pages = NULL;
\r
451 /* cal src buf mmu info */
\r
453 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
454 req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,
\r
456 if(SrcMemSize == 0) {
\r
461 /* cal dst buf mmu info */
\r
463 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
464 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
466 if(DstMemSize == 0) {
\r
470 /* Cal out the needed mem size */
\r
471 SrcMemSize = (SrcMemSize + 15) & (~15);
\r
472 DstMemSize = (DstMemSize + 15) & (~15);
\r
473 AllSize = SrcMemSize + DstMemSize;
\r
475 pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);
\r
476 if(pages == NULL) {
\r
477 pr_err("RGA MMU malloc pages mem failed\n");
\r
478 status = RGA_MALLOC_ERROR;
\r
482 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
483 if(MMU_Base == NULL) {
\r
484 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
485 status = RGA_MALLOC_ERROR;
\r
489 if((req->mmu_info.mmu_flag >> 8) & 1) {
\r
491 ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize);
\r
494 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
496 pr_err("rga map src memory failed\n");
\r
505 if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) {
\r
506 for(i=0; i<SrcMemSize; i++)
\r
507 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
510 for(i=0; i<SrcMemSize; i++)
\r
511 MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);
\r
515 if ((req->mmu_info.mmu_flag >> 10) & 1) {
\r
517 ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize);
\r
520 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
522 pr_err("rga map dst memory failed\n");
\r
529 MMU_p = MMU_Base + SrcMemSize;
\r
530 for(i=0; i<DstMemSize; i++)
\r
531 MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
\r
534 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
537 * change the buf address in req struct
\r
540 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
542 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
543 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
545 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
546 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
547 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
549 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
551 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
552 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
554 /*record the malloc buf for the cmd end to release*/
\r
555 reg->MMU_base = MMU_Base;
\r
557 /* flush data to DDR */
\r
558 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
559 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
563 /* Free the page table */
\r
564 if (pages != NULL) {
\r
573 /* Free the page table */
\r
574 if (pages != NULL) {
\r
578 /* Free MMU table */
\r
579 if(MMU_Base != NULL) {
\r
586 static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
\r
588 int SrcMemSize, DstMemSize, CMDMemSize;
\r
589 uint32_t SrcStart, DstStart, CMDStart;
\r
590 struct page **pages = NULL;
\r
593 uint32_t *MMU_Base = NULL;
\r
599 uint16_t sw, byte_num;
\r
601 shift = 3 - (req->palette_mode & 3);
\r
602 sw = req->src.vir_w;
\r
603 byte_num = sw >> shift;
\r
604 stride = (byte_num + 3) & (~3);
\r
609 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
610 if(SrcMemSize == 0) {
\r
614 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
615 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
617 if(DstMemSize == 0) {
\r
621 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
622 if(CMDMemSize == 0) {
\r
626 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
628 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
629 if(pages == NULL) {
\r
630 pr_err("RGA MMU malloc pages mem failed\n");
\r
634 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
635 if(MMU_Base == NULL) {
\r
636 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
641 for(i=0; i<CMDMemSize; i++)
\r
643 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
\r
647 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
649 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
652 pr_err("rga map src memory failed\n");
\r
659 MMU_p = MMU_Base + CMDMemSize;
\r
661 for(i=0; i<SrcMemSize; i++)
\r
663 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
668 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
670 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
673 pr_err("rga map dst memory failed\n");
\r
680 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
682 for(i=0; i<DstMemSize; i++)
\r
684 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
690 * change the buf address in req struct
\r
691 * for the reason of lie to MMU
\r
693 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
694 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
695 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
698 /*record the malloc buf for the cmd end to release*/
\r
699 reg->MMU_base = MMU_Base;
\r
701 /* flush data to DDR */
\r
702 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
703 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
705 /* Free the page table */
\r
706 if (pages != NULL) {
\r
715 /* Free the page table */
\r
716 if (pages != NULL) {
\r
720 /* Free mmu table */
\r
721 if (MMU_Base != NULL) {
\r
728 static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
\r
732 struct page **pages = NULL;
\r
735 uint32_t *MMU_Base, *MMU_p;
\r
743 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
744 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
746 if(DstMemSize == 0) {
\r
750 AllSize = DstMemSize;
\r
752 pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);
\r
753 if(pages == NULL) {
\r
754 pr_err("RGA MMU malloc pages mem failed\n");
\r
755 status = RGA_MALLOC_ERROR;
\r
759 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
760 if(pages == NULL) {
\r
761 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
762 status = RGA_MALLOC_ERROR;
\r
766 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID) {
\r
768 ret = rga_MapION(req->sg_dst, &MMU_Base[0], DstMemSize);
\r
771 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
773 pr_err("rga map dst memory failed\n");
\r
781 for(i=0; i<DstMemSize; i++)
\r
782 MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
\r
785 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
788 * change the buf address in req struct
\r
791 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
792 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
794 /*record the malloc buf for the cmd end to release*/
\r
795 reg->MMU_base = MMU_Base;
\r
797 /* flush data to DDR */
\r
798 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
799 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
801 /* Free the page table */
\r
812 if (MMU_Base != NULL)
\r
819 static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
\r
823 struct page **pages = NULL;
\r
826 uint32_t *MMU_Base, *MMU_p;
\r
833 /* cal dst buf mmu info */
\r
834 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
835 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
837 if(DstMemSize == 0) {
\r
841 AllSize = DstMemSize;
\r
843 pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);
\r
844 if(pages == NULL) {
\r
845 pr_err("RGA MMU malloc pages mem failed\n");
\r
846 status = RGA_MALLOC_ERROR;
\r
850 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
851 if(pages == NULL) {
\r
852 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
853 status = RGA_MALLOC_ERROR;
\r
857 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
859 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
861 pr_err("rga map dst memory failed\n");
\r
870 for(i=0; i<DstMemSize; i++)
\r
872 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
877 * change the buf address in req struct
\r
878 * for the reason of lie to MMU
\r
880 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
881 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
884 /*record the malloc buf for the cmd end to release*/
\r
885 reg->MMU_base = MMU_Base;
\r
887 /* flush data to DDR */
\r
888 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
889 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
891 /* Free the page table */
\r
892 if (pages != NULL) {
\r
904 if (MMU_Base != NULL)
\r
910 static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
\r
912 int SrcMemSize, DstMemSize;
\r
913 uint32_t SrcStart, DstStart;
\r
914 struct page **pages = NULL;
\r
917 uint32_t *MMU_Base, *MMU_p;
\r
919 uint32_t uv_size, v_size;
\r
925 /* cal src buf mmu info */
\r
926 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
927 req->src.format, req->src.vir_w, req->src.vir_h,
\r
929 if(SrcMemSize == 0) {
\r
933 /* cal dst buf mmu info */
\r
934 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
935 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
937 if(DstMemSize == 0) {
\r
941 AllSize = SrcMemSize + DstMemSize;
\r
943 pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);
\r
944 if(pages == NULL) {
\r
945 pr_err("RGA MMU malloc pages mem failed\n");
\r
946 status = RGA_MALLOC_ERROR;
\r
950 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
951 if(pages == NULL) {
\r
952 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
953 status = RGA_MALLOC_ERROR;
\r
957 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
959 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
962 pr_err("rga map src memory failed\n");
\r
971 for(i=0; i<SrcMemSize; i++)
\r
973 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
978 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
980 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
983 pr_err("rga map dst memory failed\n");
\r
990 MMU_p = MMU_Base + SrcMemSize;
\r
992 for(i=0; i<DstMemSize; i++)
\r
994 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
998 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
1001 * change the buf address in req struct
\r
1002 * for the reason of lie to MMU
\r
1004 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1006 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1007 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1009 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
1010 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
1011 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
1013 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1014 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1016 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
1017 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
1018 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
1021 /*record the malloc buf for the cmd end to release*/
\r
1022 reg->MMU_base = MMU_Base;
\r
1024 /* flush data to DDR */
\r
1025 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
1026 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
1028 /* Free the page table */
\r
1029 if (pages != NULL) {
\r
1037 if (pages != NULL)
\r
1040 if (MMU_Base != NULL)
\r
1048 static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
\r
1050 int SrcMemSize, DstMemSize;
\r
1051 uint32_t SrcStart, DstStart;
\r
1052 struct page **pages = NULL;
\r
1055 uint32_t *MMU_Base, *MMU_p;
\r
1058 uint32_t uv_size, v_size;
\r
1063 /* cal src buf mmu info */
\r
1064 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
1065 req->src.format, req->src.vir_w, req->src.vir_h,
\r
1067 if(SrcMemSize == 0) {
\r
1071 /* cal dst buf mmu info */
\r
1072 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
1073 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
1075 if(DstMemSize == 0) {
\r
1079 AllSize = SrcMemSize + DstMemSize;
\r
1081 pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);
\r
1082 if(pages == NULL) {
\r
1083 pr_err("RGA MMU malloc pages mem failed\n");
\r
1084 status = RGA_MALLOC_ERROR;
\r
1089 * Allocate MMU Index mem
\r
1090 * This mem release in run_to_done fun
\r
1092 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
1093 if(pages == NULL) {
\r
1094 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1095 status = RGA_MALLOC_ERROR;
\r
1099 /* map src pages */
\r
1100 if ((req->mmu_info.mmu_flag >> 8) & 1) {
\r
1101 if (req->sg_src) {
\r
1102 ret = rga_MapION(req->sg_src, &MMU_Base[0], SrcMemSize);
\r
1105 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
1107 pr_err("rga map src memory failed\n");
\r
1116 for(i=0; i<SrcMemSize; i++)
\r
1117 MMU_p[i] = (uint32_t)((SrcStart + i) << PAGE_SHIFT);
\r
1120 if((req->mmu_info.mmu_flag >> 10) & 1) {
\r
1121 if (req->sg_dst) {
\r
1122 ret = rga_MapION(req->sg_dst, &MMU_Base[SrcMemSize], DstMemSize);
\r
1125 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
1127 pr_err("rga map dst memory failed\n");
\r
1135 /* kernel space */
\r
1136 MMU_p = MMU_Base + SrcMemSize;
\r
1138 if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf) {
\r
1139 for(i=0; i<DstMemSize; i++)
\r
1140 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
1143 for(i=0; i<DstMemSize; i++)
\r
1144 MMU_p[i] = (uint32_t)((DstStart + i) << PAGE_SHIFT);
\r
1148 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
1151 * change the buf address in req struct
\r
1152 * for the reason of lie to MMU
\r
1155 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
1157 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1158 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1160 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
1161 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
1162 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
1164 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1165 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1167 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT);
\r
1168 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
1169 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
1171 /*record the malloc buf for the cmd end to release*/
\r
1172 reg->MMU_base = MMU_Base;
\r
1174 /* flush data to DDR */
\r
1175 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
1176 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
1178 /* Free the page table */
\r
1179 if (pages != NULL)
\r
1188 if (pages != NULL)
\r
1191 if (MMU_Base != NULL)
\r
1198 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
\r
1200 int SrcMemSize, CMDMemSize;
\r
1201 uint32_t SrcStart, CMDStart;
\r
1202 struct page **pages = NULL;
\r
1205 uint32_t *MMU_Base, *MMU_p;
\r
1212 /* cal src buf mmu info */
\r
1213 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
\r
1214 if(SrcMemSize == 0) {
\r
1218 /* cal cmd buf mmu info */
\r
1219 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1220 if(CMDMemSize == 0) {
\r
1224 AllSize = SrcMemSize + CMDMemSize;
\r
1226 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1227 if(pages == NULL) {
\r
1228 pr_err("RGA MMU malloc pages mem failed\n");
\r
1229 status = RGA_MALLOC_ERROR;
\r
1233 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
1234 if(pages == NULL) {
\r
1235 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1236 status = RGA_MALLOC_ERROR;
\r
1240 for(i=0; i<CMDMemSize; i++) {
\r
1241 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1244 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1246 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1248 pr_err("rga map src memory failed\n");
\r
1254 MMU_p = MMU_Base + CMDMemSize;
\r
1256 for(i=0; i<SrcMemSize; i++)
\r
1258 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1263 * change the buf address in req struct
\r
1264 * for the reason of lie to MMU
\r
1266 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1268 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1270 /*record the malloc buf for the cmd end to release*/
\r
1271 reg->MMU_base = MMU_Base;
\r
1273 /* flush data to DDR */
\r
1274 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1275 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1277 if (pages != NULL) {
\r
1278 /* Free the page table */
\r
1286 if (pages != NULL)
\r
1289 if (MMU_Base != NULL)
\r
1295 static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
\r
1297 int SrcMemSize, CMDMemSize;
\r
1298 uint32_t SrcStart, CMDStart;
\r
1299 struct page **pages = NULL;
\r
1302 uint32_t *MMU_Base, *MMU_p;
\r
1305 MMU_Base = MMU_p = 0;
\r
1310 /* cal src buf mmu info */
\r
1311 SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
\r
1312 if(SrcMemSize == 0) {
\r
1316 /* cal cmd buf mmu info */
\r
1317 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1318 if(CMDMemSize == 0) {
\r
1322 AllSize = SrcMemSize + CMDMemSize;
\r
1324 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1325 if(pages == NULL) {
\r
1326 pr_err("RGA MMU malloc pages mem failed\n");
\r
1327 status = RGA_MALLOC_ERROR;
\r
1331 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1332 if(pages == NULL) {
\r
1333 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1334 status = RGA_MALLOC_ERROR;
\r
1338 for(i=0; i<CMDMemSize; i++) {
\r
1339 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1342 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1344 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1346 pr_err("rga map src memory failed\n");
\r
1353 MMU_p = MMU_Base + CMDMemSize;
\r
1355 for(i=0; i<SrcMemSize; i++)
\r
1357 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1362 * change the buf address in req struct
\r
1363 * for the reason of lie to MMU
\r
1365 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1367 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1369 /*record the malloc buf for the cmd end to release*/
\r
1370 reg->MMU_base = MMU_Base;
\r
1372 /* flush data to DDR */
\r
1373 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1374 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1376 if (pages != NULL) {
\r
1377 /* Free the page table */
\r
1386 if (pages != NULL)
\r
1389 if (MMU_Base != NULL)
\r
1395 int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
\r
1399 switch (req->render_mode) {
\r
1400 case bitblt_mode :
\r
1401 ret = rga_mmu_info_BitBlt_mode(reg, req);
\r
1403 case color_palette_mode :
\r
1404 ret = rga_mmu_info_color_palette_mode(reg, req);
\r
1406 case color_fill_mode :
\r
1407 ret = rga_mmu_info_color_fill_mode(reg, req);
\r
1409 case line_point_drawing_mode :
\r
1410 ret = rga_mmu_info_line_point_drawing_mode(reg, req);
\r
1412 case blur_sharp_filter_mode :
\r
1413 ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
\r
1415 case pre_scaling_mode :
\r
1416 ret = rga_mmu_info_pre_scale_mode(reg, req);
\r
1418 case update_palette_table_mode :
\r
1419 ret = rga_mmu_info_update_palette_table_mode(reg, req);
\r
1421 case update_patten_buff_mode :
\r
1422 ret = rga_mmu_info_update_patten_buff_mode(reg, req);
\r