3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <asm/atomic.h>
\r
18 #include "rga_mmu_info.h"
\r
20 extern rga_service_info rga_service;
\r
21 extern int mmu_buff_temp[1024];
\r
23 #define KERNEL_SPACE_VALID 0xc0000000
\r
27 static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)
\r
29 uint32_t start, end;
\r
32 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
33 start = Mem >> PAGE_SHIFT;
\r
34 pageCount = end - start;
\r
39 static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,
\r
40 int format, uint32_t w, uint32_t h, uint32_t *StartAddr )
\r
42 uint32_t size_yrgb = 0;
\r
43 uint32_t size_uv = 0;
\r
44 uint32_t size_v = 0;
\r
45 uint32_t stride = 0;
\r
46 uint32_t start, end;
\r
51 case RK_FORMAT_RGBA_8888 :
\r
52 stride = (w * 4 + 3) & (~3);
\r
53 size_yrgb = stride*h;
\r
54 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
55 start = yrgb_addr >> PAGE_SHIFT;
\r
56 pageCount = end - start;
\r
58 case RK_FORMAT_RGBX_8888 :
\r
59 stride = (w * 4 + 3) & (~3);
\r
60 size_yrgb = stride*h;
\r
61 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
62 start = yrgb_addr >> PAGE_SHIFT;
\r
63 pageCount = end - start;
\r
65 case RK_FORMAT_RGB_888 :
\r
66 stride = (w * 3 + 3) & (~3);
\r
67 size_yrgb = stride*h;
\r
68 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
69 start = yrgb_addr >> PAGE_SHIFT;
\r
70 pageCount = end - start;
\r
72 case RK_FORMAT_BGRA_8888 :
\r
74 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
75 start = yrgb_addr >> PAGE_SHIFT;
\r
76 pageCount = end - start;
\r
78 case RK_FORMAT_RGB_565 :
\r
79 stride = (w*2 + 3) & (~3);
\r
80 size_yrgb = stride * h;
\r
81 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
82 start = yrgb_addr >> PAGE_SHIFT;
\r
83 pageCount = end - start;
\r
85 case RK_FORMAT_RGBA_5551 :
\r
86 stride = (w*2 + 3) & (~3);
\r
87 size_yrgb = stride * h;
\r
88 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
89 start = yrgb_addr >> PAGE_SHIFT;
\r
90 pageCount = end - start;
\r
92 case RK_FORMAT_RGBA_4444 :
\r
93 stride = (w*2 + 3) & (~3);
\r
94 size_yrgb = stride * h;
\r
95 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
96 start = yrgb_addr >> PAGE_SHIFT;
\r
97 pageCount = end - start;
\r
99 case RK_FORMAT_BGR_888 :
\r
100 stride = (w*3 + 3) & (~3);
\r
101 size_yrgb = stride * h;
\r
102 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
103 start = yrgb_addr >> PAGE_SHIFT;
\r
104 pageCount = end - start;
\r
108 case RK_FORMAT_YCbCr_422_SP :
\r
109 stride = (w + 3) & (~3);
\r
110 size_yrgb = stride * h;
\r
111 size_uv = stride * h;
\r
112 start = MIN(yrgb_addr, uv_addr);
\r
113 start >>= PAGE_SHIFT;
\r
114 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
115 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
116 pageCount = end - start;
\r
118 case RK_FORMAT_YCbCr_422_P :
\r
119 stride = (w + 3) & (~3);
\r
120 size_yrgb = stride * h;
\r
121 size_uv = ((stride >> 1) * h);
\r
122 size_v = ((stride >> 1) * h);
\r
123 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
124 start = start >> PAGE_SHIFT;
\r
125 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
126 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
127 pageCount = end - start;
\r
129 case RK_FORMAT_YCbCr_420_SP :
\r
130 stride = (w + 3) & (~3);
\r
131 size_yrgb = stride * h;
\r
132 size_uv = (stride * (h >> 1));
\r
133 start = MIN(yrgb_addr, uv_addr);
\r
134 start >>= PAGE_SHIFT;
\r
135 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
136 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
137 pageCount = end - start;
\r
139 case RK_FORMAT_YCbCr_420_P :
\r
140 stride = (w + 3) & (~3);
\r
141 size_yrgb = stride * h;
\r
142 size_uv = ((stride >> 1) * (h >> 1));
\r
143 size_v = ((stride >> 1) * (h >> 1));
\r
144 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
145 start >>= PAGE_SHIFT;
\r
146 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
147 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
148 pageCount = end - start;
\r
151 case RK_FORMAT_YCrCb_422_SP :
\r
152 stride = (w + 3) & (~3);
\r
153 size_yrgb = stride * h;
\r
154 size_uv = stride * h;
\r
155 start = MIN(yrgb_addr, uv_addr);
\r
156 start >>= PAGE_SHIFT;
\r
157 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
158 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
159 pageCount = end - start;
\r
161 case RK_FORMAT_YCrCb_422_P :
\r
162 stride = (w + 3) & (~3);
\r
163 size_yrgb = stride * h;
\r
164 size_uv = ((stride >> 1) * h);
\r
165 size_v = ((stride >> 1) * h);
\r
166 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
167 start >>= PAGE_SHIFT;
\r
168 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
169 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
170 pageCount = end - start;
\r
173 case RK_FORMAT_YCrCb_420_SP :
\r
174 stride = (w + 3) & (~3);
\r
175 size_yrgb = stride * h;
\r
176 size_uv = (stride * (h >> 1));
\r
177 start = MIN(yrgb_addr, uv_addr);
\r
178 start >>= PAGE_SHIFT;
\r
179 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
180 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
181 pageCount = end - start;
\r
183 case RK_FORMAT_YCrCb_420_P :
\r
184 stride = (w + 3) & (~3);
\r
185 size_yrgb = stride * h;
\r
186 size_uv = ((stride >> 1) * (h >> 1));
\r
187 size_v = ((stride >> 1) * (h >> 1));
\r
188 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
189 start >>= PAGE_SHIFT;
\r
190 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
191 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
192 pageCount = end - start;
\r
195 case RK_FORMAT_BPP1 :
\r
197 case RK_FORMAT_BPP2 :
\r
199 case RK_FORMAT_BPP4 :
\r
201 case RK_FORMAT_BPP8 :
\r
210 *StartAddr = start;
\r
214 static int rga_MapUserMemory(struct page **pages,
\r
215 uint32_t *pageTable,
\r
217 uint32_t pageCount)
\r
228 down_read(¤t->mm->mmap_sem);
\r
229 result = get_user_pages(current,
\r
231 Memory << PAGE_SHIFT,
\r
238 up_read(¤t->mm->mmap_sem);
\r
240 if(result <= 0 || result < pageCount)
\r
242 struct vm_area_struct *vma;
\r
244 for(i=0; i<pageCount; i++)
\r
246 t_mem = Memory + i;
\r
248 vma = find_vma(current->mm, (t_mem) << PAGE_SHIFT);
\r
250 if (vma && (vma->vm_flags & VM_PFNMAP) )
\r
256 unsigned long pfn;
\r
258 pgd_t * pgd = pgd_offset(current->mm, ((t_mem)<< PAGE_SHIFT));
\r
259 pud_t * pud = pud_offset(pgd, ((t_mem) << PAGE_SHIFT));
\r
262 pmd_t * pmd = pmd_offset(pud, ((t_mem) << PAGE_SHIFT));
\r
265 pte = pte_offset_map_lock(current->mm, pmd, ((t_mem)<< PAGE_SHIFT), &ptl);
\r
281 pfn = pte_pfn(*pte);
\r
282 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((t_mem) << PAGE_SHIFT)) & ~PAGE_MASK));
\r
283 pte_unmap_unlock(pte, ptl);
\r
286 /* Free the page table. */
\r
289 /* Release the pages if any. */
\r
292 for (i = 0; i < result; i++)
\r
294 if (pages[i] == NULL)
\r
299 page_cache_release(pages[i]);
\r
305 pageTable[i] = Address;
\r
311 status = RGA_OUT_OF_RESOURCES;
\r
319 for (i = 0; i < pageCount; i++)
\r
321 /* Flush the data cache. */
\r
323 dma_sync_single_for_device(
\r
325 page_to_phys(pages[i]),
\r
329 flush_dcache_page(pages[i]);
\r
333 /* Fill the page table. */
\r
334 for(i=0; i<pageCount; i++)
\r
336 /* Get the physical address from page struct. */
\r
337 pageTable[i] = page_to_phys(pages[i]);
\r
344 if (rgaIS_ERROR(status))
\r
346 /* Release page array. */
\r
347 if (result > 0 && pages != NULL)
\r
349 for (i = 0; i < result; i++)
\r
351 if (pages[i] == NULL)
\r
356 dma_sync_single_for_device(
\r
358 page_to_phys(pages[i]),
\r
362 page_cache_release(pages[i]);
\r
370 static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
\r
372 int SrcMemSize, DstMemSize, CMDMemSize;
\r
373 uint32_t SrcStart, DstStart, CMDStart;
\r
376 uint32_t *MMU_Base, *MMU_p;
\r
379 uint32_t uv_size, v_size;
\r
381 struct page **pages = NULL;
\r
385 if(req->src.act_w == 360 && req->src.act_h == 64)
\r
392 /* cal src buf mmu info */
\r
393 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
394 req->src.format, req->src.vir_w, req->src.vir_h,
\r
396 if(SrcMemSize == 0) {
\r
401 /* cal dst buf mmu info */
\r
402 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
403 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
405 if(DstMemSize == 0) {
\r
409 /* cal cmd buf mmu info */
\r
410 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
411 if(CMDMemSize == 0) {
\r
415 /* Cal out the needed mem size */
\r
416 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
418 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
419 if(pages == NULL) {
\r
420 pr_err("RGA MMU malloc pages mem failed\n");
\r
421 status = RGA_MALLOC_ERROR;
\r
425 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
426 if(MMU_Base == NULL) {
\r
427 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
428 status = RGA_MALLOC_ERROR;
\r
432 for(i=0; i<CMDMemSize; i++) {
\r
433 MMU_Base[i] = (uint32_t)virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
436 if(req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
438 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
440 pr_err("rga map src memory failed\n");
\r
447 MMU_p = MMU_Base + CMDMemSize;
\r
449 if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
451 /* Down scale ratio over 2, Last prc */
\r
452 /* MMU table copy from pre scale table */
\r
454 for(i=0; i<SrcMemSize; i++)
\r
456 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
461 for(i=0; i<SrcMemSize; i++)
\r
463 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
468 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
471 ktime_t start, end;
\r
472 start = ktime_get();
\r
474 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
476 pr_err("rga map dst memory failed\n");
\r
483 end = ktime_sub(end, start);
\r
484 printk("dst mmu map time = %d\n", (int)ktime_to_us(end));
\r
489 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
491 for(i=0; i<DstMemSize; i++)
\r
493 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
498 * change the buf address in req struct
\r
501 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
503 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
504 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
506 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
507 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT);
\r
508 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT);
\r
510 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
512 /*record the malloc buf for the cmd end to release*/
\r
513 reg->MMU_base = MMU_Base;
\r
515 /* flush data to DDR */
\r
516 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
517 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
521 /* Free the page table */
\r
522 if (pages != NULL) {
\r
531 /* Free the page table */
\r
532 if (pages != NULL) {
\r
536 /* Free MMU table */
\r
537 if(MMU_Base != NULL) {
\r
544 static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
\r
546 int SrcMemSize, DstMemSize, CMDMemSize;
\r
547 uint32_t SrcStart, DstStart, CMDStart;
\r
548 struct page **pages = NULL;
\r
551 uint32_t *MMU_Base = NULL;
\r
557 uint16_t sw, byte_num;
\r
559 shift = 3 - (req->palette_mode & 3);
\r
560 sw = req->src.vir_w;
\r
561 byte_num = sw >> shift;
\r
562 stride = (byte_num + 3) & (~3);
\r
567 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
568 if(SrcMemSize == 0) {
\r
572 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
573 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
575 if(DstMemSize == 0) {
\r
579 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
580 if(CMDMemSize == 0) {
\r
584 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
586 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
587 if(pages == NULL) {
\r
588 pr_err("RGA MMU malloc pages mem failed\n");
\r
592 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
593 if(MMU_Base == NULL) {
\r
594 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
599 for(i=0; i<CMDMemSize; i++)
\r
601 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
\r
605 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
607 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
610 pr_err("rga map src memory failed\n");
\r
617 MMU_p = MMU_Base + CMDMemSize;
\r
619 for(i=0; i<SrcMemSize; i++)
\r
621 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
626 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
628 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
631 pr_err("rga map dst memory failed\n");
\r
638 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
640 for(i=0; i<DstMemSize; i++)
\r
642 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
648 * change the buf address in req struct
\r
649 * for the reason of lie to MMU
\r
651 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
652 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
653 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
656 /*record the malloc buf for the cmd end to release*/
\r
657 reg->MMU_base = MMU_Base;
\r
659 /* flush data to DDR */
\r
660 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
661 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
663 /* Free the page table */
\r
664 if (pages != NULL) {
\r
673 /* Free the page table */
\r
674 if (pages != NULL) {
\r
678 /* Free mmu table */
\r
679 if (MMU_Base != NULL) {
\r
686 static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
\r
688 int DstMemSize, CMDMemSize;
\r
689 uint32_t DstStart, CMDStart;
\r
690 struct page **pages = NULL;
\r
693 uint32_t *MMU_Base, *MMU_p;
\r
701 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
702 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
704 if(DstMemSize == 0) {
\r
708 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
709 if(CMDMemSize == 0) {
\r
713 AllSize = DstMemSize + CMDMemSize;
\r
715 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
716 if(pages == NULL) {
\r
717 pr_err("RGA MMU malloc pages mem failed\n");
\r
718 status = RGA_MALLOC_ERROR;
\r
722 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
723 if(pages == NULL) {
\r
724 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
725 status = RGA_MALLOC_ERROR;
\r
729 for(i=0; i<CMDMemSize; i++) {
\r
730 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));
\r
733 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
735 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);
\r
737 pr_err("rga map dst memory failed\n");
\r
744 MMU_p = MMU_Base + CMDMemSize;
\r
746 for(i=0; i<DstMemSize; i++)
\r
748 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
754 * change the buf address in req struct
\r
757 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
758 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);
\r
760 /*record the malloc buf for the cmd end to release*/
\r
761 reg->MMU_base = MMU_Base;
\r
763 /* flush data to DDR */
\r
764 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
765 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
767 /* Free the page table */
\r
768 if (pages != NULL)
\r
775 if (pages != NULL)
\r
778 if (MMU_Base != NULL)
\r
785 static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
\r
787 int DstMemSize, CMDMemSize;
\r
788 uint32_t DstStart, CMDStart;
\r
789 struct page **pages = NULL;
\r
792 uint32_t *MMU_Base, *MMU_p;
\r
799 /* cal dst buf mmu info */
\r
800 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
801 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
803 if(DstMemSize == 0) {
\r
807 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
808 if(CMDMemSize == 0) {
\r
812 AllSize = DstMemSize + CMDMemSize;
\r
814 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
815 if(pages == NULL) {
\r
816 pr_err("RGA MMU malloc pages mem failed\n");
\r
817 status = RGA_MALLOC_ERROR;
\r
821 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
822 if(pages == NULL) {
\r
823 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
824 status = RGA_MALLOC_ERROR;
\r
828 for(i=0; i<CMDMemSize; i++) {
\r
829 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart+i)<<PAGE_SHIFT));
\r
832 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
834 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], DstStart, DstMemSize);
\r
836 pr_err("rga map dst memory failed\n");
\r
843 MMU_p = MMU_Base + CMDMemSize;
\r
845 for(i=0; i<DstMemSize; i++)
\r
847 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
852 * change the buf address in req struct
\r
853 * for the reason of lie to MMU
\r
855 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
856 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize) << PAGE_SHIFT);
\r
859 /*record the malloc buf for the cmd end to release*/
\r
860 reg->MMU_base = MMU_Base;
\r
862 /* flush data to DDR */
\r
863 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
864 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
866 /* Free the page table */
\r
867 if (pages != NULL) {
\r
879 if (MMU_Base != NULL)
\r
885 static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
\r
887 int SrcMemSize, DstMemSize, CMDMemSize;
\r
888 uint32_t SrcStart, DstStart, CMDStart;
\r
889 struct page **pages = NULL;
\r
892 uint32_t *MMU_Base, *MMU_p;
\r
894 uint32_t uv_size, v_size;
\r
900 /* cal src buf mmu info */
\r
901 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
902 req->src.format, req->src.vir_w, req->src.vir_h,
\r
904 if(SrcMemSize == 0) {
\r
908 /* cal dst buf mmu info */
\r
909 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
910 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
912 if(DstMemSize == 0) {
\r
916 /* cal cmd buf mmu info */
\r
917 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
918 if(CMDMemSize == 0) {
\r
922 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
924 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
925 if(pages == NULL) {
\r
926 pr_err("RGA MMU malloc pages mem failed\n");
\r
927 status = RGA_MALLOC_ERROR;
\r
931 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
932 if(pages == NULL) {
\r
933 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
934 status = RGA_MALLOC_ERROR;
\r
938 for(i=0; i<CMDMemSize; i++) {
\r
939 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<< PAGE_SHIFT));
\r
942 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
944 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
947 pr_err("rga map src memory failed\n");
\r
954 MMU_p = MMU_Base + CMDMemSize;
\r
956 for(i=0; i<SrcMemSize; i++)
\r
958 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
963 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
965 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
968 pr_err("rga map dst memory failed\n");
\r
975 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
977 for(i=0; i<DstMemSize; i++)
\r
979 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
984 * change the buf address in req struct
\r
985 * for the reason of lie to MMU
\r
987 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
989 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
990 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
992 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
993 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT);
\r
994 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT);
\r
996 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
997 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
999 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
1000 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + uv_size) << PAGE_SHIFT);
\r
1001 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + v_size) << PAGE_SHIFT);
\r
1004 /*record the malloc buf for the cmd end to release*/
\r
1005 reg->MMU_base = MMU_Base;
\r
1007 /* flush data to DDR */
\r
1008 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1009 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1011 /* Free the page table */
\r
1012 if (pages != NULL) {
\r
1020 if (pages != NULL)
\r
1023 if (MMU_Base != NULL)
\r
1031 static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
\r
1033 int SrcMemSize, DstMemSize, CMDMemSize;
\r
1034 uint32_t SrcStart, DstStart, CMDStart;
\r
1035 struct page **pages = NULL;
\r
1038 uint32_t *MMU_Base, *MMU_p;
\r
1041 uint32_t uv_size, v_size;
\r
1047 /* cal src buf mmu info */
\r
1048 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
1049 req->src.format, req->src.vir_w, req->src.vir_h,
\r
1051 if(SrcMemSize == 0) {
\r
1055 /* cal dst buf mmu info */
\r
1056 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
1057 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
1059 if(DstMemSize == 0) {
\r
1063 /* cal cmd buf mmu info */
\r
1064 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1065 if(CMDMemSize == 0) {
\r
1069 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
1071 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1072 if(pages == NULL)
\r
1074 pr_err("RGA MMU malloc pages mem failed\n");
\r
1075 status = RGA_MALLOC_ERROR;
\r
1080 * Allocate MMU Index mem
\r
1081 * This mem release in run_to_done fun
\r
1083 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1084 if(pages == NULL) {
\r
1085 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1086 status = RGA_MALLOC_ERROR;
\r
1090 for(i=0; i<CMDMemSize; i++) {
\r
1091 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1095 /* map src pages */
\r
1096 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1098 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1100 pr_err("rga map src memory failed\n");
\r
1107 MMU_p = MMU_Base + CMDMemSize;
\r
1109 for(i=0; i<SrcMemSize; i++)
\r
1111 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1116 if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID)
\r
1118 /* kernel space */
\r
1119 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
1121 if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
1123 for(i=0; i<DstMemSize; i++)
\r
1125 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
1130 for(i=0; i<DstMemSize; i++)
\r
1132 MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i)<< PAGE_SHIFT));
\r
1139 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
1142 pr_err("rga map dst memory failed\n");
\r
1149 * change the buf address in req struct
\r
1150 * for the reason of lie to MMU
\r
1153 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
1155 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1156 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1158 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1159 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + uv_size) << PAGE_SHIFT);
\r
1160 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | ((CMDMemSize + v_size) << PAGE_SHIFT);
\r
1162 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1163 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1165 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
1166 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + uv_size) << PAGE_SHIFT);
\r
1167 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize + v_size) << PAGE_SHIFT);
\r
1169 /*record the malloc buf for the cmd end to release*/
\r
1170 reg->MMU_base = MMU_Base;
\r
1172 /* flush data to DDR */
\r
1173 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1174 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1176 /* Free the page table */
\r
1177 if (pages != NULL)
\r
1186 if (pages != NULL)
\r
1189 if (MMU_Base != NULL)
\r
1196 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
\r
1198 int SrcMemSize, CMDMemSize;
\r
1199 uint32_t SrcStart, CMDStart;
\r
1200 struct page **pages = NULL;
\r
1203 uint32_t *MMU_Base, *MMU_p;
\r
1210 /* cal src buf mmu info */
\r
1211 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
\r
1212 if(SrcMemSize == 0) {
\r
1216 /* cal cmd buf mmu info */
\r
1217 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1218 if(CMDMemSize == 0) {
\r
1222 AllSize = SrcMemSize + CMDMemSize;
\r
1224 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1225 if(pages == NULL) {
\r
1226 pr_err("RGA MMU malloc pages mem failed\n");
\r
1227 status = RGA_MALLOC_ERROR;
\r
1231 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1232 if(pages == NULL) {
\r
1233 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1234 status = RGA_MALLOC_ERROR;
\r
1238 for(i=0; i<CMDMemSize; i++) {
\r
1239 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1242 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1244 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1246 pr_err("rga map src memory failed\n");
\r
1252 MMU_p = MMU_Base + CMDMemSize;
\r
1254 for(i=0; i<SrcMemSize; i++)
\r
1256 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1261 * change the buf address in req struct
\r
1262 * for the reason of lie to MMU
\r
1264 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1266 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1268 /*record the malloc buf for the cmd end to release*/
\r
1269 reg->MMU_base = MMU_Base;
\r
1271 /* flush data to DDR */
\r
1272 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1273 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1275 if (pages != NULL) {
\r
1276 /* Free the page table */
\r
1284 if (pages != NULL)
\r
1287 if (MMU_Base != NULL)
\r
1293 static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
\r
1295 int SrcMemSize, CMDMemSize;
\r
1296 uint32_t SrcStart, CMDStart;
\r
1297 struct page **pages = NULL;
\r
1300 uint32_t *MMU_Base, *MMU_p;
\r
1303 MMU_Base = MMU_p = 0;
\r
1308 /* cal src buf mmu info */
\r
1309 SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
\r
1310 if(SrcMemSize == 0) {
\r
1314 /* cal cmd buf mmu info */
\r
1315 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1316 if(CMDMemSize == 0) {
\r
1320 AllSize = SrcMemSize + CMDMemSize;
\r
1322 pages = (struct page **)kmalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1323 if(pages == NULL) {
\r
1324 pr_err("RGA MMU malloc pages mem failed\n");
\r
1325 status = RGA_MALLOC_ERROR;
\r
1329 MMU_Base = (uint32_t *)kmalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1330 if(pages == NULL) {
\r
1331 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1332 status = RGA_MALLOC_ERROR;
\r
1336 for(i=0; i<CMDMemSize; i++) {
\r
1337 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1340 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1342 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1344 pr_err("rga map src memory failed\n");
\r
1351 MMU_p = MMU_Base + CMDMemSize;
\r
1353 for(i=0; i<SrcMemSize; i++)
\r
1355 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1360 * change the buf address in req struct
\r
1361 * for the reason of lie to MMU
\r
1363 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1365 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1367 /*record the malloc buf for the cmd end to release*/
\r
1368 reg->MMU_base = MMU_Base;
\r
1370 /* flush data to DDR */
\r
1371 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1372 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1374 if (pages != NULL) {
\r
1375 /* Free the page table */
\r
1384 if (pages != NULL)
\r
1387 if (MMU_Base != NULL)
\r
1393 int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
\r
1397 switch (req->render_mode) {
\r
1398 case bitblt_mode :
\r
1399 ret = rga_mmu_info_BitBlt_mode(reg, req);
\r
1401 case color_palette_mode :
\r
1402 ret = rga_mmu_info_color_palette_mode(reg, req);
\r
1404 case color_fill_mode :
\r
1405 ret = rga_mmu_info_color_fill_mode(reg, req);
\r
1407 case line_point_drawing_mode :
\r
1408 ret = rga_mmu_info_line_point_drawing_mode(reg, req);
\r
1410 case blur_sharp_filter_mode :
\r
1411 ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
\r
1413 case pre_scaling_mode :
\r
1414 ret = rga_mmu_info_pre_scale_mode(reg, req);
\r
1416 case update_palette_table_mode :
\r
1417 ret = rga_mmu_info_update_palette_table_mode(reg, req);
\r
1419 case update_patten_buff_mode :
\r
1420 ret = rga_mmu_info_update_patten_buff_mode(reg, req);
\r