3 #include <linux/version.h>
\r
4 #include <linux/init.h>
\r
5 #include <linux/module.h>
\r
6 #include <linux/fs.h>
\r
7 #include <linux/sched.h>
\r
8 #include <linux/signal.h>
\r
9 #include <linux/pagemap.h>
\r
10 #include <linux/seq_file.h>
\r
11 #include <linux/mm.h>
\r
12 #include <linux/mman.h>
\r
13 #include <linux/sched.h>
\r
14 #include <linux/slab.h>
\r
15 #include <linux/memory.h>
\r
16 #include <linux/dma-mapping.h>
\r
17 #include <asm/memory.h>
\r
18 #include <asm/atomic.h>
\r
19 #include <asm/cacheflush.h>
\r
20 #include "rga_mmu_info.h"
\r
22 extern rga_service_info rga_service;
\r
23 //extern int mmu_buff_temp[1024];
\r
25 #define KERNEL_SPACE_VALID 0xc0000000
\r
29 static int rga_mem_size_cal(uint32_t Mem, uint32_t MemSize, uint32_t *StartAddr)
\r
31 uint32_t start, end;
\r
34 end = (Mem + (MemSize + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
35 start = Mem >> PAGE_SHIFT;
\r
36 pageCount = end - start;
\r
41 static int rga_buf_size_cal(uint32_t yrgb_addr, uint32_t uv_addr, uint32_t v_addr,
\r
42 int format, uint32_t w, uint32_t h, uint32_t *StartAddr )
\r
44 uint32_t size_yrgb = 0;
\r
45 uint32_t size_uv = 0;
\r
46 uint32_t size_v = 0;
\r
47 uint32_t stride = 0;
\r
48 uint32_t start, end;
\r
53 case RK_FORMAT_RGBA_8888 :
\r
54 stride = (w * 4 + 3) & (~3);
\r
55 size_yrgb = stride*h;
\r
56 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
57 start = yrgb_addr >> PAGE_SHIFT;
\r
58 pageCount = end - start;
\r
60 case RK_FORMAT_RGBX_8888 :
\r
61 stride = (w * 4 + 3) & (~3);
\r
62 size_yrgb = stride*h;
\r
63 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
64 start = yrgb_addr >> PAGE_SHIFT;
\r
65 pageCount = end - start;
\r
67 case RK_FORMAT_RGB_888 :
\r
68 stride = (w * 3 + 3) & (~3);
\r
69 size_yrgb = stride*h;
\r
70 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
71 start = yrgb_addr >> PAGE_SHIFT;
\r
72 pageCount = end - start;
\r
74 case RK_FORMAT_BGRA_8888 :
\r
76 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
77 start = yrgb_addr >> PAGE_SHIFT;
\r
78 pageCount = end - start;
\r
80 case RK_FORMAT_RGB_565 :
\r
81 stride = (w*2 + 3) & (~3);
\r
82 size_yrgb = stride * h;
\r
83 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
84 start = yrgb_addr >> PAGE_SHIFT;
\r
85 pageCount = end - start;
\r
87 case RK_FORMAT_RGBA_5551 :
\r
88 stride = (w*2 + 3) & (~3);
\r
89 size_yrgb = stride * h;
\r
90 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
91 start = yrgb_addr >> PAGE_SHIFT;
\r
92 pageCount = end - start;
\r
94 case RK_FORMAT_RGBA_4444 :
\r
95 stride = (w*2 + 3) & (~3);
\r
96 size_yrgb = stride * h;
\r
97 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
98 start = yrgb_addr >> PAGE_SHIFT;
\r
99 pageCount = end - start;
\r
101 case RK_FORMAT_BGR_888 :
\r
102 stride = (w*3 + 3) & (~3);
\r
103 size_yrgb = stride * h;
\r
104 end = (yrgb_addr + (size_yrgb + PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
105 start = yrgb_addr >> PAGE_SHIFT;
\r
106 pageCount = end - start;
\r
110 case RK_FORMAT_YCbCr_422_SP :
\r
111 stride = (w + 3) & (~3);
\r
112 size_yrgb = stride * h;
\r
113 size_uv = stride * h;
\r
114 start = MIN(yrgb_addr, uv_addr);
\r
115 start >>= PAGE_SHIFT;
\r
116 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
117 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
118 pageCount = end - start;
\r
120 case RK_FORMAT_YCbCr_422_P :
\r
121 stride = (w + 3) & (~3);
\r
122 size_yrgb = stride * h;
\r
123 size_uv = ((stride >> 1) * h);
\r
124 size_v = ((stride >> 1) * h);
\r
125 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
126 start = start >> PAGE_SHIFT;
\r
127 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
128 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
129 pageCount = end - start;
\r
131 case RK_FORMAT_YCbCr_420_SP :
\r
132 stride = (w + 3) & (~3);
\r
133 size_yrgb = stride * h;
\r
134 size_uv = (stride * (h >> 1));
\r
135 start = MIN(yrgb_addr, uv_addr);
\r
136 start >>= PAGE_SHIFT;
\r
137 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
138 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
139 pageCount = end - start;
\r
141 case RK_FORMAT_YCbCr_420_P :
\r
142 stride = (w + 3) & (~3);
\r
143 size_yrgb = stride * h;
\r
144 size_uv = ((stride >> 1) * (h >> 1));
\r
145 size_v = ((stride >> 1) * (h >> 1));
\r
146 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
147 start >>= PAGE_SHIFT;
\r
148 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
149 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
150 pageCount = end - start;
\r
153 case RK_FORMAT_YCrCb_422_SP :
\r
154 stride = (w + 3) & (~3);
\r
155 size_yrgb = stride * h;
\r
156 size_uv = stride * h;
\r
157 start = MIN(yrgb_addr, uv_addr);
\r
158 start >>= PAGE_SHIFT;
\r
159 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
160 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
161 pageCount = end - start;
\r
163 case RK_FORMAT_YCrCb_422_P :
\r
164 stride = (w + 3) & (~3);
\r
165 size_yrgb = stride * h;
\r
166 size_uv = ((stride >> 1) * h);
\r
167 size_v = ((stride >> 1) * h);
\r
168 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
169 start >>= PAGE_SHIFT;
\r
170 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
171 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
172 pageCount = end - start;
\r
175 case RK_FORMAT_YCrCb_420_SP :
\r
176 stride = (w + 3) & (~3);
\r
177 size_yrgb = stride * h;
\r
178 size_uv = (stride * (h >> 1));
\r
179 start = MIN(yrgb_addr, uv_addr);
\r
180 start >>= PAGE_SHIFT;
\r
181 end = MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv));
\r
182 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
183 pageCount = end - start;
\r
185 case RK_FORMAT_YCrCb_420_P :
\r
186 stride = (w + 3) & (~3);
\r
187 size_yrgb = stride * h;
\r
188 size_uv = ((stride >> 1) * (h >> 1));
\r
189 size_v = ((stride >> 1) * (h >> 1));
\r
190 start = MIN(MIN(yrgb_addr, uv_addr), v_addr);
\r
191 start >>= PAGE_SHIFT;
\r
192 end = MAX(MAX((yrgb_addr + size_yrgb), (uv_addr + size_uv)), (v_addr + size_v));
\r
193 end = (end + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
\r
194 pageCount = end - start;
\r
197 case RK_FORMAT_BPP1 :
\r
199 case RK_FORMAT_BPP2 :
\r
201 case RK_FORMAT_BPP4 :
\r
203 case RK_FORMAT_BPP8 :
\r
212 *StartAddr = start;
\r
216 static int rga_MapUserMemory(struct page **pages,
\r
217 uint32_t *pageTable,
\r
219 uint32_t pageCount)
\r
230 down_read(¤t->mm->mmap_sem);
\r
231 result = get_user_pages(current,
\r
233 Memory << PAGE_SHIFT,
\r
240 up_read(¤t->mm->mmap_sem);
\r
242 if(result <= 0 || result < pageCount)
\r
244 struct vm_area_struct *vma;
\r
246 for(i=0; i<pageCount; i++)
\r
248 vma = find_vma(current->mm, (Memory + i) << PAGE_SHIFT);
\r
250 if (vma && (vma->vm_flags & VM_PFNMAP) )
\r
256 unsigned long pfn;
\r
260 pgd = pgd_offset(current->mm, (Memory + i) << PAGE_SHIFT);
\r
262 if(pgd_val(*pgd) == 0)
\r
264 printk("pgd value is zero \n");
\r
268 pud = pud_offset(pgd, (Memory + i) << PAGE_SHIFT);
\r
271 pmd_t * pmd = pmd_offset(pud, (Memory + i) << PAGE_SHIFT);
\r
274 pte = pte_offset_map_lock(current->mm, pmd, (Memory + i) << PAGE_SHIFT, &ptl);
\r
290 pfn = pte_pfn(*pte);
\r
291 Address = ((pfn << PAGE_SHIFT) | (((unsigned long)((Memory + i) << PAGE_SHIFT)) & ~PAGE_MASK));
\r
292 pte_unmap_unlock(pte, ptl);
\r
296 pageTable[i] = Address;
\r
300 status = RGA_OUT_OF_RESOURCES;
\r
308 for (i = 0; i < pageCount; i++)
\r
310 /* Flush the data cache. */
\r
312 dma_sync_single_for_device(
\r
314 page_to_phys(pages[i]),
\r
318 flush_dcache_page(pages[i]);
\r
322 /* Fill the page table. */
\r
323 for(i=0; i<pageCount; i++)
\r
325 /* Get the physical address from page struct. */
\r
326 pageTable[i] = page_to_phys(pages[i]);
\r
333 if (rgaIS_ERROR(status))
\r
335 /* Release page array. */
\r
336 if (result > 0 && pages != NULL)
\r
338 for (i = 0; i < result; i++)
\r
340 if (pages[i] == NULL)
\r
345 dma_sync_single_for_device(
\r
347 page_to_phys(pages[i]),
\r
351 page_cache_release(pages[i]);
\r
359 static int rga_mmu_info_BitBlt_mode(struct rga_reg *reg, struct rga_req *req)
\r
361 int SrcMemSize, DstMemSize;
\r
362 uint32_t SrcStart, DstStart;
\r
365 uint32_t *MMU_Base, *MMU_p;
\r
368 uint32_t uv_size, v_size;
\r
370 struct page **pages = NULL;
\r
376 /* cal src buf mmu info */
\r
377 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
378 req->src.format, req->src.vir_w, req->src.act_h + req->src.y_offset,
\r
380 if(SrcMemSize == 0) {
\r
384 /* cal dst buf mmu info */
\r
385 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
386 req->dst.format, req->dst.vir_w, req->dst.act_h + req->dst.y_offset,
\r
388 if(DstMemSize == 0) {
\r
392 /* Cal out the needed mem size */
\r
393 AllSize = SrcMemSize + DstMemSize;
\r
395 pages = kmalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);
\r
396 if(pages == NULL) {
\r
397 pr_err("RGA MMU malloc pages mem failed\n");
\r
398 status = RGA_MALLOC_ERROR;
\r
402 MMU_Base = kmalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
403 if(MMU_Base == NULL) {
\r
404 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
405 status = RGA_MALLOC_ERROR;
\r
409 if(req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
411 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
413 pr_err("rga map src memory failed\n");
\r
422 if(req->src.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
424 /* Down scale ratio over 2, Last prc */
\r
425 /* MMU table copy from pre scale table */
\r
427 for(i=0; i<SrcMemSize; i++)
\r
429 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
434 for(i=0; i<SrcMemSize; i++)
\r
436 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
441 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
444 ktime_t start, end;
\r
445 start = ktime_get();
\r
447 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
449 pr_err("rga map dst memory failed\n");
\r
456 end = ktime_sub(end, start);
\r
457 printk("dst mmu map time = %d\n", (int)ktime_to_us(end));
\r
462 MMU_p = MMU_Base + SrcMemSize;
\r
464 for(i=0; i<DstMemSize; i++)
\r
466 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
470 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
473 * change the buf address in req struct
\r
476 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
478 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
479 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
481 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
482 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
483 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
485 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
487 /*record the malloc buf for the cmd end to release*/
\r
488 reg->MMU_base = MMU_Base;
\r
490 /* flush data to DDR */
\r
491 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
492 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
496 /* Free the page table */
\r
497 if (pages != NULL) {
\r
506 /* Free the page table */
\r
507 if (pages != NULL) {
\r
511 /* Free MMU table */
\r
512 if(MMU_Base != NULL) {
\r
519 static int rga_mmu_info_color_palette_mode(struct rga_reg *reg, struct rga_req *req)
\r
521 int SrcMemSize, DstMemSize, CMDMemSize;
\r
522 uint32_t SrcStart, DstStart, CMDStart;
\r
523 struct page **pages = NULL;
\r
526 uint32_t *MMU_Base = NULL;
\r
532 uint16_t sw, byte_num;
\r
534 shift = 3 - (req->palette_mode & 3);
\r
535 sw = req->src.vir_w;
\r
536 byte_num = sw >> shift;
\r
537 stride = (byte_num + 3) & (~3);
\r
542 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, stride, &SrcStart);
\r
543 if(SrcMemSize == 0) {
\r
547 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
548 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
550 if(DstMemSize == 0) {
\r
554 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
555 if(CMDMemSize == 0) {
\r
559 AllSize = SrcMemSize + DstMemSize + CMDMemSize;
\r
561 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
562 if(pages == NULL) {
\r
563 pr_err("RGA MMU malloc pages mem failed\n");
\r
567 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
568 if(MMU_Base == NULL) {
\r
569 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
574 for(i=0; i<CMDMemSize; i++)
\r
576 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i)<<PAGE_SHIFT));
\r
580 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
582 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
585 pr_err("rga map src memory failed\n");
\r
592 MMU_p = MMU_Base + CMDMemSize;
\r
594 for(i=0; i<SrcMemSize; i++)
\r
596 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
601 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
603 ret = rga_MapUserMemory(&pages[CMDMemSize + SrcMemSize], &MMU_Base[CMDMemSize + SrcMemSize], DstStart, DstMemSize);
\r
606 pr_err("rga map dst memory failed\n");
\r
613 MMU_p = MMU_Base + CMDMemSize + SrcMemSize;
\r
615 for(i=0; i<DstMemSize; i++)
\r
617 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
623 * change the buf address in req struct
\r
624 * for the reason of lie to MMU
\r
626 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
627 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
628 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((CMDMemSize + SrcMemSize) << PAGE_SHIFT);
\r
631 /*record the malloc buf for the cmd end to release*/
\r
632 reg->MMU_base = MMU_Base;
\r
634 /* flush data to DDR */
\r
635 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
636 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
638 /* Free the page table */
\r
639 if (pages != NULL) {
\r
648 /* Free the page table */
\r
649 if (pages != NULL) {
\r
653 /* Free mmu table */
\r
654 if (MMU_Base != NULL) {
\r
661 static int rga_mmu_info_color_fill_mode(struct rga_reg *reg, struct rga_req *req)
\r
665 struct page **pages = NULL;
\r
668 uint32_t *MMU_Base, *MMU_p;
\r
676 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
677 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
679 if(DstMemSize == 0) {
\r
683 AllSize = DstMemSize;
\r
685 pages = kzalloc((AllSize + 1)* sizeof(struct page *), GFP_KERNEL);
\r
686 if(pages == NULL) {
\r
687 pr_err("RGA MMU malloc pages mem failed\n");
\r
688 status = RGA_MALLOC_ERROR;
\r
692 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
693 if(pages == NULL) {
\r
694 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
695 status = RGA_MALLOC_ERROR;
\r
699 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
701 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
703 pr_err("rga map dst memory failed\n");
\r
712 for(i=0; i<DstMemSize; i++)
\r
714 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
718 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
721 * change the buf address in req struct
\r
724 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
725 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
727 /*record the malloc buf for the cmd end to release*/
\r
728 reg->MMU_base = MMU_Base;
\r
730 /* flush data to DDR */
\r
731 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
732 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
734 /* Free the page table */
\r
735 if (pages != NULL)
\r
742 if (pages != NULL)
\r
745 if (MMU_Base != NULL)
\r
752 static int rga_mmu_info_line_point_drawing_mode(struct rga_reg *reg, struct rga_req *req)
\r
756 struct page **pages = NULL;
\r
759 uint32_t *MMU_Base, *MMU_p;
\r
766 /* cal dst buf mmu info */
\r
767 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
768 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
770 if(DstMemSize == 0) {
\r
774 AllSize = DstMemSize;
\r
776 pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);
\r
777 if(pages == NULL) {
\r
778 pr_err("RGA MMU malloc pages mem failed\n");
\r
779 status = RGA_MALLOC_ERROR;
\r
783 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
784 if(pages == NULL) {
\r
785 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
786 status = RGA_MALLOC_ERROR;
\r
790 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
792 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], DstStart, DstMemSize);
\r
794 pr_err("rga map dst memory failed\n");
\r
803 for(i=0; i<DstMemSize; i++)
\r
805 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
810 * change the buf address in req struct
\r
811 * for the reason of lie to MMU
\r
813 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
814 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK));
\r
817 /*record the malloc buf for the cmd end to release*/
\r
818 reg->MMU_base = MMU_Base;
\r
820 /* flush data to DDR */
\r
821 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
822 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
824 /* Free the page table */
\r
825 if (pages != NULL) {
\r
837 if (MMU_Base != NULL)
\r
843 static int rga_mmu_info_blur_sharp_filter_mode(struct rga_reg *reg, struct rga_req *req)
\r
845 int SrcMemSize, DstMemSize;
\r
846 uint32_t SrcStart, DstStart;
\r
847 struct page **pages = NULL;
\r
850 uint32_t *MMU_Base, *MMU_p;
\r
852 uint32_t uv_size, v_size;
\r
858 /* cal src buf mmu info */
\r
859 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
860 req->src.format, req->src.vir_w, req->src.vir_h,
\r
862 if(SrcMemSize == 0) {
\r
866 /* cal dst buf mmu info */
\r
867 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
868 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
870 if(DstMemSize == 0) {
\r
874 AllSize = SrcMemSize + DstMemSize;
\r
876 pages = kzalloc((AllSize + 1) * sizeof(struct page *), GFP_KERNEL);
\r
877 if(pages == NULL) {
\r
878 pr_err("RGA MMU malloc pages mem failed\n");
\r
879 status = RGA_MALLOC_ERROR;
\r
883 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
884 if(pages == NULL) {
\r
885 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
886 status = RGA_MALLOC_ERROR;
\r
890 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
892 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
895 pr_err("rga map src memory failed\n");
\r
904 for(i=0; i<SrcMemSize; i++)
\r
906 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
911 if (req->dst.yrgb_addr < KERNEL_SPACE_VALID)
\r
913 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
916 pr_err("rga map dst memory failed\n");
\r
923 MMU_p = MMU_Base + SrcMemSize;
\r
925 for(i=0; i<DstMemSize; i++)
\r
927 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((DstStart + i) << PAGE_SHIFT));
\r
931 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
934 * change the buf address in req struct
\r
935 * for the reason of lie to MMU
\r
937 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
939 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
940 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
942 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
943 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
944 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
946 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
947 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
949 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | (SrcMemSize << PAGE_SHIFT);
\r
950 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
951 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
954 /*record the malloc buf for the cmd end to release*/
\r
955 reg->MMU_base = MMU_Base;
\r
957 /* flush data to DDR */
\r
958 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
959 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
961 /* Free the page table */
\r
962 if (pages != NULL) {
\r
973 if (MMU_Base != NULL)
\r
981 static int rga_mmu_info_pre_scale_mode(struct rga_reg *reg, struct rga_req *req)
\r
983 int SrcMemSize, DstMemSize;
\r
984 uint32_t SrcStart, DstStart;
\r
985 struct page **pages = NULL;
\r
988 uint32_t *MMU_Base, *MMU_p;
\r
991 uint32_t uv_size, v_size;
\r
997 /* cal src buf mmu info */
\r
998 SrcMemSize = rga_buf_size_cal(req->src.yrgb_addr, req->src.uv_addr, req->src.v_addr,
\r
999 req->src.format, req->src.vir_w, req->src.vir_h,
\r
1001 if(SrcMemSize == 0) {
\r
1005 /* cal dst buf mmu info */
\r
1006 DstMemSize = rga_buf_size_cal(req->dst.yrgb_addr, req->dst.uv_addr, req->dst.v_addr,
\r
1007 req->dst.format, req->dst.vir_w, req->dst.vir_h,
\r
1009 if(DstMemSize == 0) {
\r
1013 AllSize = SrcMemSize + DstMemSize;
\r
1015 pages = kzalloc((AllSize)* sizeof(struct page *), GFP_KERNEL);
\r
1016 if(pages == NULL)
\r
1018 pr_err("RGA MMU malloc pages mem failed\n");
\r
1019 status = RGA_MALLOC_ERROR;
\r
1024 * Allocate MMU Index mem
\r
1025 * This mem release in run_to_done fun
\r
1027 MMU_Base = kzalloc((AllSize + 1) * sizeof(uint32_t), GFP_KERNEL);
\r
1028 if(pages == NULL) {
\r
1029 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1030 status = RGA_MALLOC_ERROR;
\r
1034 /* map src pages */
\r
1035 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1037 ret = rga_MapUserMemory(&pages[0], &MMU_Base[0], SrcStart, SrcMemSize);
\r
1039 pr_err("rga map src memory failed\n");
\r
1048 for(i=0; i<SrcMemSize; i++)
\r
1050 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1055 if(req->dst.yrgb_addr >= KERNEL_SPACE_VALID)
\r
1057 /* kernel space */
\r
1058 MMU_p = MMU_Base + SrcMemSize;
\r
1060 if(req->dst.yrgb_addr == (uint32_t)rga_service.pre_scale_buf)
\r
1062 for(i=0; i<DstMemSize; i++)
\r
1064 MMU_p[i] = rga_service.pre_scale_buf[i];
\r
1069 for(i=0; i<DstMemSize; i++)
\r
1071 MMU_p[i] = virt_to_phys((uint32_t *)((DstStart + i)<< PAGE_SHIFT));
\r
1078 ret = rga_MapUserMemory(&pages[SrcMemSize], &MMU_Base[SrcMemSize], DstStart, DstMemSize);
\r
1081 pr_err("rga map dst memory failed\n");
\r
1087 MMU_Base[AllSize] = MMU_Base[AllSize - 1];
\r
1090 * change the buf address in req struct
\r
1091 * for the reason of lie to MMU
\r
1094 req->mmu_info.base_addr = (virt_to_phys(MMU_Base)>>2);
\r
1096 uv_size = (req->src.uv_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1097 v_size = (req->src.v_addr - (SrcStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1099 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK));
\r
1100 req->src.uv_addr = (req->src.uv_addr & (~PAGE_MASK)) | (uv_size << PAGE_SHIFT);
\r
1101 req->src.v_addr = (req->src.v_addr & (~PAGE_MASK)) | (v_size << PAGE_SHIFT);
\r
1103 uv_size = (req->dst.uv_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1104 v_size = (req->dst.v_addr - (DstStart << PAGE_SHIFT)) >> PAGE_SHIFT;
\r
1106 req->dst.yrgb_addr = (req->dst.yrgb_addr & (~PAGE_MASK)) | ((SrcMemSize) << PAGE_SHIFT);
\r
1107 req->dst.uv_addr = (req->dst.uv_addr & (~PAGE_MASK)) | ((SrcMemSize + uv_size) << PAGE_SHIFT);
\r
1108 req->dst.v_addr = (req->dst.v_addr & (~PAGE_MASK)) | ((SrcMemSize + v_size) << PAGE_SHIFT);
\r
1110 /*record the malloc buf for the cmd end to release*/
\r
1111 reg->MMU_base = MMU_Base;
\r
1113 /* flush data to DDR */
\r
1114 dmac_flush_range(MMU_Base, (MMU_Base + AllSize + 1));
\r
1115 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize + 1));
\r
1117 /* Free the page table */
\r
1118 if (pages != NULL)
\r
1127 if (pages != NULL)
\r
1130 if (MMU_Base != NULL)
\r
1137 static int rga_mmu_info_update_palette_table_mode(struct rga_reg *reg, struct rga_req *req)
\r
1139 int SrcMemSize, CMDMemSize;
\r
1140 uint32_t SrcStart, CMDStart;
\r
1141 struct page **pages = NULL;
\r
1144 uint32_t *MMU_Base, *MMU_p;
\r
1151 /* cal src buf mmu info */
\r
1152 SrcMemSize = rga_mem_size_cal(req->src.yrgb_addr, req->src.vir_w * req->src.vir_h, &SrcStart);
\r
1153 if(SrcMemSize == 0) {
\r
1157 /* cal cmd buf mmu info */
\r
1158 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1159 if(CMDMemSize == 0) {
\r
1163 AllSize = SrcMemSize + CMDMemSize;
\r
1165 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1166 if(pages == NULL) {
\r
1167 pr_err("RGA MMU malloc pages mem failed\n");
\r
1168 status = RGA_MALLOC_ERROR;
\r
1172 MMU_Base = kzalloc((AllSize + 1)* sizeof(uint32_t), GFP_KERNEL);
\r
1173 if(pages == NULL) {
\r
1174 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1175 status = RGA_MALLOC_ERROR;
\r
1179 for(i=0; i<CMDMemSize; i++) {
\r
1180 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1183 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1185 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1187 pr_err("rga map src memory failed\n");
\r
1193 MMU_p = MMU_Base + CMDMemSize;
\r
1195 for(i=0; i<SrcMemSize; i++)
\r
1197 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1202 * change the buf address in req struct
\r
1203 * for the reason of lie to MMU
\r
1205 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1207 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1209 /*record the malloc buf for the cmd end to release*/
\r
1210 reg->MMU_base = MMU_Base;
\r
1212 /* flush data to DDR */
\r
1213 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1214 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1216 if (pages != NULL) {
\r
1217 /* Free the page table */
\r
1225 if (pages != NULL)
\r
1228 if (MMU_Base != NULL)
\r
1234 static int rga_mmu_info_update_patten_buff_mode(struct rga_reg *reg, struct rga_req *req)
\r
1236 int SrcMemSize, CMDMemSize;
\r
1237 uint32_t SrcStart, CMDStart;
\r
1238 struct page **pages = NULL;
\r
1241 uint32_t *MMU_Base, *MMU_p;
\r
1244 MMU_Base = MMU_p = 0;
\r
1249 /* cal src buf mmu info */
\r
1250 SrcMemSize = rga_mem_size_cal(req->pat.yrgb_addr, req->pat.vir_w * req->pat.vir_h * 4, &SrcStart);
\r
1251 if(SrcMemSize == 0) {
\r
1255 /* cal cmd buf mmu info */
\r
1256 CMDMemSize = rga_mem_size_cal((uint32_t)rga_service.cmd_buff, RGA_CMD_BUF_SIZE, &CMDStart);
\r
1257 if(CMDMemSize == 0) {
\r
1261 AllSize = SrcMemSize + CMDMemSize;
\r
1263 pages = kzalloc(AllSize * sizeof(struct page *), GFP_KERNEL);
\r
1264 if(pages == NULL) {
\r
1265 pr_err("RGA MMU malloc pages mem failed\n");
\r
1266 status = RGA_MALLOC_ERROR;
\r
1270 MMU_Base = kzalloc(AllSize * sizeof(uint32_t), GFP_KERNEL);
\r
1271 if(pages == NULL) {
\r
1272 pr_err("RGA MMU malloc MMU_Base point failed\n");
\r
1273 status = RGA_MALLOC_ERROR;
\r
1277 for(i=0; i<CMDMemSize; i++) {
\r
1278 MMU_Base[i] = virt_to_phys((uint32_t *)((CMDStart + i) << PAGE_SHIFT));
\r
1281 if (req->src.yrgb_addr < KERNEL_SPACE_VALID)
\r
1283 ret = rga_MapUserMemory(&pages[CMDMemSize], &MMU_Base[CMDMemSize], SrcStart, SrcMemSize);
\r
1285 pr_err("rga map src memory failed\n");
\r
1292 MMU_p = MMU_Base + CMDMemSize;
\r
1294 for(i=0; i<SrcMemSize; i++)
\r
1296 MMU_p[i] = (uint32_t)virt_to_phys((uint32_t *)((SrcStart + i) << PAGE_SHIFT));
\r
1301 * change the buf address in req struct
\r
1302 * for the reason of lie to MMU
\r
1304 req->mmu_info.base_addr = (virt_to_phys(MMU_Base) >> 2);
\r
1306 req->src.yrgb_addr = (req->src.yrgb_addr & (~PAGE_MASK)) | (CMDMemSize << PAGE_SHIFT);
\r
1308 /*record the malloc buf for the cmd end to release*/
\r
1309 reg->MMU_base = MMU_Base;
\r
1311 /* flush data to DDR */
\r
1312 dmac_flush_range(MMU_Base, (MMU_Base + AllSize));
\r
1313 outer_flush_range(virt_to_phys(MMU_Base),virt_to_phys(MMU_Base + AllSize));
\r
1315 if (pages != NULL) {
\r
1316 /* Free the page table */
\r
1325 if (pages != NULL)
\r
1328 if (MMU_Base != NULL)
\r
1334 int rga_set_mmu_info(struct rga_reg *reg, struct rga_req *req)
\r
1338 switch (req->render_mode) {
\r
1339 case bitblt_mode :
\r
1340 ret = rga_mmu_info_BitBlt_mode(reg, req);
\r
1342 case color_palette_mode :
\r
1343 ret = rga_mmu_info_color_palette_mode(reg, req);
\r
1345 case color_fill_mode :
\r
1346 ret = rga_mmu_info_color_fill_mode(reg, req);
\r
1348 case line_point_drawing_mode :
\r
1349 ret = rga_mmu_info_line_point_drawing_mode(reg, req);
\r
1351 case blur_sharp_filter_mode :
\r
1352 ret = rga_mmu_info_blur_sharp_filter_mode(reg, req);
\r
1354 case pre_scaling_mode :
\r
1355 ret = rga_mmu_info_pre_scale_mode(reg, req);
\r
1357 case update_palette_table_mode :
\r
1358 ret = rga_mmu_info_update_palette_table_mode(reg, req);
\r
1360 case update_patten_buff_mode :
\r
1361 ret = rga_mmu_info_update_patten_buff_mode(reg, req);
\r