2 * Microblaze support for cache consistent memory.
3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2010 PetaLogix
5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9 * Copyright (C) 2000 Russell King
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
26 #include <linux/swap.h>
27 #include <linux/stddef.h>
28 #include <linux/vmalloc.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/bootmem.h>
32 #include <linux/highmem.h>
33 #include <linux/pci.h>
34 #include <linux/interrupt.h>
36 #include <asm/pgalloc.h>
38 #include <linux/hardirq.h>
39 #include <asm/mmu_context.h>
41 #include <linux/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/cpuinfo.h>
47 /* I have to use dcache values because I can't relate on ram size */
48 #define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
51 * Consistent memory allocators. Used for DMA devices that want to
52 * share uncached memory with the processor core.
53 * My crufty no-MMU approach is simple. In the HW platform we can optionally
54 * mirror the DDR up above the processor cacheable region. So, memory accessed
55 * in this mirror region will not be cached. It's alloced from the same
56 * pool as normal memory, but the handle we return is shifted up into the
57 * uncached region. This will no doubt cause big problems if memory allocated
58 * here is not also freed properly. -- JW
60 void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
62 struct page *page, *end, *free;
69 size = PAGE_ALIGN(size);
70 order = get_order(size);
72 page = alloc_pages(gfp, order);
76 /* We could do with a page_to_phys and page_to_bus here. */
77 virt = page_address(page);
78 ret = ioremap(virt_to_phys(virt), size);
83 * Here's the magic! Note if the uncached shadow is not implemented,
84 * it's up to the calling code to also test that condition and make
85 * other arranegments, such as manually flushing the cache and so on.
87 #ifdef CONFIG_XILINX_UNCACHED_SHADOW
88 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
90 /* dma_handle is same as physical (shadowed) address */
91 *dma_handle = (dma_addr_t)ret;
94 * free wasted pages. We skip the first page since we know
95 * that it will have count = 1 and won't require freeing.
96 * We also mark the pages in use as reserved so that
97 * remap_page_range works.
99 page = virt_to_page(virt);
100 free = page + (size >> PAGE_SHIFT);
101 end = page + (1 << order);
103 for (; page < end; page++) {
104 init_page_count(page);
108 SetPageReserved(page);
113 __free_pages(page, order);
120 void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
123 unsigned long page, va, flags;
125 struct vm_struct *area;
131 /* Only allocate page size areas. */
132 size = PAGE_ALIGN(size);
133 order = get_order(size);
135 page = __get_free_pages(gfp, order);
142 * we need to ensure that there are no cachelines in use,
143 * or worse dirty in this area.
145 flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size);
147 /* Allocate some common virtual space to map the new pages. */
148 area = get_vm_area(size, VM_ALLOC);
150 free_pages(page, order);
153 va = (unsigned long) area->addr;
156 /* This gives us the real physical address of the first page. */
157 *dma_handle = pa = virt_to_bus((void *)page);
159 /* MS: This is the whole magic - use cache inhibit pages */
160 flags = _PAGE_KERNEL | _PAGE_NO_CACHE;
163 * Set refcount=1 on all pages in an order>0
164 * allocation so that vfree() will actually
165 * free all pages that were allocated.
168 struct page *rpage = virt_to_page(page);
169 for (i = 1; i < (1 << order); i++)
170 init_page_count(rpage+i);
174 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
175 err = map_page(va+i, pa+i, flags);
184 #endif /* CONFIG_MMU */
185 EXPORT_SYMBOL(consistent_alloc);
188 * free page(s) as defined by the above mapping.
190 void consistent_free(void *vaddr)
195 /* Clear SHADOW_MASK bit in address, and free as per usual */
196 #ifdef CONFIG_XILINX_UNCACHED_SHADOW
197 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
201 EXPORT_SYMBOL(consistent_free);
204 * make an area consistent.
206 void consistent_sync(void *vaddr, size_t size, int direction)
211 start = (unsigned long)vaddr;
213 /* Convert start address back down to unshadowed memory region */
214 #ifdef CONFIG_XILINX_UNCACHED_SHADOW
215 start &= ~UNCACHED_SHADOW_MASK;
222 case PCI_DMA_FROMDEVICE: /* invalidate only */
223 flush_dcache_range(start, end);
225 case PCI_DMA_TODEVICE: /* writeback only */
226 flush_dcache_range(start, end);
228 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
229 flush_dcache_range(start, end);
233 EXPORT_SYMBOL(consistent_sync);
236 * consistent_sync_page makes memory consistent. identical
237 * to consistent_sync, but takes a struct page instead of a
240 void consistent_sync_page(struct page *page, unsigned long offset,
241 size_t size, int direction)
243 unsigned long start = (unsigned long)page_address(page) + offset;
244 consistent_sync((void *)start, size, direction);
246 EXPORT_SYMBOL(consistent_sync_page);