2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 enum page_cache_mode pcm)
34 unsigned long nrpages = size >> PAGE_SHIFT;
38 case _PAGE_CACHE_MODE_UC:
40 err = _set_memory_uc(vaddr, nrpages);
42 case _PAGE_CACHE_MODE_WC:
43 err = _set_memory_wc(vaddr, nrpages);
45 case _PAGE_CACHE_MODE_WB:
46 err = _set_memory_wb(vaddr, nrpages);
53 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
58 for (i = 0; i < nr_pages; ++i)
59 if (pfn_valid(start_pfn + i) &&
60 !PageReserved(pfn_to_page(start_pfn + i)))
63 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
69 * Remap an arbitrary physical address space into the kernel virtual
70 * address space. It transparently creates kernel huge I/O mapping when
71 * the physical address is aligned by a huge page size (1GB or 2MB) and
72 * the requested size is at least the huge page size.
74 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
75 * Therefore, the mapping code falls back to use a smaller page toward 4KB
76 * when a mapping range is covered by non-WB type of MTRRs.
78 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
79 * have to convert them into an offset in a page-aligned mapping, but the
80 * caller shouldn't need to know that small detail.
82 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
83 unsigned long size, enum page_cache_mode pcm, void *caller)
85 unsigned long offset, vaddr;
86 resource_size_t pfn, last_pfn, last_addr;
87 const resource_size_t unaligned_phys_addr = phys_addr;
88 const unsigned long unaligned_size = size;
89 struct vm_struct *area;
90 enum page_cache_mode new_pcm;
93 void __iomem *ret_addr;
96 /* Don't allow wraparound or zero size */
97 last_addr = phys_addr + size - 1;
98 if (!size || last_addr < phys_addr)
101 if (!phys_addr_valid(phys_addr)) {
102 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
103 (unsigned long long)phys_addr);
109 * Don't remap the low PCI/ISA area, it's always mapped..
111 if (is_ISA_range(phys_addr, last_addr))
112 return (__force void __iomem *)phys_to_virt(phys_addr);
115 * Don't allow anybody to remap normal RAM that we're using..
117 /* First check if whole region can be identified as RAM or not */
118 ram_region = region_is_ram(phys_addr, size);
119 if (ram_region > 0) {
120 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
121 (unsigned long int)phys_addr,
122 (unsigned long int)last_addr);
126 /* If could not be identified(-1), check page by page */
127 if (ram_region < 0) {
128 pfn = phys_addr >> PAGE_SHIFT;
129 last_pfn = last_addr >> PAGE_SHIFT;
130 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
131 __ioremap_check_ram) == 1)
135 * Mappings have to be page-aligned
137 offset = phys_addr & ~PAGE_MASK;
138 phys_addr &= PHYSICAL_PAGE_MASK;
139 size = PAGE_ALIGN(last_addr+1) - phys_addr;
141 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
144 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
148 if (pcm != new_pcm) {
149 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
151 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
152 (unsigned long long)phys_addr,
153 (unsigned long long)(phys_addr + size),
155 goto err_free_memtype;
160 prot = PAGE_KERNEL_IO;
162 case _PAGE_CACHE_MODE_UC:
164 prot = __pgprot(pgprot_val(prot) |
165 cachemode2protval(_PAGE_CACHE_MODE_UC));
167 case _PAGE_CACHE_MODE_UC_MINUS:
168 prot = __pgprot(pgprot_val(prot) |
169 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
171 case _PAGE_CACHE_MODE_WC:
172 prot = __pgprot(pgprot_val(prot) |
173 cachemode2protval(_PAGE_CACHE_MODE_WC));
175 case _PAGE_CACHE_MODE_WT:
176 prot = __pgprot(pgprot_val(prot) |
177 cachemode2protval(_PAGE_CACHE_MODE_WT));
179 case _PAGE_CACHE_MODE_WB:
186 area = get_vm_area_caller(size, VM_IOREMAP, caller);
188 goto err_free_memtype;
189 area->phys_addr = phys_addr;
190 vaddr = (unsigned long) area->addr;
192 if (kernel_map_sync_memtype(phys_addr, size, pcm))
195 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
198 ret_addr = (void __iomem *) (vaddr + offset);
199 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
202 * Check if the request spans more than any BAR in the iomem resource
205 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
206 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
212 free_memtype(phys_addr, phys_addr + size);
217 * ioremap_nocache - map bus memory into CPU space
218 * @phys_addr: bus address of the memory
219 * @size: size of the resource to map
221 * ioremap_nocache performs a platform specific sequence of operations to
222 * make bus memory CPU accessible via the readb/readw/readl/writeb/
223 * writew/writel functions and the other mmio helpers. The returned
224 * address is not guaranteed to be usable directly as a virtual
227 * This version of ioremap ensures that the memory is marked uncachable
228 * on the CPU as well as honouring existing caching rules from things like
229 * the PCI bus. Note that there are other caches and buffers on many
230 * busses. In particular driver authors should read up on PCI writes
232 * It's useful if some control registers are in such an area and
233 * write combining or read caching is not desirable:
235 * Must be freed with iounmap.
237 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
240 * Ideally, this should be:
241 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
243 * Till we fix all X drivers to use ioremap_wc(), we will use
244 * UC MINUS. Drivers that are certain they need or can already
245 * be converted over to strong UC can use ioremap_uc().
247 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
249 return __ioremap_caller(phys_addr, size, pcm,
250 __builtin_return_address(0));
252 EXPORT_SYMBOL(ioremap_nocache);
255 * ioremap_uc - map bus memory into CPU space as strongly uncachable
256 * @phys_addr: bus address of the memory
257 * @size: size of the resource to map
259 * ioremap_uc performs a platform specific sequence of operations to
260 * make bus memory CPU accessible via the readb/readw/readl/writeb/
261 * writew/writel functions and the other mmio helpers. The returned
262 * address is not guaranteed to be usable directly as a virtual
265 * This version of ioremap ensures that the memory is marked with a strong
266 * preference as completely uncachable on the CPU when possible. For non-PAT
267 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
268 * systems this will set the PAT entry for the pages as strong UC. This call
269 * will honor existing caching rules from things like the PCI bus. Note that
270 * there are other caches and buffers on many busses. In particular driver
271 * authors should read up on PCI writes.
273 * It's useful if some control registers are in such an area and
274 * write combining or read caching is not desirable:
276 * Must be freed with iounmap.
278 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
280 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
282 return __ioremap_caller(phys_addr, size, pcm,
283 __builtin_return_address(0));
285 EXPORT_SYMBOL_GPL(ioremap_uc);
288 * ioremap_wc - map memory into CPU space write combined
289 * @phys_addr: bus address of the memory
290 * @size: size of the resource to map
292 * This version of ioremap ensures that the memory is marked write combining.
293 * Write combining allows faster writes to some hardware devices.
295 * Must be freed with iounmap.
297 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
299 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
300 __builtin_return_address(0));
302 EXPORT_SYMBOL(ioremap_wc);
305 * ioremap_wt - map memory into CPU space write through
306 * @phys_addr: bus address of the memory
307 * @size: size of the resource to map
309 * This version of ioremap ensures that the memory is marked write through.
310 * Write through stores data into memory while keeping the cache up-to-date.
312 * Must be freed with iounmap.
314 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
316 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
317 __builtin_return_address(0));
319 EXPORT_SYMBOL(ioremap_wt);
321 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
323 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
324 __builtin_return_address(0));
326 EXPORT_SYMBOL(ioremap_cache);
328 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
329 unsigned long prot_val)
331 return __ioremap_caller(phys_addr, size,
332 pgprot2cachemode(__pgprot(prot_val)),
333 __builtin_return_address(0));
335 EXPORT_SYMBOL(ioremap_prot);
338 * iounmap - Free a IO remapping
339 * @addr: virtual address from ioremap_*
341 * Caller must ensure there is only one unmapping for the same pointer.
343 void iounmap(volatile void __iomem *addr)
345 struct vm_struct *p, *o;
347 if ((void __force *)addr <= high_memory)
351 * __ioremap special-cases the PCI/ISA range by not instantiating a
352 * vm_area and by simply returning an address into the kernel mapping
353 * of ISA space. So handle that here.
355 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
356 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
359 addr = (volatile void __iomem *)
360 (PAGE_MASK & (unsigned long __force)addr);
362 mmiotrace_iounmap(addr);
364 /* Use the vm area unlocked, assuming the caller
365 ensures there isn't another iounmap for the same address
366 in parallel. Reuse of the virtual address is prevented by
367 leaving it in the global lists until we're done with it.
368 cpa takes care of the direct mappings. */
369 p = find_vm_area((void __force *)addr);
372 printk(KERN_ERR "iounmap: bad address %p\n", addr);
377 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
379 /* Finally remove it */
380 o = remove_vm_area((void __force *)addr);
381 BUG_ON(p != o || o == NULL);
384 EXPORT_SYMBOL(iounmap);
386 int __init arch_ioremap_pud_supported(void)
389 return cpu_has_gbpages;
395 int __init arch_ioremap_pmd_supported(void)
401 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
404 void *xlate_dev_mem_ptr(phys_addr_t phys)
406 unsigned long start = phys & PAGE_MASK;
407 unsigned long offset = phys & ~PAGE_MASK;
410 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
411 if (page_is_ram(start >> PAGE_SHIFT))
414 vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
415 /* Only add the offset on success and return NULL if the ioremap() failed: */
419 return (void *)vaddr;
422 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
424 if (page_is_ram(phys >> PAGE_SHIFT))
427 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
431 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
433 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
435 /* Don't assume we're using swapper_pg_dir at this point */
436 pgd_t *base = __va(read_cr3());
437 pgd_t *pgd = &base[pgd_index(addr)];
438 pud_t *pud = pud_offset(pgd, addr);
439 pmd_t *pmd = pmd_offset(pud, addr);
444 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
446 return &bm_pte[pte_index(addr)];
449 bool __init is_early_ioremap_ptep(pte_t *ptep)
451 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
454 void __init early_ioremap_init(void)
459 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
461 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
464 early_ioremap_setup();
466 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
467 memset(bm_pte, 0, sizeof(bm_pte));
468 pmd_populate_kernel(&init_mm, pmd, bm_pte);
471 * The boot-ioremap range spans multiple pmds, for which
472 * we are not prepared:
474 #define __FIXADDR_TOP (-PAGE_SIZE)
475 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
476 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
478 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
480 printk(KERN_WARNING "pmd %p != %p\n",
481 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
482 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
483 fix_to_virt(FIX_BTMAP_BEGIN));
484 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
485 fix_to_virt(FIX_BTMAP_END));
487 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
488 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
493 void __init __early_set_fixmap(enum fixed_addresses idx,
494 phys_addr_t phys, pgprot_t flags)
496 unsigned long addr = __fix_to_virt(idx);
499 if (idx >= __end_of_fixed_addresses) {
503 pte = early_ioremap_pte(addr);
505 if (pgprot_val(flags))
506 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
508 pte_clear(&init_mm, addr, pte);
509 __flush_tlb_one(addr);