2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 unsigned long prot_val)
34 unsigned long nrpages = size >> PAGE_SHIFT;
40 err = _set_memory_uc(vaddr, nrpages);
43 err = _set_memory_wc(vaddr, nrpages);
46 err = _set_memory_wb(vaddr, nrpages);
54 * Remap an arbitrary physical address space into the kernel virtual
55 * address space. Needed when the kernel wants to access high addresses
58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
59 * have to convert them into an offset in a page-aligned mapping, but the
60 * caller shouldn't need to know that small detail.
62 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
63 unsigned long size, unsigned long prot_val, void *caller)
65 unsigned long offset, vaddr;
66 resource_size_t pfn, last_pfn, last_addr;
67 const resource_size_t unaligned_phys_addr = phys_addr;
68 const unsigned long unaligned_size = size;
69 struct vm_struct *area;
70 unsigned long new_prot_val;
73 void __iomem *ret_addr;
75 /* Don't allow wraparound or zero size */
76 last_addr = phys_addr + size - 1;
77 if (!size || last_addr < phys_addr)
80 if (!phys_addr_valid(phys_addr)) {
81 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
82 (unsigned long long)phys_addr);
88 * Don't remap the low PCI/ISA area, it's always mapped..
90 if (is_ISA_range(phys_addr, last_addr))
91 return (__force void __iomem *)phys_to_virt(phys_addr);
94 * Don't allow anybody to remap normal RAM that we're using..
96 last_pfn = last_addr >> PAGE_SHIFT;
97 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
98 int is_ram = page_is_ram(pfn);
100 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
102 WARN_ON_ONCE(is_ram);
106 * Mappings have to be page-aligned
108 offset = phys_addr & ~PAGE_MASK;
109 phys_addr &= PHYSICAL_PAGE_MASK;
110 size = PAGE_ALIGN(last_addr+1) - phys_addr;
112 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
113 prot_val, &new_prot_val);
115 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
119 if (prot_val != new_prot_val) {
120 if (!is_new_memtype_allowed(phys_addr, size,
121 prot_val, new_prot_val)) {
123 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
124 (unsigned long long)phys_addr,
125 (unsigned long long)(phys_addr + size),
126 prot_val, new_prot_val);
127 goto err_free_memtype;
129 prot_val = new_prot_val;
135 prot = PAGE_KERNEL_IO_NOCACHE;
137 case _PAGE_CACHE_UC_MINUS:
138 prot = PAGE_KERNEL_IO_UC_MINUS;
141 prot = PAGE_KERNEL_IO_WC;
144 prot = PAGE_KERNEL_IO;
151 area = get_vm_area_caller(size, VM_IOREMAP, caller);
153 goto err_free_memtype;
154 area->phys_addr = phys_addr;
155 vaddr = (unsigned long) area->addr;
157 if (kernel_map_sync_memtype(phys_addr, size, prot_val))
160 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
163 ret_addr = (void __iomem *) (vaddr + offset);
164 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
167 * Check if the request spans more than any BAR in the iomem resource
170 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
171 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
177 free_memtype(phys_addr, phys_addr + size);
182 * ioremap_nocache - map bus memory into CPU space
183 * @phys_addr: bus address of the memory
184 * @size: size of the resource to map
186 * ioremap_nocache performs a platform specific sequence of operations to
187 * make bus memory CPU accessible via the readb/readw/readl/writeb/
188 * writew/writel functions and the other mmio helpers. The returned
189 * address is not guaranteed to be usable directly as a virtual
192 * This version of ioremap ensures that the memory is marked uncachable
193 * on the CPU as well as honouring existing caching rules from things like
194 * the PCI bus. Note that there are other caches and buffers on many
195 * busses. In particular driver authors should read up on PCI writes
197 * It's useful if some control registers are in such an area and
198 * write combining or read caching is not desirable:
200 * Must be freed with iounmap.
202 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
205 * Ideally, this should be:
206 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
208 * Till we fix all X drivers to use ioremap_wc(), we will use
211 unsigned long val = _PAGE_CACHE_UC_MINUS;
213 return __ioremap_caller(phys_addr, size, val,
214 __builtin_return_address(0));
216 EXPORT_SYMBOL(ioremap_nocache);
219 * ioremap_wc - map memory into CPU space write combined
220 * @phys_addr: bus address of the memory
221 * @size: size of the resource to map
223 * This version of ioremap ensures that the memory is marked write combining.
224 * Write combining allows faster writes to some hardware devices.
226 * Must be freed with iounmap.
228 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
231 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
232 __builtin_return_address(0));
234 return ioremap_nocache(phys_addr, size);
236 EXPORT_SYMBOL(ioremap_wc);
238 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
240 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
241 __builtin_return_address(0));
243 EXPORT_SYMBOL(ioremap_cache);
245 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
246 unsigned long prot_val)
248 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
249 __builtin_return_address(0));
251 EXPORT_SYMBOL(ioremap_prot);
254 * iounmap - Free a IO remapping
255 * @addr: virtual address from ioremap_*
257 * Caller must ensure there is only one unmapping for the same pointer.
259 void iounmap(volatile void __iomem *addr)
261 struct vm_struct *p, *o;
263 if ((void __force *)addr <= high_memory)
267 * __ioremap special-cases the PCI/ISA range by not instantiating a
268 * vm_area and by simply returning an address into the kernel mapping
269 * of ISA space. So handle that here.
271 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
272 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
275 addr = (volatile void __iomem *)
276 (PAGE_MASK & (unsigned long __force)addr);
278 mmiotrace_iounmap(addr);
280 /* Use the vm area unlocked, assuming the caller
281 ensures there isn't another iounmap for the same address
282 in parallel. Reuse of the virtual address is prevented by
283 leaving it in the global lists until we're done with it.
284 cpa takes care of the direct mappings. */
285 p = find_vm_area((void __force *)addr);
288 printk(KERN_ERR "iounmap: bad address %p\n", addr);
293 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
295 /* Finally remove it */
296 o = remove_vm_area((void __force *)addr);
297 BUG_ON(p != o || o == NULL);
300 EXPORT_SYMBOL(iounmap);
303 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
306 void *xlate_dev_mem_ptr(unsigned long phys)
309 unsigned long start = phys & PAGE_MASK;
311 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
312 if (page_is_ram(start >> PAGE_SHIFT))
315 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
317 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
322 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
324 if (page_is_ram(phys >> PAGE_SHIFT))
327 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
331 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
333 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
335 /* Don't assume we're using swapper_pg_dir at this point */
336 pgd_t *base = __va(read_cr3());
337 pgd_t *pgd = &base[pgd_index(addr)];
338 pud_t *pud = pud_offset(pgd, addr);
339 pmd_t *pmd = pmd_offset(pud, addr);
344 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
346 return &bm_pte[pte_index(addr)];
349 bool __init is_early_ioremap_ptep(pte_t *ptep)
351 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
354 void __init early_ioremap_init(void)
359 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
361 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
364 early_ioremap_setup();
366 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
367 memset(bm_pte, 0, sizeof(bm_pte));
368 pmd_populate_kernel(&init_mm, pmd, bm_pte);
371 * The boot-ioremap range spans multiple pmds, for which
372 * we are not prepared:
374 #define __FIXADDR_TOP (-PAGE_SIZE)
375 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
376 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
378 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
380 printk(KERN_WARNING "pmd %p != %p\n",
381 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
382 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
383 fix_to_virt(FIX_BTMAP_BEGIN));
384 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
385 fix_to_virt(FIX_BTMAP_END));
387 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
388 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
393 void __init __early_set_fixmap(enum fixed_addresses idx,
394 phys_addr_t phys, pgprot_t flags)
396 unsigned long addr = __fix_to_virt(idx);
399 if (idx >= __end_of_fixed_addresses) {
403 pte = early_ioremap_pte(addr);
405 if (pgprot_val(flags))
406 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
408 pte_clear(&init_mm, addr, pte);
409 __flush_tlb_one(addr);