*/
#ifdef CONFIG_MMU
void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
- unsigned long flags, void *caller);
+ pgprot_t prot, void *caller);
void __iounmap(void __iomem *addr);
#ifdef CONFIG_IOREMAP_FIXED
#endif
static inline void __iomem *
-__ioremap(unsigned long offset, unsigned long size, unsigned long flags)
+__ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
{
- return __ioremap_caller(offset, size, flags, __builtin_return_address(0));
+ return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
}
static inline void __iomem *
-__ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags)
+__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
{
#ifdef CONFIG_29BIT
unsigned long last_addr = offset + size - 1;
* mapping must be done by the PMB or by using page tables.
*/
if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
- if (unlikely(flags & _PAGE_CACHABLE))
+ if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
return (void __iomem *)P1SEGADDR(offset);
return (void __iomem *)P2SEGADDR(offset);
}
static inline void __iomem *
-__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
+__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
{
void __iomem *ret;
if (ret)
return ret;
- ret = __ioremap_29bit(offset, size, flags);
+ ret = __ioremap_29bit(offset, size, prot);
if (ret)
return ret;
- return __ioremap(offset, size, flags);
+ return __ioremap(offset, size, prot);
}
#else
-#define __ioremap(offset, size, flags) ((void __iomem *)(offset))
-#define __ioremap_mode(offset, size, flags) ((void __iomem *)(offset))
+#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
+#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
#define __iounmap(addr) do { } while (0)
#endif /* CONFIG_MMU */
-#define ioremap(offset, size) \
- __ioremap_mode((offset), (size), 0)
-#define ioremap_nocache(offset, size) \
- __ioremap_mode((offset), (size), 0)
-#define ioremap_cache(offset, size) \
- __ioremap_mode((offset), (size), _PAGE_CACHABLE)
-#define p3_ioremap(offset, size, flags) \
- __ioremap((offset), (size), (flags))
-#define ioremap_prot(offset, size, flags) \
- __ioremap_mode((offset), (size), (flags))
-#define iounmap(addr) \
- __iounmap((addr))
+static inline void __iomem *
+ioremap(unsigned long offset, unsigned long size)
+{
+ return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
+}
+
+static inline void __iomem *
+ioremap_cache(unsigned long offset, unsigned long size)
+{
+ return __ioremap_mode(offset, size, PAGE_KERNEL);
+}
+
+static inline void __iomem *
+ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
+{
+ return __ioremap_mode(offset, size, __pgprot(flags));
+}
+
+#define ioremap_nocache ioremap
+#define p3_ioremap __ioremap
+#define iounmap __iounmap
#define maybebadio(port) \
printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
*/
void __iomem * __init_refok
__ioremap_caller(unsigned long phys_addr, unsigned long size,
- unsigned long flags, void *caller)
+ pgprot_t pgprot, void *caller)
{
struct vm_struct *area;
unsigned long offset, last_addr, addr, orig_addr;
- pgprot_t pgprot;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
* If we can't yet use the regular approach, go the fixmap route.
*/
if (!mem_init_done)
- return ioremap_fixed(phys_addr, size, __pgprot(flags));
+ return ioremap_fixed(phys_addr, size, pgprot);
/*
* Ok, go for it..
* PMB entries are all pre-faulted.
*/
if (unlikely(phys_addr >= P1SEG)) {
- unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
+ unsigned long mapped;
+ mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot));
if (likely(mapped)) {
addr += mapped;
phys_addr += mapped;
}
#endif
- pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
if (likely(size))
if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
vunmap((void *)orig_addr);