* doesn't exist, so everything must go through page tables.
*/
#ifdef CONFIG_MMU
-void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
+void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
pgprot_t prot, void *caller);
void __iounmap(void __iomem *addr);
static inline void __iomem *
-__ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
+__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
}
static inline void __iomem *
-__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
+__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
#ifdef CONFIG_29BIT
- unsigned long last_addr = offset + size - 1;
+ phys_addr_t last_addr = offset + size - 1;
/*
* For P1 and P2 space this is trivial, as everything is already
}
static inline void __iomem *
-__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
+__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
{
void __iomem *ret;
#define __iounmap(addr) do { } while (0)
#endif /* CONFIG_MMU */
-static inline void __iomem *
-ioremap(unsigned long offset, unsigned long size)
+static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
{
return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
}
static inline void __iomem *
-ioremap_cache(unsigned long offset, unsigned long size)
+ioremap_cache(phys_addr_t offset, unsigned long size)
{
return __ioremap_mode(offset, size, PAGE_KERNEL);
}
#ifdef CONFIG_HAVE_IOREMAP_PROT
static inline void __iomem *
-ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
+ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
{
return __ioremap_mode(offset, size, __pgprot(flags));
}
#endif
#ifdef CONFIG_IOREMAP_FIXED
-extern void __iomem *ioremap_fixed(resource_size_t, unsigned long,
- unsigned long, pgprot_t);
+extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
extern int iounmap_fixed(void __iomem *);
extern void ioremap_fixed_init(void);
#else
static inline void __iomem *
-ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
- unsigned long size, pgprot_t prot)
+ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
{
BUG();
return NULL;
* caller shouldn't need to know that small detail.
*/
void __iomem * __init_refok
-__ioremap_caller(unsigned long phys_addr, unsigned long size,
+__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
pgprot_t pgprot, void *caller)
{
struct vm_struct *area;
unsigned long offset, last_addr, addr, orig_addr;
+ void __iomem *mapped;
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
+ /*
+ * If we can't yet use the regular approach, go the fixmap route.
+ */
+ if (!mem_init_done)
+ return ioremap_fixed(phys_addr, size, pgprot);
+
+ /*
+ * First try to remap through the PMB.
+ * PMB entries are all pre-faulted.
+ */
+ mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
+ if (mapped && !IS_ERR(mapped))
+ return mapped;
+
/*
* Mappings have to be page-aligned
*/
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
- /*
- * If we can't yet use the regular approach, go the fixmap route.
- */
- if (!mem_init_done)
- return ioremap_fixed(phys_addr, offset, size, pgprot);
-
/*
* Ok, go for it..
*/
area->phys_addr = phys_addr;
orig_addr = addr = (unsigned long)area->addr;
-#ifdef CONFIG_PMB
- /*
- * First try to remap through the PMB once a valid VMA has been
- * established. Smaller allocations (or the rest of the size
- * remaining after a PMB mapping due to the size not being
- * perfectly aligned on a PMB size boundary) are then mapped
- * through the UTLB using conventional page tables.
- *
- * PMB entries are all pre-faulted.
- */
- if (unlikely(phys_addr >= P1SEG)) {
- unsigned long mapped;
-
- mapped = pmb_remap(addr, phys_addr, size, pgprot);
- if (likely(mapped)) {
- addr += mapped;
- phys_addr += mapped;
- size -= mapped;
- }
+ if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
+ vunmap((void *)orig_addr);
+ return NULL;
}
-#endif
-
- if (likely(size))
- if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
- vunmap((void *)orig_addr);
- return NULL;
- }
return (void __iomem *)(offset + (char *)orig_addr);
}
if (iounmap_fixed(addr) == 0)
return;
-#ifdef CONFIG_PMB
/*
- * Purge any PMB entries that may have been established for this
- * mapping, then proceed with conventional VMA teardown.
- *
- * XXX: Note that due to the way that remove_vm_area() does
- * matching of the resultant VMA, we aren't able to fast-forward
- * the address past the PMB space until the end of the VMA where
- * the page tables reside. As such, unmap_vm_area() will be
- * forced to linearly scan over the area until it finds the page
- * tables where PTEs that need to be unmapped actually reside,
- * which is far from optimal. Perhaps we need to use a separate
- * VMA for the PMB mappings?
- * -- PFM.
+ * If the PMB handled it, there's nothing else to do.
*/
- pmb_unmap(vaddr);
-#endif
+ if (pmb_unmap(addr) == 0)
+ return;
p = remove_vm_area((void *)(vaddr & PAGE_MASK));
if (!p) {
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
#include <asm/sizes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
struct pmb_entry *link;
};
+static struct {
+ unsigned long size;
+ int flag;
+} pmb_sizes[] = {
+ { .size = SZ_512M, .flag = PMB_SZ_512M, },
+ { .size = SZ_128M, .flag = PMB_SZ_128M, },
+ { .size = SZ_64M, .flag = PMB_SZ_64M, },
+ { .size = SZ_16M, .flag = PMB_SZ_16M, },
+};
+
static void pmb_unmap_entry(struct pmb_entry *, int depth);
static DEFINE_RWLOCK(pmb_rwlock);
return mk_pmb_entry(entry) | PMB_DATA;
}
+static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
+{
+ return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
+}
+
+/*
+ * Ensure that the PMB entries match our cache configuration.
+ *
+ * When we are in 32-bit address extended mode, CCR.CB becomes
+ * invalid, so care must be taken to manually adjust cacheable
+ * translations.
+ */
+static __always_inline unsigned long pmb_cache_flags(void)
+{
+ unsigned long flags = 0;
+
+#if defined(CONFIG_CACHE_OFF)
+ flags |= PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITETHROUGH)
+ flags |= PMB_C | PMB_WT | PMB_UB;
+#elif defined(CONFIG_CACHE_WRITEBACK)
+ flags |= PMB_C;
+#endif
+
+ return flags;
+}
+
+/*
+ * Convert typical pgprot value to the PMB equivalent
+ */
+static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
+{
+ unsigned long pmb_flags = 0;
+ u64 flags = pgprot_val(prot);
+
+ if (flags & _PAGE_CACHABLE)
+ pmb_flags |= PMB_C;
+ if (flags & _PAGE_WT)
+ pmb_flags |= PMB_WT | PMB_UB;
+
+ return pmb_flags;
+}
+
+static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
+{
+ return (b->vpn == (a->vpn + a->size)) &&
+ (b->ppn == (a->ppn + a->size)) &&
+ (b->flags == a->flags);
+}
+
+static bool pmb_size_valid(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (pmb_sizes[i].size == size)
+ return true;
+
+ return false;
+}
+
+static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
+{
+ return (addr >= P1SEG && (addr + size - 1) < P3SEG);
+}
+
+static inline bool pmb_prot_valid(pgprot_t prot)
+{
+ return (pgprot_val(prot) & _PAGE_USER) == 0;
+}
+
+static int pmb_size_to_flags(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (pmb_sizes[i].size == size)
+ return pmb_sizes[i].flag;
+
+ return 0;
+}
+
static int pmb_alloc_entry(void)
{
int pos;
pmbe->link = NULL;
}
-/*
- * Ensure that the PMB entries match our cache configuration.
- *
- * When we are in 32-bit address extended mode, CCR.CB becomes
- * invalid, so care must be taken to manually adjust cacheable
- * translations.
- */
-static __always_inline unsigned long pmb_cache_flags(void)
-{
- unsigned long flags = 0;
-
-#if defined(CONFIG_CACHE_WRITETHROUGH)
- flags |= PMB_C | PMB_WT | PMB_UB;
-#elif defined(CONFIG_CACHE_WRITEBACK)
- flags |= PMB_C;
-#endif
-
- return flags;
-}
-
/*
* Must be run uncached.
*/
static void __set_pmb_entry(struct pmb_entry *pmbe)
{
- writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
- writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
- mk_pmb_data(pmbe->entry));
+ /* Set V-bit */
+ __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
+ __raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
}
static void __clear_pmb_entry(struct pmb_entry *pmbe)
spin_unlock_irqrestore(&pmbe->lock, flags);
}
-static struct {
- unsigned long size;
- int flag;
-} pmb_sizes[] = {
- { .size = SZ_512M, .flag = PMB_SZ_512M, },
- { .size = SZ_128M, .flag = PMB_SZ_128M, },
- { .size = SZ_64M, .flag = PMB_SZ_64M, },
- { .size = SZ_16M, .flag = PMB_SZ_16M, },
-};
+int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
+ unsigned long size, pgprot_t prot)
+{
+ return 0;
+}
-long pmb_remap(unsigned long vaddr, unsigned long phys,
- unsigned long size, pgprot_t prot)
+void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
+ pgprot_t prot, void *caller)
{
struct pmb_entry *pmbp, *pmbe;
- unsigned long wanted;
- int pmb_flags, i;
- long err;
- u64 flags;
+ unsigned long pmb_flags;
+ int i, mapped;
+ unsigned long orig_addr, vaddr;
+ phys_addr_t offset, last_addr;
+ phys_addr_t align_mask;
+ unsigned long aligned;
+ struct vm_struct *area;
- flags = pgprot_val(prot);
+ /*
+ * Small mappings need to go through the TLB.
+ */
+ if (size < SZ_16M)
+ return ERR_PTR(-EINVAL);
+ if (!pmb_prot_valid(prot))
+ return ERR_PTR(-EINVAL);
- pmb_flags = PMB_WT | PMB_UB;
+ pmbp = NULL;
+ pmb_flags = pgprot_to_pmb_flags(prot);
+ mapped = 0;
- /* Convert typical pgprot value to the PMB equivalent */
- if (flags & _PAGE_CACHABLE) {
- pmb_flags |= PMB_C;
+ for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
+ if (size >= pmb_sizes[i].size)
+ break;
- if ((flags & _PAGE_WT) == 0)
- pmb_flags &= ~(PMB_WT | PMB_UB);
- }
+ last_addr = phys + size;
+ align_mask = ~(pmb_sizes[i].size - 1);
+ offset = phys & ~align_mask;
+ phys &= align_mask;
+ aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
- pmbp = NULL;
- wanted = size;
+ area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
+ P3SEG, caller);
+ if (!area)
+ return NULL;
+
+ area->phys_addr = phys;
+ orig_addr = vaddr = (unsigned long)area->addr;
+
+ if (!pmb_addr_valid(vaddr, aligned))
+ return ERR_PTR(-EFAULT);
again:
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
PMB_NO_ENTRY);
if (IS_ERR(pmbe)) {
- err = PTR_ERR(pmbe);
- goto out;
+ pmb_unmap_entry(pmbp, mapped);
+ return pmbe;
}
spin_lock_irqsave(&pmbe->lock, flags);
+ pmbe->size = pmb_sizes[i].size;
+
__set_pmb_entry(pmbe);
- phys += pmb_sizes[i].size;
- vaddr += pmb_sizes[i].size;
- size -= pmb_sizes[i].size;
-
- pmbe->size = pmb_sizes[i].size;
+ phys += pmbe->size;
+ vaddr += pmbe->size;
+ size -= pmbe->size;
/*
* Link adjacent entries that span multiple PMB entries
* pmb_sizes[i].size again.
*/
i--;
+ mapped++;
spin_unlock_irqrestore(&pmbe->lock, flags);
}
if (size >= SZ_16M)
goto again;
- return wanted - size;
-
-out:
- pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
-
- return err;
+ return (void __iomem *)(offset + (char *)orig_addr);
}
-void pmb_unmap(unsigned long addr)
+int pmb_unmap(void __iomem *addr)
{
struct pmb_entry *pmbe = NULL;
- int i;
+ unsigned long vaddr = (unsigned long __force)addr;
+ int i, found = 0;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
if (test_bit(i, pmb_map)) {
pmbe = &pmb_entry_list[i];
- if (pmbe->vpn == addr)
+ if (pmbe->vpn == vaddr) {
+ found = 1;
break;
+ }
}
}
read_unlock(&pmb_rwlock);
- pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
-}
-
-static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
-{
- return (b->vpn == (a->vpn + a->size)) &&
- (b->ppn == (a->ppn + a->size)) &&
- (b->flags == a->flags);
-}
-
-static bool pmb_size_valid(unsigned long size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
- if (pmb_sizes[i].size == size)
- return true;
-
- return false;
-}
-
-static int pmb_size_to_flags(unsigned long size)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
- if (pmb_sizes[i].size == size)
- return pmb_sizes[i].flag;
+ if (found) {
+ pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
+ return 0;
+ }
- return 0;
+ return -EINVAL;
}
static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
write_unlock_irqrestore(&pmb_rwlock, flags);
}
-static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
-{
- return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
-}
-
static void __init pmb_notify(void)
{
int i;