[ARM] mmu: add option to map lowmem with page mappings
authorGary King <gking@nvidia.com>
Thu, 7 Oct 2010 19:44:06 +0000 (12:44 -0700)
committerRebecca Schultz Zavin <rebecca@android.com>
Fri, 8 Oct 2010 22:58:59 +0000 (15:58 -0700)
add a kernel configuration to map the kernel's lowmem pages using PTE
mappings, rather than the default behavior of 1MiB section mappings.
on ARMv7 processors, to support allocating pages with DMA-coherent
cache attributes, the cache attributes specified in the kernel's
mapping must match cache attributes specified for other mappings;
to ensure that this is the case, the kernel's attributes must be
specified on a per-page basis.

to avoid problems caused by the init_mm page table allocations exceeding
the available initial memory, when this config is enabled lowmem is
initially mapped using sections (matches current behavior), then remapped
using pages after bootmem is initialized

Change-Id: I8a6feba1d6806d007e17d9d4616525b0446c0fb1
Signed-off-by: Gary King <gking@nvidia.com>
arch/arm/mm/Kconfig
arch/arm/mm/mmu.c

index cc6f9d6193dd130d9e069ba55b7912478ba1ff70..f4a4e42a4a1983cca2d20b093d306e1ca0b784f2 100644 (file)
@@ -834,3 +834,12 @@ config ARCH_HAS_BARRIERS
        help
          This option allows the use of custom mandatory barriers
          included via the mach/barriers.h file.
+
+config ARCH_LOWMEM_IN_PTES
+       bool
+       help
+         This option will map the kernel direct-mapped lowmem region
+         using page table mappings rather than section mappings.
+
+config ARCH_USES_PG_UNCACHED
+       bool
index 6a3a2d0cd6db15806342a7357c2c154b6ab5970d..6e65f186905c32d82d88b7fb7399dda4e9a3a2d0 100644 (file)
@@ -251,6 +251,7 @@ static struct mem_type mem_types[] = {
                                L_PTE_USER | L_PTE_EXEC,
                .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .prot_l1   = PMD_TYPE_TABLE,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_ROM] = {
@@ -555,6 +556,30 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
+#ifdef CONFIG_ARCH_LOWMEM_IN_PTES
+static void __init realloc_init_pte(pmd_t *pmd, unsigned long addr,
+                                   unsigned long end, unsigned long pfn,
+                                   const struct mem_type *type)
+{
+       pte_t *pte, *ptep;
+
+       if ((pmd_val(*pmd) & PMD_TYPE_MASK) != PMD_TYPE_SECT)
+               return;
+
+       pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t));
+       if (WARN_ON(!pte))
+               return;
+
+       ptep = pte + PTRS_PER_PTE + __pte_index(addr);
+       do {
+               set_pte_ext(ptep, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
+               pfn++;
+       } while (ptep++, addr += PAGE_SIZE, addr != end);
+
+       __pmd_populate(pmd, __pa(pte) | type->prot_l1);
+}
+#endif
+
 static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
                                      unsigned long end, unsigned long phys,
                                      const struct mem_type *type)
@@ -1017,6 +1042,40 @@ static void __init map_lowmem(void)
        }
 }
 
+static void __init remap_lowmem(void)
+{
+#ifdef CONFIG_ARCH_LOWMEM_IN_PTES
+       struct meminfo *mi = &meminfo;
+       const struct mem_type *type = &mem_types[MT_MEMORY];
+       int i;
+
+       for (i = 0; i < mi->nr_banks; i++) {
+               pgd_t *pgd;
+               unsigned long phys, addr, end;
+               struct membank *bank = &mi->bank[i];
+
+               if (bank->highmem)
+                       continue;
+
+               phys = __pfn_to_phys(bank_pfn_start(bank));
+               addr = __phys_to_virt(bank_phys_start(bank));
+               end = addr + bank_phys_size(bank);
+
+               pgd = pgd_offset_k(addr);
+               do {
+                       unsigned long next = pgd_addr_end(addr, end);
+                       pmd_t *pmd = pmd_offset(pgd, addr);
+
+                       realloc_init_pte(pmd, addr, next,
+                                        __phys_to_pfn(phys), type);
+
+                       phys += next - addr;
+                       addr = next;
+               } while (pgd++, addr != end);
+       }
+#endif
+}
+
 static int __init meminfo_cmp(const void *_a, const void *_b)
 {
        const struct membank *a = _a, *b = _b;
@@ -1038,6 +1097,7 @@ void __init paging_init(struct machine_desc *mdesc)
        sanity_check_meminfo();
        prepare_page_table();
        map_lowmem();
+       remap_lowmem();
        devicemaps_init(mdesc);
        kmap_init();