ARM: implement basic NX support for kernel lowmem mappings
[firefly-linux-kernel-4.4.55.git] / arch / arm / mm / mmu.c
index fce2e73880981644f5eb99e7051e21f5899bf6e2..9ec715f1222475c407f3e5070989bafc7c9967b2 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cputype.h>
 #include <asm/sections.h>
 #include <asm/cachetype.h>
+#include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/smp_plat.h>
 #include <asm/tlb.h>
@@ -293,6 +294,13 @@ static struct mem_type mem_types[] = {
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
+       [MT_MEMORY_RW] = {
+               .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+                            L_PTE_XN,
+               .prot_l1   = PMD_TYPE_TABLE,
+               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .domain    = DOMAIN_KERNEL,
+       },
        [MT_ROM] = {
                .prot_sect = PMD_TYPE_SECT,
                .domain    = DOMAIN_KERNEL,
@@ -410,6 +418,9 @@ static void __init build_mem_type_table(void)
                        mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
                        mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
                        mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
+
+                       /* Also setup NX memory mapping */
+                       mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
                }
                if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
                        /*
@@ -489,6 +500,8 @@ static void __init build_mem_type_table(void)
                        mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
                        mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
+                       mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
                        mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
@@ -545,6 +558,8 @@ static void __init build_mem_type_table(void)
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
        mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
+       mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+       mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
        mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
        mem_types[MT_ROM].prot_sect |= cp->pmd;
@@ -1296,6 +1311,8 @@ static void __init kmap_init(void)
 static void __init map_lowmem(void)
 {
        struct memblock_region *reg;
+       unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+       unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
 
        /* Map all the lowmem memory banks. */
        for_each_memblock(memory, reg) {
@@ -1308,12 +1325,40 @@ static void __init map_lowmem(void)
                if (start >= end)
                        break;
 
-               map.pfn = __phys_to_pfn(start);
-               map.virtual = __phys_to_virt(start);
-               map.length = end - start;
-               map.type = MT_MEMORY;
+               if (end < kernel_x_start || start >= kernel_x_end) {
+                       map.pfn = __phys_to_pfn(start);
+                       map.virtual = __phys_to_virt(start);
+                       map.length = end - start;
+                       map.type = MT_MEMORY_RWX;
 
-               create_mapping(&map);
+                       create_mapping(&map);
+               } else {
+                       /* This better cover the entire kernel */
+                       if (start < kernel_x_start) {
+                               map.pfn = __phys_to_pfn(start);
+                               map.virtual = __phys_to_virt(start);
+                               map.length = kernel_x_start - start;
+                               map.type = MT_MEMORY_RW;
+
+                               create_mapping(&map);
+                       }
+
+                       map.pfn = __phys_to_pfn(kernel_x_start);
+                       map.virtual = __phys_to_virt(kernel_x_start);
+                       map.length = kernel_x_end - kernel_x_start;
+                       map.type = MT_MEMORY_RWX;
+
+                       create_mapping(&map);
+
+                       if (kernel_x_end < end) {
+                               map.pfn = __phys_to_pfn(kernel_x_end);
+                               map.virtual = __phys_to_virt(kernel_x_end);
+                               map.length = end - kernel_x_end;
+                               map.type = MT_MEMORY_RW;
+
+                               create_mapping(&map);
+                       }
+               }
        }
 }