2 * Based on arch/arm/mm/mmu.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/libfdt.h>
25 #include <linux/mman.h>
26 #include <linux/nodemask.h>
27 #include <linux/memblock.h>
30 #include <linux/slab.h>
31 #include <linux/stop_machine.h>
33 #include <asm/barrier.h>
34 #include <asm/cputype.h>
35 #include <asm/fixmap.h>
36 #include <asm/kernel-pgtable.h>
37 #include <asm/sections.h>
38 #include <asm/setup.h>
39 #include <asm/sizes.h>
41 #include <asm/memblock.h>
42 #include <asm/mmu_context.h>
46 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
49 * Empty_zero_page is a special page that is used for zero-initialized data
52 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
53 EXPORT_SYMBOL(empty_zero_page);
55 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
56 unsigned long size, pgprot_t vma_prot)
59 return pgprot_noncached(vma_prot);
60 else if (file->f_flags & O_SYNC)
61 return pgprot_writecombine(vma_prot);
64 EXPORT_SYMBOL(phys_mem_access_prot);
66 static phys_addr_t __init early_pgtable_alloc(void)
71 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
75 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
76 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
79 ptr = pte_set_fixmap(phys);
81 memset(ptr, 0, PAGE_SIZE);
84 * Implicit barriers also ensure the zeroed page is visible to the page
93 * remap a PMD into pages
95 static void split_pmd(pmd_t *pmd, pte_t *pte)
97 unsigned long pfn = pmd_pfn(*pmd);
102 * Need to have the least restrictive permissions available
103 * permissions will be fixed up later
105 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
107 } while (pte++, i++, i < PTRS_PER_PTE);
110 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
111 unsigned long end, unsigned long pfn,
113 phys_addr_t (*pgtable_alloc)(void))
117 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
118 phys_addr_t pte_phys = pgtable_alloc();
119 pte = pte_set_fixmap(pte_phys);
122 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
126 BUG_ON(pmd_bad(*pmd));
128 pte = pte_set_fixmap_offset(pmd, addr);
130 set_pte(pte, pfn_pte(pfn, prot));
132 } while (pte++, addr += PAGE_SIZE, addr != end);
137 static void split_pud(pud_t *old_pud, pmd_t *pmd)
139 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
140 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
144 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
146 } while (pmd++, i++, i < PTRS_PER_PMD);
149 static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
150 unsigned long addr, unsigned long end,
151 phys_addr_t phys, pgprot_t prot,
152 phys_addr_t (*pgtable_alloc)(void))
158 * Check for initial section mappings in the pgd/pud and remove them.
160 if (pud_none(*pud) || pud_sect(*pud)) {
161 phys_addr_t pmd_phys = pgtable_alloc();
162 pmd = pmd_set_fixmap(pmd_phys);
163 if (pud_sect(*pud)) {
165 * need to have the 1G of mappings continue to be
170 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
174 BUG_ON(pud_bad(*pud));
176 pmd = pmd_set_fixmap_offset(pud, addr);
178 next = pmd_addr_end(addr, end);
179 /* try section mapping first */
180 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
182 set_pmd(pmd, __pmd(phys |
183 pgprot_val(mk_sect_prot(prot))));
185 * Check for previous table entries created during
186 * boot (__create_page_tables) and flush them.
188 if (!pmd_none(old_pmd)) {
190 if (pmd_table(old_pmd)) {
191 phys_addr_t table = pmd_page_paddr(old_pmd);
192 if (!WARN_ON_ONCE(slab_is_available()))
193 memblock_free(table, PAGE_SIZE);
197 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
198 prot, pgtable_alloc);
201 } while (pmd++, addr = next, addr != end);
206 static inline bool use_1G_block(unsigned long addr, unsigned long next,
209 if (PAGE_SHIFT != 12)
212 if (((addr | next | phys) & ~PUD_MASK) != 0)
218 static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
219 unsigned long addr, unsigned long end,
220 phys_addr_t phys, pgprot_t prot,
221 phys_addr_t (*pgtable_alloc)(void))
226 if (pgd_none(*pgd)) {
227 phys_addr_t pud_phys = pgtable_alloc();
228 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
230 BUG_ON(pgd_bad(*pgd));
232 pud = pud_set_fixmap_offset(pgd, addr);
234 next = pud_addr_end(addr, end);
237 * For 4K granule only, attempt to put down a 1GB block
239 if (use_1G_block(addr, next, phys)) {
240 pud_t old_pud = *pud;
241 set_pud(pud, __pud(phys |
242 pgprot_val(mk_sect_prot(prot))));
245 * If we have an old value for a pud, it will
246 * be pointing to a pmd table that we no longer
247 * need (from swapper_pg_dir).
249 * Look up the old pmd table and free it.
251 if (!pud_none(old_pud)) {
253 if (pud_table(old_pud)) {
254 phys_addr_t table = pud_page_paddr(old_pud);
255 if (!WARN_ON_ONCE(slab_is_available()))
256 memblock_free(table, PAGE_SIZE);
260 alloc_init_pmd(mm, pud, addr, next, phys, prot,
264 } while (pud++, addr = next, addr != end);
270 * Create the page directory entries and any necessary page tables for the
271 * mapping specified by 'md'.
273 static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
274 phys_addr_t phys, unsigned long virt,
275 phys_addr_t size, pgprot_t prot,
276 phys_addr_t (*pgtable_alloc)(void))
278 unsigned long addr, length, end, next;
281 * If the virtual and physical address don't have the same offset
282 * within a page, we cannot map the region as the caller expects.
284 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
288 addr = virt & PAGE_MASK;
289 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
293 next = pgd_addr_end(addr, end);
294 alloc_init_pud(mm, pgd, addr, next, phys, prot, pgtable_alloc);
296 } while (pgd++, addr = next, addr != end);
299 static phys_addr_t late_pgtable_alloc(void)
301 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
304 /* Ensure the zeroed page is visible to the page table walker */
309 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
310 phys_addr_t size, pgprot_t prot)
312 if (virt < VMALLOC_START) {
313 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
317 __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt,
318 size, prot, early_pgtable_alloc);
321 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
322 unsigned long virt, phys_addr_t size,
325 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
329 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
330 phys_addr_t size, pgprot_t prot)
332 if (virt < VMALLOC_START) {
333 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
338 return __create_mapping(&init_mm, pgd_offset_k(virt),
339 phys, virt, size, prot, late_pgtable_alloc);
342 #ifdef CONFIG_DEBUG_RODATA
343 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
346 * Set up the executable regions using the existing section mappings
347 * for now. This will get more fine grained later once all memory
350 unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
351 unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
353 if (end < kernel_x_start) {
354 create_mapping(start, __phys_to_virt(start),
355 end - start, PAGE_KERNEL);
356 } else if (start >= kernel_x_end) {
357 create_mapping(start, __phys_to_virt(start),
358 end - start, PAGE_KERNEL);
360 if (start < kernel_x_start)
361 create_mapping(start, __phys_to_virt(start),
362 kernel_x_start - start,
364 create_mapping(kernel_x_start,
365 __phys_to_virt(kernel_x_start),
366 kernel_x_end - kernel_x_start,
368 if (kernel_x_end < end)
369 create_mapping(kernel_x_end,
370 __phys_to_virt(kernel_x_end),
377 static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
379 create_mapping(start, __phys_to_virt(start), end - start,
384 static void __init map_mem(void)
386 struct memblock_region *reg;
388 /* map all the memory banks */
389 for_each_memblock(memory, reg) {
390 phys_addr_t start = reg->base;
391 phys_addr_t end = start + reg->size;
396 __map_memblock(start, end);
400 static void __init fixup_executable(void)
402 #ifdef CONFIG_DEBUG_RODATA
403 /* now that we are actually fully mapped, make the start/end more fine grained */
404 if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
405 unsigned long aligned_start = round_down(__pa(_stext),
408 create_mapping(aligned_start, __phys_to_virt(aligned_start),
409 __pa(_stext) - aligned_start,
413 if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
414 unsigned long aligned_end = round_up(__pa(__init_end),
416 create_mapping(__pa(__init_end), (unsigned long)__init_end,
417 aligned_end - __pa(__init_end),
423 #ifdef CONFIG_DEBUG_RODATA
424 void mark_rodata_ro(void)
426 create_mapping_late(__pa(_stext), (unsigned long)_stext,
427 (unsigned long)_etext - (unsigned long)_stext,
433 void fixup_init(void)
435 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
436 (unsigned long)__init_end - (unsigned long)__init_begin,
441 * paging_init() sets up the page tables, initialises the zone memory
442 * maps and sets up the zero page.
444 void __init paging_init(void)
453 * Check whether a kernel address is valid (derived from arch/x86/).
455 int kern_addr_valid(unsigned long addr)
462 if ((((long)addr) >> VA_BITS) != -1UL)
465 pgd = pgd_offset_k(addr);
469 pud = pud_offset(pgd, addr);
474 return pfn_valid(pud_pfn(*pud));
476 pmd = pmd_offset(pud, addr);
481 return pfn_valid(pmd_pfn(*pmd));
483 pte = pte_offset_kernel(pmd, addr);
487 return pfn_valid(pte_pfn(*pte));
489 #ifdef CONFIG_SPARSEMEM_VMEMMAP
490 #if !ARM64_SWAPPER_USES_SECTION_MAPS
491 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
493 return vmemmap_populate_basepages(start, end, node);
495 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
496 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
498 unsigned long addr = start;
505 next = pmd_addr_end(addr, end);
507 pgd = vmemmap_pgd_populate(addr, node);
511 pud = vmemmap_pud_populate(pgd, addr, node);
515 pmd = pmd_offset(pud, addr);
516 if (pmd_none(*pmd)) {
519 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
523 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
525 vmemmap_verify((pte_t *)pmd, node, addr, next);
526 } while (addr = next, addr != end);
530 #endif /* CONFIG_ARM64_64K_PAGES */
531 void vmemmap_free(unsigned long start, unsigned long end)
534 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
536 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
537 #if CONFIG_PGTABLE_LEVELS > 2
538 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
540 #if CONFIG_PGTABLE_LEVELS > 3
541 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
544 static inline pud_t * fixmap_pud(unsigned long addr)
546 pgd_t *pgd = pgd_offset_k(addr);
548 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
550 return pud_offset(pgd, addr);
553 static inline pmd_t * fixmap_pmd(unsigned long addr)
555 pud_t *pud = fixmap_pud(addr);
557 BUG_ON(pud_none(*pud) || pud_bad(*pud));
559 return pmd_offset(pud, addr);
562 static inline pte_t * fixmap_pte(unsigned long addr)
564 pmd_t *pmd = fixmap_pmd(addr);
566 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
568 return pte_offset_kernel(pmd, addr);
571 void __init early_fixmap_init(void)
576 unsigned long addr = FIXADDR_START;
578 pgd = pgd_offset_k(addr);
579 pgd_populate(&init_mm, pgd, bm_pud);
580 pud = pud_offset(pgd, addr);
581 pud_populate(&init_mm, pud, bm_pmd);
582 pmd = pmd_offset(pud, addr);
583 pmd_populate_kernel(&init_mm, pmd, bm_pte);
586 * The boot-ioremap range spans multiple pmds, for which
587 * we are not preparted:
589 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
590 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
592 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
593 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
595 pr_warn("pmd %p != %p, %p\n",
596 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
597 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
598 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
599 fix_to_virt(FIX_BTMAP_BEGIN));
600 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
601 fix_to_virt(FIX_BTMAP_END));
603 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
604 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
608 void __set_fixmap(enum fixed_addresses idx,
609 phys_addr_t phys, pgprot_t flags)
611 unsigned long addr = __fix_to_virt(idx);
614 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
616 pte = fixmap_pte(addr);
618 if (pgprot_val(flags)) {
619 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
621 pte_clear(&init_mm, addr, pte);
622 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
626 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
628 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
629 pgprot_t prot = PAGE_KERNEL_RO;
634 * Check whether the physical FDT address is set and meets the minimum
635 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
636 * at least 8 bytes so that we can always access the size field of the
637 * FDT header after mapping the first chunk, double check here if that
638 * is indeed the case.
640 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
641 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
645 * Make sure that the FDT region can be mapped without the need to
646 * allocate additional translation table pages, so that it is safe
647 * to call create_mapping() this early.
649 * On 64k pages, the FDT will be mapped using PTEs, so we need to
650 * be in the same PMD as the rest of the fixmap.
651 * On 4k pages, we'll use section mappings for the FDT so we only
652 * have to be in the same PUD.
654 BUILD_BUG_ON(dt_virt_base % SZ_2M);
656 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
657 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
659 offset = dt_phys % SWAPPER_BLOCK_SIZE;
660 dt_virt = (void *)dt_virt_base + offset;
662 /* map the first chunk so we can read the size from the header */
663 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
664 SWAPPER_BLOCK_SIZE, prot);
666 if (fdt_check_header(dt_virt) != 0)
669 size = fdt_totalsize(dt_virt);
670 if (size > MAX_FDT_SIZE)
673 if (offset + size > SWAPPER_BLOCK_SIZE)
674 create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
675 round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
677 memblock_reserve(dt_phys, size);