2 * Based on arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/errno.h>
23 #include <linux/swap.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/mman.h>
27 #include <linux/nodemask.h>
28 #include <linux/initrd.h>
29 #include <linux/gfp.h>
30 #include <linux/memblock.h>
31 #include <linux/sort.h>
33 #include <linux/of_fdt.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/dma-contiguous.h>
36 #include <linux/efi.h>
37 #include <linux/swiotlb.h>
38 #include <linux/kexec.h>
39 #include <linux/crash_dump.h>
42 #include <asm/fixmap.h>
43 #include <asm/kasan.h>
44 #include <asm/kernel-pgtable.h>
45 #include <asm/memory.h>
46 #include <asm/sections.h>
47 #include <asm/setup.h>
48 #include <asm/sizes.h>
50 #include <asm/alternative.h>
55 * We need to be able to catch inadvertent references to memstart_addr
56 * that occur (potentially in generic code) before arm64_memblock_init()
57 * executes, which assigns it its actual value. So use a default value
58 * that cannot be mistaken for a real physical address.
60 s64 memstart_addr __read_mostly = -1;
61 phys_addr_t arm64_dma_phys_limit __read_mostly;
63 #ifdef CONFIG_BLK_DEV_INITRD
64 static int __init early_initrd(char *p)
66 unsigned long start, size;
69 start = memparse(p, &endp);
71 size = memparse(endp + 1, NULL);
74 initrd_end = start + size;
78 early_param("initrd", early_initrd);
81 #ifdef CONFIG_KEXEC_CORE
83 * reserve_crashkernel() - reserves memory for crash kernel
85 * This function reserves memory area given in "crashkernel=" kernel command
86 * line parameter. The memory reserved is used by dump capture kernel when
87 * primary kernel is crashing.
89 static void __init reserve_crashkernel(void)
91 unsigned long long crash_base, crash_size;
94 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
95 &crash_size, &crash_base);
96 /* no crashkernel= or invalid value specified */
97 if (ret || !crash_size)
100 crash_size = PAGE_ALIGN(crash_size);
102 if (crash_base == 0) {
103 /* Current arm64 boot protocol requires 2MB alignment */
104 crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
106 if (crash_base == 0) {
107 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
112 /* User specifies base address explicitly. */
113 if (!memblock_is_region_memory(crash_base, crash_size)) {
114 pr_warn("cannot reserve crashkernel: region is not memory\n");
118 if (memblock_is_region_reserved(crash_base, crash_size)) {
119 pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
123 if (!IS_ALIGNED(crash_base, SZ_2M)) {
124 pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
128 memblock_reserve(crash_base, crash_size);
130 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
131 crash_base, crash_base + crash_size, crash_size >> 20);
133 crashk_res.start = crash_base;
134 crashk_res.end = crash_base + crash_size - 1;
137 static void __init kexec_reserve_crashkres_pages(void)
139 #ifdef CONFIG_HIBERNATION
147 * To reduce the size of hibernation image, all the pages are
148 * marked as Reserved initially.
150 for (addr = crashk_res.start; addr < (crashk_res.end + 1);
152 page = phys_to_page(addr);
153 SetPageReserved(page);
158 static void __init reserve_crashkernel(void)
162 static void __init kexec_reserve_crashkres_pages(void)
165 #endif /* CONFIG_KEXEC_CORE */
167 #ifdef CONFIG_CRASH_DUMP
168 static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
169 const char *uname, int depth, void *data)
174 if (depth != 1 || strcmp(uname, "chosen") != 0)
177 reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
178 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
181 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®);
182 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®);
188 * reserve_elfcorehdr() - reserves memory for elf core header
190 * This function reserves the memory occupied by an elf core header
191 * described in the device tree. This region contains all the
192 * information about primary kernel's core image and is used by a dump
193 * capture kernel to access the system memory on primary kernel.
195 static void __init reserve_elfcorehdr(void)
197 of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
199 if (!elfcorehdr_size)
202 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
203 pr_warn("elfcorehdr is overlapped\n");
207 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
209 pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
210 elfcorehdr_size >> 10, elfcorehdr_addr);
213 static void __init reserve_elfcorehdr(void)
216 #endif /* CONFIG_CRASH_DUMP */
218 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
219 * currently assumes that for memory starting above 4G, 32-bit devices will
222 static phys_addr_t __init max_zone_dma_phys(void)
224 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
225 return min(offset + (1ULL << 32), memblock_end_of_DRAM());
228 static void __init zone_sizes_init(unsigned long min, unsigned long max)
230 struct memblock_region *reg;
231 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
232 unsigned long max_dma = min;
234 memset(zone_size, 0, sizeof(zone_size));
236 /* 4GB maximum for 32-bit only capable devices */
237 #ifdef CONFIG_ZONE_DMA
238 max_dma = PFN_DOWN(arm64_dma_phys_limit);
239 zone_size[ZONE_DMA] = max_dma - min;
241 zone_size[ZONE_NORMAL] = max - max_dma;
243 memcpy(zhole_size, zone_size, sizeof(zhole_size));
245 for_each_memblock(memory, reg) {
246 unsigned long start = memblock_region_memory_base_pfn(reg);
247 unsigned long end = memblock_region_memory_end_pfn(reg);
252 #ifdef CONFIG_ZONE_DMA
253 if (start < max_dma) {
254 unsigned long dma_end = min(end, max_dma);
255 zhole_size[ZONE_DMA] -= dma_end - start;
259 unsigned long normal_end = min(end, max);
260 unsigned long normal_start = max(start, max_dma);
261 zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
265 free_area_init_node(0, zone_size, min, zhole_size);
268 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
269 #define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
271 int pfn_valid(unsigned long pfn)
273 return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT);
275 EXPORT_SYMBOL(pfn_valid);
278 #ifndef CONFIG_SPARSEMEM
279 static void __init arm64_memory_present(void)
283 static void __init arm64_memory_present(void)
285 struct memblock_region *reg;
287 for_each_memblock(memory, reg)
288 memory_present(0, memblock_region_memory_base_pfn(reg),
289 memblock_region_memory_end_pfn(reg));
293 static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
296 * Limit the memory size that was specified via FDT.
298 static int __init early_mem(char *p)
303 memory_limit = memparse(p, &p) & PAGE_MASK;
304 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
308 early_param("mem", early_mem);
310 static int __init early_init_dt_scan_usablemem(unsigned long node,
311 const char *uname, int depth, void *data)
313 struct memblock_region *usablemem = data;
317 if (depth != 1 || strcmp(uname, "chosen") != 0)
320 reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
321 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
324 usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®);
325 usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®);
330 static void __init fdt_enforce_memory_region(void)
332 struct memblock_region reg = {
336 of_scan_flat_dt(early_init_dt_scan_usablemem, ®);
339 memblock_cap_memory_range(reg.base, reg.size);
342 void __init arm64_memblock_init(void)
344 const s64 linear_region_size = -(s64)PAGE_OFFSET;
346 /* Handle linux,usable-memory-range property */
347 fdt_enforce_memory_region();
350 * Ensure that the linear region takes up exactly half of the kernel
351 * virtual address space. This way, we can distinguish a linear address
352 * from a kernel/module/vmalloc address by testing a single bit.
354 BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
357 * Select a suitable value for the base of physical memory.
359 memstart_addr = round_down(memblock_start_of_DRAM(),
360 ARM64_MEMSTART_ALIGN);
363 * Remove the memory that we will not be able to cover with the
364 * linear mapping. Take care not to clip the kernel which may be
367 memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
369 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
370 /* ensure that memstart_addr remains sufficiently aligned */
371 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
372 ARM64_MEMSTART_ALIGN);
373 memblock_remove(0, memstart_addr);
377 * Apply the memory limit if it was set. Since the kernel may be loaded
378 * high up in memory, add back the kernel region that must be accessible
379 * via the linear mapping.
381 if (memory_limit != (phys_addr_t)ULLONG_MAX) {
382 memblock_enforce_memory_limit(memory_limit);
383 memblock_add(__pa(_text), (u64)(_end - _text));
386 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
387 extern u16 memstart_offset_seed;
388 u64 range = linear_region_size -
389 (memblock_end_of_DRAM() - memblock_start_of_DRAM());
392 * If the size of the linear region exceeds, by a sufficient
393 * margin, the size of the region that the available physical
394 * memory spans, randomize the linear region as well.
396 if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
397 range = range / ARM64_MEMSTART_ALIGN + 1;
398 memstart_addr -= ARM64_MEMSTART_ALIGN *
399 ((range * memstart_offset_seed) >> 16);
404 * Register the kernel text, kernel data, initrd, and initial
405 * pagetables with memblock.
407 memblock_reserve(__pa(_text), _end - _text);
408 #ifdef CONFIG_BLK_DEV_INITRD
410 memblock_reserve(initrd_start, initrd_end - initrd_start);
412 /* the generic initrd code expects virtual addresses */
413 initrd_start = __phys_to_virt(initrd_start);
414 initrd_end = __phys_to_virt(initrd_end);
418 early_init_fdt_scan_reserved_mem();
420 /* 4GB maximum for 32-bit only capable devices */
421 if (IS_ENABLED(CONFIG_ZONE_DMA))
422 arm64_dma_phys_limit = max_zone_dma_phys();
424 arm64_dma_phys_limit = PHYS_MASK + 1;
426 reserve_crashkernel();
428 reserve_elfcorehdr();
430 dma_contiguous_reserve(arm64_dma_phys_limit);
432 memblock_allow_resize();
436 void __init bootmem_init(void)
438 unsigned long min, max;
440 min = PFN_UP(memblock_start_of_DRAM());
441 max = PFN_DOWN(memblock_end_of_DRAM());
443 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
446 * Sparsemem tries to allocate bootmem in memory_present(), so must be
447 * done after the fixed reservations.
449 arm64_memory_present();
452 zone_sizes_init(min, max);
454 high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
455 max_pfn = max_low_pfn = max;
458 #ifndef CONFIG_SPARSEMEM_VMEMMAP
459 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
461 struct page *start_pg, *end_pg;
462 unsigned long pg, pgend;
465 * Convert start_pfn/end_pfn to a struct page pointer.
467 start_pg = pfn_to_page(start_pfn - 1) + 1;
468 end_pg = pfn_to_page(end_pfn - 1) + 1;
471 * Convert to physical addresses, and round start upwards and end
474 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
475 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
478 * If there are free pages between these, free the section of the
482 free_bootmem(pg, pgend - pg);
486 * The mem_map array can get very big. Free the unused area of the memory map.
488 static void __init free_unused_memmap(void)
490 unsigned long start, prev_end = 0;
491 struct memblock_region *reg;
493 for_each_memblock(memory, reg) {
494 start = __phys_to_pfn(reg->base);
496 #ifdef CONFIG_SPARSEMEM
498 * Take care not to free memmap entries that don't exist due
499 * to SPARSEMEM sections which aren't present.
501 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
504 * If we had a previous bank, and there is a space between the
505 * current bank and the previous, free it.
507 if (prev_end && prev_end < start)
508 free_memmap(prev_end, start);
511 * Align up here since the VM subsystem insists that the
512 * memmap entries are valid from the bank end aligned to
513 * MAX_ORDER_NR_PAGES.
515 prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
519 #ifdef CONFIG_SPARSEMEM
520 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
521 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
524 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
527 * mem_init() marks the free areas in the mem_map and tells us how much memory
528 * is free. This is done after various parts of the system have claimed their
529 * memory after the kernel image.
531 void __init mem_init(void)
535 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
537 #ifndef CONFIG_SPARSEMEM_VMEMMAP
538 free_unused_memmap();
540 /* this will put all unused low memory onto the freelists */
543 kexec_reserve_crashkres_pages();
545 mem_init_print_info(NULL);
547 #define MLK(b, t) b, t, ((t) - (b)) >> 10
548 #define MLM(b, t) b, t, ((t) - (b)) >> 20
549 #define MLG(b, t) b, t, ((t) - (b)) >> 30
550 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
552 pr_notice("Virtual kernel memory layout:\n"
554 " kasan : 0x%16lx - 0x%16lx (%6ld GB)\n"
556 " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
557 " vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n"
558 " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
559 " .text : 0x%p" " - 0x%p" " (%6ld KB)\n"
560 " .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n"
561 " .data : 0x%p" " - 0x%p" " (%6ld KB)\n"
562 #ifdef CONFIG_SPARSEMEM_VMEMMAP
563 " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
564 " 0x%16lx - 0x%16lx (%6ld MB actual)\n"
566 " fixed : 0x%16lx - 0x%16lx (%6ld KB)\n"
567 " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
568 " memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
570 MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
572 MLM(MODULES_VADDR, MODULES_END),
573 MLG(VMALLOC_START, VMALLOC_END),
574 MLK_ROUNDUP(__init_begin, __init_end),
575 MLK_ROUNDUP(_text, _etext),
576 MLK_ROUNDUP(__start_rodata, __init_begin),
577 MLK_ROUNDUP(_sdata, _edata),
578 #ifdef CONFIG_SPARSEMEM_VMEMMAP
580 VMEMMAP_START + VMEMMAP_SIZE),
581 MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
582 (unsigned long)virt_to_page(high_memory)),
584 MLK(FIXADDR_START, FIXADDR_TOP),
585 MLM(PCI_IO_START, PCI_IO_END),
586 MLM(__phys_to_virt(memblock_start_of_DRAM()),
587 (unsigned long)high_memory));
594 * Check boundaries twice: Some fundamental inconsistencies can be
595 * detected at build time already.
598 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
600 BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
601 BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
603 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
604 extern int sysctl_overcommit_memory;
606 * On a machine this small we won't get anywhere without
607 * overcommit, so turn it on by default.
609 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
613 void free_initmem(void)
615 free_initmem_default(0);
619 #ifdef CONFIG_BLK_DEV_INITRD
621 static int keep_initrd __initdata;
623 void __init free_initrd_mem(unsigned long start, unsigned long end)
626 free_reserved_area((void *)start, (void *)end, 0, "initrd");
629 static int __init keepinitrd_setup(char *__unused)
635 __setup("keepinitrd", keepinitrd_setup);
639 * Dump out memory limit information on panic.
641 static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
643 if (memory_limit != (phys_addr_t)ULLONG_MAX) {
644 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
646 pr_emerg("Memory Limit: none\n");
651 static struct notifier_block mem_limit_notifier = {
652 .notifier_call = dump_mem_limit,
655 static int __init register_mem_limit_dumper(void)
657 atomic_notifier_chain_register(&panic_notifier_list,
658 &mem_limit_notifier);
661 __initcall(register_mem_limit_dumper);