2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/proc_fs.h>
25 #include <linux/pci.h>
26 #include <linux/dma-mapping.h>
28 #include <asm/processor.h>
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/pgtable.h>
32 #include <asm/pgalloc.h>
34 #include <asm/fixmap.h>
38 #include <asm/mmu_context.h>
39 #include <asm/proto.h>
41 #include <asm/sections.h>
42 #include <asm/dma-mapping.h>
43 #include <asm/swiotlb.h>
49 struct dma_mapping_ops* dma_ops;
50 EXPORT_SYMBOL(dma_ops);
52 static unsigned long dma_reserve __initdata;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
69 printk(KERN_INFO "Mem-info:\n");
71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
73 for_each_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
77 if (PageReserved(page))
79 else if (PageSwapCache(page))
81 else if (page_count(page))
82 shared += page_count(page) - 1;
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
91 /* References to section boundaries */
95 static void *spp_getpage(void)
99 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
101 ptr = alloc_bootmem_pages(PAGE_SIZE);
102 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
103 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
105 Dprintk("spp_getpage %p\n", ptr);
109 static void set_pte_phys(unsigned long vaddr,
110 unsigned long phys, pgprot_t prot)
117 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
119 pgd = pgd_offset_k(vaddr);
120 if (pgd_none(*pgd)) {
121 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
124 pud = pud_offset(pgd, vaddr);
125 if (pud_none(*pud)) {
126 pmd = (pmd_t *) spp_getpage();
127 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
128 if (pmd != pmd_offset(pud, 0)) {
129 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
133 pmd = pmd_offset(pud, vaddr);
134 if (pmd_none(*pmd)) {
135 pte = (pte_t *) spp_getpage();
136 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
137 if (pte != pte_offset_kernel(pmd, 0)) {
138 printk("PAGETABLE BUG #02!\n");
142 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
144 pte = pte_offset_kernel(pmd, vaddr);
145 if (!pte_none(*pte) &&
146 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
148 set_pte(pte, new_pte);
151 * It's enough to flush this one mapping.
152 * (PGE mappings get flushed as well)
154 __flush_tlb_one(vaddr);
157 /* NOTE: this is meant to be run only at boot */
158 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
160 unsigned long address = __fix_to_virt(idx);
162 if (idx >= __end_of_fixed_addresses) {
163 printk("Invalid __set_fixmap\n");
166 set_pte_phys(address, phys, prot);
169 unsigned long __initdata table_start, table_end;
171 extern pmd_t temp_boot_pmds[];
173 static struct temp_map {
177 } temp_mappings[] __initdata = {
178 { &temp_boot_pmds[0], (void *)(40UL * 1024 * 1024) },
179 { &temp_boot_pmds[1], (void *)(42UL * 1024 * 1024) },
183 static __init void *alloc_low_page(int *index, unsigned long *phys)
187 unsigned long pfn = table_end++, paddr;
191 panic("alloc_low_page: ran out of memory");
192 for (i = 0; temp_mappings[i].allocated; i++) {
193 if (!temp_mappings[i].pmd)
194 panic("alloc_low_page: ran out of temp mappings");
196 ti = &temp_mappings[i];
197 paddr = (pfn << PAGE_SHIFT) & PMD_MASK;
198 set_pmd(ti->pmd, __pmd(paddr | _KERNPG_TABLE | _PAGE_PSE));
201 adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
203 *phys = pfn * PAGE_SIZE;
207 static __init void unmap_low_page(int i)
209 struct temp_map *ti = &temp_mappings[i];
210 set_pmd(ti->pmd, __pmd(0));
214 static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
218 i = pud_index(address);
220 for (; i < PTRS_PER_PUD; pud++, i++) {
222 unsigned long paddr, pmd_phys;
225 paddr = address + i*PUD_SIZE;
227 for (; i < PTRS_PER_PUD; i++, pud++)
228 set_pud(pud, __pud(0));
232 if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
233 set_pud(pud, __pud(0));
237 pmd = alloc_low_page(&map, &pmd_phys);
238 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
239 for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
243 for (; j < PTRS_PER_PMD; j++, pmd++)
244 set_pmd(pmd, __pmd(0));
247 pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
248 pe &= __supported_pte_mask;
249 set_pmd(pmd, __pmd(pe));
256 static void __init find_early_table_space(unsigned long end)
258 unsigned long puds, pmds, tables, start;
260 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
261 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
262 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
263 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
265 /* RED-PEN putting page tables only on node 0 could
266 cause a hotspot and fill up ZONE_DMA. The page tables
267 need roughly 0.5KB per GB. */
269 table_start = find_e820_area(start, end, tables);
270 if (table_start == -1UL)
271 panic("Cannot find space for the kernel page tables");
273 table_start >>= PAGE_SHIFT;
274 table_end = table_start;
277 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
278 This runs before bootmem is initialized and gets pages directly from the
279 physical memory. To access them they are temporarily mapped. */
280 void __init init_memory_mapping(unsigned long start, unsigned long end)
284 Dprintk("init_memory_mapping\n");
287 * Find space for the kernel direct mapping tables.
288 * Later we should allocate these tables in the local node of the memory
289 * mapped. Unfortunately this is done currently before the nodes are
292 find_early_table_space(end);
294 start = (unsigned long)__va(start);
295 end = (unsigned long)__va(end);
297 for (; start < end; start = next) {
299 unsigned long pud_phys;
300 pud_t *pud = alloc_low_page(&map, &pud_phys);
301 next = start + PGDIR_SIZE;
304 phys_pud_init(pud, __pa(start), __pa(next));
305 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
309 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
311 early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
312 table_start<<PAGE_SHIFT,
313 table_end<<PAGE_SHIFT);
316 void __cpuinit zap_low_mappings(int cpu)
319 pgd_t *pgd = pgd_offset_k(0UL);
323 * For AP's, zap the low identity mappings by changing the cr3
324 * to init_level4_pgt and doing local flush tlb all
326 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
331 /* Compute zone sizes for the DMA and DMA32 zones in a node. */
333 size_zones(unsigned long *z, unsigned long *h,
334 unsigned long start_pfn, unsigned long end_pfn)
339 for (i = 0; i < MAX_NR_ZONES; i++)
342 if (start_pfn < MAX_DMA_PFN)
343 z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
344 if (start_pfn < MAX_DMA32_PFN) {
345 unsigned long dma32_pfn = MAX_DMA32_PFN;
346 if (dma32_pfn > end_pfn)
348 z[ZONE_DMA32] = dma32_pfn - start_pfn;
350 z[ZONE_NORMAL] = end_pfn - start_pfn;
352 /* Remove lower zones from higher ones. */
354 for (i = 0; i < MAX_NR_ZONES; i++) {
362 for (i = 0; i < MAX_NR_ZONES; i++) {
365 h[i] = e820_hole_size(s, w);
368 /* Add the space pace needed for mem_map to the holes too. */
369 for (i = 0; i < MAX_NR_ZONES; i++)
370 h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
372 /* The 16MB DMA zone has the kernel and other misc mappings.
375 h[ZONE_DMA] += dma_reserve;
376 if (h[ZONE_DMA] >= z[ZONE_DMA]) {
378 "Kernel too large and filling up ZONE_DMA?\n");
379 h[ZONE_DMA] = z[ZONE_DMA];
385 void __init paging_init(void)
387 unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
388 size_zones(zones, holes, 0, end_pfn);
389 free_area_init_node(0, NODE_DATA(0), zones,
390 __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
394 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
395 from the CPU leading to inconsistent cache lines. address and size
396 must be aligned to 2MB boundaries.
397 Does nothing when the mapping doesn't exist. */
398 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
400 unsigned long end = address + size;
402 BUG_ON(address & ~LARGE_PAGE_MASK);
403 BUG_ON(size & ~LARGE_PAGE_MASK);
405 for (; address < end; address += LARGE_PAGE_SIZE) {
406 pgd_t *pgd = pgd_offset_k(address);
411 pud = pud_offset(pgd, address);
414 pmd = pmd_offset(pud, address);
415 if (!pmd || pmd_none(*pmd))
417 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
418 /* Could handle this, but it should not happen currently. */
420 "clear_kernel_mapping: mapping has been split. will leak memory\n");
423 set_pmd(pmd, __pmd(0));
428 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
431 void __init mem_init(void)
433 long codesize, reservedpages, datasize, initsize;
435 #ifdef CONFIG_SWIOTLB
440 /* How many end-of-memory variables you have, grandma! */
441 max_low_pfn = end_pfn;
443 num_physpages = end_pfn;
444 high_memory = (void *) __va(end_pfn * PAGE_SIZE);
446 /* clear the zero-page */
447 memset(empty_zero_page, 0, PAGE_SIZE);
451 /* this will put all low memory onto the freelists */
453 totalram_pages = numa_free_all_bootmem();
455 totalram_pages = free_all_bootmem();
457 reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
461 codesize = (unsigned long) &_etext - (unsigned long) &_text;
462 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
463 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
465 /* Register memory areas for /proc/kcore */
466 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
467 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
468 VMALLOC_END-VMALLOC_START);
469 kclist_add(&kcore_kernel, &_stext, _end - _stext);
470 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
471 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
472 VSYSCALL_END - VSYSCALL_START);
474 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
475 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
476 end_pfn << (PAGE_SHIFT-10),
478 reservedpages << (PAGE_SHIFT-10),
484 * Sync boot_level4_pgt mappings with the init_level4_pgt
485 * except for the low identity mappings which are already zapped
486 * in init_level4_pgt. This sync-up is essential for AP's bringup
488 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
492 void free_initmem(void)
496 addr = (unsigned long)(&__init_begin);
497 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
498 ClearPageReserved(virt_to_page(addr));
499 set_page_count(virt_to_page(addr), 1);
500 memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
504 memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
505 printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
508 #ifdef CONFIG_DEBUG_RODATA
510 extern char __start_rodata, __end_rodata;
511 void mark_rodata_ro(void)
513 unsigned long addr = (unsigned long)&__start_rodata;
515 for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
516 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
518 printk ("Write protecting the kernel read-only data: %luk\n",
519 (&__end_rodata - &__start_rodata) >> 10);
522 * change_page_attr_addr() requires a global_flush_tlb() call after it.
523 * We do this after the printk so that if something went wrong in the
524 * change, the printk gets out at least to give a better debug hint
525 * of who is the culprit.
531 #ifdef CONFIG_BLK_DEV_INITRD
532 void free_initrd_mem(unsigned long start, unsigned long end)
534 if (start < (unsigned long)&_end)
536 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
537 for (; start < end; start += PAGE_SIZE) {
538 ClearPageReserved(virt_to_page(start));
539 set_page_count(virt_to_page(start), 1);
546 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
548 /* Should check here against the e820 map to avoid double free */
550 int nid = phys_to_nid(phys);
551 reserve_bootmem_node(NODE_DATA(nid), phys, len);
553 reserve_bootmem(phys, len);
555 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
556 dma_reserve += len / PAGE_SIZE;
559 int kern_addr_valid(unsigned long addr)
561 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
567 if (above != 0 && above != -1UL)
570 pgd = pgd_offset_k(addr);
574 pud = pud_offset(pgd, addr);
578 pmd = pmd_offset(pud, addr);
582 return pfn_valid(pmd_pfn(*pmd));
584 pte = pte_offset_kernel(pmd, addr);
587 return pfn_valid(pte_pfn(*pte));
591 #include <linux/sysctl.h>
593 extern int exception_trace, page_fault_trace;
595 static ctl_table debug_table2[] = {
596 { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
601 static ctl_table debug_root_table2[] = {
602 { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
603 .child = debug_table2 },
607 static __init int x8664_sysctl_init(void)
609 register_sysctl_table(debug_root_table2, 1);
612 __initcall(x8664_sysctl_init);
615 /* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
616 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
617 not need special handling anymore. */
619 static struct vm_area_struct gate_vma = {
620 .vm_start = VSYSCALL_START,
621 .vm_end = VSYSCALL_END,
622 .vm_page_prot = PAGE_READONLY
625 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
627 #ifdef CONFIG_IA32_EMULATION
628 if (test_tsk_thread_flag(tsk, TIF_IA32))
634 int in_gate_area(struct task_struct *task, unsigned long addr)
636 struct vm_area_struct *vma = get_gate_vma(task);
639 return (addr >= vma->vm_start) && (addr < vma->vm_end);
642 /* Use this when you have no reliable task/vma, typically from interrupt
643 * context. It is less reliable than using the task's vma and may give
646 int in_gate_area_no_task(unsigned long addr)
648 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);