2 * linux/arch/x86_64/mm/init.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/pagemap.h>
22 #include <linux/bootmem.h>
23 #include <linux/proc_fs.h>
24 #include <linux/pci.h>
25 #include <linux/poison.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/module.h>
28 #include <linux/memory_hotplug.h>
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
36 #include <asm/fixmap.h>
40 #include <asm/mmu_context.h>
41 #include <asm/proto.h>
43 #include <asm/sections.h>
49 const struct dma_mapping_ops* dma_ops;
50 EXPORT_SYMBOL(dma_ops);
52 static unsigned long dma_reserve __initdata;
54 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
58 * physical space so we can cache the place of the first one and move
59 * around without checking the pgd every time.
64 long i, total = 0, reserved = 0;
65 long shared = 0, cached = 0;
69 printk(KERN_INFO "Mem-info:\n");
71 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
73 for_each_online_pgdat(pgdat) {
74 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
75 page = pfn_to_page(pgdat->node_start_pfn + i);
77 if (PageReserved(page))
79 else if (PageSwapCache(page))
81 else if (page_count(page))
82 shared += page_count(page) - 1;
85 printk(KERN_INFO "%lu pages of RAM\n", total);
86 printk(KERN_INFO "%lu reserved pages\n",reserved);
87 printk(KERN_INFO "%lu pages shared\n",shared);
88 printk(KERN_INFO "%lu pages swap cached\n",cached);
93 static __init void *spp_getpage(void)
97 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
99 ptr = alloc_bootmem_pages(PAGE_SIZE);
100 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
101 panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
103 Dprintk("spp_getpage %p\n", ptr);
107 static __init void set_pte_phys(unsigned long vaddr,
108 unsigned long phys, pgprot_t prot)
115 Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
117 pgd = pgd_offset_k(vaddr);
118 if (pgd_none(*pgd)) {
119 printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
122 pud = pud_offset(pgd, vaddr);
123 if (pud_none(*pud)) {
124 pmd = (pmd_t *) spp_getpage();
125 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
126 if (pmd != pmd_offset(pud, 0)) {
127 printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
131 pmd = pmd_offset(pud, vaddr);
132 if (pmd_none(*pmd)) {
133 pte = (pte_t *) spp_getpage();
134 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
135 if (pte != pte_offset_kernel(pmd, 0)) {
136 printk("PAGETABLE BUG #02!\n");
140 new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
142 pte = pte_offset_kernel(pmd, vaddr);
143 if (!pte_none(*pte) &&
144 pte_val(*pte) != (pte_val(new_pte) & __supported_pte_mask))
146 set_pte(pte, new_pte);
149 * It's enough to flush this one mapping.
150 * (PGE mappings get flushed as well)
152 __flush_tlb_one(vaddr);
155 /* NOTE: this is meant to be run only at boot */
157 __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
159 unsigned long address = __fix_to_virt(idx);
161 if (idx >= __end_of_fixed_addresses) {
162 printk("Invalid __set_fixmap\n");
165 set_pte_phys(address, phys, prot);
168 unsigned long __initdata table_start, table_end;
170 static __meminit void *alloc_low_page(unsigned long *phys)
172 unsigned long pfn = table_end++;
176 adr = (void *)get_zeroed_page(GFP_ATOMIC);
182 panic("alloc_low_page: ran out of memory");
184 adr = early_ioremap(pfn * PAGE_SIZE, PAGE_SIZE);
185 memset(adr, 0, PAGE_SIZE);
186 *phys = pfn * PAGE_SIZE;
190 static __meminit void unmap_low_page(void *adr)
196 early_iounmap(adr, PAGE_SIZE);
199 /* Must run before zap_low_mappings */
200 __init void *early_ioremap(unsigned long addr, unsigned long size)
203 pmd_t *pmd, *last_pmd;
206 pmds = ((addr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
207 vaddr = __START_KERNEL_map;
208 pmd = level2_kernel_pgt;
209 last_pmd = level2_kernel_pgt + PTRS_PER_PMD - 1;
210 for (; pmd <= last_pmd; pmd++, vaddr += PMD_SIZE) {
211 for (i = 0; i < pmds; i++) {
212 if (pmd_present(pmd[i]))
215 vaddr += addr & ~PMD_MASK;
217 for (i = 0; i < pmds; i++, addr += PMD_SIZE)
218 set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE));
220 return (void *)vaddr;
224 printk("early_ioremap(0x%lx, %lu) failed\n", addr, size);
228 /* To avoid virtual aliases later */
229 __init void early_iounmap(void *addr, unsigned long size)
235 vaddr = (unsigned long)addr;
236 pmds = ((vaddr & ~PMD_MASK) + size + ~PMD_MASK) / PMD_SIZE;
237 pmd = level2_kernel_pgt + pmd_index(vaddr);
238 for (i = 0; i < pmds; i++)
243 static void __meminit
244 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
246 int i = pmd_index(address);
248 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
250 pmd_t *pmd = pmd_page + pmd_index(address);
252 if (address >= end) {
254 for (; i < PTRS_PER_PMD; i++, pmd++)
255 set_pmd(pmd, __pmd(0));
262 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
263 entry &= __supported_pte_mask;
264 set_pmd(pmd, __pmd(entry));
268 static void __meminit
269 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
271 pmd_t *pmd = pmd_offset(pud,0);
272 spin_lock(&init_mm.page_table_lock);
273 phys_pmd_init(pmd, address, end);
274 spin_unlock(&init_mm.page_table_lock);
278 static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
280 int i = pud_index(addr);
283 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
284 unsigned long pmd_phys;
285 pud_t *pud = pud_page + pud_index(addr);
291 if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
292 set_pud(pud, __pud(0));
297 phys_pmd_update(pud, addr, end);
301 pmd = alloc_low_page(&pmd_phys);
302 spin_lock(&init_mm.page_table_lock);
303 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
304 phys_pmd_init(pmd, addr, end);
305 spin_unlock(&init_mm.page_table_lock);
311 static void __init find_early_table_space(unsigned long end)
313 unsigned long puds, pmds, tables, start;
315 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
316 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
317 tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
318 round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
320 /* RED-PEN putting page tables only on node 0 could
321 cause a hotspot and fill up ZONE_DMA. The page tables
322 need roughly 0.5KB per GB. */
324 table_start = find_e820_area(start, end, tables);
325 if (table_start == -1UL)
326 panic("Cannot find space for the kernel page tables");
328 table_start >>= PAGE_SHIFT;
329 table_end = table_start;
331 early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
332 end, table_start << PAGE_SHIFT,
333 (table_start << PAGE_SHIFT) + tables);
336 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
337 This runs before bootmem is initialized and gets pages directly from the
338 physical memory. To access them they are temporarily mapped. */
339 void __meminit init_memory_mapping(unsigned long start, unsigned long end)
343 Dprintk("init_memory_mapping\n");
346 * Find space for the kernel direct mapping tables.
347 * Later we should allocate these tables in the local node of the memory
348 * mapped. Unfortunately this is done currently before the nodes are
352 find_early_table_space(end);
354 start = (unsigned long)__va(start);
355 end = (unsigned long)__va(end);
357 for (; start < end; start = next) {
358 unsigned long pud_phys;
359 pgd_t *pgd = pgd_offset_k(start);
363 pud = pud_offset(pgd, start & PGDIR_MASK);
365 pud = alloc_low_page(&pud_phys);
367 next = start + PGDIR_SIZE;
370 phys_pud_init(pud, __pa(start), __pa(next));
372 set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
377 asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
381 void __cpuinit zap_low_mappings(int cpu)
384 pgd_t *pgd = pgd_offset_k(0UL);
388 * For AP's, zap the low identity mappings by changing the cr3
389 * to init_level4_pgt and doing local flush tlb all
391 asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
397 void __init paging_init(void)
399 unsigned long max_zone_pfns[MAX_NR_ZONES];
400 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
401 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
402 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
403 max_zone_pfns[ZONE_NORMAL] = end_pfn;
405 memory_present(0, 0, end_pfn);
407 free_area_init_nodes(max_zone_pfns);
411 /* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
412 from the CPU leading to inconsistent cache lines. address and size
413 must be aligned to 2MB boundaries.
414 Does nothing when the mapping doesn't exist. */
415 void __init clear_kernel_mapping(unsigned long address, unsigned long size)
417 unsigned long end = address + size;
419 BUG_ON(address & ~LARGE_PAGE_MASK);
420 BUG_ON(size & ~LARGE_PAGE_MASK);
422 for (; address < end; address += LARGE_PAGE_SIZE) {
423 pgd_t *pgd = pgd_offset_k(address);
428 pud = pud_offset(pgd, address);
431 pmd = pmd_offset(pud, address);
432 if (!pmd || pmd_none(*pmd))
434 if (0 == (pmd_val(*pmd) & _PAGE_PSE)) {
435 /* Could handle this, but it should not happen currently. */
437 "clear_kernel_mapping: mapping has been split. will leak memory\n");
440 set_pmd(pmd, __pmd(0));
446 * Memory hotplug specific functions
448 void online_page(struct page *page)
450 ClearPageReserved(page);
451 init_page_count(page);
457 #ifdef CONFIG_MEMORY_HOTPLUG
459 * Memory is added always to NORMAL zone. This means you will never get
460 * additional DMA/DMA32 memory.
462 int arch_add_memory(int nid, u64 start, u64 size)
464 struct pglist_data *pgdat = NODE_DATA(nid);
465 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
466 unsigned long start_pfn = start >> PAGE_SHIFT;
467 unsigned long nr_pages = size >> PAGE_SHIFT;
470 init_memory_mapping(start, (start + size -1));
472 ret = __add_pages(zone, start_pfn, nr_pages);
478 printk("%s: Problem encountered in __add_pages!\n", __func__);
481 EXPORT_SYMBOL_GPL(arch_add_memory);
483 int remove_memory(u64 start, u64 size)
487 EXPORT_SYMBOL_GPL(remove_memory);
489 #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
490 int memory_add_physaddr_to_nid(u64 start)
494 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
497 #endif /* CONFIG_MEMORY_HOTPLUG */
499 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
501 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
502 * just online the pages.
504 int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
508 unsigned long total = 0, mem = 0;
509 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
510 if (pfn_valid(pfn)) {
511 online_page(pfn_to_page(pfn));
518 z->spanned_pages += total;
519 z->present_pages += mem;
520 z->zone_pgdat->node_spanned_pages += total;
521 z->zone_pgdat->node_present_pages += mem;
527 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
530 void __init mem_init(void)
532 long codesize, reservedpages, datasize, initsize;
536 /* clear the zero-page */
537 memset(empty_zero_page, 0, PAGE_SIZE);
541 /* this will put all low memory onto the freelists */
543 totalram_pages = numa_free_all_bootmem();
545 totalram_pages = free_all_bootmem();
547 reservedpages = end_pfn - totalram_pages -
548 absent_pages_in_range(0, end_pfn);
552 codesize = (unsigned long) &_etext - (unsigned long) &_text;
553 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
554 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
556 /* Register memory areas for /proc/kcore */
557 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
558 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
559 VMALLOC_END-VMALLOC_START);
560 kclist_add(&kcore_kernel, &_stext, _end - _stext);
561 kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
562 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
563 VSYSCALL_END - VSYSCALL_START);
565 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
566 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
567 end_pfn << (PAGE_SHIFT-10),
569 reservedpages << (PAGE_SHIFT-10),
575 * Sync boot_level4_pgt mappings with the init_level4_pgt
576 * except for the low identity mappings which are already zapped
577 * in init_level4_pgt. This sync-up is essential for AP's bringup
579 memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
583 void free_init_pages(char *what, unsigned long begin, unsigned long end)
590 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
591 for (addr = begin; addr < end; addr += PAGE_SIZE) {
592 ClearPageReserved(virt_to_page(addr));
593 init_page_count(virt_to_page(addr));
594 memset((void *)(addr & ~(PAGE_SIZE-1)),
595 POISON_FREE_INITMEM, PAGE_SIZE);
601 void free_initmem(void)
603 memset(__initdata_begin, POISON_FREE_INITDATA,
604 __initdata_end - __initdata_begin);
605 free_init_pages("unused kernel memory",
606 (unsigned long)(&__init_begin),
607 (unsigned long)(&__init_end));
610 #ifdef CONFIG_DEBUG_RODATA
612 void mark_rodata_ro(void)
614 unsigned long addr = (unsigned long)__start_rodata;
616 for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
617 change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
619 printk ("Write protecting the kernel read-only data: %luk\n",
620 (__end_rodata - __start_rodata) >> 10);
623 * change_page_attr_addr() requires a global_flush_tlb() call after it.
624 * We do this after the printk so that if something went wrong in the
625 * change, the printk gets out at least to give a better debug hint
626 * of who is the culprit.
632 #ifdef CONFIG_BLK_DEV_INITRD
633 void free_initrd_mem(unsigned long start, unsigned long end)
635 free_init_pages("initrd memory", start, end);
639 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
642 int nid = phys_to_nid(phys);
644 unsigned long pfn = phys >> PAGE_SHIFT;
645 if (pfn >= end_pfn) {
646 /* This can happen with kdump kernels when accessing firmware
648 if (pfn < end_pfn_map)
650 printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n",
655 /* Should check here against the e820 map to avoid double free */
657 reserve_bootmem_node(NODE_DATA(nid), phys, len);
659 reserve_bootmem(phys, len);
661 if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
662 dma_reserve += len / PAGE_SIZE;
663 set_dma_reserve(dma_reserve);
667 int kern_addr_valid(unsigned long addr)
669 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
675 if (above != 0 && above != -1UL)
678 pgd = pgd_offset_k(addr);
682 pud = pud_offset(pgd, addr);
686 pmd = pmd_offset(pud, addr);
690 return pfn_valid(pmd_pfn(*pmd));
692 pte = pte_offset_kernel(pmd, addr);
695 return pfn_valid(pte_pfn(*pte));
699 #include <linux/sysctl.h>
701 extern int exception_trace, page_fault_trace;
703 static ctl_table debug_table2[] = {
706 .procname = "exception-trace",
707 .data = &exception_trace,
708 .maxlen = sizeof(int),
710 .proc_handler = proc_dointvec
715 static ctl_table debug_root_table2[] = {
717 .ctl_name = CTL_DEBUG,
720 .child = debug_table2
725 static __init int x8664_sysctl_init(void)
727 register_sysctl_table(debug_root_table2);
730 __initcall(x8664_sysctl_init);
733 /* A pseudo VMA to allow ptrace access for the vsyscall page. This only
734 covers the 64bit vsyscall page now. 32bit has a real VMA now and does
735 not need special handling anymore. */
737 static struct vm_area_struct gate_vma = {
738 .vm_start = VSYSCALL_START,
739 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT),
740 .vm_page_prot = PAGE_READONLY_EXEC,
741 .vm_flags = VM_READ | VM_EXEC
744 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
746 #ifdef CONFIG_IA32_EMULATION
747 if (test_tsk_thread_flag(tsk, TIF_IA32))
753 int in_gate_area(struct task_struct *task, unsigned long addr)
755 struct vm_area_struct *vma = get_gate_vma(task);
758 return (addr >= vma->vm_start) && (addr < vma->vm_end);
761 /* Use this when you have no reliable task/vma, typically from interrupt
762 * context. It is less reliable than using the task's vma and may give
765 int in_gate_area_no_task(unsigned long addr)
767 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);