2 * Copyright (C) 1995 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * Memory region support
7 * David Parsons <orc@pell.chi.il.us>, July-August 1999
9 * Added E820 sanitization routine (removes overlapping memory regions);
10 * Brian Moyle <bmoyle@mvista.com>, February 2001
12 * Moved CPU detection code to cpu/${cpu}.c
13 * Patrick Mochel <mochel@osdl.org>, March 2002
15 * Provisions for empty E820 memory regions (reported by certain BIOSes).
16 * Alex Achenbach <xela@slit.de>, December 2002.
21 * This file handles the architecture-dependent parts of initialization
24 #include <linux/sched.h>
26 #include <linux/mmzone.h>
27 #include <linux/screen_info.h>
28 #include <linux/ioport.h>
29 #include <linux/acpi.h>
30 #include <linux/sfi.h>
31 #include <linux/apm_bios.h>
32 #include <linux/initrd.h>
33 #include <linux/bootmem.h>
34 #include <linux/memblock.h>
35 #include <linux/seq_file.h>
36 #include <linux/console.h>
37 #include <linux/root_dev.h>
38 #include <linux/highmem.h>
39 #include <linux/module.h>
40 #include <linux/efi.h>
41 #include <linux/init.h>
42 #include <linux/edd.h>
43 #include <linux/iscsi_ibft.h>
44 #include <linux/nodemask.h>
45 #include <linux/kexec.h>
46 #include <linux/dmi.h>
47 #include <linux/pfn.h>
48 #include <linux/pci.h>
49 #include <asm/pci-direct.h>
50 #include <linux/init_ohci1394_dma.h>
51 #include <linux/kvm_para.h>
52 #include <linux/dma-contiguous.h>
54 #include <linux/errno.h>
55 #include <linux/kernel.h>
56 #include <linux/stddef.h>
57 #include <linux/unistd.h>
58 #include <linux/ptrace.h>
59 #include <linux/user.h>
60 #include <linux/delay.h>
62 #include <linux/kallsyms.h>
63 #include <linux/cpufreq.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/ctype.h>
66 #include <linux/uaccess.h>
68 #include <linux/percpu.h>
69 #include <linux/crash_dump.h>
70 #include <linux/tboot.h>
71 #include <linux/jiffies.h>
73 #include <video/edid.h>
77 #include <asm/realmode.h>
79 #include <asm/mpspec.h>
80 #include <asm/setup.h>
82 #include <asm/timer.h>
83 #include <asm/i8259.h>
84 #include <asm/sections.h>
86 #include <asm/io_apic.h>
88 #include <asm/setup_arch.h>
89 #include <asm/bios_ebda.h>
90 #include <asm/cacheflush.h>
91 #include <asm/processor.h>
94 #include <asm/vsyscall.h>
98 #include <asm/iommu.h>
100 #include <asm/mmu_context.h>
101 #include <asm/proto.h>
103 #include <asm/paravirt.h>
104 #include <asm/hypervisor.h>
105 #include <asm/olpc_ofw.h>
107 #include <asm/percpu.h>
108 #include <asm/topology.h>
109 #include <asm/apicdef.h>
110 #include <asm/amd_nb.h>
112 #include <asm/alternative.h>
113 #include <asm/prom.h>
116 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
117 * max_pfn_mapped: highest direct mapped pfn over 4GB
119 * The direct mapping only covers E820_RAM regions, so the ranges and gaps are
120 * represented by pfn_mapped
122 unsigned long max_low_pfn_mapped;
123 unsigned long max_pfn_mapped;
126 RESERVE_BRK(dmi_alloc, 65536);
130 static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
131 unsigned long _brk_end = (unsigned long)__brk_base;
134 int default_cpu_present_to_apicid(int mps_cpu)
136 return __default_cpu_present_to_apicid(mps_cpu);
139 int default_check_phys_apicid_present(int phys_apicid)
141 return __default_check_phys_apicid_present(phys_apicid);
145 struct boot_params boot_params;
150 static struct resource data_resource = {
151 .name = "Kernel data",
154 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
157 static struct resource code_resource = {
158 .name = "Kernel code",
161 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
164 static struct resource bss_resource = {
165 .name = "Kernel bss",
168 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
173 /* cpu data as detected by the assembly code in head.S */
174 struct cpuinfo_x86 new_cpu_data __cpuinitdata = {0, 0, 0, 0, -1, 1, 0, 0, -1};
175 /* common cpu data for all cpus */
176 struct cpuinfo_x86 boot_cpu_data __read_mostly = {0, 0, 0, 0, -1, 1, 0, 0, -1};
177 EXPORT_SYMBOL(boot_cpu_data);
179 unsigned int def_to_bigsmp;
181 /* for MCA, but anyone else can use it if they want */
182 unsigned int machine_id;
183 unsigned int machine_submodel_id;
184 unsigned int BIOS_revision;
186 struct apm_info apm_info;
187 EXPORT_SYMBOL(apm_info);
189 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
190 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
191 struct ist_info ist_info;
192 EXPORT_SYMBOL(ist_info);
194 struct ist_info ist_info;
198 struct cpuinfo_x86 boot_cpu_data __read_mostly = {
199 .x86_phys_bits = MAX_PHYSMEM_BITS,
201 EXPORT_SYMBOL(boot_cpu_data);
205 #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
206 unsigned long mmu_cr4_features;
208 unsigned long mmu_cr4_features = X86_CR4_PAE;
211 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
212 int bootloader_type, bootloader_version;
217 struct screen_info screen_info;
218 EXPORT_SYMBOL(screen_info);
219 struct edid_info edid_info;
220 EXPORT_SYMBOL_GPL(edid_info);
222 extern int root_mountflags;
224 unsigned long saved_video_mode;
226 #define RAMDISK_IMAGE_START_MASK 0x07FF
227 #define RAMDISK_PROMPT_FLAG 0x8000
228 #define RAMDISK_LOAD_FLAG 0x4000
230 static char __initdata command_line[COMMAND_LINE_SIZE];
231 #ifdef CONFIG_CMDLINE_BOOL
232 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
235 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
237 #ifdef CONFIG_EDD_MODULE
241 * copy_edd() - Copy the BIOS EDD information
242 * from boot_params into a safe place.
245 static inline void __init copy_edd(void)
247 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
248 sizeof(edd.mbr_signature));
249 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
250 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
251 edd.edd_info_nr = boot_params.eddbuf_entries;
254 static inline void __init copy_edd(void)
259 void * __init extend_brk(size_t size, size_t align)
261 size_t mask = align - 1;
264 BUG_ON(_brk_start == 0);
265 BUG_ON(align & mask);
267 _brk_end = (_brk_end + mask) & ~mask;
268 BUG_ON((char *)(_brk_end + size) > __brk_limit);
270 ret = (void *)_brk_end;
273 memset(ret, 0, size);
279 static void __init cleanup_highmap(void)
284 static void __init reserve_brk(void)
286 if (_brk_end > _brk_start)
287 memblock_reserve(__pa(_brk_start),
288 __pa(_brk_end) - __pa(_brk_start));
290 /* Mark brk area as locked down and no longer taking any
295 #ifdef CONFIG_BLK_DEV_INITRD
297 #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
298 static void __init relocate_initrd(void)
300 /* Assume only end is not page aligned */
301 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
302 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
303 u64 area_size = PAGE_ALIGN(ramdisk_size);
305 unsigned long slop, clen, mapaddr;
308 /* We need to move the initrd down into directly mapped mem */
309 ramdisk_here = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
310 area_size, PAGE_SIZE);
313 panic("Cannot find place for new RAMDISK of size %lld\n",
316 /* Note: this includes all the mem currently occupied by
317 the initrd, we rely on that fact to keep the data intact. */
318 memblock_reserve(ramdisk_here, area_size);
319 initrd_start = ramdisk_here + PAGE_OFFSET;
320 initrd_end = initrd_start + ramdisk_size;
321 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
322 ramdisk_here, ramdisk_here + ramdisk_size - 1);
324 q = (char *)initrd_start;
326 /* Copy the initrd */
327 while (ramdisk_size) {
328 slop = ramdisk_image & ~PAGE_MASK;
330 if (clen > MAX_MAP_CHUNK-slop)
331 clen = MAX_MAP_CHUNK-slop;
332 mapaddr = ramdisk_image & PAGE_MASK;
333 p = early_memremap(mapaddr, clen+slop);
334 memcpy(q, p+slop, clen);
335 early_iounmap(p, clen+slop);
337 ramdisk_image += clen;
338 ramdisk_size -= clen;
341 ramdisk_image = boot_params.hdr.ramdisk_image;
342 ramdisk_size = boot_params.hdr.ramdisk_size;
343 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
344 " [mem %#010llx-%#010llx]\n",
345 ramdisk_image, ramdisk_image + ramdisk_size - 1,
346 ramdisk_here, ramdisk_here + ramdisk_size - 1);
349 static u64 __init get_mem_size(unsigned long limit_pfn)
352 u64 mapped_pages = 0;
353 unsigned long start_pfn, end_pfn;
355 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
356 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
357 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
358 mapped_pages += end_pfn - start_pfn;
361 return mapped_pages << PAGE_SHIFT;
363 static void __init reserve_initrd(void)
365 /* Assume only end is not page aligned */
366 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
367 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
368 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
371 if (!boot_params.hdr.type_of_loader ||
372 !ramdisk_image || !ramdisk_size)
373 return; /* No initrd provided by bootloader */
377 mapped_size = get_mem_size(max_pfn_mapped);
378 if (ramdisk_size >= (mapped_size>>1))
379 panic("initrd too large to handle, "
380 "disabling initrd (%lld needed, %lld available)\n",
381 ramdisk_size, mapped_size>>1);
383 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
386 if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
387 PFN_DOWN(ramdisk_end))) {
388 /* All are mapped, easy case */
390 * don't need to reserve again, already reserved early
391 * in i386_start_kernel
393 initrd_start = ramdisk_image + PAGE_OFFSET;
394 initrd_end = initrd_start + ramdisk_size;
400 memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
403 static void __init reserve_initrd(void)
406 #endif /* CONFIG_BLK_DEV_INITRD */
408 static void __init parse_setup_data(void)
410 struct setup_data *data;
413 if (boot_params.hdr.version < 0x0209)
415 pa_data = boot_params.hdr.setup_data;
417 u32 data_len, map_len;
419 map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
420 (u64)sizeof(struct setup_data));
421 data = early_memremap(pa_data, map_len);
422 data_len = data->len + sizeof(struct setup_data);
423 if (data_len > map_len) {
424 early_iounmap(data, map_len);
425 data = early_memremap(pa_data, data_len);
429 switch (data->type) {
431 parse_e820_ext(data);
439 pa_data = data->next;
440 early_iounmap(data, map_len);
444 static void __init e820_reserve_setup_data(void)
446 struct setup_data *data;
450 if (boot_params.hdr.version < 0x0209)
452 pa_data = boot_params.hdr.setup_data;
454 data = early_memremap(pa_data, sizeof(*data));
455 e820_update_range(pa_data, sizeof(*data)+data->len,
456 E820_RAM, E820_RESERVED_KERN);
458 pa_data = data->next;
459 early_iounmap(data, sizeof(*data));
464 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
465 memcpy(&e820_saved, &e820, sizeof(struct e820map));
466 printk(KERN_INFO "extended physical RAM map:\n");
467 e820_print_map("reserve setup_data");
470 static void __init memblock_x86_reserve_range_setup_data(void)
472 struct setup_data *data;
475 if (boot_params.hdr.version < 0x0209)
477 pa_data = boot_params.hdr.setup_data;
479 data = early_memremap(pa_data, sizeof(*data));
480 memblock_reserve(pa_data, sizeof(*data) + data->len);
481 pa_data = data->next;
482 early_iounmap(data, sizeof(*data));
487 * --------- Crashkernel reservation ------------------------------
493 * Keep the crash kernel below this limit. On 32 bits earlier kernels
494 * would limit the kernel to the low 512 MiB due to mapping restrictions.
495 * On 64 bits, kexec-tools currently limits us to 896 MiB; increase this
496 * limit once kexec-tools are fixed.
499 # define CRASH_KERNEL_ADDR_MAX (512 << 20)
501 # define CRASH_KERNEL_ADDR_MAX (896 << 20)
504 static void __init reserve_crashkernel(void)
506 unsigned long long total_mem;
507 unsigned long long crash_size, crash_base;
510 total_mem = memblock_phys_mem_size();
512 ret = parse_crashkernel(boot_command_line, total_mem,
513 &crash_size, &crash_base);
514 if (ret != 0 || crash_size <= 0)
517 /* 0 means: find the address automatically */
518 if (crash_base <= 0) {
519 const unsigned long long alignment = 16<<20; /* 16M */
522 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
524 crash_base = memblock_find_in_range(alignment,
525 CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
528 pr_info("crashkernel reservation failed - No suitable area found.\n");
532 unsigned long long start;
534 start = memblock_find_in_range(crash_base,
535 crash_base + crash_size, crash_size, 1<<20);
536 if (start != crash_base) {
537 pr_info("crashkernel reservation failed - memory is in use.\n");
541 memblock_reserve(crash_base, crash_size);
543 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
544 "for crashkernel (System RAM: %ldMB)\n",
545 (unsigned long)(crash_size >> 20),
546 (unsigned long)(crash_base >> 20),
547 (unsigned long)(total_mem >> 20));
549 crashk_res.start = crash_base;
550 crashk_res.end = crash_base + crash_size - 1;
551 insert_resource(&iomem_resource, &crashk_res);
554 static void __init reserve_crashkernel(void)
559 static struct resource standard_io_resources[] = {
560 { .name = "dma1", .start = 0x00, .end = 0x1f,
561 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
562 { .name = "pic1", .start = 0x20, .end = 0x21,
563 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
564 { .name = "timer0", .start = 0x40, .end = 0x43,
565 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
566 { .name = "timer1", .start = 0x50, .end = 0x53,
567 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
568 { .name = "keyboard", .start = 0x60, .end = 0x60,
569 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
570 { .name = "keyboard", .start = 0x64, .end = 0x64,
571 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
572 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
573 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
574 { .name = "pic2", .start = 0xa0, .end = 0xa1,
575 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
576 { .name = "dma2", .start = 0xc0, .end = 0xdf,
577 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
578 { .name = "fpu", .start = 0xf0, .end = 0xff,
579 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
582 void __init reserve_standard_io_resources(void)
586 /* request I/O space for devices used on all i[345]86 PCs */
587 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
588 request_resource(&ioport_resource, &standard_io_resources[i]);
592 static __init void reserve_ibft_region(void)
594 unsigned long addr, size = 0;
596 addr = find_ibft_region(&size);
599 memblock_reserve(addr, size);
602 static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
604 static bool __init snb_gfx_workaround_needed(void)
609 static const __initconst u16 snb_ids[] = {
619 /* Assume no if something weird is going on with PCI */
620 if (!early_pci_allowed())
623 vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
624 if (vendor != 0x8086)
627 devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
628 for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
629 if (devid == snb_ids[i])
637 * Sandy Bridge graphics has trouble with certain ranges, exclude
638 * them from allocation.
640 static void __init trim_snb_memory(void)
642 static const __initconst unsigned long bad_pages[] = {
651 if (!snb_gfx_workaround_needed())
654 printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
657 * Reserve all memory below the 1 MB mark that has not
658 * already been reserved.
660 memblock_reserve(0, 1<<20);
662 for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
663 if (memblock_reserve(bad_pages[i], PAGE_SIZE))
664 printk(KERN_WARNING "failed to reserve 0x%08lx\n",
670 * Here we put platform-specific memory range workarounds, i.e.
671 * memory known to be corrupt or otherwise in need to be reserved on
672 * specific platforms.
674 * If this gets used more widely it could use a real dispatch mechanism.
676 static void __init trim_platform_memory_ranges(void)
681 static void __init trim_bios_range(void)
684 * A special case is the first 4Kb of memory;
685 * This is a BIOS owned area, not kernel ram, but generally
686 * not listed as such in the E820 table.
688 * This typically reserves additional memory (64KiB by default)
689 * since some BIOSes are known to corrupt low memory. See the
690 * Kconfig help text for X86_RESERVE_LOW.
692 e820_update_range(0, ALIGN(reserve_low, PAGE_SIZE),
693 E820_RAM, E820_RESERVED);
696 * special case: Some BIOSen report the PC BIOS
697 * area (640->1Mb) as ram even though it is not.
700 e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
702 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
705 static int __init parse_reservelow(char *p)
707 unsigned long long size;
712 size = memparse(p, &p);
725 early_param("reservelow", parse_reservelow);
728 * Determine if we were loaded by an EFI loader. If so, then we have also been
729 * passed the efi memmap, systab, etc., so we should use these data structures
730 * for initialization. Note, the efi init code path is determined by the
731 * global efi_enabled. This allows the same kernel image to be used on existing
732 * systems (with a traditional BIOS) as well as on EFI systems.
735 * setup_arch - architecture-specific boot-time initializations
737 * Note: On x86_64, fixmaps are ready for use even before this is called.
740 void __init setup_arch(char **cmdline_p)
743 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
744 visws_early_detect();
747 * copy kernel address range established so far and switch
748 * to the proper swapper page table
750 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
751 initial_page_table + KERNEL_PGD_BOUNDARY,
754 load_cr3(swapper_pg_dir);
757 printk(KERN_INFO "Command line: %s\n", boot_command_line);
761 * If we have OLPC OFW, we might end up relocating the fixmap due to
762 * reserve_top(), so do this before touching the ioremap area.
768 early_ioremap_init();
770 setup_olpc_ofw_pgd();
772 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
773 screen_info = boot_params.screen_info;
774 edid_info = boot_params.edid_info;
776 apm_info.bios = boot_params.apm_bios_info;
777 ist_info = boot_params.ist_info;
778 if (boot_params.sys_desc_table.length != 0) {
779 machine_id = boot_params.sys_desc_table.table[0];
780 machine_submodel_id = boot_params.sys_desc_table.table[1];
781 BIOS_revision = boot_params.sys_desc_table.table[2];
784 saved_video_mode = boot_params.hdr.vid_mode;
785 bootloader_type = boot_params.hdr.type_of_loader;
786 if ((bootloader_type >> 4) == 0xe) {
787 bootloader_type &= 0xf;
788 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
790 bootloader_version = bootloader_type & 0xf;
791 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
793 #ifdef CONFIG_BLK_DEV_RAM
794 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
795 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
796 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
799 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
803 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
808 if (efi_enabled && efi_memblock_x86_reserve_range())
812 x86_init.oem.arch_setup();
814 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
817 /* update the e820_saved too */
818 e820_reserve_setup_data();
822 if (!boot_params.hdr.root_flags)
823 root_mountflags &= ~MS_RDONLY;
824 init_mm.start_code = (unsigned long) _text;
825 init_mm.end_code = (unsigned long) _etext;
826 init_mm.end_data = (unsigned long) _edata;
827 init_mm.brk = _brk_end;
829 code_resource.start = virt_to_phys(_text);
830 code_resource.end = virt_to_phys(_etext)-1;
831 data_resource.start = virt_to_phys(_etext);
832 data_resource.end = virt_to_phys(_edata)-1;
833 bss_resource.start = virt_to_phys(&__bss_start);
834 bss_resource.end = virt_to_phys(&__bss_stop)-1;
836 #ifdef CONFIG_CMDLINE_BOOL
837 #ifdef CONFIG_CMDLINE_OVERRIDE
838 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
840 if (builtin_cmdline[0]) {
841 /* append boot loader cmdline to builtin */
842 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
843 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
844 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
849 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
850 *cmdline_p = command_line;
853 * x86_configure_nx() is called before parse_early_param() to detect
854 * whether hardware doesn't support NX (so that the early EHCI debug
855 * console setup can safely call set_fixmap()). It may then be called
856 * again from within noexec_setup() during parsing early parameters
857 * to honor the respective command line option.
865 /* after early param, so could get panic from serial */
866 memblock_x86_reserve_range_setup_data();
868 if (acpi_mps_check()) {
869 #ifdef CONFIG_X86_LOCAL_APIC
872 setup_clear_cpu_cap(X86_FEATURE_APIC);
876 if (pci_early_dump_regs)
877 early_dump_pci_devices();
880 finish_e820_parsing();
888 * VMware detection requires dmi to be available, so this
889 * needs to be done after dmi_scan_machine, for the BP.
891 init_hypervisor_platform();
893 x86_init.resources.probe_roms();
895 /* after parse_early_param, so could debug it */
896 insert_resource(&iomem_resource, &code_resource);
897 insert_resource(&iomem_resource, &data_resource);
898 insert_resource(&iomem_resource, &bss_resource);
901 * Complain if .text .data and .bss are not marked as E820_RAM and
902 * attempt to fix it by adding the range. We may have a confused BIOS,
903 * or the user may have incorrectly supplied it via memmap=exactmap. If
904 * we really are running on top non-RAM, we will crash later anyways.
906 if (!e820_all_mapped(code_resource.start, __pa(__brk_limit), E820_RAM)) {
907 pr_warn(".text .data .bss are not marked as E820_RAM!\n");
909 e820_add_region(code_resource.start,
910 __pa(__brk_limit) - code_resource.start + 1,
916 if (ppro_with_ram_bug()) {
917 e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
919 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
920 printk(KERN_INFO "fixed physical RAM map:\n");
921 e820_print_map("bad_ppro");
924 early_gart_iommu_check();
928 * partially used pages are not usable - thus
929 * we are rounding upwards:
931 max_pfn = e820_end_of_ram_pfn();
933 /* update e820 for memory not covered by WB MTRRs */
935 if (mtrr_trim_uncached_memory(max_pfn))
936 max_pfn = e820_end_of_ram_pfn();
939 /* max_low_pfn get updated here */
940 find_low_pfn_range();
942 num_physpages = max_pfn;
946 /* How many end-of-memory variables you have, grandma! */
947 /* need this before calling reserve_initrd */
948 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
949 max_low_pfn = e820_end_of_low_ram_pfn();
951 max_low_pfn = max_pfn;
953 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
957 * Find and reserve possible boot-time SMP configuration:
961 reserve_ibft_region();
963 early_alloc_pgt_buf();
966 * Need to conclude brk, before memblock_x86_fill()
967 * it could use memblock_find_in_range, could overlap with
974 memblock.current_limit = ISA_END_ADDRESS;
978 * The EFI specification says that boot service code won't be called
979 * after ExitBootServices(). This is, in fact, a lie.
982 efi_reserve_boot_services();
984 /* preallocate 4k for mptable mpc */
985 early_reserve_e820_mpc_new();
987 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
988 setup_bios_corruption_check();
991 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
992 (max_pfn_mapped<<PAGE_SHIFT) - 1);
996 trim_platform_memory_ranges();
1000 memblock.current_limit = get_max_mapped();
1001 dma_contiguous_reserve(0);
1004 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1007 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1008 if (init_ohci1394_dma_early)
1009 init_ohci1394_dma_on_all_controllers();
1011 /* Allocate bigger log buffer */
1016 #if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD)
1017 acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
1020 reserve_crashkernel();
1027 * Parse the ACPI tables for possible boot-time SMP configuration.
1029 acpi_boot_table_init();
1031 early_acpi_boot_init();
1034 memblock_find_dma_reserve();
1036 #ifdef CONFIG_KVM_GUEST
1040 x86_init.paging.pagetable_init();
1042 if (boot_cpu_data.cpuid_level >= 0) {
1043 /* A CPU has %cr4 if and only if it has CPUID */
1044 mmu_cr4_features = read_cr4();
1045 if (trampoline_cr4_features)
1046 *trampoline_cr4_features = mmu_cr4_features;
1049 #ifdef CONFIG_X86_32
1050 /* sync back kernel address range */
1051 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1052 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1058 #ifdef CONFIG_X86_64
1062 generic_apic_probe();
1067 * Read APIC and some other early information from ACPI tables.
1074 * get boot-time SMP configuration:
1076 if (smp_found_config)
1079 prefill_possible_map();
1083 init_apic_mappings();
1084 if (x86_io_apic_ops.init)
1085 x86_io_apic_ops.init();
1089 e820_reserve_resources();
1090 e820_mark_nosave_regions(max_low_pfn);
1092 x86_init.resources.reserve_resources();
1097 #if defined(CONFIG_VGA_CONSOLE)
1098 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1099 conswitchp = &vga_con;
1100 #elif defined(CONFIG_DUMMY_CONSOLE)
1101 conswitchp = &dummy_con;
1104 x86_init.oem.banner();
1106 x86_init.timers.wallclock_init();
1110 arch_init_ideal_nops();
1112 register_refined_jiffies(CLOCK_TICK_RATE);
1115 /* Once setup is done above, disable efi_enabled on mismatched
1116 * firmware/kernel archtectures since there is no support for
1119 if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
1120 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
1127 #ifdef CONFIG_X86_32
1129 static struct resource video_ram_resource = {
1130 .name = "Video RAM area",
1133 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1136 void __init i386_reserve_resources(void)
1138 request_resource(&iomem_resource, &video_ram_resource);
1139 reserve_standard_io_resources();
1142 #endif /* CONFIG_X86_32 */