2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9 * Vivek Goyal <vgoyal@redhat.com>
13 #define pr_fmt(fmt) "kexec: " fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/smp.h>
18 #include <linux/reboot.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/elf.h>
22 #include <linux/elfcore.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
27 #include <asm/processor.h>
28 #include <asm/hardirq.h>
30 #include <asm/hw_irq.h>
32 #include <asm/io_apic.h>
34 #include <linux/kdebug.h>
36 #include <asm/reboot.h>
37 #include <asm/virtext.h>
39 /* Alignment required for elf header segment */
40 #define ELF_CORE_HEADER_ALIGN 4096
42 /* This primarily represents number of split ranges due to exclusion */
43 #define CRASH_MAX_RANGES 16
45 struct crash_mem_range {
50 unsigned int nr_ranges;
51 struct crash_mem_range ranges[CRASH_MAX_RANGES];
54 /* Misc data about ram ranges needed to prepare elf headers */
55 struct crash_elf_data {
58 * Total number of ram ranges we have after various adjustments for
59 * GART, crash reserved region etc.
61 unsigned int max_nr_ranges;
62 unsigned long gart_start, gart_end;
64 /* Pointer to elf header */
66 /* Pointer to next phdr */
71 /* Used while preparing memory map entries for second kernel */
72 struct crash_memmap_data {
73 struct boot_params *params;
79 * This is used to VMCLEAR all VMCSs loaded on the
80 * processor. And when loading kvm_intel module, the
81 * callback function pointer will be assigned.
85 crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
86 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
87 unsigned long crash_zero_bytes;
89 static inline void cpu_crash_vmclear_loaded_vmcss(void)
91 crash_vmclear_fn *do_vmclear_operation = NULL;
94 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
95 if (do_vmclear_operation)
96 do_vmclear_operation();
100 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
102 static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
105 struct pt_regs fixed_regs;
107 if (!user_mode(regs)) {
108 crash_fixup_ss_esp(&fixed_regs, regs);
112 crash_save_cpu(regs, cpu);
115 * VMCLEAR VMCSs loaded on all cpus if needed.
117 cpu_crash_vmclear_loaded_vmcss();
119 /* Disable VMX or SVM if needed.
121 * We need to disable virtualization on all CPUs.
122 * Having VMX or SVM enabled on any CPU may break rebooting
123 * after the kdump kernel has finished its task.
125 cpu_emergency_vmxoff();
126 cpu_emergency_svm_disable();
128 disable_local_APIC();
131 static void kdump_nmi_shootdown_cpus(void)
133 nmi_shootdown_cpus(kdump_nmi_callback);
135 disable_local_APIC();
139 static void kdump_nmi_shootdown_cpus(void)
141 /* There are no cpus to shootdown */
145 void native_machine_crash_shutdown(struct pt_regs *regs)
147 /* This function is only called after the system
148 * has panicked or is otherwise in a critical state.
149 * The minimum amount of code to allow a kexec'd kernel
150 * to run successfully needs to happen here.
152 * In practice this means shooting down the other cpus in
155 /* The kernel is broken so disable interrupts */
158 kdump_nmi_shootdown_cpus();
161 * VMCLEAR VMCSs loaded on this cpu if needed.
163 cpu_crash_vmclear_loaded_vmcss();
165 /* Booting kdump kernel with VMX or SVM enabled won't work,
166 * because (among other limitations) we can't disable paging
167 * with the virt flags.
169 cpu_emergency_vmxoff();
170 cpu_emergency_svm_disable();
172 #ifdef CONFIG_X86_IO_APIC
173 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
178 #ifdef CONFIG_HPET_TIMER
181 crash_save_cpu(regs, safe_smp_processor_id());
184 #ifdef CONFIG_KEXEC_FILE
185 static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
187 unsigned int *nr_ranges = arg;
193 static int get_gart_ranges_callback(u64 start, u64 end, void *arg)
195 struct crash_elf_data *ced = arg;
197 ced->gart_start = start;
200 /* Not expecting more than 1 gart aperture */
205 /* Gather all the required information to prepare elf headers for ram regions */
206 static void fill_up_crash_elf_data(struct crash_elf_data *ced,
207 struct kimage *image)
209 unsigned int nr_ranges = 0;
213 walk_system_ram_res(0, -1, &nr_ranges,
214 get_nr_ram_ranges_callback);
216 ced->max_nr_ranges = nr_ranges;
219 * We don't create ELF headers for GART aperture as an attempt
220 * to dump this memory in second kernel leads to hang/crash.
221 * If gart aperture is present, one needs to exclude that region
222 * and that could lead to need of extra phdr.
224 walk_iomem_res("GART", IORESOURCE_MEM, 0, -1,
225 ced, get_gart_ranges_callback);
228 * If we have gart region, excluding that could potentially split
229 * a memory range, resulting in extra header. Account for that.
232 ced->max_nr_ranges++;
234 /* Exclusion of crash region could split memory ranges */
235 ced->max_nr_ranges++;
237 /* If crashk_low_res is not 0, another range split possible */
238 if (crashk_low_res.end)
239 ced->max_nr_ranges++;
242 static int exclude_mem_range(struct crash_mem *mem,
243 unsigned long long mstart, unsigned long long mend)
246 unsigned long long start, end;
247 struct crash_mem_range temp_range = {0, 0};
249 for (i = 0; i < mem->nr_ranges; i++) {
250 start = mem->ranges[i].start;
251 end = mem->ranges[i].end;
253 if (mstart > end || mend < start)
256 /* Truncate any area outside of range */
262 /* Found completely overlapping range */
263 if (mstart == start && mend == end) {
264 mem->ranges[i].start = 0;
265 mem->ranges[i].end = 0;
266 if (i < mem->nr_ranges - 1) {
267 /* Shift rest of the ranges to left */
268 for (j = i; j < mem->nr_ranges - 1; j++) {
269 mem->ranges[j].start =
270 mem->ranges[j+1].start;
272 mem->ranges[j+1].end;
279 if (mstart > start && mend < end) {
280 /* Split original range */
281 mem->ranges[i].end = mstart - 1;
282 temp_range.start = mend + 1;
283 temp_range.end = end;
284 } else if (mstart != start)
285 mem->ranges[i].end = mstart - 1;
287 mem->ranges[i].start = mend + 1;
291 /* If a split happend, add the split to array */
296 if (i == CRASH_MAX_RANGES - 1) {
297 pr_err("Too many crash ranges after split\n");
301 /* Location where new range should go */
303 if (j < mem->nr_ranges) {
304 /* Move over all ranges one slot towards the end */
305 for (i = mem->nr_ranges - 1; i >= j; i--)
306 mem->ranges[i + 1] = mem->ranges[i];
309 mem->ranges[j].start = temp_range.start;
310 mem->ranges[j].end = temp_range.end;
316 * Look for any unwanted ranges between mstart, mend and remove them. This
317 * might lead to split and split ranges are put in ced->mem.ranges[] array
319 static int elf_header_exclude_ranges(struct crash_elf_data *ced,
320 unsigned long long mstart, unsigned long long mend)
322 struct crash_mem *cmem = &ced->mem;
325 memset(cmem->ranges, 0, sizeof(cmem->ranges));
327 cmem->ranges[0].start = mstart;
328 cmem->ranges[0].end = mend;
331 /* Exclude crashkernel region */
332 ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
336 if (crashk_low_res.end) {
337 ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
342 /* Exclude GART region */
344 ret = exclude_mem_range(cmem, ced->gart_start, ced->gart_end);
352 static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
354 struct crash_elf_data *ced = arg;
357 unsigned long mstart, mend;
358 struct kimage *image = ced->image;
359 struct crash_mem *cmem;
364 /* Exclude unwanted mem ranges */
365 ret = elf_header_exclude_ranges(ced, start, end);
369 /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
372 for (i = 0; i < cmem->nr_ranges; i++) {
373 mstart = cmem->ranges[i].start;
374 mend = cmem->ranges[i].end;
377 ced->bufp += sizeof(Elf64_Phdr);
379 phdr->p_type = PT_LOAD;
380 phdr->p_flags = PF_R|PF_W|PF_X;
381 phdr->p_offset = mstart;
384 * If a range matches backup region, adjust offset to backup
387 if (mstart == image->arch.backup_src_start &&
388 (mend - mstart + 1) == image->arch.backup_src_sz)
389 phdr->p_offset = image->arch.backup_load_addr;
391 phdr->p_paddr = mstart;
392 phdr->p_vaddr = (unsigned long long) __va(mstart);
393 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
396 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
397 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
398 ehdr->e_phnum, phdr->p_offset);
404 static int prepare_elf64_headers(struct crash_elf_data *ced,
405 void **addr, unsigned long *sz)
409 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
410 unsigned char *buf, *bufp;
412 unsigned long long notes_addr;
415 /* extra phdr for vmcoreinfo elf note */
416 nr_phdr = nr_cpus + 1;
417 nr_phdr += ced->max_nr_ranges;
420 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
421 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
422 * I think this is required by tools like gdb. So same physical
423 * memory will be mapped in two elf headers. One will contain kernel
424 * text virtual addresses and other will have __va(physical) addresses.
428 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
429 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
431 buf = vzalloc(elf_sz);
436 ehdr = (Elf64_Ehdr *)bufp;
437 bufp += sizeof(Elf64_Ehdr);
438 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
439 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
440 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
441 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
442 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
443 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
444 ehdr->e_type = ET_CORE;
445 ehdr->e_machine = ELF_ARCH;
446 ehdr->e_version = EV_CURRENT;
447 ehdr->e_phoff = sizeof(Elf64_Ehdr);
448 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
449 ehdr->e_phentsize = sizeof(Elf64_Phdr);
451 /* Prepare one phdr of type PT_NOTE for each present cpu */
452 for_each_present_cpu(cpu) {
453 phdr = (Elf64_Phdr *)bufp;
454 bufp += sizeof(Elf64_Phdr);
455 phdr->p_type = PT_NOTE;
456 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
457 phdr->p_offset = phdr->p_paddr = notes_addr;
458 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
462 /* Prepare one PT_NOTE header for vmcoreinfo */
463 phdr = (Elf64_Phdr *)bufp;
464 bufp += sizeof(Elf64_Phdr);
465 phdr->p_type = PT_NOTE;
466 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
467 phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
471 /* Prepare PT_LOAD type program header for kernel text region */
472 phdr = (Elf64_Phdr *)bufp;
473 bufp += sizeof(Elf64_Phdr);
474 phdr->p_type = PT_LOAD;
475 phdr->p_flags = PF_R|PF_W|PF_X;
476 phdr->p_vaddr = (Elf64_Addr)_text;
477 phdr->p_filesz = phdr->p_memsz = _end - _text;
478 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
482 /* Prepare PT_LOAD headers for system ram chunks. */
485 ret = walk_system_ram_res(0, -1, ced,
486 prepare_elf64_ram_headers_callback);
495 /* Prepare elf headers. Return addr and size */
496 static int prepare_elf_headers(struct kimage *image, void **addr,
499 struct crash_elf_data *ced;
502 ced = kzalloc(sizeof(*ced), GFP_KERNEL);
506 fill_up_crash_elf_data(ced, image);
508 /* By default prepare 64bit headers */
509 ret = prepare_elf64_headers(ced, addr, sz);
514 static int add_e820_entry(struct boot_params *params, struct e820entry *entry)
516 unsigned int nr_e820_entries;
518 nr_e820_entries = params->e820_entries;
519 if (nr_e820_entries >= E820MAX)
522 memcpy(¶ms->e820_map[nr_e820_entries], entry,
523 sizeof(struct e820entry));
524 params->e820_entries++;
528 static int memmap_entry_callback(u64 start, u64 end, void *arg)
530 struct crash_memmap_data *cmd = arg;
531 struct boot_params *params = cmd->params;
535 ei.size = end - start + 1;
537 add_e820_entry(params, &ei);
542 static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
543 unsigned long long mstart,
544 unsigned long long mend)
546 unsigned long start, end;
549 cmem->ranges[0].start = mstart;
550 cmem->ranges[0].end = mend;
553 /* Exclude Backup region */
554 start = image->arch.backup_load_addr;
555 end = start + image->arch.backup_src_sz - 1;
556 ret = exclude_mem_range(cmem, start, end);
560 /* Exclude elf header region */
561 start = image->arch.elf_load_addr;
562 end = start + image->arch.elf_headers_sz - 1;
563 return exclude_mem_range(cmem, start, end);
566 /* Prepare memory map for crash dump kernel */
567 int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
572 struct crash_memmap_data cmd;
573 struct crash_mem *cmem;
575 cmem = vzalloc(sizeof(struct crash_mem));
579 memset(&cmd, 0, sizeof(struct crash_memmap_data));
582 /* Add first 640K segment */
583 ei.addr = image->arch.backup_src_start;
584 ei.size = image->arch.backup_src_sz;
586 add_e820_entry(params, &ei);
588 /* Add ACPI tables */
589 cmd.type = E820_ACPI;
590 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
591 walk_iomem_res("ACPI Tables", flags, 0, -1, &cmd,
592 memmap_entry_callback);
594 /* Add ACPI Non-volatile Storage */
596 walk_iomem_res("ACPI Non-volatile Storage", flags, 0, -1, &cmd,
597 memmap_entry_callback);
599 /* Add crashk_low_res region */
600 if (crashk_low_res.end) {
601 ei.addr = crashk_low_res.start;
602 ei.size = crashk_low_res.end - crashk_low_res.start + 1;
604 add_e820_entry(params, &ei);
607 /* Exclude some ranges from crashk_res and add rest to memmap */
608 ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
613 for (i = 0; i < cmem->nr_ranges; i++) {
614 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
616 /* If entry is less than a page, skip it */
617 if (ei.size < PAGE_SIZE)
619 ei.addr = cmem->ranges[i].start;
621 add_e820_entry(params, &ei);
629 static int determine_backup_region(u64 start, u64 end, void *arg)
631 struct kimage *image = arg;
633 image->arch.backup_src_start = start;
634 image->arch.backup_src_sz = end - start + 1;
636 /* Expecting only one range for backup region */
640 int crash_load_segments(struct kimage *image)
642 unsigned long src_start, src_sz, elf_sz;
647 * Determine and load a segment for backup area. First 640K RAM
648 * region is backup source
651 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
652 image, determine_backup_region);
654 /* Zero or postive return values are ok */
658 src_start = image->arch.backup_src_start;
659 src_sz = image->arch.backup_src_sz;
661 /* Add backup segment. */
664 * Ideally there is no source for backup segment. This is
665 * copied in purgatory after crash. Just add a zero filled
666 * segment for now to make sure checksum logic works fine.
668 ret = kexec_add_buffer(image, (char *)&crash_zero_bytes,
669 sizeof(crash_zero_bytes), src_sz,
671 &image->arch.backup_load_addr);
674 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
675 image->arch.backup_load_addr, src_start, src_sz);
678 /* Prepare elf headers and add a segment */
679 ret = prepare_elf_headers(image, &elf_addr, &elf_sz);
683 image->arch.elf_headers = elf_addr;
684 image->arch.elf_headers_sz = elf_sz;
686 ret = kexec_add_buffer(image, (char *)elf_addr, elf_sz, elf_sz,
687 ELF_CORE_HEADER_ALIGN, 0, -1, 0,
688 &image->arch.elf_load_addr);
690 vfree((void *)image->arch.elf_headers);
693 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
694 image->arch.elf_load_addr, elf_sz, elf_sz);
698 #endif /* CONFIG_KEXEC_FILE */