2 * handle transition of Linux booting another kernel
3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #define pr_fmt(fmt) "kexec: " fmt
12 #include <linux/kexec.h>
13 #include <linux/string.h>
14 #include <linux/gfp.h>
15 #include <linux/reboot.h>
16 #include <linux/numa.h>
17 #include <linux/ftrace.h>
19 #include <linux/suspend.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlbflush.h>
24 #include <asm/mmu_context.h>
25 #include <asm/debugreg.h>
27 static struct kexec_file_ops *kexec_file_loaders[] = {
31 static void free_transition_pgtable(struct kimage *image)
33 free_page((unsigned long)image->arch.pud);
34 free_page((unsigned long)image->arch.pmd);
35 free_page((unsigned long)image->arch.pte);
38 static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
43 unsigned long vaddr, paddr;
46 vaddr = (unsigned long)relocate_kernel;
47 paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
48 pgd += pgd_index(vaddr);
49 if (!pgd_present(*pgd)) {
50 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
53 image->arch.pud = pud;
54 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
56 pud = pud_offset(pgd, vaddr);
57 if (!pud_present(*pud)) {
58 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
61 image->arch.pmd = pmd;
62 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
64 pmd = pmd_offset(pud, vaddr);
65 if (!pmd_present(*pmd)) {
66 pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
69 image->arch.pte = pte;
70 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
72 pte = pte_offset_kernel(pmd, vaddr);
73 set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
76 free_transition_pgtable(image);
80 static void *alloc_pgt_page(void *data)
82 struct kimage *image = (struct kimage *)data;
86 page = kimage_alloc_control_pages(image, 0);
88 p = page_address(page);
95 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
97 struct x86_mapping_info info = {
98 .alloc_pgt_page = alloc_pgt_page,
100 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
102 unsigned long mstart, mend;
107 level4p = (pgd_t *)__va(start_pgtable);
109 for (i = 0; i < nr_pfn_mapped; i++) {
110 mstart = pfn_mapped[i].start << PAGE_SHIFT;
111 mend = pfn_mapped[i].end << PAGE_SHIFT;
113 result = kernel_ident_mapping_init(&info,
114 level4p, mstart, mend);
120 * segments's mem ranges could be outside 0 ~ max_pfn,
121 * for example when jump back to original kernel from kexeced kernel.
122 * or first kernel is booted with user mem map, and second kernel
123 * could be loaded out of that range.
125 for (i = 0; i < image->nr_segments; i++) {
126 mstart = image->segment[i].mem;
127 mend = mstart + image->segment[i].memsz;
129 result = kernel_ident_mapping_init(&info,
130 level4p, mstart, mend);
136 return init_transition_pgtable(image, level4p);
139 static void set_idt(void *newidt, u16 limit)
141 struct desc_ptr curidt;
143 /* x86-64 supports unaliged loads & stores */
145 curidt.address = (unsigned long)newidt;
147 __asm__ __volatile__ (
154 static void set_gdt(void *newgdt, u16 limit)
156 struct desc_ptr curgdt;
158 /* x86-64 supports unaligned loads & stores */
160 curgdt.address = (unsigned long)newgdt;
162 __asm__ __volatile__ (
168 static void load_segments(void)
170 __asm__ __volatile__ (
176 : : "a" (__KERNEL_DS) : "memory"
180 int machine_kexec_prepare(struct kimage *image)
182 unsigned long start_pgtable;
185 /* Calculate the offsets */
186 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
188 /* Setup the identity mapped 64bit page table */
189 result = init_pgtable(image, start_pgtable);
196 void machine_kexec_cleanup(struct kimage *image)
198 free_transition_pgtable(image);
202 * Do not allocate memory (or fail in any way) in machine_kexec().
203 * We are past the point of no return, committed to rebooting now.
205 void machine_kexec(struct kimage *image)
207 unsigned long page_list[PAGES_NR];
209 int save_ftrace_enabled;
211 #ifdef CONFIG_KEXEC_JUMP
212 if (image->preserve_context)
213 save_processor_state();
216 save_ftrace_enabled = __ftrace_enabled_save();
218 /* Interrupts aren't acceptable while we reboot */
220 hw_breakpoint_disable();
222 if (image->preserve_context) {
223 #ifdef CONFIG_X86_IO_APIC
225 * We need to put APICs in legacy mode so that we can
226 * get timer interrupts in second kernel. kexec/kdump
227 * paths already have calls to disable_IO_APIC() in
228 * one form or other. kexec jump path also need
235 control_page = page_address(image->control_code_page) + PAGE_SIZE;
236 memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
238 page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
239 page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
240 page_list[PA_TABLE_PAGE] =
241 (unsigned long)__pa(page_address(image->control_code_page));
243 if (image->type == KEXEC_TYPE_DEFAULT)
244 page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
248 * The segment registers are funny things, they have both a
249 * visible and an invisible part. Whenever the visible part is
250 * set to a specific selector, the invisible part is loaded
251 * with from a table in memory. At no other time is the
252 * descriptor table in memory accessed.
254 * I take advantage of this here by force loading the
255 * segments, before I zap the gdt with an invalid value.
259 * The gdt & idt are now invalid.
260 * If you want to load them you must set up your own idt & gdt.
262 set_gdt(phys_to_virt(0), 0);
263 set_idt(phys_to_virt(0), 0);
266 image->start = relocate_kernel((unsigned long)image->head,
267 (unsigned long)page_list,
269 image->preserve_context);
271 #ifdef CONFIG_KEXEC_JUMP
272 if (image->preserve_context)
273 restore_processor_state();
276 __ftrace_enabled_restore(save_ftrace_enabled);
279 void arch_crash_save_vmcoreinfo(void)
281 VMCOREINFO_SYMBOL(phys_base);
282 VMCOREINFO_SYMBOL(init_level4_pgt);
285 VMCOREINFO_SYMBOL(node_data);
286 VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
288 vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
289 (unsigned long)&_text - __START_KERNEL);
292 /* arch-dependent functionality related to kexec file-based syscall */
294 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
295 unsigned long buf_len)
297 int i, ret = -ENOEXEC;
298 struct kexec_file_ops *fops;
300 for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
301 fops = kexec_file_loaders[i];
302 if (!fops || !fops->probe)
305 ret = fops->probe(buf, buf_len);
315 void *arch_kexec_kernel_image_load(struct kimage *image)
317 if (!image->fops || !image->fops->load)
318 return ERR_PTR(-ENOEXEC);
320 return image->fops->load(image, image->kernel_buf,
321 image->kernel_buf_len, image->initrd_buf,
322 image->initrd_buf_len, image->cmdline_buf,
323 image->cmdline_buf_len);
326 int arch_kimage_file_post_load_cleanup(struct kimage *image)
328 if (!image->fops || !image->fops->cleanup)
331 return image->fops->cleanup(image);
335 * Apply purgatory relocations.
337 * ehdr: Pointer to elf headers
338 * sechdrs: Pointer to section headers.
339 * relsec: section index of SHT_RELA section.
341 * TODO: Some of the code belongs to generic code. Move that in kexec.c.
343 int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
344 Elf64_Shdr *sechdrs, unsigned int relsec)
350 Elf64_Shdr *section, *symtabsec;
351 unsigned long address, sec_base, value;
352 const char *strtab, *name, *shstrtab;
355 * ->sh_offset has been modified to keep the pointer to section
358 rel = (void *)sechdrs[relsec].sh_offset;
360 /* Section to which relocations apply */
361 section = &sechdrs[sechdrs[relsec].sh_info];
363 pr_debug("Applying relocate section %u to %u\n", relsec,
364 sechdrs[relsec].sh_info);
366 /* Associated symbol table */
367 symtabsec = &sechdrs[sechdrs[relsec].sh_link];
370 if (symtabsec->sh_link >= ehdr->e_shnum) {
371 /* Invalid strtab section number */
372 pr_err("Invalid string table section index %d\n",
377 strtab = (char *)sechdrs[symtabsec->sh_link].sh_offset;
379 /* section header string table */
380 shstrtab = (char *)sechdrs[ehdr->e_shstrndx].sh_offset;
382 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
385 * rel[i].r_offset contains byte offset from beginning
386 * of section to the storage unit affected.
388 * This is location to update (->sh_offset). This is temporary
389 * buffer where section is currently loaded. This will finally
390 * be loaded to a different address later, pointed to by
391 * ->sh_addr. kexec takes care of moving it
392 * (kexec_load_segment()).
394 location = (void *)(section->sh_offset + rel[i].r_offset);
396 /* Final address of the location */
397 address = section->sh_addr + rel[i].r_offset;
400 * rel[i].r_info contains information about symbol table index
401 * w.r.t which relocation must be made and type of relocation
402 * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get
403 * these respectively.
405 sym = (Elf64_Sym *)symtabsec->sh_offset +
406 ELF64_R_SYM(rel[i].r_info);
409 name = strtab + sym->st_name;
411 name = shstrtab + sechdrs[sym->st_shndx].sh_name;
413 pr_debug("Symbol: %s info: %02x shndx: %02x value=%llx size: %llx\n",
414 name, sym->st_info, sym->st_shndx, sym->st_value,
417 if (sym->st_shndx == SHN_UNDEF) {
418 pr_err("Undefined symbol: %s\n", name);
422 if (sym->st_shndx == SHN_COMMON) {
423 pr_err("symbol '%s' in common section\n", name);
427 if (sym->st_shndx == SHN_ABS)
429 else if (sym->st_shndx >= ehdr->e_shnum) {
430 pr_err("Invalid section %d for symbol %s\n",
431 sym->st_shndx, name);
434 sec_base = sechdrs[sym->st_shndx].sh_addr;
436 value = sym->st_value;
438 value += rel[i].r_addend;
440 switch (ELF64_R_TYPE(rel[i].r_info)) {
444 *(u64 *)location = value;
447 *(u32 *)location = value;
448 if (value != *(u32 *)location)
452 *(s32 *)location = value;
453 if ((s64)value != *(s32 *)location)
457 value -= (u64)address;
458 *(u32 *)location = value;
461 pr_err("Unknown rela relocation: %llu\n",
462 ELF64_R_TYPE(rel[i].r_info));
469 pr_err("Overflow in relocation type %d value 0x%lx\n",
470 (int)ELF64_R_TYPE(rel[i].r_info), value);