2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
18 #include "vdso_image.h"
20 #if defined(CONFIG_X86_64)
21 unsigned int __read_mostly vdso_enabled = 1;
23 DECLARE_VDSO_IMAGE(vdso);
24 extern unsigned short vdso_sync_cpuid;
25 static unsigned vdso_size;
27 #ifdef CONFIG_X86_X32_ABI
28 DECLARE_VDSO_IMAGE(vdsox32);
29 static unsigned vdsox32_size;
33 #if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \
34 defined(CONFIG_COMPAT)
35 void __init patch_vdso32(void *vdso, size_t len)
37 Elf32_Ehdr *hdr = vdso;
38 Elf32_Shdr *sechdrs, *alt_sec = 0;
43 BUG_ON(len < sizeof(Elf32_Ehdr));
44 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
46 sechdrs = (void *)hdr + hdr->e_shoff;
47 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
49 for (i = 1; i < hdr->e_shnum; i++) {
50 Elf32_Shdr *shdr = &sechdrs[i];
51 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
57 /* If we get here, it's probably a bug. */
58 pr_warning("patch_vdso32: .altinstructions not found\n");
59 return; /* nothing to patch */
62 alt_data = (void *)hdr + alt_sec->sh_offset;
63 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
67 #if defined(CONFIG_X86_64)
68 static void __init patch_vdso64(void *vdso, size_t len)
70 Elf64_Ehdr *hdr = vdso;
71 Elf64_Shdr *sechdrs, *alt_sec = 0;
76 BUG_ON(len < sizeof(Elf64_Ehdr));
77 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
79 sechdrs = (void *)hdr + hdr->e_shoff;
80 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
82 for (i = 1; i < hdr->e_shnum; i++) {
83 Elf64_Shdr *shdr = &sechdrs[i];
84 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
90 /* If we get here, it's probably a bug. */
91 pr_warning("patch_vdso64: .altinstructions not found\n");
92 return; /* nothing to patch */
95 alt_data = (void *)hdr + alt_sec->sh_offset;
96 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
99 static int __init init_vdso(void)
101 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
104 patch_vdso64(vdso_start, vdso_end - vdso_start);
106 vdso_size = npages << PAGE_SHIFT;
107 for (i = 0; i < npages; i++)
108 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
110 #ifdef CONFIG_X86_X32_ABI
111 patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start);
112 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
113 vdsox32_size = npages << PAGE_SHIFT;
114 for (i = 0; i < npages; i++)
115 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
120 subsys_initcall(init_vdso);
124 /* Put the vdso above the (randomized) stack with another randomized offset.
125 This way there is no hole in the middle of address space.
126 To save memory make sure it is still in the same PTE as the stack top.
127 This doesn't give that many random bits */
128 static unsigned long vdso_addr(unsigned long start, unsigned len)
130 unsigned long addr, end;
132 end = (start + PMD_SIZE - 1) & PMD_MASK;
133 if (end >= TASK_SIZE_MAX)
136 /* This loses some more bits than a modulo, but is cheaper */
137 offset = get_random_int() & (PTRS_PER_PTE - 1);
138 addr = start + (offset << PAGE_SHIFT);
143 * page-align it here so that get_unmapped_area doesn't
144 * align it wrongfully again to the next page. addr can come in 4K
145 * unaligned here as a result of stack start randomization.
147 addr = PAGE_ALIGN(addr);
148 addr = align_vdso_addr(addr);
153 /* Setup a VMA at program startup for the vsyscall page.
154 Not called for compat tasks */
155 static int setup_additional_pages(struct linux_binprm *bprm,
160 struct mm_struct *mm = current->mm;
167 down_write(&mm->mmap_sem);
168 addr = vdso_addr(mm->start_stack, size);
169 addr = get_unmapped_area(NULL, addr, size, 0, 0);
170 if (IS_ERR_VALUE(addr)) {
175 current->mm->context.vdso = (void *)addr;
177 ret = install_special_mapping(mm, addr, size,
179 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
182 current->mm->context.vdso = NULL;
187 up_write(&mm->mmap_sem);
191 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
193 return setup_additional_pages(bprm, uses_interp, vdso_pages,
197 #ifdef CONFIG_X86_X32_ABI
198 int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
200 return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
205 static __init int vdso_setup(char *s)
207 vdso_enabled = simple_strtoul(s, NULL, 0);
210 __setup("vdso=", vdso_setup);