x86: Load the 32-bit vdso in place, just like the 64-bit vdsos
[firefly-linux-kernel-4.4.55.git] / arch / x86 / vdso / vma.c
1 /*
2  * Set up the VMAs to tell the VM about the vDSO.
3  * Copyright 2007 Andi Kleen, SUSE Labs.
4  * Subject to the GPL, v.2
5  */
6 #include <linux/mm.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
16 #include <asm/vdso.h>
17 #include <asm/page.h>
18 #include "vdso_image.h"
19
20 #if defined(CONFIG_X86_64)
21 unsigned int __read_mostly vdso_enabled = 1;
22
23 DECLARE_VDSO_IMAGE(vdso);
24 extern unsigned short vdso_sync_cpuid;
25 static unsigned vdso_size;
26
27 #ifdef CONFIG_X86_X32_ABI
28 DECLARE_VDSO_IMAGE(vdsox32);
29 static unsigned vdsox32_size;
30 #endif
31 #endif
32
33 #if defined(CONFIG_X86_32) || defined(CONFIG_X86_X32_ABI) || \
34         defined(CONFIG_COMPAT)
35 void __init patch_vdso32(void *vdso, size_t len)
36 {
37         Elf32_Ehdr *hdr = vdso;
38         Elf32_Shdr *sechdrs, *alt_sec = 0;
39         char *secstrings;
40         void *alt_data;
41         int i;
42
43         BUG_ON(len < sizeof(Elf32_Ehdr));
44         BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
45
46         sechdrs = (void *)hdr + hdr->e_shoff;
47         secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
48
49         for (i = 1; i < hdr->e_shnum; i++) {
50                 Elf32_Shdr *shdr = &sechdrs[i];
51                 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
52                         alt_sec = shdr;
53                         goto found;
54                 }
55         }
56
57         /* If we get here, it's probably a bug. */
58         pr_warning("patch_vdso32: .altinstructions not found\n");
59         return;  /* nothing to patch */
60
61 found:
62         alt_data = (void *)hdr + alt_sec->sh_offset;
63         apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
64 }
65 #endif
66
67 #if defined(CONFIG_X86_64)
68 static void __init patch_vdso64(void *vdso, size_t len)
69 {
70         Elf64_Ehdr *hdr = vdso;
71         Elf64_Shdr *sechdrs, *alt_sec = 0;
72         char *secstrings;
73         void *alt_data;
74         int i;
75
76         BUG_ON(len < sizeof(Elf64_Ehdr));
77         BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
78
79         sechdrs = (void *)hdr + hdr->e_shoff;
80         secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
81
82         for (i = 1; i < hdr->e_shnum; i++) {
83                 Elf64_Shdr *shdr = &sechdrs[i];
84                 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
85                         alt_sec = shdr;
86                         goto found;
87                 }
88         }
89
90         /* If we get here, it's probably a bug. */
91         pr_warning("patch_vdso64: .altinstructions not found\n");
92         return;  /* nothing to patch */
93
94 found:
95         alt_data = (void *)hdr + alt_sec->sh_offset;
96         apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
97 }
98
99 static int __init init_vdso(void)
100 {
101         int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
102         int i;
103
104         patch_vdso64(vdso_start, vdso_end - vdso_start);
105
106         vdso_size = npages << PAGE_SHIFT;
107         for (i = 0; i < npages; i++)
108                 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
109
110 #ifdef CONFIG_X86_X32_ABI
111         patch_vdso32(vdsox32_start, vdsox32_end - vdsox32_start);
112         npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
113         vdsox32_size = npages << PAGE_SHIFT;
114         for (i = 0; i < npages; i++)
115                 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
116 #endif
117
118         return 0;
119 }
120 subsys_initcall(init_vdso);
121
122 struct linux_binprm;
123
124 /* Put the vdso above the (randomized) stack with another randomized offset.
125    This way there is no hole in the middle of address space.
126    To save memory make sure it is still in the same PTE as the stack top.
127    This doesn't give that many random bits */
128 static unsigned long vdso_addr(unsigned long start, unsigned len)
129 {
130         unsigned long addr, end;
131         unsigned offset;
132         end = (start + PMD_SIZE - 1) & PMD_MASK;
133         if (end >= TASK_SIZE_MAX)
134                 end = TASK_SIZE_MAX;
135         end -= len;
136         /* This loses some more bits than a modulo, but is cheaper */
137         offset = get_random_int() & (PTRS_PER_PTE - 1);
138         addr = start + (offset << PAGE_SHIFT);
139         if (addr >= end)
140                 addr = end;
141
142         /*
143          * page-align it here so that get_unmapped_area doesn't
144          * align it wrongfully again to the next page. addr can come in 4K
145          * unaligned here as a result of stack start randomization.
146          */
147         addr = PAGE_ALIGN(addr);
148         addr = align_vdso_addr(addr);
149
150         return addr;
151 }
152
153 /* Setup a VMA at program startup for the vsyscall page.
154    Not called for compat tasks */
155 static int setup_additional_pages(struct linux_binprm *bprm,
156                                   int uses_interp,
157                                   struct page **pages,
158                                   unsigned size)
159 {
160         struct mm_struct *mm = current->mm;
161         unsigned long addr;
162         int ret;
163
164         if (!vdso_enabled)
165                 return 0;
166
167         down_write(&mm->mmap_sem);
168         addr = vdso_addr(mm->start_stack, size);
169         addr = get_unmapped_area(NULL, addr, size, 0, 0);
170         if (IS_ERR_VALUE(addr)) {
171                 ret = addr;
172                 goto up_fail;
173         }
174
175         current->mm->context.vdso = (void *)addr;
176
177         ret = install_special_mapping(mm, addr, size,
178                                       VM_READ|VM_EXEC|
179                                       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
180                                       pages);
181         if (ret) {
182                 current->mm->context.vdso = NULL;
183                 goto up_fail;
184         }
185
186 up_fail:
187         up_write(&mm->mmap_sem);
188         return ret;
189 }
190
191 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
192 {
193         return setup_additional_pages(bprm, uses_interp, vdso_pages,
194                                       vdso_size);
195 }
196
197 #ifdef CONFIG_X86_X32_ABI
198 int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
199 {
200         return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
201                                       vdsox32_size);
202 }
203 #endif
204
205 static __init int vdso_setup(char *s)
206 {
207         vdso_enabled = simple_strtoul(s, NULL, 0);
208         return 0;
209 }
210 __setup("vdso=", vdso_setup);
211 #endif