9c78d5b24874d8f873962ebb10d61f7e4c45c263
[firefly-linux-kernel-4.4.55.git] / arch / x86 / vdso / vdso32-setup.c
1 /*
2  * (C) Copyright 2002 Linus Torvalds
3  * Portions based on the vdso-randomization code from exec-shield:
4  * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
5  *
6  * This file contains the needed initializations to support sysenter.
7  */
8
9 #include <linux/init.h>
10 #include <linux/smp.h>
11 #include <linux/thread_info.h>
12 #include <linux/sched.h>
13 #include <linux/gfp.h>
14 #include <linux/string.h>
15 #include <linux/elf.h>
16 #include <linux/mm.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20
21 #include <asm/cpufeature.h>
22 #include <asm/msr.h>
23 #include <asm/pgtable.h>
24 #include <asm/unistd.h>
25 #include <asm/elf.h>
26 #include <asm/tlbflush.h>
27 #include <asm/vdso.h>
28 #include <asm/proto.h>
29 #include <asm/fixmap.h>
30 #include <asm/hpet.h>
31 #include <asm/vvar.h>
32
33 #ifdef CONFIG_COMPAT_VDSO
34 #define VDSO_DEFAULT    0
35 #else
36 #define VDSO_DEFAULT    1
37 #endif
38
39 #ifdef CONFIG_X86_64
40 #define arch_setup_additional_pages     syscall32_setup_pages
41 #endif
42
43 /*
44  * Should the kernel map a VDSO page into processes and pass its
45  * address down to glibc upon exec()?
46  */
47 unsigned int __read_mostly vdso32_enabled = VDSO_DEFAULT;
48
49 static int __init vdso32_setup(char *s)
50 {
51         vdso32_enabled = simple_strtoul(s, NULL, 0);
52
53         if (vdso32_enabled > 1)
54                 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
55
56         return 1;
57 }
58
59 /*
60  * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
61  * behavior on both 64-bit and 32-bit kernels.
62  * On 32-bit kernels, vdso=[012] means the same thing.
63  */
64 __setup("vdso32=", vdso32_setup);
65
66 #ifdef CONFIG_X86_32
67 __setup_param("vdso=", vdso_setup, vdso32_setup, 0);
68 #endif
69
70 static struct page **vdso32_pages;
71 static unsigned vdso32_size;
72
73 #ifdef CONFIG_X86_64
74
75 #define vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SYSENTER32))
76 #define vdso32_syscall()        (boot_cpu_has(X86_FEATURE_SYSCALL32))
77
78 #else  /* CONFIG_X86_32 */
79
80 #define vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SEP))
81 #define vdso32_syscall()        (0)
82
83 #endif  /* CONFIG_X86_64 */
84
85 int __init sysenter_setup(void)
86 {
87         char *vdso32_start, *vdso32_end;
88         int npages, i;
89
90 #ifdef CONFIG_COMPAT
91         if (vdso32_syscall()) {
92                 vdso32_start = vdso32_syscall_start;
93                 vdso32_end = vdso32_syscall_end;
94                 vdso32_pages = vdso32_syscall_pages;
95         } else
96 #endif
97         if (vdso32_sysenter()) {
98                 vdso32_start = vdso32_sysenter_start;
99                 vdso32_end = vdso32_sysenter_end;
100                 vdso32_pages = vdso32_sysenter_pages;
101         } else {
102                 vdso32_start = vdso32_int80_start;
103                 vdso32_end = vdso32_int80_end;
104                 vdso32_pages = vdso32_int80_pages;
105         }
106
107         npages = ((vdso32_end - vdso32_start) + PAGE_SIZE - 1) / PAGE_SIZE;
108         vdso32_size = npages << PAGE_SHIFT;
109         for (i = 0; i < npages; i++)
110                 vdso32_pages[i] = virt_to_page(vdso32_start + i*PAGE_SIZE);
111
112         patch_vdso32(vdso32_start, vdso32_size);
113
114         return 0;
115 }
116
117 /* Setup a VMA at program startup for the vsyscall page */
118 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
119 {
120         struct mm_struct *mm = current->mm;
121         unsigned long addr;
122         int ret = 0;
123         struct vm_area_struct *vma;
124
125 #ifdef CONFIG_X86_X32_ABI
126         if (test_thread_flag(TIF_X32))
127                 return x32_setup_additional_pages(bprm, uses_interp);
128 #endif
129
130         if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
131                 return 0;
132
133         down_write(&mm->mmap_sem);
134
135         addr = get_unmapped_area(NULL, 0, vdso32_size + VDSO_OFFSET(VDSO_PREV_PAGES), 0, 0);
136         if (IS_ERR_VALUE(addr)) {
137                 ret = addr;
138                 goto up_fail;
139         }
140
141         addr += VDSO_OFFSET(VDSO_PREV_PAGES);
142
143         current->mm->context.vdso = (void *)addr;
144
145         /*
146          * MAYWRITE to allow gdb to COW and set breakpoints
147          */
148         ret = install_special_mapping(mm,
149                         addr,
150                         vdso32_size,
151                         VM_READ|VM_EXEC|
152                         VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
153                         vdso32_pages);
154
155         if (ret)
156                 goto up_fail;
157
158         vma = _install_special_mapping(mm,
159                         addr -  VDSO_OFFSET(VDSO_PREV_PAGES),
160                         VDSO_OFFSET(VDSO_PREV_PAGES),
161                         VM_READ,
162                         NULL);
163
164         if (IS_ERR(vma)) {
165                 ret = PTR_ERR(vma);
166                 goto up_fail;
167         }
168
169         ret = remap_pfn_range(vma,
170                 addr - VDSO_OFFSET(VDSO_VVAR_PAGE),
171                 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
172                 PAGE_SIZE,
173                 PAGE_READONLY);
174
175         if (ret)
176                 goto up_fail;
177
178 #ifdef CONFIG_HPET_TIMER
179         if (hpet_address) {
180                 ret = io_remap_pfn_range(vma,
181                         addr - VDSO_OFFSET(VDSO_HPET_PAGE),
182                         hpet_address >> PAGE_SHIFT,
183                         PAGE_SIZE,
184                         pgprot_noncached(PAGE_READONLY));
185
186                 if (ret)
187                         goto up_fail;
188         }
189 #endif
190
191         current_thread_info()->sysenter_return =
192                 VDSO32_SYMBOL(addr, SYSENTER_RETURN);
193
194   up_fail:
195         if (ret)
196                 current->mm->context.vdso = NULL;
197
198         up_write(&mm->mmap_sem);
199
200         return ret;
201 }
202
203 #ifdef CONFIG_X86_64
204
205 subsys_initcall(sysenter_setup);
206
207 #ifdef CONFIG_SYSCTL
208 /* Register vsyscall32 into the ABI table */
209 #include <linux/sysctl.h>
210
211 static struct ctl_table abi_table2[] = {
212         {
213                 .procname       = "vsyscall32",
214                 .data           = &vdso32_enabled,
215                 .maxlen         = sizeof(int),
216                 .mode           = 0644,
217                 .proc_handler   = proc_dointvec
218         },
219         {}
220 };
221
222 static struct ctl_table abi_root_table2[] = {
223         {
224                 .procname = "abi",
225                 .mode = 0555,
226                 .child = abi_table2
227         },
228         {}
229 };
230
231 static __init int ia32_binfmt_init(void)
232 {
233         register_sysctl_table(abi_root_table2);
234         return 0;
235 }
236 __initcall(ia32_binfmt_init);
237 #endif
238
239 #else  /* CONFIG_X86_32 */
240
241 const char *arch_vma_name(struct vm_area_struct *vma)
242 {
243         if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
244                 return "[vdso]";
245         return NULL;
246 }
247
248 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
249 {
250         return NULL;
251 }
252
253 int in_gate_area(struct mm_struct *mm, unsigned long addr)
254 {
255         return 0;
256 }
257
258 int in_gate_area_no_mm(unsigned long addr)
259 {
260         return 0;
261 }
262
263 #endif  /* CONFIG_X86_64 */