5b4aaefb6b423dc19fa2a717b582f3127b9cc4cb
[firefly-linux-kernel-4.4.55.git] / arch / x86 / vdso / vdso32-setup.c
1 /*
2  * (C) Copyright 2002 Linus Torvalds
3  * Portions based on the vdso-randomization code from exec-shield:
4  * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
5  *
6  * This file contains the needed initializations to support sysenter.
7  */
8
9 #include <linux/init.h>
10 #include <linux/smp.h>
11 #include <linux/thread_info.h>
12 #include <linux/sched.h>
13 #include <linux/gfp.h>
14 #include <linux/string.h>
15 #include <linux/elf.h>
16 #include <linux/mm.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20
21 #include <asm/cpufeature.h>
22 #include <asm/msr.h>
23 #include <asm/pgtable.h>
24 #include <asm/unistd.h>
25 #include <asm/elf.h>
26 #include <asm/tlbflush.h>
27 #include <asm/vdso.h>
28 #include <asm/proto.h>
29 #include <asm/fixmap.h>
30 #include <asm/hpet.h>
31 #include <asm/vvar.h>
32
33 #ifdef CONFIG_COMPAT_VDSO
34 #define VDSO_DEFAULT    0
35 #else
36 #define VDSO_DEFAULT    1
37 #endif
38
39 #ifdef CONFIG_X86_64
40 #define vdso_enabled                    sysctl_vsyscall32
41 #define arch_setup_additional_pages     syscall32_setup_pages
42 #endif
43
44 /*
45  * Should the kernel map a VDSO page into processes and pass its
46  * address down to glibc upon exec()?
47  */
48 unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
49
50 static int __init vdso_setup(char *s)
51 {
52         vdso_enabled = simple_strtoul(s, NULL, 0);
53
54         if (vdso_enabled > 1)
55                 pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
56
57         return 1;
58 }
59
60 /*
61  * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
62  * behavior on both 64-bit and 32-bit kernels.
63  * On 32-bit kernels, vdso=[012] means the same thing.
64  */
65 __setup("vdso32=", vdso_setup);
66
67 #ifdef CONFIG_X86_32
68 __setup_param("vdso=", vdso32_setup, vdso_setup, 0);
69
70 EXPORT_SYMBOL_GPL(vdso_enabled);
71 #endif
72
73 static struct page **vdso32_pages;
74 static unsigned int vdso32_size;
75
76 #ifdef CONFIG_X86_64
77
78 #define vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SYSENTER32))
79 #define vdso32_syscall()        (boot_cpu_has(X86_FEATURE_SYSCALL32))
80
81 /* May not be __init: called during resume */
82 void syscall32_cpu_init(void)
83 {
84         /* Load these always in case some future AMD CPU supports
85            SYSENTER from compat mode too. */
86         wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
87         wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
88         wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
89
90         wrmsrl(MSR_CSTAR, ia32_cstar_target);
91 }
92
93 #else  /* CONFIG_X86_32 */
94
95 #define vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SEP))
96 #define vdso32_syscall()        (0)
97
98 void enable_sep_cpu(void)
99 {
100         int cpu = get_cpu();
101         struct tss_struct *tss = &per_cpu(init_tss, cpu);
102
103         if (!boot_cpu_has(X86_FEATURE_SEP)) {
104                 put_cpu();
105                 return;
106         }
107
108         tss->x86_tss.ss1 = __KERNEL_CS;
109         tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
110         wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
111         wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
112         wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
113         put_cpu();      
114 }
115
116 #endif  /* CONFIG_X86_64 */
117
118 int __init sysenter_setup(void)
119 {
120         void *vdso_pages;
121         const void *vdso;
122         size_t vdso_len;
123         unsigned int i;
124
125         if (vdso32_syscall()) {
126                 vdso = &vdso32_syscall_start;
127                 vdso_len = &vdso32_syscall_end - &vdso32_syscall_start;
128         } else if (vdso32_sysenter()){
129                 vdso = &vdso32_sysenter_start;
130                 vdso_len = &vdso32_sysenter_end - &vdso32_sysenter_start;
131         } else {
132                 vdso = &vdso32_int80_start;
133                 vdso_len = &vdso32_int80_end - &vdso32_int80_start;
134         }
135
136         vdso32_size = (vdso_len + PAGE_SIZE - 1) / PAGE_SIZE;
137         vdso32_pages = kmalloc(sizeof(*vdso32_pages) * vdso32_size, GFP_ATOMIC);
138         vdso_pages = kmalloc(VDSO_OFFSET(vdso32_size), GFP_ATOMIC);
139
140         for(i = 0; i != vdso32_size; ++i)
141                 vdso32_pages[i] = virt_to_page(vdso_pages + VDSO_OFFSET(i));
142
143         memcpy(vdso_pages, vdso, vdso_len);
144         patch_vdso32(vdso_pages, vdso_len);
145
146         return 0;
147 }
148
149 /* Setup a VMA at program startup for the vsyscall page */
150 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
151 {
152         struct mm_struct *mm = current->mm;
153         unsigned long addr;
154         int ret = 0;
155         struct vm_area_struct *vma;
156
157 #ifdef CONFIG_X86_X32_ABI
158         if (test_thread_flag(TIF_X32))
159                 return x32_setup_additional_pages(bprm, uses_interp);
160 #endif
161
162         if (vdso_enabled != 1)  /* Other values all mean "disabled" */
163                 return 0;
164
165         down_write(&mm->mmap_sem);
166
167         addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
168         if (IS_ERR_VALUE(addr)) {
169                 ret = addr;
170                 goto up_fail;
171         }
172
173         current->mm->context.vdso = (void *)addr;
174
175         /*
176          * MAYWRITE to allow gdb to COW and set breakpoints
177          */
178         ret = install_special_mapping(mm,
179                         addr,
180                         VDSO_OFFSET(vdso32_size),
181                         VM_READ|VM_EXEC|
182                         VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
183                         vdso32_pages);
184
185         if (ret)
186                 goto up_fail;
187
188         vma = _install_special_mapping(mm,
189                         addr -  VDSO_OFFSET(VDSO_PREV_PAGES),
190                         VDSO_OFFSET(VDSO_PREV_PAGES),
191                         VM_READ,
192                         NULL);
193
194         if (IS_ERR(vma)) {
195                 ret = PTR_ERR(vma);
196                 goto up_fail;
197         }
198
199         ret = remap_pfn_range(vma,
200                 addr - VDSO_OFFSET(VDSO_VVAR_PAGE),
201                 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
202                 PAGE_SIZE,
203                 PAGE_READONLY);
204
205         if (ret)
206                 goto up_fail;
207
208 #ifdef CONFIG_HPET_TIMER
209         if (hpet_address) {
210                 ret = io_remap_pfn_range(vma,
211                         addr - VDSO_OFFSET(VDSO_HPET_PAGE),
212                         hpet_address >> PAGE_SHIFT,
213                         PAGE_SIZE,
214                         pgprot_noncached(PAGE_READONLY));
215
216                 if (ret)
217                         goto up_fail;
218         }
219 #endif
220
221         current_thread_info()->sysenter_return =
222                 VDSO32_SYMBOL(addr, SYSENTER_RETURN);
223
224   up_fail:
225         if (ret)
226                 current->mm->context.vdso = NULL;
227
228         up_write(&mm->mmap_sem);
229
230         return ret;
231 }
232
233 #ifdef CONFIG_X86_64
234
235 subsys_initcall(sysenter_setup);
236
237 #ifdef CONFIG_SYSCTL
238 /* Register vsyscall32 into the ABI table */
239 #include <linux/sysctl.h>
240
241 static struct ctl_table abi_table2[] = {
242         {
243                 .procname       = "vsyscall32",
244                 .data           = &sysctl_vsyscall32,
245                 .maxlen         = sizeof(int),
246                 .mode           = 0644,
247                 .proc_handler   = proc_dointvec
248         },
249         {}
250 };
251
252 static struct ctl_table abi_root_table2[] = {
253         {
254                 .procname = "abi",
255                 .mode = 0555,
256                 .child = abi_table2
257         },
258         {}
259 };
260
261 static __init int ia32_binfmt_init(void)
262 {
263         register_sysctl_table(abi_root_table2);
264         return 0;
265 }
266 __initcall(ia32_binfmt_init);
267 #endif
268
269 #else  /* CONFIG_X86_32 */
270
271 const char *arch_vma_name(struct vm_area_struct *vma)
272 {
273         if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
274                 return "[vdso]";
275         return NULL;
276 }
277
278 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
279 {
280         return NULL;
281 }
282
283 int in_gate_area(struct mm_struct *mm, unsigned long addr)
284 {
285         return 0;
286 }
287
288 int in_gate_area_no_mm(unsigned long addr)
289 {
290         return 0;
291 }
292
293 #endif  /* CONFIG_X86_64 */