Merge branch 'linus' into x86/urgent, to be able to merge a dependent fix
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kvm / hyperv.c
1 /*
2  * KVM Microsoft Hyper-V emulation
3  *
4  * derived from arch/x86/kvm/x86.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
11  *
12  * Authors:
13  *   Avi Kivity   <avi@qumranet.com>
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Amit Shah    <amit.shah@qumranet.com>
16  *   Ben-Ami Yassour <benami@il.ibm.com>
17  *   Andrey Smetanin <asmetanin@virtuozzo.com>
18  *
19  * This work is licensed under the terms of the GNU GPL, version 2.  See
20  * the COPYING file in the top-level directory.
21  *
22  */
23
24 #include "x86.h"
25 #include "lapic.h"
26 #include "hyperv.h"
27
28 #include <linux/kvm_host.h>
29 #include <trace/events/kvm.h>
30
31 #include "trace.h"
32
33 static bool kvm_hv_msr_partition_wide(u32 msr)
34 {
35         bool r = false;
36
37         switch (msr) {
38         case HV_X64_MSR_GUEST_OS_ID:
39         case HV_X64_MSR_HYPERCALL:
40         case HV_X64_MSR_REFERENCE_TSC:
41         case HV_X64_MSR_TIME_REF_COUNT:
42         case HV_X64_MSR_CRASH_CTL:
43         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
44                 r = true;
45                 break;
46         }
47
48         return r;
49 }
50
51 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
52                                      u32 index, u64 *pdata)
53 {
54         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
55
56         if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
57                 return -EINVAL;
58
59         *pdata = hv->hv_crash_param[index];
60         return 0;
61 }
62
63 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
64 {
65         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
66
67         *pdata = hv->hv_crash_ctl;
68         return 0;
69 }
70
71 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
72 {
73         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
74
75         if (host)
76                 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
77
78         if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
79
80                 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
81                           hv->hv_crash_param[0],
82                           hv->hv_crash_param[1],
83                           hv->hv_crash_param[2],
84                           hv->hv_crash_param[3],
85                           hv->hv_crash_param[4]);
86
87                 /* Send notification about crash to user space */
88                 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
89         }
90
91         return 0;
92 }
93
94 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
95                                      u32 index, u64 data)
96 {
97         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
98
99         if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
100                 return -EINVAL;
101
102         hv->hv_crash_param[index] = data;
103         return 0;
104 }
105
106 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
107                              bool host)
108 {
109         struct kvm *kvm = vcpu->kvm;
110         struct kvm_hv *hv = &kvm->arch.hyperv;
111
112         switch (msr) {
113         case HV_X64_MSR_GUEST_OS_ID:
114                 hv->hv_guest_os_id = data;
115                 /* setting guest os id to zero disables hypercall page */
116                 if (!hv->hv_guest_os_id)
117                         hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
118                 break;
119         case HV_X64_MSR_HYPERCALL: {
120                 u64 gfn;
121                 unsigned long addr;
122                 u8 instructions[4];
123
124                 /* if guest os id is not set hypercall should remain disabled */
125                 if (!hv->hv_guest_os_id)
126                         break;
127                 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
128                         hv->hv_hypercall = data;
129                         break;
130                 }
131                 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
132                 addr = gfn_to_hva(kvm, gfn);
133                 if (kvm_is_error_hva(addr))
134                         return 1;
135                 kvm_x86_ops->patch_hypercall(vcpu, instructions);
136                 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
137                 if (__copy_to_user((void __user *)addr, instructions, 4))
138                         return 1;
139                 hv->hv_hypercall = data;
140                 mark_page_dirty(kvm, gfn);
141                 break;
142         }
143         case HV_X64_MSR_REFERENCE_TSC: {
144                 u64 gfn;
145                 HV_REFERENCE_TSC_PAGE tsc_ref;
146
147                 memset(&tsc_ref, 0, sizeof(tsc_ref));
148                 hv->hv_tsc_page = data;
149                 if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
150                         break;
151                 gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
152                 if (kvm_write_guest(
153                                 kvm,
154                                 gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
155                                 &tsc_ref, sizeof(tsc_ref)))
156                         return 1;
157                 mark_page_dirty(kvm, gfn);
158                 break;
159         }
160         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
161                 return kvm_hv_msr_set_crash_data(vcpu,
162                                                  msr - HV_X64_MSR_CRASH_P0,
163                                                  data);
164         case HV_X64_MSR_CRASH_CTL:
165                 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
166         default:
167                 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
168                             msr, data);
169                 return 1;
170         }
171         return 0;
172 }
173
174 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
175 {
176         struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
177
178         switch (msr) {
179         case HV_X64_MSR_APIC_ASSIST_PAGE: {
180                 u64 gfn;
181                 unsigned long addr;
182
183                 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
184                         hv->hv_vapic = data;
185                         if (kvm_lapic_enable_pv_eoi(vcpu, 0))
186                                 return 1;
187                         break;
188                 }
189                 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
190                 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
191                 if (kvm_is_error_hva(addr))
192                         return 1;
193                 if (__clear_user((void __user *)addr, PAGE_SIZE))
194                         return 1;
195                 hv->hv_vapic = data;
196                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
197                 if (kvm_lapic_enable_pv_eoi(vcpu,
198                                             gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
199                         return 1;
200                 break;
201         }
202         case HV_X64_MSR_EOI:
203                 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
204         case HV_X64_MSR_ICR:
205                 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
206         case HV_X64_MSR_TPR:
207                 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
208         default:
209                 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
210                             msr, data);
211                 return 1;
212         }
213
214         return 0;
215 }
216
217 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
218 {
219         u64 data = 0;
220         struct kvm *kvm = vcpu->kvm;
221         struct kvm_hv *hv = &kvm->arch.hyperv;
222
223         switch (msr) {
224         case HV_X64_MSR_GUEST_OS_ID:
225                 data = hv->hv_guest_os_id;
226                 break;
227         case HV_X64_MSR_HYPERCALL:
228                 data = hv->hv_hypercall;
229                 break;
230         case HV_X64_MSR_TIME_REF_COUNT: {
231                 data =
232                      div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
233                 break;
234         }
235         case HV_X64_MSR_REFERENCE_TSC:
236                 data = hv->hv_tsc_page;
237                 break;
238         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
239                 return kvm_hv_msr_get_crash_data(vcpu,
240                                                  msr - HV_X64_MSR_CRASH_P0,
241                                                  pdata);
242         case HV_X64_MSR_CRASH_CTL:
243                 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
244         default:
245                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
246                 return 1;
247         }
248
249         *pdata = data;
250         return 0;
251 }
252
253 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
254 {
255         u64 data = 0;
256         struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
257
258         switch (msr) {
259         case HV_X64_MSR_VP_INDEX: {
260                 int r;
261                 struct kvm_vcpu *v;
262
263                 kvm_for_each_vcpu(r, v, vcpu->kvm) {
264                         if (v == vcpu) {
265                                 data = r;
266                                 break;
267                         }
268                 }
269                 break;
270         }
271         case HV_X64_MSR_EOI:
272                 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
273         case HV_X64_MSR_ICR:
274                 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
275         case HV_X64_MSR_TPR:
276                 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
277         case HV_X64_MSR_APIC_ASSIST_PAGE:
278                 data = hv->hv_vapic;
279                 break;
280         default:
281                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
282                 return 1;
283         }
284         *pdata = data;
285         return 0;
286 }
287
288 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
289 {
290         if (kvm_hv_msr_partition_wide(msr)) {
291                 int r;
292
293                 mutex_lock(&vcpu->kvm->lock);
294                 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
295                 mutex_unlock(&vcpu->kvm->lock);
296                 return r;
297         } else
298                 return kvm_hv_set_msr(vcpu, msr, data);
299 }
300
301 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
302 {
303         if (kvm_hv_msr_partition_wide(msr)) {
304                 int r;
305
306                 mutex_lock(&vcpu->kvm->lock);
307                 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
308                 mutex_unlock(&vcpu->kvm->lock);
309                 return r;
310         } else
311                 return kvm_hv_get_msr(vcpu, msr, pdata);
312 }
313
314 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
315 {
316         return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
317 }
318
319 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
320 {
321         u64 param, ingpa, outgpa, ret;
322         uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
323         bool fast, longmode;
324
325         /*
326          * hypercall generates UD from non zero cpl and real mode
327          * per HYPER-V spec
328          */
329         if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
330                 kvm_queue_exception(vcpu, UD_VECTOR);
331                 return 0;
332         }
333
334         longmode = is_64_bit_mode(vcpu);
335
336         if (!longmode) {
337                 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
338                         (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
339                 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
340                         (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
341                 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
342                         (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
343         }
344 #ifdef CONFIG_X86_64
345         else {
346                 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
347                 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
348                 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
349         }
350 #endif
351
352         code = param & 0xffff;
353         fast = (param >> 16) & 0x1;
354         rep_cnt = (param >> 32) & 0xfff;
355         rep_idx = (param >> 48) & 0xfff;
356
357         trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
358
359         switch (code) {
360         case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
361                 kvm_vcpu_on_spin(vcpu);
362                 break;
363         default:
364                 res = HV_STATUS_INVALID_HYPERCALL_CODE;
365                 break;
366         }
367
368         ret = res | (((u64)rep_done & 0xfff) << 32);
369         if (longmode) {
370                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
371         } else {
372                 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
373                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
374         }
375
376         return 1;
377 }