KVM: Be more careful restoring fs on lightweight vmexit
[firefly-linux-kernel-4.4.55.git] / drivers / kvm / vmx.c
index c1ac106ace8c1c99193c9b87667396f4adbfb2c5..49cadd31120b243a4fb70cd26e401b6bc1826723 100644 (file)
@@ -34,6 +34,9 @@ MODULE_LICENSE("GPL");
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
 
+static struct page *vmx_io_bitmap_a;
+static struct page *vmx_io_bitmap_b;
+
 #ifdef CONFIG_X86_64
 #define HOST_IS_64 1
 #else
@@ -480,6 +483,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
        case MSR_GS_BASE:
                vmcs_writel(GUEST_GS_BASE, data);
                break;
+       case MSR_LSTAR:
+       case MSR_SYSCALL_MASK:
+               msr = find_msr_entry(vcpu, msr_index);
+               if (msr)
+                       msr->data = data;
+               load_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
+               break;
 #endif
        case MSR_IA32_SYSENTER_CS:
                vmcs_write32(GUEST_SYSENTER_CS, data);
@@ -1129,8 +1139,8 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
        vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
 
        /* I/O */
-       vmcs_write64(IO_BITMAP_A, 0);
-       vmcs_write64(IO_BITMAP_B, 0);
+       vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
+       vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
 
        guest_write_tsc(0);
 
@@ -1150,7 +1160,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
                               CPU_BASED_HLT_EXITING         /* 20.6.2 */
                               | CPU_BASED_CR8_LOAD_EXITING    /* 20.6.2 */
                               | CPU_BASED_CR8_STORE_EXITING   /* 20.6.2 */
-                              | CPU_BASED_UNCOND_IO_EXITING   /* 20.6.2 */
+                              | CPU_BASED_ACTIVATE_IO_BITMAP  /* 20.6.2 */
                               | CPU_BASED_MOV_DR_EXITING
                               | CPU_BASED_USE_TSC_OFFSETING   /* 21.3 */
                        );
@@ -1817,21 +1827,26 @@ static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        int fs_gs_ldt_reload_needed;
        int r;
 
-again:
+preempted:
        /*
         * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
         * allow segment selectors with cpl > 0 or ti == 1.
         */
-       fs_sel = read_fs();
-       gs_sel = read_gs();
        ldt_sel = read_ldt();
-       fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
-       if (!fs_gs_ldt_reload_needed) {
+       fs_gs_ldt_reload_needed = ldt_sel;
+       fs_sel = read_fs();
+       if (!(fs_sel & 7))
                vmcs_write16(HOST_FS_SELECTOR, fs_sel);
-               vmcs_write16(HOST_GS_SELECTOR, gs_sel);
-       } else {
+       else {
                vmcs_write16(HOST_FS_SELECTOR, 0);
+               fs_gs_ldt_reload_needed = 1;
+       }
+       gs_sel = read_gs();
+       if (!(gs_sel & 7))
+               vmcs_write16(HOST_GS_SELECTOR, gs_sel);
+       else {
                vmcs_write16(HOST_GS_SELECTOR, 0);
+               fs_gs_ldt_reload_needed = 1;
        }
 
 #ifdef CONFIG_X86_64
@@ -1848,13 +1863,6 @@ again:
        if (vcpu->guest_debug.enabled)
                kvm_guest_debug_pre(vcpu);
 
-       kvm_load_guest_fpu(vcpu);
-
-       /*
-        * Loading guest fpu may have cleared host cr0.ts
-        */
-       vmcs_writel(HOST_CR0, read_cr0());
-
 #ifdef CONFIG_X86_64
        if (is_long_mode(vcpu)) {
                save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1);
@@ -1862,6 +1870,14 @@ again:
        }
 #endif
 
+again:
+       kvm_load_guest_fpu(vcpu);
+
+       /*
+        * Loading guest fpu may have cleared host cr0.ts
+        */
+       vmcs_writel(HOST_CR0, read_cr0());
+
        asm (
                /* Store host registers */
                "pushf \n\t"
@@ -1981,11 +1997,49 @@ again:
                [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
              : "cc", "memory" );
 
+       ++vcpu->stat.exits;
+
+       vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
+
+       asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
+
+       if (unlikely(fail)) {
+               kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+               kvm_run->fail_entry.hardware_entry_failure_reason
+                       = vmcs_read32(VM_INSTRUCTION_ERROR);
+               r = 0;
+               goto out;
+       }
        /*
-        * Reload segment selectors ASAP. (it's needed for a functional
-        * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
-        * relies on having 0 in %gs for the CPU PDA to work.)
+        * Profile KVM exit RIPs:
         */
+       if (unlikely(prof_on == KVM_PROFILING))
+               profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
+
+       vcpu->launched = 1;
+       r = kvm_handle_exit(kvm_run, vcpu);
+       if (r > 0) {
+               /* Give scheduler a change to reschedule. */
+               if (signal_pending(current)) {
+                       r = -EINTR;
+                       kvm_run->exit_reason = KVM_EXIT_INTR;
+                       ++vcpu->stat.signal_exits;
+                       goto out;
+               }
+
+               if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+                       r = -EINTR;
+                       kvm_run->exit_reason = KVM_EXIT_INTR;
+                       ++vcpu->stat.request_irq_exits;
+                       goto out;
+               }
+               if (!need_resched()) {
+                       ++vcpu->stat.light_exits;
+                       goto again;
+               }
+       }
+
+out:
        if (fs_gs_ldt_reload_needed) {
                load_ldt(ldt_sel);
                load_fs(fs_sel);
@@ -2002,8 +2056,6 @@ again:
 
                reload_tss();
        }
-       ++vcpu->stat.exits;
-
 #ifdef CONFIG_X86_64
        if (is_long_mode(vcpu)) {
                save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
@@ -2011,43 +2063,9 @@ again:
        }
 #endif
 
-       vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
-
-       asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
-
-       if (fail) {
-               kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
-               kvm_run->fail_entry.hardware_entry_failure_reason
-                       = vmcs_read32(VM_INSTRUCTION_ERROR);
-               r = 0;
-       } else {
-               /*
-                * Profile KVM exit RIPs:
-                */
-               if (unlikely(prof_on == KVM_PROFILING))
-                       profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
-
-               vcpu->launched = 1;
-               r = kvm_handle_exit(kvm_run, vcpu);
-               if (r > 0) {
-                       /* Give scheduler a change to reschedule. */
-                       if (signal_pending(current)) {
-                               ++vcpu->stat.signal_exits;
-                               post_kvm_run_save(vcpu, kvm_run);
-                               kvm_run->exit_reason = KVM_EXIT_INTR;
-                               return -EINTR;
-                       }
-
-                       if (dm_request_for_irq_injection(vcpu, kvm_run)) {
-                               ++vcpu->stat.request_irq_exits;
-                               post_kvm_run_save(vcpu, kvm_run);
-                               kvm_run->exit_reason = KVM_EXIT_INTR;
-                               return -EINTR;
-                       }
-
-                       kvm_resched(vcpu);
-                       goto again;
-               }
+       if (r > 0) {
+               kvm_resched(vcpu);
+               goto preempted;
        }
 
        post_kvm_run_save(vcpu, kvm_run);
@@ -2188,11 +2206,50 @@ static struct kvm_arch_ops vmx_arch_ops = {
 
 static int __init vmx_init(void)
 {
-       return kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+       void *iova;
+       int r;
+
+       vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+       if (!vmx_io_bitmap_a)
+               return -ENOMEM;
+
+       vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+       if (!vmx_io_bitmap_b) {
+               r = -ENOMEM;
+               goto out;
+       }
+
+       /*
+        * Allow direct access to the PC debug port (it is often used for I/O
+        * delays, but the vmexits simply slow things down).
+        */
+       iova = kmap(vmx_io_bitmap_a);
+       memset(iova, 0xff, PAGE_SIZE);
+       clear_bit(0x80, iova);
+       kunmap(iova);
+
+       iova = kmap(vmx_io_bitmap_b);
+       memset(iova, 0xff, PAGE_SIZE);
+       kunmap(iova);
+
+       r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
+       if (r)
+               goto out1;
+
+       return 0;
+
+out1:
+       __free_page(vmx_io_bitmap_b);
+out:
+       __free_page(vmx_io_bitmap_a);
+       return r;
 }
 
 static void __exit vmx_exit(void)
 {
+       __free_page(vmx_io_bitmap_b);
+       __free_page(vmx_io_bitmap_a);
+
        kvm_exit_arch();
 }