kvm: vmx: Implement set_apic_access_page_addr
authorTang Chen <tangchen@cn.fujitsu.com>
Wed, 24 Sep 2014 07:57:54 +0000 (15:57 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 24 Sep 2014 12:08:01 +0000 (14:08 +0200)
Currently, the APIC access page is pinned by KVM for the entire life
of the guest.  We want to make it migratable in order to make memory
hot-unplug available for machines that run KVM.

This patch prepares to handle this for the case where there is no nested
virtualization, or where the nested guest does not have an APIC page of
its own.  All accesses to kvm->arch.apic_access_page are changed to go
through kvm_vcpu_reload_apic_access_page.

If the APIC access page is invalidated when the host is running, we update
the VMCS in the next guest entry.

If it is invalidated when the guest is running, the MMU notifier will force
an exit, after which we will handle everything as in the previous case.

If it is invalidated when a nested guest is running, the request will update
either the VMCS01 or the VMCS02.  Updating the VMCS01 is done at the
next L2->L1 exit, while updating the VMCS02 is done in prepare_vmcs02.

Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx.c

index bfe3022e60f1aea07813204b695551c8c75419af..881d266eb3ae0cb6c426167346d04b7e4a51ee71 100644 (file)
@@ -3134,9 +3134,17 @@ static __init int hardware_setup(void)
        if (!cpu_has_vmx_unrestricted_guest())
                enable_unrestricted_guest = 0;
 
-       if (!cpu_has_vmx_flexpriority())
+       if (!cpu_has_vmx_flexpriority()) {
                flexpriority_enabled = 0;
 
+               /*
+                * set_apic_access_page_addr() is used to reload apic access
+                * page upon invalidation.  No need to do anything if the
+                * processor does not have the APIC_ACCESS_ADDR VMCS field.
+                */
+               kvm_x86_ops->set_apic_access_page_addr = NULL;
+       }
+
        if (!cpu_has_vmx_tpr_shadow())
                kvm_x86_ops->update_cr8_intercept = NULL;
 
@@ -4557,9 +4565,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
                vmcs_write32(TPR_THRESHOLD, 0);
        }
 
-       if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
-               vmcs_write64(APIC_ACCESS_ADDR,
-                            page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
+       kvm_vcpu_reload_apic_access_page(vcpu);
 
        if (vmx_vm_has_apicv(vcpu->kvm))
                memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
@@ -7198,6 +7204,29 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
        vmx_set_msr_bitmap(vcpu);
 }
 
+static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       /*
+        * Currently we do not handle the nested case where L2 has an
+        * APIC access page of its own; that page is still pinned.
+        * Hence, we skip the case where the VCPU is in guest mode _and_
+        * L1 prepared an APIC access page for L2.
+        *
+        * For the case where L1 and L2 share the same APIC access page
+        * (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear
+        * in the vmcs12), this function will only update either the vmcs01
+        * or the vmcs02.  If the former, the vmcs02 will be updated by
+        * prepare_vmcs02.  If the latter, the vmcs01 will be updated in
+        * the next L2->L1 exit.
+        */
+       if (!is_guest_mode(vcpu) ||
+           !nested_cpu_has2(vmx->nested.current_vmcs12,
+                            SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+               vmcs_write64(APIC_ACCESS_ADDR, hpa);
+}
+
 static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
 {
        u16 status;
@@ -8140,8 +8169,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
                } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
                        exec_control |=
                                SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
-                       vmcs_write64(APIC_ACCESS_ADDR,
-                               page_to_phys(vcpu->kvm->arch.apic_access_page));
+                       kvm_vcpu_reload_apic_access_page(vcpu);
                }
 
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
@@ -8949,6 +8977,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                vmx->nested.virtual_apic_page = NULL;
        }
 
+       /*
+        * We are now running in L2, mmu_notifier will force to reload the
+        * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
+        */
+       kvm_vcpu_reload_apic_access_page(vcpu);
+
        /*
         * Exiting from L2 to L1, we're now back to L1 which thinks it just
         * finished a VMLAUNCH or VMRESUME instruction, so we need to set the
@@ -9074,6 +9108,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .enable_irq_window = enable_irq_window,
        .update_cr8_intercept = update_cr8_intercept,
        .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
+       .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
        .vm_has_apicv = vmx_vm_has_apicv,
        .load_eoi_exitmap = vmx_load_eoi_exitmap,
        .hwapic_irr_update = vmx_hwapic_irr_update,