KVM: MMU: remove prefault from invlpg handler
authorMarcelo Tosatti <mtosatti@redhat.com>
Sat, 5 Dec 2009 14:34:11 +0000 (12:34 -0200)
committerGreg Kroah-Hartman <gregkh@suse.de>
Wed, 6 Jan 2010 23:04:00 +0000 (15:04 -0800)
commit fb341f572d26e0786167cd96b90cc4febed830cf upstream.

The invlpg prefault optimization breaks Windows 2008 R2 occasionally.

The visible effect is that the invlpg handler instantiates a pte which
is, microseconds later, written with a different gfn by another vcpu.

The OS could have other mechanisms to prevent a present translation from
being used, which the hypervisor is unaware of.

While the documentation states that the cpu is at liberty to prefetch tlb
entries, it looks like this is not heeded, so remove tlb prefetch from
invlpg.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
arch/x86/kvm/paging_tmpl.h

index 72558f8ff3f5a999a3f7192dd3117fe667d72098..85e12cd6fb48b9226052106c9b7c83d97b4bf480 100644 (file)
@@ -455,8 +455,6 @@ out_unlock:
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct kvm_shadow_walk_iterator iterator;
-       pt_element_t gpte;
-       gpa_t pte_gpa = -1;
        int level;
        u64 *sptep;
        int need_flush = 0;
@@ -471,10 +469,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
                if (level == PT_PAGE_TABLE_LEVEL  ||
                    ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
                    ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
-                       struct kvm_mmu_page *sp = page_header(__pa(sptep));
-
-                       pte_gpa = (sp->gfn << PAGE_SHIFT);
-                       pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
                        if (is_shadow_present_pte(*sptep)) {
                                rmap_remove(vcpu->kvm, sptep);
@@ -493,18 +487,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
        if (need_flush)
                kvm_flush_remote_tlbs(vcpu->kvm);
        spin_unlock(&vcpu->kvm->mmu_lock);
-
-       if (pte_gpa == -1)
-               return;
-       if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
-                                 sizeof(pt_element_t)))
-               return;
-       if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
-               if (mmu_topup_memory_caches(vcpu))
-                       return;
-               kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
-                                 sizeof(pt_element_t), 0);
-       }
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)