KVM: MMU: reduce remote tlb flush in kvm_mmu_pte_write()
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Fri, 4 Jun 2010 13:56:59 +0000 (21:56 +0800)
committerAvi Kivity <avi@redhat.com>
Sun, 1 Aug 2010 07:39:28 +0000 (10:39 +0300)
collect remote tlb flush in kvm_mmu_pte_write() path

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c

index 3b75689eda95f9bb4fc583580b01f5b2b68e0bc7..b285449e82b065c509f47693de2b3e4aaddaf5c0 100644 (file)
@@ -2666,11 +2666,15 @@ static bool need_remote_flush(u64 old, u64 new)
        return (old & ~new & PT64_PERM_MASK) != 0;
 }
 
-static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
+static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
+                                   bool remote_flush, bool local_flush)
 {
-       if (need_remote_flush(old, new))
+       if (zap_page)
+               return;
+
+       if (remote_flush)
                kvm_flush_remote_tlbs(vcpu->kvm);
-       else
+       else if (local_flush)
                kvm_mmu_flush_tlb(vcpu);
 }
 
@@ -2735,6 +2739,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        int npte;
        int r;
        int invlpg_counter;
+       bool remote_flush, local_flush, zap_page;
+
+       zap_page = remote_flush = local_flush = false;
 
        pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
 
@@ -2808,7 +2815,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                         */
                        pgprintk("misaligned: gpa %llx bytes %d role %x\n",
                                 gpa, bytes, sp->role.word);
-                       kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
+                       zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
                                                     &invalid_list);
                        ++vcpu->kvm->stat.mmu_flooded;
                        continue;
@@ -2833,16 +2840,19 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                        if (quadrant != sp->role.quadrant)
                                continue;
                }
+               local_flush = true;
                spte = &sp->spt[page_offset / sizeof(*spte)];
                while (npte--) {
                        entry = *spte;
                        mmu_pte_write_zap_pte(vcpu, sp, spte);
                        if (gentry)
                                mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
-                       mmu_pte_write_flush_tlb(vcpu, entry, *spte);
+                       if (!remote_flush && need_remote_flush(entry, *spte))
+                               remote_flush = true;
                        ++spte;
                }
        }
+       mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
        kvm_mmu_audit(vcpu, "post pte write");
        spin_unlock(&vcpu->kvm->mmu_lock);