KVM: Recalculate mmu pages needed for every memory region change
authorZhang Xiantao <xiantao.zhang@intel.com>
Tue, 20 Nov 2007 05:11:38 +0000 (13:11 +0800)
committerAvi Kivity <avi@qumranet.com>
Wed, 30 Jan 2008 15:53:09 +0000 (17:53 +0200)
Instead of incrementally changing the mmu cache size for every memory slot
operation, recalculate it from scratch.  This is simpler and safer.

Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
drivers/kvm/kvm_main.c
drivers/kvm/mmu.c
drivers/kvm/x86.h

index 729573b844e5e860d4d108f68c5889f2d849ebc6..93ecafbfb1b6bfdeb147f671ebecd3b5988e244c 100644 (file)
@@ -333,26 +333,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
        if (mem->slot >= kvm->nmemslots)
                kvm->nmemslots = mem->slot + 1;
 
+       *memslot = new;
+
        if (!kvm->n_requested_mmu_pages) {
-               unsigned int n_pages;
-
-               if (npages) {
-                       n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
-                       kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
-                                                n_pages);
-               } else {
-                       unsigned int nr_mmu_pages;
-
-                       n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
-                       nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
-                       nr_mmu_pages = max(nr_mmu_pages,
-                                       (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
-                       kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
-               }
+               unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
+               kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
        }
 
-       *memslot = new;
-
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
        kvm_flush_remote_tlbs(kvm);
 
index 4624f3789b9a8df1d6ad490fee317dd564c44455..101cd5377a89716377021baebe68733b14e5ca85 100644 (file)
@@ -1535,6 +1535,25 @@ nomem:
        return -ENOMEM;
 }
 
+/*
+ * Caculate mmu pages needed for kvm.
+ */
+unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
+{
+       int i;
+       unsigned int nr_mmu_pages;
+       unsigned int  nr_pages = 0;
+
+       for (i = 0; i < kvm->nmemslots; i++)
+               nr_pages += kvm->memslots[i].npages;
+
+       nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
+       nr_mmu_pages = max(nr_mmu_pages,
+                       (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+
+       return nr_mmu_pages;
+}
+
 #ifdef AUDIT
 
 static const char *audit_msg;
index 90b791bf62879a64471902c33f06878cadcb5295..71f2477d03fd18c9ce258c62ccaaa0bf3c52197d 100644 (file)
@@ -236,6 +236,7 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
 void kvm_mmu_zap_all(struct kvm *kvm);
+unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
 
 enum emulation_result {