KVM: abstract kvm x86 mmu->n_free_mmu_pages
authorDave Hansen <dave@linux.vnet.ibm.com>
Fri, 20 Aug 2010 01:11:05 +0000 (18:11 -0700)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Oct 2010 08:51:17 +0000 (10:51 +0200)
"free" is a poor name for this value.  In this context, it means,
"the number of mmu pages which this kvm instance should be able to
allocate."  But "free" implies much more that the objects are there
and ready for use.  "available" is a much better description, especially
when you see how it is calculated.

In this patch, we abstract its use into a function.  We'll soon
replace the function's contents by calculating the value in a
different way.

All of the reads of n_free_mmu_pages are taken care of in this
patch.  The modification sites will be handled in a patch
later in the series.

Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com>
Signed-off-by: Tim Pepper <lnxninja@linux.vnet.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h

index ff95d418750d7e7347cf0a9175d32829086369b1..625b178946613f8661d5ef12e27189ff5272b5c6 100644 (file)
@@ -1696,7 +1696,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
        int used_pages;
        LIST_HEAD(invalid_list);
 
-       used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
+       used_pages = kvm->arch.n_alloc_mmu_pages - kvm_mmu_available_pages(kvm);
        used_pages = max(0, used_pages);
 
        /*
@@ -2959,18 +2959,15 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
 
 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
-       int free_pages;
        LIST_HEAD(invalid_list);
 
-       free_pages = vcpu->kvm->arch.n_free_mmu_pages;
-       while (free_pages < KVM_REFILL_PAGES &&
+       while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
               !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
                struct kvm_mmu_page *sp;
 
                sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
                                  struct kvm_mmu_page, link);
-               free_pages += kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
-                                                      &invalid_list);
+               kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
                ++vcpu->kvm->stat.mmu_recycled;
        }
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3145,7 +3142,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
                npages = kvm->arch.n_alloc_mmu_pages -
-                        kvm->arch.n_free_mmu_pages;
+                        kvm_mmu_available_pages(kvm);
                cache_count += npages;
                if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
                        freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
index be66759321a546aeee68a37cdc84c179ef39266e..c3a689ae7df03bdb09d535e993ee4bb215529edc 100644 (file)
 
 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
 
+static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+{
+       return kvm->arch.n_free_mmu_pages;
+}
+
 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
-       if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
+       if (unlikely(kvm_mmu_available_pages(vcpu->kvm)< KVM_MIN_FREE_MMU_PAGES))
                __kvm_mmu_free_some_pages(vcpu);
 }