KVM: Make kvm_mmu_change_mmu_pages() take mmu_lock by itself
authorTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Tue, 8 Jan 2013 10:46:07 +0000 (19:46 +0900)
committerGleb Natapov <gleb@redhat.com>
Mon, 14 Jan 2013 09:14:09 +0000 (11:14 +0200)
No reason to make callers take mmu_lock since we do not need to protect
kvm_mmu_change_mmu_pages() and kvm_mmu_slot_remove_write_access()
together by mmu_lock in kvm_arch_commit_memory_region(): the former
calls kvm_mmu_commit_zap_page() and flushes TLBs by itself.

Note: we do not need to protect kvm->arch.n_requested_mmu_pages by
mmu_lock as can be seen from the fact that it is read locklessly.

Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c

index 9c1b2d6158bf24ab378064185e4c37aa4a335691..f5572804f5948fac4753b5e0ec76b882b2cd1b90 100644 (file)
@@ -2143,6 +2143,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
         * change the value
         */
 
+       spin_lock(&kvm->mmu_lock);
+
        if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
                while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
                        !list_empty(&kvm->arch.active_mmu_pages)) {
@@ -2157,6 +2159,8 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
        }
 
        kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
+
+       spin_unlock(&kvm->mmu_lock);
 }
 
 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
index add5e48019680b08af48f4b4b59ebea73cef7e26..080bbdcbf2eec3877eb123e4bbd7fb4b5a32415f 100644 (file)
@@ -3270,12 +3270,10 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
                return -EINVAL;
 
        mutex_lock(&kvm->slots_lock);
-       spin_lock(&kvm->mmu_lock);
 
        kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
        kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
 
-       spin_unlock(&kvm->mmu_lock);
        mutex_unlock(&kvm->slots_lock);
        return 0;
 }
@@ -6894,7 +6892,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
        if (!kvm->arch.n_requested_mmu_pages)
                nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
 
-       spin_lock(&kvm->mmu_lock);
        if (nr_mmu_pages)
                kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
        /*
@@ -6902,9 +6899,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
         * Existing largepage mappings are destroyed here and new ones will
         * not be created until the end of the logging.
         */
-       if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
+       if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
+               spin_lock(&kvm->mmu_lock);
                kvm_mmu_slot_remove_write_access(kvm, mem->slot);
-       spin_unlock(&kvm->mmu_lock);
+               spin_unlock(&kvm->mmu_lock);
+       }
        /*
         * If memory slot is created, or moved, we need to clear all
         * mmio sptes.