KVM: MMU: move bits lost judgement into a separate function
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Mon, 2 Aug 2010 08:14:04 +0000 (16:14 +0800)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Oct 2010 08:50:31 +0000 (10:50 +0200)
Introduce spte_has_volatile_bits() function to judge whether spte
bits will miss, it's more readable and can help us to cleanup code
later

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c

index e430a383ad1515332b7a0127f532366394d436cb..c07b9a200bc8381a513b5b1a52a80ea8aca9bba5 100644 (file)
@@ -299,6 +299,20 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte)
 #endif
 }
 
+static bool spte_has_volatile_bits(u64 spte)
+{
+       if (!shadow_accessed_mask)
+               return false;
+
+       if (!is_shadow_present_pte(spte))
+               return false;
+
+       if (spte & shadow_accessed_mask)
+               return false;
+
+       return true;
+}
+
 static void update_spte(u64 *sptep, u64 new_spte)
 {
        u64 old_spte;
@@ -679,14 +693,14 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
        pfn_t pfn;
        u64 old_spte = *sptep;
 
-       if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) ||
-             old_spte & shadow_accessed_mask) {
+       if (!spte_has_volatile_bits(old_spte))
                __set_spte(sptep, new_spte);
-       else
+       else
                old_spte = __xchg_spte(sptep, new_spte);
 
        if (!is_rmap_spte(old_spte))
                return;
+
        pfn = spte_to_pfn(old_spte);
        if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);