KVM: VMX: Use EPT Access bit in response to memory notifiers
authorXudong Hao <xudong.hao@intel.com>
Tue, 22 May 2012 03:23:15 +0000 (11:23 +0800)
committerAvi Kivity <avi@redhat.com>
Tue, 5 Jun 2012 13:31:05 +0000 (16:31 +0300)
Signed-off-by: Haitao Shan <haitao.shan@intel.com>
Signed-off-by: Xudong Hao <xudong.hao@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c

index be3cea4407ffad63824c068332079baf16336012..d07e436b7a42b5910b2795a96a8591a195cb75d7 100644 (file)
@@ -1242,7 +1242,8 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
        int young = 0;
 
        /*
-        * Emulate the accessed bit for EPT, by checking if this page has
+        * In case of absence of EPT Access and Dirty Bits supports,
+        * emulate the accessed bit for EPT, by checking if this page has
         * an EPT mapping, and clearing it if it does. On the next access,
         * a new EPT mapping will be established.
         * This has some overhead, but not as much as the cost of swapping
@@ -1253,11 +1254,12 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
 
        for (sptep = rmap_get_first(*rmapp, &iter); sptep;
             sptep = rmap_get_next(&iter)) {
-               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+               BUG_ON(!is_shadow_present_pte(*sptep));
 
-               if (*sptep & PT_ACCESSED_MASK) {
+               if (*sptep & shadow_accessed_mask) {
                        young = 1;
-                       clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)sptep);
+                       clear_bit((ffs(shadow_accessed_mask) - 1),
+                                (unsigned long *)sptep);
                }
        }
 
@@ -1281,9 +1283,9 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
 
        for (sptep = rmap_get_first(*rmapp, &iter); sptep;
             sptep = rmap_get_next(&iter)) {
-               BUG_ON(!(*sptep & PT_PRESENT_MASK));
+               BUG_ON(!is_shadow_present_pte(*sptep));
 
-               if (*sptep & PT_ACCESSED_MASK) {
+               if (*sptep & shadow_accessed_mask) {
                        young = 1;
                        break;
                }
index d392e5427ca0573ab10e880129d0b09961e541ae..396148ab089b3dc4d546645f4251d34b4d31c172 100644 (file)
@@ -7289,8 +7289,10 @@ static int __init vmx_init(void)
        vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
 
        if (enable_ept) {
-               kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
-                               VMX_EPT_EXECUTABLE_MASK);
+               kvm_mmu_set_mask_ptes(0ull,
+                       (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+                       (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+                       0ull, VMX_EPT_EXECUTABLE_MASK);
                ept_set_mmio_spte_mask();
                kvm_enable_tdp();
        } else