KVM: x86: Move TSC scaling logic out of call-back adjust_tsc_offset()
authorHaozhong Zhang <haozhong.zhang@intel.com>
Tue, 20 Oct 2015 07:39:06 +0000 (15:39 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 10 Nov 2015 11:06:17 +0000 (12:06 +0100)
For both VMX and SVM, if the 2nd argument of call-back
adjust_tsc_offset() is the host TSC, then adjust_tsc_offset() will scale
it first. This patch moves this common TSC scaling logic to its caller
adjust_tsc_offset_host() and rename the call-back adjust_tsc_offset() to
adjust_tsc_offset_guest().

Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 672f960e81449478215865357cf28b1f072f6ffb..8465944fe8dae257d8d1083e855fba87aa654285 100644 (file)
@@ -845,7 +845,7 @@ struct kvm_x86_ops {
        int (*get_lpage_level)(void);
        bool (*rdtscp_supported)(void);
        bool (*invpcid_supported)(void);
-       void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
+       void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);
 
        void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
 
@@ -922,17 +922,6 @@ struct kvm_arch_async_pf {
 
 extern struct kvm_x86_ops *kvm_x86_ops;
 
-static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
-                                          s64 adjustment)
-{
-       kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
-}
-
-static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
-{
-       kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
-}
-
 int kvm_mmu_module_init(void);
 void kvm_mmu_module_exit(void);
 
index d99b175ffbeaf62bcdd7720d5464d34a8e4348da..b5824a3894bf9c5644d077f2de104f965b74d735 100644 (file)
@@ -983,16 +983,10 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
-static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
+static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       if (host) {
-               if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
-                       WARN_ON(adjustment < 0);
-               adjustment = kvm_scale_tsc(vcpu, (u64)adjustment);
-       }
-
        svm->vmcb->control.tsc_offset += adjustment;
        if (is_guest_mode(vcpu))
                svm->nested.hsave->control.tsc_offset += adjustment;
@@ -4360,7 +4354,7 @@ static struct kvm_x86_ops svm_x86_ops = {
 
        .read_tsc_offset = svm_read_tsc_offset,
        .write_tsc_offset = svm_write_tsc_offset,
-       .adjust_tsc_offset = svm_adjust_tsc_offset,
+       .adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
        .read_l1_tsc = svm_read_l1_tsc,
 
        .set_tdp_cr3 = set_tdp_cr3,
index 2d4782ce9a9378b00e526d7cacd3b3ce43ce6602..c0fb398ac50ed78232887d09c70b5904c5ab5b8f 100644 (file)
@@ -2413,7 +2413,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        }
 }
 
-static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
+static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
 {
        u64 offset = vmcs_read64(TSC_OFFSET);
 
@@ -10807,7 +10807,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
        .read_tsc_offset = vmx_read_tsc_offset,
        .write_tsc_offset = vmx_write_tsc_offset,
-       .adjust_tsc_offset = vmx_adjust_tsc_offset,
+       .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
        .read_l1_tsc = vmx_read_l1_tsc,
 
        .set_tdp_cr3 = vmx_set_cr3,
index bb46066e125b95ad65e226a673631624b8de0b4f..4073009fe578f2e4081aef4ce25dc2488e18f987 100644 (file)
@@ -1526,6 +1526,20 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
 EXPORT_SYMBOL_GPL(kvm_write_tsc);
 
+static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+                                          s64 adjustment)
+{
+       kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+}
+
+static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
+{
+       if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
+               WARN_ON(adjustment < 0);
+       adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
+       kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+}
+
 #ifdef CONFIG_X86_64
 
 static cycle_t read_tsc(void)