Merge remote-tracking branch 'asoc/fix/dapm' into asoc-linus
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kvm / x86.c
index 1473e64cb744c7ae5a88956bb89bc9d5fabe87b2..00462bd63129cfbde2c6b7e7bdf50eb073cc31b8 100644 (file)
@@ -673,9 +673,9 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
        /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
        if (index != XCR_XFEATURE_ENABLED_MASK)
                return 1;
-       if (!(xcr0 & XSTATE_FP))
+       if (!(xcr0 & XFEATURE_MASK_FP))
                return 1;
-       if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
+       if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
                return 1;
 
        /*
@@ -683,23 +683,24 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
         * saving.  However, xcr0 bit 0 is always set, even if the
         * emulated CPU does not support XSAVE (see fx_init).
         */
-       valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP;
+       valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
        if (xcr0 & ~valid_bits)
                return 1;
 
-       if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR)))
+       if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
+           (!(xcr0 & XFEATURE_MASK_BNDCSR)))
                return 1;
 
-       if (xcr0 & XSTATE_AVX512) {
-               if (!(xcr0 & XSTATE_YMM))
+       if (xcr0 & XFEATURE_MASK_AVX512) {
+               if (!(xcr0 & XFEATURE_MASK_YMM))
                        return 1;
-               if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512)
+               if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
                        return 1;
        }
        kvm_put_guest_xcr0(vcpu);
        vcpu->arch.xcr0 = xcr0;
 
-       if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK)
+       if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
                kvm_update_cpuid(vcpu);
        return 0;
 }
@@ -1253,7 +1254,43 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
        return v;
 }
 
-static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
+static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
+{
+       u64 ratio;
+
+       /* Guest TSC same frequency as host TSC? */
+       if (!scale) {
+               vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+               return 0;
+       }
+
+       /* TSC scaling supported? */
+       if (!kvm_has_tsc_control) {
+               if (user_tsc_khz > tsc_khz) {
+                       vcpu->arch.tsc_catchup = 1;
+                       vcpu->arch.tsc_always_catchup = 1;
+                       return 0;
+               } else {
+                       WARN(1, "user requested TSC rate below hardware speed\n");
+                       return -1;
+               }
+       }
+
+       /* TSC scaling required  - calculate ratio */
+       ratio = mul_u64_u32_div(1ULL << kvm_tsc_scaling_ratio_frac_bits,
+                               user_tsc_khz, tsc_khz);
+
+       if (ratio == 0 || ratio >= kvm_max_tsc_scaling_ratio) {
+               WARN_ONCE(1, "Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
+                         user_tsc_khz);
+               return -1;
+       }
+
+       vcpu->arch.tsc_scaling_ratio = ratio;
+       return 0;
+}
+
+static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
 {
        u32 thresh_lo, thresh_hi;
        int use_scaling = 0;
@@ -1262,7 +1299,7 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
        if (this_tsc_khz == 0) {
                /* set tsc_scaling_ratio to a safe value */
                vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
-               return;
+               return -1;
        }
 
        /* Compute a scale to convert nanoseconds in TSC cycles */
@@ -1283,7 +1320,7 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
                pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
                use_scaling = 1;
        }
-       kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
+       return set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
 }
 
 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
@@ -1356,6 +1393,21 @@ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
 }
 EXPORT_SYMBOL_GPL(kvm_scale_tsc);
 
+static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+{
+       u64 tsc;
+
+       tsc = kvm_scale_tsc(vcpu, rdtsc());
+
+       return target_tsc - tsc;
+}
+
+u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
+{
+       return kvm_x86_ops->read_l1_tsc(vcpu, kvm_scale_tsc(vcpu, host_tsc));
+}
+EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -1367,7 +1419,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        u64 data = msr->data;
 
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
-       offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
+       offset = kvm_compute_tsc_offset(vcpu, data);
        ns = get_kernel_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
 
@@ -1424,7 +1476,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
                } else {
                        u64 delta = nsec_to_cycles(vcpu, elapsed);
                        data += delta;
-                       offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
+                       offset = kvm_compute_tsc_offset(vcpu, data);
                        pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
                }
                matched = true;
@@ -1481,6 +1533,20 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
 EXPORT_SYMBOL_GPL(kvm_write_tsc);
 
+static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+                                          s64 adjustment)
+{
+       kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+}
+
+static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
+{
+       if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
+               WARN_ON(adjustment < 0);
+       adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
+       kvm_x86_ops->adjust_tsc_offset_guest(vcpu, adjustment);
+}
+
 #ifdef CONFIG_X86_64
 
 static cycle_t read_tsc(void)
@@ -1642,7 +1708,7 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
 
 static int kvm_guest_time_update(struct kvm_vcpu *v)
 {
-       unsigned long flags, this_tsc_khz;
+       unsigned long flags, this_tsc_khz, tgt_tsc_khz;
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct kvm_arch *ka = &v->kvm->arch;
        s64 kernel_ns;
@@ -1679,7 +1745,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                kernel_ns = get_kernel_ns();
        }
 
-       tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
+       tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
 
        /*
         * We may have to catch up the TSC to match elapsed wall clock
@@ -1705,7 +1771,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                return 0;
 
        if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
-               kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
+               tgt_tsc_khz = kvm_has_tsc_control ?
+                       vcpu->virtual_tsc_khz : this_tsc_khz;
+               kvm_get_time_scale(NSEC_PER_SEC / 1000, tgt_tsc_khz,
                                   &vcpu->hv_clock.tsc_shift,
                                   &vcpu->hv_clock.tsc_to_system_mul);
                vcpu->hw_tsc_khz = this_tsc_khz;
@@ -2651,7 +2719,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
                if (check_tsc_unstable()) {
-                       u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
+                       u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);
                        kvm_x86_ops->write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;
@@ -2956,7 +3024,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
         * Copy each region from the possibly compacted offset to the
         * non-compacted offset.
         */
-       valid = xstate_bv & ~XSTATE_FPSSE;
+       valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
        while (valid) {
                u64 feature = valid & -valid;
                int index = fls64(feature) - 1;
@@ -2994,7 +3062,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
         * Copy each region from the non-compacted offset to the
         * possibly compacted offset.
         */
-       valid = xstate_bv & ~XSTATE_FPSSE;
+       valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
        while (valid) {
                u64 feature = valid & -valid;
                int index = fls64(feature) - 1;
@@ -3022,7 +3090,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
                        &vcpu->arch.guest_fpu.state.fxsave,
                        sizeof(struct fxregs_state));
                *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
-                       XSTATE_FPSSE;
+                       XFEATURE_MASK_FPSSE;
        }
 }
 
@@ -3042,7 +3110,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
                        return -EINVAL;
                load_xsave(vcpu, (u8 *)guest_xsave->region);
        } else {
-               if (xstate_bv & ~XSTATE_FPSSE)
+               if (xstate_bv & ~XFEATURE_MASK_FPSSE)
                        return -EINVAL;
                memcpy(&vcpu->arch.guest_fpu.state.fxsave,
                        guest_xsave->region, sizeof(struct fxregs_state));
@@ -3353,9 +3421,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                if (user_tsc_khz == 0)
                        user_tsc_khz = tsc_khz;
 
-               kvm_set_tsc_khz(vcpu, user_tsc_khz);
+               if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
+                       r = 0;
 
-               r = 0;
                goto out;
        }
        case KVM_GET_TSC_KHZ: {
@@ -6486,8 +6554,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (hw_breakpoint_active())
                hw_breakpoint_restore();
 
-       vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
-                                                          rdtsc());
+       vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
 
        vcpu->mode = OUTSIDE_GUEST_MODE;
        smp_wmb();
@@ -7049,7 +7116,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
         */
        kvm_set_rflags(vcpu, rflags);
 
-       kvm_x86_ops->update_db_bp_intercept(vcpu);
+       kvm_x86_ops->update_bp_intercept(vcpu);
 
        r = 0;
 
@@ -7123,7 +7190,7 @@ static void fx_init(struct kvm_vcpu *vcpu)
        /*
         * Ensure guest xcr0 is valid for loading
         */
-       vcpu->arch.xcr0 = XSTATE_FP;
+       vcpu->arch.xcr0 = XFEATURE_MASK_FP;
 
        vcpu->arch.cr0 |= X86_CR0_ET;
 }