Merge branch 'master' of ssh://master.kernel.org/pub/scm/linux/kernel/git/rusty/linux...
[firefly-linux-kernel-4.4.55.git] / arch / x86 / kernel / tsc.c
index 71f4368b357edf2a47fd035349bfbcd8c595e882..cd982f48e23e11c74acd08e8db823c3ef4fe5886 100644 (file)
@@ -17,6 +17,8 @@
 #include <asm/time.h>
 #include <asm/delay.h>
 #include <asm/hypervisor.h>
+#include <asm/nmi.h>
+#include <asm/x86_init.h>
 
 unsigned int __read_mostly cpu_khz;    /* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -400,15 +402,9 @@ unsigned long native_calibrate_tsc(void)
 {
        u64 tsc1, tsc2, delta, ref1, ref2;
        unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
-       unsigned long flags, latch, ms, fast_calibrate, hv_tsc_khz;
+       unsigned long flags, latch, ms, fast_calibrate;
        int hpet = is_hpet_enabled(), i, loopmin;
 
-       hv_tsc_khz = get_hypervisor_tsc_freq();
-       if (hv_tsc_khz) {
-               printk(KERN_INFO "TSC: Frequency read from the hypervisor\n");
-               return hv_tsc_khz;
-       }
-
        local_irq_save(flags);
        fast_calibrate = quick_pit_calibrate();
        local_irq_restore(flags);
@@ -566,7 +562,7 @@ int recalibrate_cpu_khz(void)
        unsigned long cpu_khz_old = cpu_khz;
 
        if (cpu_has_tsc) {
-               tsc_khz = calibrate_tsc();
+               tsc_khz = x86_platform.calibrate_tsc();
                cpu_khz = tsc_khz;
                cpu_data(0).loops_per_jiffy =
                        cpufreq_scale(cpu_data(0).loops_per_jiffy,
@@ -670,7 +666,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
        if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
                        (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
                        (val == CPUFREQ_RESUMECHANGE)) {
-               *lpj =  cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
+               *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
 
                tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
                if (!(freq->flags & CPUFREQ_CONST_LOOPS))
@@ -744,10 +740,16 @@ static cycle_t __vsyscall_fn vread_tsc(void)
 }
 #endif
 
+static void resume_tsc(void)
+{
+       clocksource_tsc.cycle_last = 0;
+}
+
 static struct clocksource clocksource_tsc = {
        .name                   = "tsc",
        .rating                 = 300,
        .read                   = read_tsc,
+       .resume                 = resume_tsc,
        .mask                   = CLOCKSOURCE_MASK(64),
        .shift                  = 22,
        .flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
@@ -761,12 +763,14 @@ void mark_tsc_unstable(char *reason)
 {
        if (!tsc_unstable) {
                tsc_unstable = 1;
-               printk("Marking TSC unstable due to %s\n", reason);
+               printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
                /* Change only the rating, when not registered */
                if (clocksource_tsc.mult)
-                       clocksource_change_rating(&clocksource_tsc, 0);
-               else
+                       clocksource_mark_unstable(&clocksource_tsc);
+               else {
+                       clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
                        clocksource_tsc.rating = 0;
+               }
        }
 }
 
@@ -852,15 +856,71 @@ static void __init init_tsc_clocksource(void)
        clocksource_register(&clocksource_tsc);
 }
 
+#ifdef CONFIG_X86_64
+/*
+ * calibrate_cpu is used on systems with fixed rate TSCs to determine
+ * processor frequency
+ */
+#define TICK_COUNT 100000000
+static unsigned long __init calibrate_cpu(void)
+{
+       int tsc_start, tsc_now;
+       int i, no_ctr_free;
+       unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
+       unsigned long flags;
+
+       for (i = 0; i < 4; i++)
+               if (avail_to_resrv_perfctr_nmi_bit(i))
+                       break;
+       no_ctr_free = (i == 4);
+       if (no_ctr_free) {
+               WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
+                    "cpu_khz value may be incorrect.\n");
+               i = 3;
+               rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
+               wrmsrl(MSR_K7_EVNTSEL3, 0);
+               rdmsrl(MSR_K7_PERFCTR3, pmc3);
+       } else {
+               reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
+               reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
+       }
+       local_irq_save(flags);
+       /* start measuring cycles, incrementing from 0 */
+       wrmsrl(MSR_K7_PERFCTR0 + i, 0);
+       wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
+       rdtscl(tsc_start);
+       do {
+               rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
+               tsc_now = get_cycles();
+       } while ((tsc_now - tsc_start) < TICK_COUNT);
+
+       local_irq_restore(flags);
+       if (no_ctr_free) {
+               wrmsrl(MSR_K7_EVNTSEL3, 0);
+               wrmsrl(MSR_K7_PERFCTR3, pmc3);
+               wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
+       } else {
+               release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
+               release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
+       }
+
+       return pmc_now * tsc_khz / (tsc_now - tsc_start);
+}
+#else
+static inline unsigned long calibrate_cpu(void) { return cpu_khz; }
+#endif
+
 void __init tsc_init(void)
 {
        u64 lpj;
        int cpu;
 
+       x86_init.timers.tsc_pre_init();
+
        if (!cpu_has_tsc)
                return;
 
-       tsc_khz = calibrate_tsc();
+       tsc_khz = x86_platform.calibrate_tsc();
        cpu_khz = tsc_khz;
 
        if (!tsc_khz) {
@@ -868,11 +928,9 @@ void __init tsc_init(void)
                return;
        }
 
-#ifdef CONFIG_X86_64
        if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
                        (boot_cpu_data.x86_vendor == X86_VENDOR_AMD))
                cpu_khz = calibrate_cpu();
-#endif
 
        printk("Detected %lu.%03lu MHz processor.\n",
                        (unsigned long)cpu_khz / 1000,