x86_64: prepare shared kernel/tsc.c
authorThomas Gleixner <tglx@linutronix.de>
Thu, 11 Oct 2007 09:15:16 +0000 (11:15 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 11 Oct 2007 09:15:16 +0000 (11:15 +0200)
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86_64/kernel/Makefile
arch/x86_64/kernel/tsc.c [deleted file]
arch/x86_64/kernel/tsc_64.c [new file with mode: 0644]

index 4028850291063c9ca87c2e0768974b12f7228b20..2c0e778e8acfa766d4cddad320a010957c9f3fbb 100644 (file)
@@ -8,7 +8,7 @@ obj-y   := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
                ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
                x8664_ksyms_64.o i387_64.o syscall.o vsyscall_64.o \
                setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \
                ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
                x8664_ksyms_64.o i387_64.o syscall.o vsyscall_64.o \
                setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \
-               pci-dma_64.o pci-nommu_64.o alternative.o hpet_64.o tsc.o bugs_64.o \
+               pci-dma_64.o pci-nommu_64.o alternative.o hpet_64.o tsc_64.o bugs_64.o \
                perfctr-watchdog.o
 
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
                perfctr-watchdog.o
 
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
diff --git a/arch/x86_64/kernel/tsc.c b/arch/x86_64/kernel/tsc.c
deleted file mode 100644 (file)
index 2a59bde..0000000
+++ /dev/null
@@ -1,207 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/clocksource.h>
-#include <linux/time.h>
-#include <linux/acpi.h>
-#include <linux/cpufreq.h>
-
-#include <asm/timex.h>
-
-static int notsc __initdata = 0;
-
-unsigned int cpu_khz;          /* TSC clocks / usec, not used here */
-EXPORT_SYMBOL(cpu_khz);
-unsigned int tsc_khz;
-EXPORT_SYMBOL(tsc_khz);
-
-static unsigned int cyc2ns_scale __read_mostly;
-
-void set_cyc2ns_scale(unsigned long khz)
-{
-       cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
-}
-
-static unsigned long long cycles_2_ns(unsigned long long cyc)
-{
-       return (cyc * cyc2ns_scale) >> NS_SCALE;
-}
-
-unsigned long long sched_clock(void)
-{
-       unsigned long a = 0;
-
-       /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
-        * which means it is not completely exact and may not be monotonous
-        * between CPUs. But the errors should be too small to matter for
-        * scheduling purposes.
-        */
-
-       rdtscll(a);
-       return cycles_2_ns(a);
-}
-
-static int tsc_unstable;
-
-inline int check_tsc_unstable(void)
-{
-       return tsc_unstable;
-}
-#ifdef CONFIG_CPU_FREQ
-
-/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
- * changes.
- *
- * RED-PEN: On SMP we assume all CPUs run with the same frequency.  It's
- * not that important because current Opteron setups do not support
- * scaling on SMP anyroads.
- *
- * Should fix up last_tsc too. Currently gettimeofday in the
- * first tick after the change will be slightly wrong.
- */
-
-static unsigned int  ref_freq;
-static unsigned long loops_per_jiffy_ref;
-static unsigned long tsc_khz_ref;
-
-static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
-                                void *data)
-{
-       struct cpufreq_freqs *freq = data;
-       unsigned long *lpj, dummy;
-
-       if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
-               return 0;
-
-       lpj = &dummy;
-       if (!(freq->flags & CPUFREQ_CONST_LOOPS))
-#ifdef CONFIG_SMP
-               lpj = &cpu_data[freq->cpu].loops_per_jiffy;
-#else
-               lpj = &boot_cpu_data.loops_per_jiffy;
-#endif
-
-       if (!ref_freq) {
-               ref_freq = freq->old;
-               loops_per_jiffy_ref = *lpj;
-               tsc_khz_ref = tsc_khz;
-       }
-       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-               (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
-               (val == CPUFREQ_RESUMECHANGE)) {
-               *lpj =
-               cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
-
-               tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
-               if (!(freq->flags & CPUFREQ_CONST_LOOPS))
-                       mark_tsc_unstable("cpufreq changes");
-       }
-
-       set_cyc2ns_scale(tsc_khz_ref);
-
-       return 0;
-}
-
-static struct notifier_block time_cpufreq_notifier_block = {
-       .notifier_call  = time_cpufreq_notifier
-};
-
-static int __init cpufreq_tsc(void)
-{
-       cpufreq_register_notifier(&time_cpufreq_notifier_block,
-                                 CPUFREQ_TRANSITION_NOTIFIER);
-       return 0;
-}
-
-core_initcall(cpufreq_tsc);
-
-#endif
-
-/*
- * Make an educated guess if the TSC is trustworthy and synchronized
- * over all CPUs.
- */
-__cpuinit int unsynchronized_tsc(void)
-{
-       if (tsc_unstable)
-               return 1;
-
-#ifdef CONFIG_SMP
-       if (apic_is_clustered_box())
-               return 1;
-#endif
-       /* Most intel systems have synchronized TSCs except for
-          multi node systems */
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
-#ifdef CONFIG_ACPI
-               /* But TSC doesn't tick in C3 so don't use it there */
-               if (acpi_gbl_FADT.header.length > 0 &&
-                   acpi_gbl_FADT.C3latency < 1000)
-                       return 1;
-#endif
-               return 0;
-       }
-
-       /* Assume multi socket systems are not synchronized */
-       return num_present_cpus() > 1;
-}
-
-int __init notsc_setup(char *s)
-{
-       notsc = 1;
-       return 1;
-}
-
-__setup("notsc", notsc_setup);
-
-
-/* clock source code: */
-static cycle_t read_tsc(void)
-{
-       cycle_t ret = (cycle_t)get_cycles_sync();
-       return ret;
-}
-
-static cycle_t __vsyscall_fn vread_tsc(void)
-{
-       cycle_t ret = (cycle_t)get_cycles_sync();
-       return ret;
-}
-
-static struct clocksource clocksource_tsc = {
-       .name                   = "tsc",
-       .rating                 = 300,
-       .read                   = read_tsc,
-       .mask                   = CLOCKSOURCE_MASK(64),
-       .shift                  = 22,
-       .flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
-                                 CLOCK_SOURCE_MUST_VERIFY,
-       .vread                  = vread_tsc,
-};
-
-void mark_tsc_unstable(char *reason)
-{
-       if (!tsc_unstable) {
-               tsc_unstable = 1;
-               printk("Marking TSC unstable due to %s\n", reason);
-               /* Change only the rating, when not registered */
-               if (clocksource_tsc.mult)
-                       clocksource_change_rating(&clocksource_tsc, 0);
-               else
-                       clocksource_tsc.rating = 0;
-       }
-}
-EXPORT_SYMBOL_GPL(mark_tsc_unstable);
-
-void __init init_tsc_clocksource(void)
-{
-       if (!notsc) {
-               clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
-                                                       clocksource_tsc.shift);
-               if (check_tsc_unstable())
-                       clocksource_tsc.rating = 0;
-
-               clocksource_register(&clocksource_tsc);
-       }
-}
diff --git a/arch/x86_64/kernel/tsc_64.c b/arch/x86_64/kernel/tsc_64.c
new file mode 100644 (file)
index 0000000..2a59bde
--- /dev/null
@@ -0,0 +1,207 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/clocksource.h>
+#include <linux/time.h>
+#include <linux/acpi.h>
+#include <linux/cpufreq.h>
+
+#include <asm/timex.h>
+
+static int notsc __initdata = 0;
+
+unsigned int cpu_khz;          /* TSC clocks / usec, not used here */
+EXPORT_SYMBOL(cpu_khz);
+unsigned int tsc_khz;
+EXPORT_SYMBOL(tsc_khz);
+
+static unsigned int cyc2ns_scale __read_mostly;
+
+void set_cyc2ns_scale(unsigned long khz)
+{
+       cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
+}
+
+static unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+       return (cyc * cyc2ns_scale) >> NS_SCALE;
+}
+
+unsigned long long sched_clock(void)
+{
+       unsigned long a = 0;
+
+       /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
+        * which means it is not completely exact and may not be monotonous
+        * between CPUs. But the errors should be too small to matter for
+        * scheduling purposes.
+        */
+
+       rdtscll(a);
+       return cycles_2_ns(a);
+}
+
+static int tsc_unstable;
+
+inline int check_tsc_unstable(void)
+{
+       return tsc_unstable;
+}
+#ifdef CONFIG_CPU_FREQ
+
+/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
+ * changes.
+ *
+ * RED-PEN: On SMP we assume all CPUs run with the same frequency.  It's
+ * not that important because current Opteron setups do not support
+ * scaling on SMP anyroads.
+ *
+ * Should fix up last_tsc too. Currently gettimeofday in the
+ * first tick after the change will be slightly wrong.
+ */
+
+static unsigned int  ref_freq;
+static unsigned long loops_per_jiffy_ref;
+static unsigned long tsc_khz_ref;
+
+static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+                                void *data)
+{
+       struct cpufreq_freqs *freq = data;
+       unsigned long *lpj, dummy;
+
+       if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
+               return 0;
+
+       lpj = &dummy;
+       if (!(freq->flags & CPUFREQ_CONST_LOOPS))
+#ifdef CONFIG_SMP
+               lpj = &cpu_data[freq->cpu].loops_per_jiffy;
+#else
+               lpj = &boot_cpu_data.loops_per_jiffy;
+#endif
+
+       if (!ref_freq) {
+               ref_freq = freq->old;
+               loops_per_jiffy_ref = *lpj;
+               tsc_khz_ref = tsc_khz;
+       }
+       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
+               (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+               (val == CPUFREQ_RESUMECHANGE)) {
+               *lpj =
+               cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
+
+               tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
+               if (!(freq->flags & CPUFREQ_CONST_LOOPS))
+                       mark_tsc_unstable("cpufreq changes");
+       }
+
+       set_cyc2ns_scale(tsc_khz_ref);
+
+       return 0;
+}
+
+static struct notifier_block time_cpufreq_notifier_block = {
+       .notifier_call  = time_cpufreq_notifier
+};
+
+static int __init cpufreq_tsc(void)
+{
+       cpufreq_register_notifier(&time_cpufreq_notifier_block,
+                                 CPUFREQ_TRANSITION_NOTIFIER);
+       return 0;
+}
+
+core_initcall(cpufreq_tsc);
+
+#endif
+
+/*
+ * Make an educated guess if the TSC is trustworthy and synchronized
+ * over all CPUs.
+ */
+__cpuinit int unsynchronized_tsc(void)
+{
+       if (tsc_unstable)
+               return 1;
+
+#ifdef CONFIG_SMP
+       if (apic_is_clustered_box())
+               return 1;
+#endif
+       /* Most intel systems have synchronized TSCs except for
+          multi node systems */
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+#ifdef CONFIG_ACPI
+               /* But TSC doesn't tick in C3 so don't use it there */
+               if (acpi_gbl_FADT.header.length > 0 &&
+                   acpi_gbl_FADT.C3latency < 1000)
+                       return 1;
+#endif
+               return 0;
+       }
+
+       /* Assume multi socket systems are not synchronized */
+       return num_present_cpus() > 1;
+}
+
+int __init notsc_setup(char *s)
+{
+       notsc = 1;
+       return 1;
+}
+
+__setup("notsc", notsc_setup);
+
+
+/* clock source code: */
+static cycle_t read_tsc(void)
+{
+       cycle_t ret = (cycle_t)get_cycles_sync();
+       return ret;
+}
+
+static cycle_t __vsyscall_fn vread_tsc(void)
+{
+       cycle_t ret = (cycle_t)get_cycles_sync();
+       return ret;
+}
+
+static struct clocksource clocksource_tsc = {
+       .name                   = "tsc",
+       .rating                 = 300,
+       .read                   = read_tsc,
+       .mask                   = CLOCKSOURCE_MASK(64),
+       .shift                  = 22,
+       .flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
+                                 CLOCK_SOURCE_MUST_VERIFY,
+       .vread                  = vread_tsc,
+};
+
+void mark_tsc_unstable(char *reason)
+{
+       if (!tsc_unstable) {
+               tsc_unstable = 1;
+               printk("Marking TSC unstable due to %s\n", reason);
+               /* Change only the rating, when not registered */
+               if (clocksource_tsc.mult)
+                       clocksource_change_rating(&clocksource_tsc, 0);
+               else
+                       clocksource_tsc.rating = 0;
+       }
+}
+EXPORT_SYMBOL_GPL(mark_tsc_unstable);
+
+void __init init_tsc_clocksource(void)
+{
+       if (!notsc) {
+               clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
+                                                       clocksource_tsc.shift);
+               if (check_tsc_unstable())
+                       clocksource_tsc.rating = 0;
+
+               clocksource_register(&clocksource_tsc);
+       }
+}