[PATCH] Time: i386 Conversion - part 2: Rework TSC Support
authorjohn stultz <johnstul@us.ibm.com>
Mon, 26 Jun 2006 07:25:10 +0000 (00:25 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Mon, 26 Jun 2006 16:58:21 +0000 (09:58 -0700)
As part of the i386 conversion to the generic timekeeping infrastructure, this
introduces a new tsc.c file.  The code in this file replaces the TSC
initialization, management and access code currently in timer_tsc.c (which
will be removed) that we want to preserve.

The code also introduces the following functionality:

o tsc_khz: like cpu_khz but stores the TSC frequency on systems that do not
  change TSC frequency w/ CPU frequency

o check/mark_tsc_unstable: accessor/modifier flag for TSC timekeeping
  usability

o minor cleanups to calibration math.

This patch also includes a one line __cpuinitdata fix from Zwane Mwaikambo.

Signed-off-by: John Stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
arch/i386/kernel/Makefile
arch/i386/kernel/numaq.c
arch/i386/kernel/setup.c
arch/i386/kernel/timers/timer_tsc.c
arch/i386/kernel/tsc.c [new file with mode: 0644]
drivers/acpi/processor_idle.c
include/asm-i386/mach-default/mach_timer.h
include/asm-i386/mach-summit/mach_mpparse.h
include/asm-i386/timex.h
include/asm-i386/tsc.h [new file with mode: 0644]

index 4142d69a5336d69d4c389fce7cdb01687be0ad61..ca70d61ea834c184f4b4c844e20ee53cc1d23795 100644 (file)
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
 obj-y  := process.o semaphore.o signal.o entry.o traps.o irq.o \
                ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
                pci-dma.o i386_ksyms.o i387.o bootflag.o \
-               quirks.o i8237.o topology.o alternative.o i8253.o
+               quirks.o i8237.o topology.o alternative.o i8253.o tsc.o
 
 obj-y                          += cpu/
 obj-y                          += timers/
index 5f5b075f860a8c97ceddf6fed27f45ef153f4622..0caf14652badfca7856a3b7d13e54cbc0f461918 100644 (file)
@@ -79,10 +79,12 @@ int __init get_memcfg_numaq(void)
        return 1;
 }
 
-static int __init numaq_dsc_disable(void)
+static int __init numaq_tsc_disable(void)
 {
-       printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
-       tsc_disable = 1;
+       if (num_online_nodes() > 1) {
+               printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
+               tsc_disable = 1;
+       }
        return 0;
 }
-core_initcall(numaq_dsc_disable);
+arch_initcall(numaq_tsc_disable);
index 6bef9273733e66370f79963e032f1470c4d4d21e..4a65040cc624b06bf1de3ca678f66cd9527b36a9 100644 (file)
@@ -1575,6 +1575,7 @@ void __init setup_arch(char **cmdline_p)
        conswitchp = &dummy_con;
 #endif
 #endif
+       tsc_init();
 }
 
 static __init int add_pcspkr(void)
index f1187ddb0d0fc07809a3f5f20ab43177b6fbe61f..243ec04840793e2e1e7a88a5ff98f25da46557b1 100644 (file)
@@ -32,10 +32,6 @@ static unsigned long hpet_last;
 static struct timer_opts timer_tsc;
 #endif
 
-static inline void cpufreq_delayed_get(void);
-
-int tsc_disable __devinitdata = 0;
-
 static int use_tsc;
 /* Number of usecs that the last interrupt was delayed */
 static int delay_at_last_interrupt;
@@ -144,30 +140,6 @@ static unsigned long long monotonic_clock_tsc(void)
        return base + cycles_2_ns(this_offset - last_offset);
 }
 
-/*
- * Scheduler clock - returns current time in nanosec units.
- */
-unsigned long long sched_clock(void)
-{
-       unsigned long long this_offset;
-
-       /*
-        * In the NUMA case we dont use the TSC as they are not
-        * synchronized across all CPUs.
-        */
-#ifndef CONFIG_NUMA
-       if (!use_tsc)
-#endif
-               /* no locking but a rare wrong value is not a big deal */
-               return jiffies_64 * (1000000000 / HZ);
-
-       /* Read the Time Stamp Counter */
-       rdtscll(this_offset);
-
-       /* return the value in ns */
-       return cycles_2_ns(this_offset);
-}
-
 static void delay_tsc(unsigned long loops)
 {
        unsigned long bclock, now;
@@ -231,136 +203,6 @@ static void mark_offset_tsc_hpet(void)
 }
 #endif
 
-
-#ifdef CONFIG_CPU_FREQ
-#include <linux/workqueue.h>
-
-static unsigned int cpufreq_delayed_issched = 0;
-static unsigned int cpufreq_init = 0;
-static struct work_struct cpufreq_delayed_get_work;
-
-static void handle_cpufreq_delayed_get(void *v)
-{
-       unsigned int cpu;
-       for_each_online_cpu(cpu) {
-               cpufreq_get(cpu);
-       }
-       cpufreq_delayed_issched = 0;
-}
-
-/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
- * to verify the CPU frequency the timing core thinks the CPU is running
- * at is still correct.
- */
-static inline void cpufreq_delayed_get(void) 
-{
-       if (cpufreq_init && !cpufreq_delayed_issched) {
-               cpufreq_delayed_issched = 1;
-               printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
-               schedule_work(&cpufreq_delayed_get_work);
-       }
-}
-
-/* If the CPU frequency is scaled, TSC-based delays will need a different
- * loops_per_jiffy value to function properly.
- */
-
-static unsigned int  ref_freq = 0;
-static unsigned long loops_per_jiffy_ref = 0;
-
-#ifndef CONFIG_SMP
-static unsigned long fast_gettimeoffset_ref = 0;
-static unsigned int cpu_khz_ref = 0;
-#endif
-
-static int
-time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
-                      void *data)
-{
-       struct cpufreq_freqs *freq = data;
-
-       if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
-               write_seqlock_irq(&xtime_lock);
-       if (!ref_freq) {
-               if (!freq->old){
-                       ref_freq = freq->new;
-                       goto end;
-               }
-               ref_freq = freq->old;
-               loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
-#ifndef CONFIG_SMP
-               fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
-               cpu_khz_ref = cpu_khz;
-#endif
-       }
-
-       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
-           (val == CPUFREQ_RESUMECHANGE)) {
-               if (!(freq->flags & CPUFREQ_CONST_LOOPS))
-                       cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
-#ifndef CONFIG_SMP
-               if (cpu_khz)
-                       cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
-               if (use_tsc) {
-                       if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
-                               fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
-                               set_cyc2ns_scale(cpu_khz);
-                       }
-               }
-#endif
-       }
-
-end:
-       if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
-               write_sequnlock_irq(&xtime_lock);
-
-       return 0;
-}
-
-static struct notifier_block time_cpufreq_notifier_block = {
-       .notifier_call  = time_cpufreq_notifier
-};
-
-
-static int __init cpufreq_tsc(void)
-{
-       int ret;
-       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
-       ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
-                                       CPUFREQ_TRANSITION_NOTIFIER);
-       if (!ret)
-               cpufreq_init = 1;
-       return ret;
-}
-core_initcall(cpufreq_tsc);
-
-#else /* CONFIG_CPU_FREQ */
-static inline void cpufreq_delayed_get(void) { return; }
-#endif 
-
-int recalibrate_cpu_khz(void)
-{
-#ifndef CONFIG_SMP
-       unsigned int cpu_khz_old = cpu_khz;
-
-       if (cpu_has_tsc) {
-               local_irq_disable();
-               init_cpu_khz();
-               local_irq_enable();
-               cpu_data[0].loops_per_jiffy =
-                   cpufreq_scale(cpu_data[0].loops_per_jiffy,
-                                 cpu_khz_old,
-                                 cpu_khz);
-               return 0;
-       } else
-               return -ENODEV;
-#else
-       return -ENODEV;
-#endif
-}
-EXPORT_SYMBOL(recalibrate_cpu_khz);
-
 static void mark_offset_tsc(void)
 {
        unsigned long lost,delay;
@@ -451,9 +293,6 @@ static void mark_offset_tsc(void)
 
                        clock_fallback();
                }
-               /* ... but give the TSC a fair chance */
-               if (lost_count > 25)
-                       cpufreq_delayed_get();
        } else
                lost_count = 0;
        /* update the monotonic base value */
@@ -578,23 +417,6 @@ static int tsc_resume(void)
        return 0;
 }
 
-#ifndef CONFIG_X86_TSC
-/* disable flag for tsc.  Takes effect by clearing the TSC cpu flag
- * in cpu/common.c */
-static int __init tsc_setup(char *str)
-{
-       tsc_disable = 1;
-       return 1;
-}
-#else
-static int __init tsc_setup(char *str)
-{
-       printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
-                               "cannot disable TSC.\n");
-       return 1;
-}
-#endif
-__setup("notsc", tsc_setup);
 
 
 
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
new file mode 100644 (file)
index 0000000..3b64eaa
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * This code largely moved from arch/i386/kernel/timer/timer_tsc.c
+ * which was originally moved from arch/i386/kernel/time.c.
+ * See comments there for proper credits.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/cpufreq.h>
+#include <linux/jiffies.h>
+#include <linux/init.h>
+
+#include <asm/tsc.h>
+#include <asm/io.h>
+
+#include "mach_timer.h"
+
+/*
+ * On some systems the TSC frequency does not
+ * change with the cpu frequency. So we need
+ * an extra value to store the TSC freq
+ */
+unsigned int tsc_khz;
+
+int tsc_disable __cpuinitdata = 0;
+
+#ifdef CONFIG_X86_TSC
+static int __init tsc_setup(char *str)
+{
+       printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
+                               "cannot disable TSC.\n");
+       return 1;
+}
+#else
+/*
+ * disable flag for tsc. Takes effect by clearing the TSC cpu flag
+ * in cpu/common.c
+ */
+static int __init tsc_setup(char *str)
+{
+       tsc_disable = 1;
+
+       return 1;
+}
+#endif
+
+__setup("notsc", tsc_setup);
+
+
+/*
+ * code to mark and check if the TSC is unstable
+ * due to cpufreq or due to unsynced TSCs
+ */
+static int tsc_unstable;
+
+static inline int check_tsc_unstable(void)
+{
+       return tsc_unstable;
+}
+
+void mark_tsc_unstable(void)
+{
+       tsc_unstable = 1;
+}
+EXPORT_SYMBOL_GPL(mark_tsc_unstable);
+
+/* Accellerators for sched_clock()
+ * convert from cycles(64bits) => nanoseconds (64bits)
+ *  basic equation:
+ *             ns = cycles / (freq / ns_per_sec)
+ *             ns = cycles * (ns_per_sec / freq)
+ *             ns = cycles * (10^9 / (cpu_khz * 10^3))
+ *             ns = cycles * (10^6 / cpu_khz)
+ *
+ *     Then we use scaling math (suggested by george@mvista.com) to get:
+ *             ns = cycles * (10^6 * SC / cpu_khz) / SC
+ *             ns = cycles * cyc2ns_scale / SC
+ *
+ *     And since SC is a constant power of two, we can convert the div
+ *  into a shift.
+ *
+ *  We can use khz divisor instead of mhz to keep a better percision, since
+ *  cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
+ *  (mathieu.desnoyers@polymtl.ca)
+ *
+ *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+static unsigned long cyc2ns_scale __read_mostly;
+
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline void set_cyc2ns_scale(unsigned long cpu_khz)
+{
+       cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
+}
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+       return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+/*
+ * Scheduler clock - returns current time in nanosec units.
+ */
+unsigned long long sched_clock(void)
+{
+       unsigned long long this_offset;
+
+       /*
+        * in the NUMA case we dont use the TSC as they are not
+        * synchronized across all CPUs.
+        */
+#ifndef CONFIG_NUMA
+       if (!cpu_khz || check_tsc_unstable())
+#endif
+               /* no locking but a rare wrong value is not a big deal */
+               return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
+
+       /* read the Time Stamp Counter: */
+       rdtscll(this_offset);
+
+       /* return the value in ns */
+       return cycles_2_ns(this_offset);
+}
+
+static unsigned long calculate_cpu_khz(void)
+{
+       unsigned long long start, end;
+       unsigned long count;
+       u64 delta64;
+       int i;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       /* run 3 times to ensure the cache is warm */
+       for (i = 0; i < 3; i++) {
+               mach_prepare_counter();
+               rdtscll(start);
+               mach_countup(&count);
+               rdtscll(end);
+       }
+       /*
+        * Error: ECTCNEVERSET
+        * The CTC wasn't reliable: we got a hit on the very first read,
+        * or the CPU was so fast/slow that the quotient wouldn't fit in
+        * 32 bits..
+        */
+       if (count <= 1)
+               goto err;
+
+       delta64 = end - start;
+
+       /* cpu freq too fast: */
+       if (delta64 > (1ULL<<32))
+               goto err;
+
+       /* cpu freq too slow: */
+       if (delta64 <= CALIBRATE_TIME_MSEC)
+               goto err;
+
+       delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
+       do_div(delta64,CALIBRATE_TIME_MSEC);
+
+       local_irq_restore(flags);
+       return (unsigned long)delta64;
+err:
+       local_irq_restore(flags);
+       return 0;
+}
+
+int recalibrate_cpu_khz(void)
+{
+#ifndef CONFIG_SMP
+       unsigned long cpu_khz_old = cpu_khz;
+
+       if (cpu_has_tsc) {
+               cpu_khz = calculate_cpu_khz();
+               tsc_khz = cpu_khz;
+               cpu_data[0].loops_per_jiffy =
+                       cpufreq_scale(cpu_data[0].loops_per_jiffy,
+                                       cpu_khz_old, cpu_khz);
+               return 0;
+       } else
+               return -ENODEV;
+#else
+       return -ENODEV;
+#endif
+}
+
+EXPORT_SYMBOL(recalibrate_cpu_khz);
+
+void tsc_init(void)
+{
+       if (!cpu_has_tsc || tsc_disable)
+               return;
+
+       cpu_khz = calculate_cpu_khz();
+       tsc_khz = cpu_khz;
+
+       if (!cpu_khz)
+               return;
+
+       printk("Detected %lu.%03lu MHz processor.\n",
+                               (unsigned long)cpu_khz / 1000,
+                               (unsigned long)cpu_khz % 1000);
+
+       set_cyc2ns_scale(cpu_khz);
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+static unsigned int cpufreq_delayed_issched = 0;
+static unsigned int cpufreq_init = 0;
+static struct work_struct cpufreq_delayed_get_work;
+
+static void handle_cpufreq_delayed_get(void *v)
+{
+       unsigned int cpu;
+
+       for_each_online_cpu(cpu)
+               cpufreq_get(cpu);
+
+       cpufreq_delayed_issched = 0;
+}
+
+/*
+ * if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries
+ * to verify the CPU frequency the timing core thinks the CPU is running
+ * at is still correct.
+ */
+static inline void cpufreq_delayed_get(void)
+{
+       if (cpufreq_init && !cpufreq_delayed_issched) {
+               cpufreq_delayed_issched = 1;
+               printk(KERN_DEBUG "Checking if CPU frequency changed.\n");
+               schedule_work(&cpufreq_delayed_get_work);
+       }
+}
+
+/*
+ * if the CPU frequency is scaled, TSC-based delays will need a different
+ * loops_per_jiffy value to function properly.
+ */
+static unsigned int ref_freq = 0;
+static unsigned long loops_per_jiffy_ref = 0;
+static unsigned long cpu_khz_ref = 0;
+
+static int
+time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
+{
+       struct cpufreq_freqs *freq = data;
+
+       if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
+               write_seqlock_irq(&xtime_lock);
+
+       if (!ref_freq) {
+               if (!freq->old){
+                       ref_freq = freq->new;
+                       goto end;
+               }
+               ref_freq = freq->old;
+               loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
+               cpu_khz_ref = cpu_khz;
+       }
+
+       if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+           (val == CPUFREQ_RESUMECHANGE)) {
+               if (!(freq->flags & CPUFREQ_CONST_LOOPS))
+                       cpu_data[freq->cpu].loops_per_jiffy =
+                               cpufreq_scale(loops_per_jiffy_ref,
+                                               ref_freq, freq->new);
+
+               if (cpu_khz) {
+
+                       if (num_online_cpus() == 1)
+                               cpu_khz = cpufreq_scale(cpu_khz_ref,
+                                               ref_freq, freq->new);
+                       if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
+                               tsc_khz = cpu_khz;
+                               set_cyc2ns_scale(cpu_khz);
+                               /*
+                                * TSC based sched_clock turns
+                                * to junk w/ cpufreq
+                                */
+                               mark_tsc_unstable();
+                       }
+               }
+       }
+end:
+       if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
+               write_sequnlock_irq(&xtime_lock);
+
+       return 0;
+}
+
+static struct notifier_block time_cpufreq_notifier_block = {
+       .notifier_call  = time_cpufreq_notifier
+};
+
+static int __init cpufreq_tsc(void)
+{
+       int ret;
+
+       INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
+       ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
+                                       CPUFREQ_TRANSITION_NOTIFIER);
+       if (!ret)
+               cpufreq_init = 1;
+
+       return ret;
+}
+
+core_initcall(cpufreq_tsc);
+
+#endif
index 3b97a5eae9e83d9e8676df512af7a8999770b01b..a5f4f2aa007a7ac2d8b676bfb3a7cc3c7b6dc380 100644 (file)
@@ -369,6 +369,11 @@ static void acpi_processor_idle(void)
                t2 = inl(acpi_fadt.xpm_tmr_blk.address);
                /* Get end time (ticks) */
                t2 = inl(acpi_fadt.xpm_tmr_blk.address);
+
+#ifdef CONFIG_GENERIC_TIME
+               /* TSC halts in C2, so notify users */
+               mark_tsc_unstable();
+#endif
                /* Re-enable interrupts */
                local_irq_enable();
                set_thread_flag(TIF_POLLING_NRFLAG);
@@ -409,6 +414,10 @@ static void acpi_processor_idle(void)
                                          ACPI_MTX_DO_NOT_LOCK);
                }
 
+#ifdef CONFIG_GENERIC_TIME
+               /* TSC halts in C3, so notify users */
+               mark_tsc_unstable();
+#endif
                /* Re-enable interrupts */
                local_irq_enable();
                set_thread_flag(TIF_POLLING_NRFLAG);
index 4b9703bb0288f4783ce846bfb4f9ce446dd0399a..807992fd4171b78fe9d86287706e590f9d11deb2 100644 (file)
@@ -15,7 +15,9 @@
 #ifndef _MACH_TIMER_H
 #define _MACH_TIMER_H
 
-#define CALIBRATE_LATCH        (5 * LATCH)
+#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
+#define CALIBRATE_LATCH        \
+       ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
 
 static inline void mach_prepare_counter(void)
 {
index 1cce2b924a8041180340bb935eee31d540c3718c..94268399170de77aadeb11714a0c922d2d3f9e78 100644 (file)
@@ -2,6 +2,7 @@
 #define __ASM_MACH_MPPARSE_H
 
 #include <mach_apic.h>
+#include <asm/tsc.h>
 
 extern int use_cyclone;
 
@@ -29,6 +30,7 @@ static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
                        (!strncmp(productid, "VIGIL SMP", 9) 
                         || !strncmp(productid, "EXA", 3)
                         || !strncmp(productid, "RUTHLESS SMP", 12))){
+               mark_tsc_unstable();
                use_cyclone = 1; /*enable cyclone-timer*/
                setup_summit();
                return 1;
@@ -42,6 +44,7 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
        if (!strncmp(oem_id, "IBM", 3) &&
            (!strncmp(oem_table_id, "SERVIGIL", 8)
             || !strncmp(oem_table_id, "EXA", 3))){
+               mark_tsc_unstable();
                use_cyclone = 1; /*enable cyclone-timer*/
                setup_summit();
                return 1;
index d434984303ca35f15556bbb8f2bee352a1c48cf3..3666044409f06c6c2ec0c1ee6ba45d9dbeb1cd50 100644 (file)
@@ -7,6 +7,7 @@
 #define _ASMi386_TIMEX_H
 
 #include <asm/processor.h>
+#include <asm/tsc.h>
 
 #ifdef CONFIG_X86_ELAN
 #  define CLOCK_TICK_RATE 1189200 /* AMD Elan has different frequency! */
 #endif
 
 
-/*
- * Standard way to access the cycle counter on i586+ CPUs.
- * Currently only used on SMP.
- *
- * If you really have a SMP machine with i486 chips or older,
- * compile for that, and this will just always return zero.
- * That's ok, it just means that the nicer scheduling heuristics
- * won't work for you.
- *
- * We only use the low 32 bits, and we'd simply better make sure
- * that we reschedule before that wraps. Scheduling at least every
- * four billion cycles just basically sounds like a good idea,
- * regardless of how fast the machine is. 
- */
-typedef unsigned long long cycles_t;
-
-static inline cycles_t get_cycles (void)
-{
-       unsigned long long ret=0;
-
-#ifndef CONFIG_X86_TSC
-       if (!cpu_has_tsc)
-               return 0;
-#endif
-
-#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
-       rdtscll(ret);
-#endif
-       return ret;
-}
-
-extern unsigned int cpu_khz;
-
 extern int read_current_timer(unsigned long *timer_value);
 #define ARCH_HAS_READ_CURRENT_TIMER    1
 
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
new file mode 100644 (file)
index 0000000..97b828c
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * linux/include/asm-i386/tsc.h
+ *
+ * i386 TSC related functions
+ */
+#ifndef _ASM_i386_TSC_H
+#define _ASM_i386_TSC_H
+
+#include <linux/config.h>
+#include <asm/processor.h>
+
+/*
+ * Standard way to access the cycle counter on i586+ CPUs.
+ * Currently only used on SMP.
+ *
+ * If you really have a SMP machine with i486 chips or older,
+ * compile for that, and this will just always return zero.
+ * That's ok, it just means that the nicer scheduling heuristics
+ * won't work for you.
+ *
+ * We only use the low 32 bits, and we'd simply better make sure
+ * that we reschedule before that wraps. Scheduling at least every
+ * four billion cycles just basically sounds like a good idea,
+ * regardless of how fast the machine is.
+ */
+typedef unsigned long long cycles_t;
+
+extern unsigned int cpu_khz;
+extern unsigned int tsc_khz;
+
+static inline cycles_t get_cycles(void)
+{
+       unsigned long long ret = 0;
+
+#ifndef CONFIG_X86_TSC
+       if (!cpu_has_tsc)
+               return 0;
+#endif
+
+#if defined(CONFIG_X86_GENERIC) || defined(CONFIG_X86_TSC)
+       rdtscll(ret);
+#endif
+       return ret;
+}
+
+extern void tsc_init(void);
+extern void mark_tsc_unstable(void);
+
+#endif