2 * sched_clock for unstable cpu clocks
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
16 * cpu_clock(i) provides a fast (execution time) high resolution
17 * clock with bounded drift between CPUs. The value of cpu_clock(i)
18 * is monotonic for constant i. The timestamp returned is in nanoseconds.
20 * ######################### BIG FAT WARNING ##########################
21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
23 * ####################################################################
25 * There is no strict promise about the base, although it tends to start
26 * at 0 on boot (but people really shouldn't rely on that).
28 * cpu_clock(i) -- can be used from any context, including NMI.
29 * local_clock() -- is cpu_clock() on the current cpu.
35 * The implementation either uses sched_clock() when
36 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
37 * sched_clock() is assumed to provide these properties (mostly it means
38 * the architecture provides a globally synchronized highres time source).
40 * Otherwise it tries to create a semi stable clock from a mixture of other
43 * - GTOD (clock monotomic)
45 * - explicit idle events
47 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
48 * deltas are filtered to provide monotonicity and keeping it within an
51 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
52 * that is otherwise invisible (TSC gets stopped).
55 #include <linux/spinlock.h>
56 #include <linux/hardirq.h>
57 #include <linux/export.h>
58 #include <linux/percpu.h>
59 #include <linux/ktime.h>
60 #include <linux/sched.h>
63 * Scheduler clock - returns current time in nanosec units.
64 * This is default implementation.
65 * Architectures and sub-architectures can override this.
67 unsigned long long __attribute__((weak)) sched_clock(void)
69 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
70 * (NSEC_PER_SEC / HZ);
72 EXPORT_SYMBOL_GPL(sched_clock);
74 __read_mostly int sched_clock_running;
76 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
77 __read_mostly int sched_clock_stable;
79 struct sched_clock_data {
85 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
87 static inline struct sched_clock_data *this_scd(void)
89 return &__get_cpu_var(sched_clock_data);
92 static inline struct sched_clock_data *cpu_sdc(int cpu)
94 return &per_cpu(sched_clock_data, cpu);
97 void sched_clock_init(void)
99 u64 ktime_now = ktime_to_ns(ktime_get());
102 for_each_possible_cpu(cpu) {
103 struct sched_clock_data *scd = cpu_sdc(cpu);
106 scd->tick_gtod = ktime_now;
107 scd->clock = ktime_now;
110 sched_clock_running = 1;
114 * min, max except they take wrapping into account
117 static inline u64 wrap_min(u64 x, u64 y)
119 return (s64)(x - y) < 0 ? x : y;
122 static inline u64 wrap_max(u64 x, u64 y)
124 return (s64)(x - y) > 0 ? x : y;
128 * update the percpu scd from the raw @now value
130 * - filter out backward motion
131 * - use the GTOD tick value to create a window to filter crazy TSC values
133 static u64 sched_clock_local(struct sched_clock_data *scd)
135 u64 now, clock, old_clock, min_clock, max_clock;
140 delta = now - scd->tick_raw;
141 if (unlikely(delta < 0))
144 old_clock = scd->clock;
147 * scd->clock = clamp(scd->tick_gtod + delta,
148 * max(scd->tick_gtod, scd->clock),
149 * scd->tick_gtod + TICK_NSEC);
152 clock = scd->tick_gtod + delta;
153 min_clock = wrap_max(scd->tick_gtod, old_clock);
154 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
156 clock = wrap_max(clock, min_clock);
157 clock = wrap_min(clock, max_clock);
159 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
165 static u64 sched_clock_remote(struct sched_clock_data *scd)
167 struct sched_clock_data *my_scd = this_scd();
168 u64 this_clock, remote_clock;
169 u64 *ptr, old_val, val;
171 #if BITS_PER_LONG != 64
174 * Careful here: The local and the remote clock values need to
175 * be read out atomic as we need to compare the values and
176 * then update either the local or the remote side. So the
177 * cmpxchg64 below only protects one readout.
179 * We must reread via sched_clock_local() in the retry case on
180 * 32bit as an NMI could use sched_clock_local() via the
181 * tracer and hit between the readout of
182 * the low32bit and the high 32bit portion.
184 this_clock = sched_clock_local(my_scd);
186 * We must enforce atomic readout on 32bit, otherwise the
187 * update on the remote cpu can hit inbetween the readout of
188 * the low32bit and the high 32bit portion.
190 remote_clock = cmpxchg64(&scd->clock, 0, 0);
193 * On 64bit the read of [my]scd->clock is atomic versus the
194 * update, so we can avoid the above 32bit dance.
196 sched_clock_local(my_scd);
198 this_clock = my_scd->clock;
199 remote_clock = scd->clock;
203 * Use the opportunity that we have both locks
204 * taken to couple the two clocks: we take the
205 * larger time as the latest time for both
206 * runqueues. (this creates monotonic movement)
208 if (likely((s64)(remote_clock - this_clock) < 0)) {
210 old_val = remote_clock;
214 * Should be rare, but possible:
216 ptr = &my_scd->clock;
217 old_val = this_clock;
221 if (cmpxchg64(ptr, old_val, val) != old_val)
228 * Similar to cpu_clock(), but requires local IRQs to be disabled.
232 u64 sched_clock_cpu(int cpu)
234 struct sched_clock_data *scd;
237 if (sched_clock_stable)
238 return sched_clock();
240 if (unlikely(!sched_clock_running))
246 if (cpu != smp_processor_id())
247 clock = sched_clock_remote(scd);
249 clock = sched_clock_local(scd);
255 void sched_clock_tick(void)
257 struct sched_clock_data *scd;
260 if (sched_clock_stable)
263 if (unlikely(!sched_clock_running))
266 WARN_ON_ONCE(!irqs_disabled());
269 now_gtod = ktime_to_ns(ktime_get());
273 scd->tick_gtod = now_gtod;
274 sched_clock_local(scd);
278 * We are going deep-idle (irqs are disabled):
280 void sched_clock_idle_sleep_event(void)
282 sched_clock_cpu(smp_processor_id());
284 EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
287 * We just idled delta nanoseconds (called with irqs disabled):
289 void sched_clock_idle_wakeup_event(u64 delta_ns)
291 if (timekeeping_suspended)
295 touch_softlockup_watchdog();
297 EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
300 * As outlined at the top, provides a fast, high resolution, nanosecond
301 * time source that is monotonic per cpu argument and has bounded drift
304 * ######################### BIG FAT WARNING ##########################
305 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
306 * # go backwards !! #
307 * ####################################################################
309 u64 cpu_clock(int cpu)
311 return sched_clock_cpu(cpu);
315 * Similar to cpu_clock() for the current cpu. Time will only be observed
316 * to be monotonic if care is taken to only compare timestampt taken on the
321 u64 local_clock(void)
323 return sched_clock_cpu(raw_smp_processor_id());
326 #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
328 void sched_clock_init(void)
330 sched_clock_running = 1;
333 u64 sched_clock_cpu(int cpu)
335 if (unlikely(!sched_clock_running))
338 return sched_clock();
341 u64 cpu_clock(int cpu)
343 return sched_clock_cpu(cpu);
346 u64 local_clock(void)
348 return sched_clock_cpu(0);
351 #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
353 EXPORT_SYMBOL_GPL(cpu_clock);
354 EXPORT_SYMBOL_GPL(local_clock);