2 * linux/kernel/hrtimer.c
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 * High-resolution kernel timers
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
14 * These timers are currently used for:
18 * - precise in-kernel timing
20 * Started by: Thomas Gleixner and Ingo Molnar
23 * based on kernel/timer.c
25 * Help, testing, suggestions, bugfixes, improvements were
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
31 * For licencing details see kernel-base/COPYING
34 #include <linux/cpu.h>
35 #include <linux/module.h>
36 #include <linux/percpu.h>
37 #include <linux/hrtimer.h>
38 #include <linux/notifier.h>
39 #include <linux/syscalls.h>
40 #include <linux/interrupt.h>
42 #include <asm/uaccess.h>
45 * ktime_get - get the monotonic time in ktime_t format
47 * returns the time in ktime_t format
49 ktime_t ktime_get(void)
55 return timespec_to_ktime(now);
59 * ktime_get_real - get the real (wall-) time in ktime_t format
61 * returns the time in ktime_t format
63 ktime_t ktime_get_real(void)
69 return timespec_to_ktime(now);
72 EXPORT_SYMBOL_GPL(ktime_get_real);
77 * Note: If we want to add new timer bases, we have to skip the two
78 * clock ids captured by the cpu-timers. We do this by holding empty
79 * entries rather than doing math adjustment of the clock ids.
80 * This ensures that we capture erroneous accesses to these clock ids
81 * rather than moving them into the range of valid clock id's.
83 static DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
89 .index = CLOCK_REALTIME,
90 .get_time = &ktime_get_real,
91 .resolution = KTIME_REALTIME_RES,
94 .index = CLOCK_MONOTONIC,
95 .get_time = &ktime_get,
96 .resolution = KTIME_MONOTONIC_RES,
102 * ktime_get_ts - get the monotonic clock in timespec format
103 * @ts: pointer to timespec variable
105 * The function calculates the monotonic clock from the realtime
106 * clock and the wall_to_monotonic offset and stores the result
107 * in normalized timespec format in the variable pointed to by @ts.
109 void ktime_get_ts(struct timespec *ts)
111 struct timespec tomono;
115 seq = read_seqbegin(&xtime_lock);
117 tomono = wall_to_monotonic;
119 } while (read_seqretry(&xtime_lock, seq));
121 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
122 ts->tv_nsec + tomono.tv_nsec);
124 EXPORT_SYMBOL_GPL(ktime_get_ts);
127 * Get the coarse grained time at the softirq based on xtime and
130 static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
132 ktime_t xtim, tomono;
137 seq = read_seqbegin(&xtime_lock);
139 getnstimeofday(&xts);
143 } while (read_seqretry(&xtime_lock, seq));
145 xtim = timespec_to_ktime(xts);
146 tomono = timespec_to_ktime(wall_to_monotonic);
147 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
148 base->clock_base[CLOCK_MONOTONIC].softirq_time =
149 ktime_add(xtim, tomono);
153 * Helper function to check, whether the timer is on one of the queues
155 static inline int hrtimer_is_queued(struct hrtimer *timer)
157 return timer->state & HRTIMER_STATE_ENQUEUED;
161 * Helper function to check, whether the timer is running the callback
164 static inline int hrtimer_callback_running(struct hrtimer *timer)
166 return timer->state & HRTIMER_STATE_CALLBACK;
170 * Functions and macros which are different for UP/SMP systems are kept in a
176 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
177 * means that all timers which are tied to this base via timer->base are
178 * locked, and the base itself is locked too.
180 * So __run_timers/migrate_timers can safely modify all timers which could
181 * be found on the lists/queues.
183 * When the timer's base is locked, and the timer removed from list, it is
184 * possible to set timer->base = NULL and drop the lock: the timer remains
188 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
189 unsigned long *flags)
191 struct hrtimer_clock_base *base;
195 if (likely(base != NULL)) {
196 spin_lock_irqsave(&base->cpu_base->lock, *flags);
197 if (likely(base == timer->base))
199 /* The timer has migrated to another CPU: */
200 spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
207 * Switch the timer base to the current CPU when possible.
209 static inline struct hrtimer_clock_base *
210 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
212 struct hrtimer_clock_base *new_base;
213 struct hrtimer_cpu_base *new_cpu_base;
215 new_cpu_base = &__get_cpu_var(hrtimer_bases);
216 new_base = &new_cpu_base->clock_base[base->index];
218 if (base != new_base) {
220 * We are trying to schedule the timer on the local CPU.
221 * However we can't change timer's base while it is running,
222 * so we keep it on the same CPU. No hassle vs. reprogramming
223 * the event source in the high resolution case. The softirq
224 * code will take care of this when the timer function has
225 * completed. There is no conflict as we hold the lock until
226 * the timer is enqueued.
228 if (unlikely(timer->state & HRTIMER_STATE_CALLBACK))
231 /* See the comment in lock_timer_base() */
233 spin_unlock(&base->cpu_base->lock);
234 spin_lock(&new_base->cpu_base->lock);
235 timer->base = new_base;
240 #else /* CONFIG_SMP */
242 static inline struct hrtimer_clock_base *
243 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
245 struct hrtimer_clock_base *base = timer->base;
247 spin_lock_irqsave(&base->cpu_base->lock, *flags);
252 #define switch_hrtimer_base(t, b) (b)
254 #endif /* !CONFIG_SMP */
257 * Functions for the union type storage format of ktime_t which are
258 * too large for inlining:
260 #if BITS_PER_LONG < 64
261 # ifndef CONFIG_KTIME_SCALAR
263 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
265 * @nsec: the scalar nsec value to add
267 * Returns the sum of kt and nsec in ktime_t format
269 ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
273 if (likely(nsec < NSEC_PER_SEC)) {
276 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
278 tmp = ktime_set((long)nsec, rem);
281 return ktime_add(kt, tmp);
284 #else /* CONFIG_KTIME_SCALAR */
286 # endif /* !CONFIG_KTIME_SCALAR */
289 * Divide a ktime value by a nanosecond value
291 static unsigned long ktime_divns(const ktime_t kt, s64 div)
296 dclc = dns = ktime_to_ns(kt);
298 /* Make sure the divisor is less than 2^32: */
304 do_div(dclc, (unsigned long) div);
306 return (unsigned long) dclc;
309 #else /* BITS_PER_LONG < 64 */
310 # define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div))
311 #endif /* BITS_PER_LONG >= 64 */
314 * Counterpart to lock_timer_base above:
317 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
319 spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
323 * hrtimer_forward - forward the timer expiry
324 * @timer: hrtimer to forward
325 * @now: forward past this time
326 * @interval: the interval to forward
328 * Forward the timer expiry so it will expire in the future.
329 * Returns the number of overruns.
332 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
334 unsigned long orun = 1;
337 delta = ktime_sub(now, timer->expires);
342 if (interval.tv64 < timer->base->resolution.tv64)
343 interval.tv64 = timer->base->resolution.tv64;
345 if (unlikely(delta.tv64 >= interval.tv64)) {
346 s64 incr = ktime_to_ns(interval);
348 orun = ktime_divns(delta, incr);
349 timer->expires = ktime_add_ns(timer->expires, incr * orun);
350 if (timer->expires.tv64 > now.tv64)
353 * This (and the ktime_add() below) is the
354 * correction for exact:
358 timer->expires = ktime_add(timer->expires, interval);
364 * enqueue_hrtimer - internal function to (re)start a timer
366 * The timer is inserted in expiry order. Insertion into the
367 * red black tree is O(log(n)). Must hold the base lock.
369 static void enqueue_hrtimer(struct hrtimer *timer,
370 struct hrtimer_clock_base *base)
372 struct rb_node **link = &base->active.rb_node;
373 struct rb_node *parent = NULL;
374 struct hrtimer *entry;
377 * Find the right place in the rbtree:
381 entry = rb_entry(parent, struct hrtimer, node);
383 * We dont care about collisions. Nodes with
384 * the same expiry time stay together.
386 if (timer->expires.tv64 < entry->expires.tv64)
387 link = &(*link)->rb_left;
389 link = &(*link)->rb_right;
393 * Insert the timer to the rbtree and check whether it
394 * replaces the first pending timer
396 rb_link_node(&timer->node, parent, link);
397 rb_insert_color(&timer->node, &base->active);
399 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
400 * state of a possibly running callback.
402 timer->state |= HRTIMER_STATE_ENQUEUED;
404 if (!base->first || timer->expires.tv64 <
405 rb_entry(base->first, struct hrtimer, node)->expires.tv64)
406 base->first = &timer->node;
410 * __remove_hrtimer - internal function to remove a timer
412 * Caller must hold the base lock.
414 static void __remove_hrtimer(struct hrtimer *timer,
415 struct hrtimer_clock_base *base,
416 unsigned long newstate)
419 * Remove the timer from the rbtree and replace the
420 * first entry pointer if necessary.
422 if (base->first == &timer->node)
423 base->first = rb_next(&timer->node);
424 rb_erase(&timer->node, &base->active);
425 timer->state = newstate;
429 * remove hrtimer, called with base lock held
432 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
434 if (hrtimer_is_queued(timer)) {
435 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE);
442 * hrtimer_start - (re)start an relative timer on the current CPU
443 * @timer: the timer to be added
445 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
449 * 1 when the timer was active
452 hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
454 struct hrtimer_clock_base *base, *new_base;
458 base = lock_hrtimer_base(timer, &flags);
460 /* Remove an active timer from the queue: */
461 ret = remove_hrtimer(timer, base);
463 /* Switch the timer base, if necessary: */
464 new_base = switch_hrtimer_base(timer, base);
466 if (mode == HRTIMER_MODE_REL) {
467 tim = ktime_add(tim, new_base->get_time());
469 * CONFIG_TIME_LOW_RES is a temporary way for architectures
470 * to signal that they simply return xtime in
471 * do_gettimeoffset(). In this case we want to round up by
472 * resolution when starting a relative timer, to avoid short
473 * timeouts. This will go away with the GTOD framework.
475 #ifdef CONFIG_TIME_LOW_RES
476 tim = ktime_add(tim, base->resolution);
479 timer->expires = tim;
481 enqueue_hrtimer(timer, new_base);
483 unlock_hrtimer_base(timer, &flags);
487 EXPORT_SYMBOL_GPL(hrtimer_start);
490 * hrtimer_try_to_cancel - try to deactivate a timer
491 * @timer: hrtimer to stop
494 * 0 when the timer was not active
495 * 1 when the timer was active
496 * -1 when the timer is currently excuting the callback function and
499 int hrtimer_try_to_cancel(struct hrtimer *timer)
501 struct hrtimer_clock_base *base;
505 base = lock_hrtimer_base(timer, &flags);
507 if (!hrtimer_callback_running(timer))
508 ret = remove_hrtimer(timer, base);
510 unlock_hrtimer_base(timer, &flags);
515 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
518 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
519 * @timer: the timer to be cancelled
522 * 0 when the timer was not active
523 * 1 when the timer was active
525 int hrtimer_cancel(struct hrtimer *timer)
528 int ret = hrtimer_try_to_cancel(timer);
535 EXPORT_SYMBOL_GPL(hrtimer_cancel);
538 * hrtimer_get_remaining - get remaining time for the timer
539 * @timer: the timer to read
541 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
543 struct hrtimer_clock_base *base;
547 base = lock_hrtimer_base(timer, &flags);
548 rem = ktime_sub(timer->expires, base->get_time());
549 unlock_hrtimer_base(timer, &flags);
553 EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
555 #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
557 * hrtimer_get_next_event - get the time until next expiry event
559 * Returns the delta to the next expiry event or KTIME_MAX if no timer
562 ktime_t hrtimer_get_next_event(void)
564 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
565 struct hrtimer_clock_base *base = cpu_base->clock_base;
566 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
570 spin_lock_irqsave(&cpu_base->lock, flags);
572 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
573 struct hrtimer *timer;
578 timer = rb_entry(base->first, struct hrtimer, node);
579 delta.tv64 = timer->expires.tv64;
580 delta = ktime_sub(delta, base->get_time());
581 if (delta.tv64 < mindelta.tv64)
582 mindelta.tv64 = delta.tv64;
585 spin_unlock_irqrestore(&cpu_base->lock, flags);
587 if (mindelta.tv64 < 0)
594 * hrtimer_init - initialize a timer to the given clock
595 * @timer: the timer to be initialized
596 * @clock_id: the clock to be used
597 * @mode: timer mode abs/rel
599 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
600 enum hrtimer_mode mode)
602 struct hrtimer_cpu_base *cpu_base;
604 memset(timer, 0, sizeof(struct hrtimer));
606 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
608 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
609 clock_id = CLOCK_MONOTONIC;
611 timer->base = &cpu_base->clock_base[clock_id];
613 EXPORT_SYMBOL_GPL(hrtimer_init);
616 * hrtimer_get_res - get the timer resolution for a clock
617 * @which_clock: which clock to query
618 * @tp: pointer to timespec variable to store the resolution
620 * Store the resolution of the clock selected by @which_clock in the
621 * variable pointed to by @tp.
623 int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
625 struct hrtimer_cpu_base *cpu_base;
627 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
628 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
632 EXPORT_SYMBOL_GPL(hrtimer_get_res);
635 * Expire the per base hrtimer-queue:
637 static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
640 struct rb_node *node;
641 struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
646 if (base->get_softirq_time)
647 base->softirq_time = base->get_softirq_time();
649 spin_lock_irq(&cpu_base->lock);
651 while ((node = base->first)) {
652 struct hrtimer *timer;
653 enum hrtimer_restart (*fn)(struct hrtimer *);
656 timer = rb_entry(node, struct hrtimer, node);
657 if (base->softirq_time.tv64 <= timer->expires.tv64)
660 fn = timer->function;
661 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK);
662 spin_unlock_irq(&cpu_base->lock);
666 spin_lock_irq(&cpu_base->lock);
668 timer->state &= ~HRTIMER_STATE_CALLBACK;
669 if (restart != HRTIMER_NORESTART) {
670 BUG_ON(hrtimer_active(timer));
671 enqueue_hrtimer(timer, base);
674 spin_unlock_irq(&cpu_base->lock);
678 * Called from timer softirq every jiffy, expire hrtimers:
680 void hrtimer_run_queues(void)
682 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
685 hrtimer_get_softirq_time(cpu_base);
687 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
688 run_hrtimer_queue(cpu_base, i);
692 * Sleep related functions:
694 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
696 struct hrtimer_sleeper *t =
697 container_of(timer, struct hrtimer_sleeper, timer);
698 struct task_struct *task = t->task;
702 wake_up_process(task);
704 return HRTIMER_NORESTART;
707 void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
709 sl->timer.function = hrtimer_wakeup;
713 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
715 hrtimer_init_sleeper(t, current);
718 set_current_state(TASK_INTERRUPTIBLE);
719 hrtimer_start(&t->timer, t->timer.expires, mode);
723 hrtimer_cancel(&t->timer);
724 mode = HRTIMER_MODE_ABS;
726 } while (t->task && !signal_pending(current));
728 return t->task == NULL;
731 long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
733 struct hrtimer_sleeper t;
734 struct timespec __user *rmtp;
738 restart->fn = do_no_restart_syscall;
740 hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS);
741 t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2;
743 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
746 rmtp = (struct timespec __user *) restart->arg1;
748 time = ktime_sub(t.timer.expires, t.timer.base->get_time());
751 tu = ktime_to_timespec(time);
752 if (copy_to_user(rmtp, &tu, sizeof(tu)))
756 restart->fn = hrtimer_nanosleep_restart;
758 /* The other values in restart are already filled in */
759 return -ERESTART_RESTARTBLOCK;
762 long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
763 const enum hrtimer_mode mode, const clockid_t clockid)
765 struct restart_block *restart;
766 struct hrtimer_sleeper t;
770 hrtimer_init(&t.timer, clockid, mode);
771 t.timer.expires = timespec_to_ktime(*rqtp);
772 if (do_nanosleep(&t, mode))
775 /* Absolute timers do not update the rmtp value and restart: */
776 if (mode == HRTIMER_MODE_ABS)
777 return -ERESTARTNOHAND;
780 rem = ktime_sub(t.timer.expires, t.timer.base->get_time());
783 tu = ktime_to_timespec(rem);
784 if (copy_to_user(rmtp, &tu, sizeof(tu)))
788 restart = ¤t_thread_info()->restart_block;
789 restart->fn = hrtimer_nanosleep_restart;
790 restart->arg0 = (unsigned long) t.timer.base->index;
791 restart->arg1 = (unsigned long) rmtp;
792 restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF;
793 restart->arg3 = t.timer.expires.tv64 >> 32;
795 return -ERESTART_RESTARTBLOCK;
799 sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
803 if (copy_from_user(&tu, rqtp, sizeof(tu)))
806 if (!timespec_valid(&tu))
809 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
813 * Functions related to boot-time initialization:
815 static void __devinit init_hrtimers_cpu(int cpu)
817 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
820 spin_lock_init(&cpu_base->lock);
821 lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key);
823 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
824 cpu_base->clock_base[i].cpu_base = cpu_base;
828 #ifdef CONFIG_HOTPLUG_CPU
830 static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
831 struct hrtimer_clock_base *new_base)
833 struct hrtimer *timer;
834 struct rb_node *node;
836 while ((node = rb_first(&old_base->active))) {
837 timer = rb_entry(node, struct hrtimer, node);
838 BUG_ON(timer->state & HRTIMER_STATE_CALLBACK);
839 __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE);
840 timer->base = new_base;
841 enqueue_hrtimer(timer, new_base);
845 static void migrate_hrtimers(int cpu)
847 struct hrtimer_cpu_base *old_base, *new_base;
850 BUG_ON(cpu_online(cpu));
851 old_base = &per_cpu(hrtimer_bases, cpu);
852 new_base = &get_cpu_var(hrtimer_bases);
856 spin_lock(&new_base->lock);
857 spin_lock(&old_base->lock);
859 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
860 migrate_hrtimer_list(&old_base->clock_base[i],
861 &new_base->clock_base[i]);
863 spin_unlock(&old_base->lock);
864 spin_unlock(&new_base->lock);
867 put_cpu_var(hrtimer_bases);
869 #endif /* CONFIG_HOTPLUG_CPU */
871 static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
872 unsigned long action, void *hcpu)
874 long cpu = (long)hcpu;
879 init_hrtimers_cpu(cpu);
882 #ifdef CONFIG_HOTPLUG_CPU
884 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
885 migrate_hrtimers(cpu);
896 static struct notifier_block __cpuinitdata hrtimers_nb = {
897 .notifier_call = hrtimer_cpu_notify,
900 void __init hrtimers_init(void)
902 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
903 (void *)(long)smp_processor_id());
904 register_cpu_notifier(&hrtimers_nb);