2 * linux/kernel/hrtimer.c
4 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
6 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
8 * High-resolution kernel timers
10 * In contrast to the low-resolution timeout API implemented in
11 * kernel/timer.c, hrtimers provide finer resolution and accuracy
12 * depending on system configuration and capabilities.
14 * These timers are currently used for:
18 * - precise in-kernel timing
20 * Started by: Thomas Gleixner and Ingo Molnar
23 * based on kernel/timer.c
25 * Help, testing, suggestions, bugfixes, improvements were
28 * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
31 * For licencing details see kernel-base/COPYING
34 #include <linux/cpu.h>
35 #include <linux/module.h>
36 #include <linux/percpu.h>
37 #include <linux/hrtimer.h>
38 #include <linux/notifier.h>
39 #include <linux/syscalls.h>
40 #include <linux/kallsyms.h>
41 #include <linux/interrupt.h>
42 #include <linux/tick.h>
43 #include <linux/seq_file.h>
44 #include <linux/err.h>
45 #include <linux/debugobjects.h>
46 #include <linux/sched.h>
47 #include <linux/timer.h>
49 #include <asm/uaccess.h>
51 #ifndef CONFIG_GENERIC_TIME
53 * ktime_get - get the monotonic time in ktime_t format
55 * returns the time in ktime_t format
57 ktime_t ktime_get(void)
63 return timespec_to_ktime(now);
65 EXPORT_SYMBOL_GPL(ktime_get);
69 * ktime_get_real - get the real (wall-) time in ktime_t format
71 * returns the time in ktime_t format
73 ktime_t ktime_get_real(void)
79 return timespec_to_ktime(now);
82 EXPORT_SYMBOL_GPL(ktime_get_real);
87 * Note: If we want to add new timer bases, we have to skip the two
88 * clock ids captured by the cpu-timers. We do this by holding empty
89 * entries rather than doing math adjustment of the clock ids.
90 * This ensures that we capture erroneous accesses to these clock ids
91 * rather than moving them into the range of valid clock id's.
93 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
99 .index = CLOCK_REALTIME,
100 .get_time = &ktime_get_real,
101 .resolution = KTIME_LOW_RES,
104 .index = CLOCK_MONOTONIC,
105 .get_time = &ktime_get,
106 .resolution = KTIME_LOW_RES,
111 #ifndef CONFIG_GENERIC_TIME
113 * ktime_get_ts - get the monotonic clock in timespec format
114 * @ts: pointer to timespec variable
116 * The function calculates the monotonic clock from the realtime
117 * clock and the wall_to_monotonic offset and stores the result
118 * in normalized timespec format in the variable pointed to by @ts.
120 void ktime_get_ts(struct timespec *ts)
122 struct timespec tomono;
126 seq = read_seqbegin(&xtime_lock);
128 tomono = wall_to_monotonic;
130 } while (read_seqretry(&xtime_lock, seq));
132 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
133 ts->tv_nsec + tomono.tv_nsec);
135 EXPORT_SYMBOL_GPL(ktime_get_ts);
139 * Get the coarse grained time at the softirq based on xtime and
142 static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
144 ktime_t xtim, tomono;
145 struct timespec xts, tom;
149 seq = read_seqbegin(&xtime_lock);
150 xts = current_kernel_time();
151 tom = wall_to_monotonic;
152 } while (read_seqretry(&xtime_lock, seq));
154 xtim = timespec_to_ktime(xts);
155 tomono = timespec_to_ktime(tom);
156 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
157 base->clock_base[CLOCK_MONOTONIC].softirq_time =
158 ktime_add(xtim, tomono);
162 * Functions and macros which are different for UP/SMP systems are kept in a
168 * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
169 * means that all timers which are tied to this base via timer->base are
170 * locked, and the base itself is locked too.
172 * So __run_timers/migrate_timers can safely modify all timers which could
173 * be found on the lists/queues.
175 * When the timer's base is locked, and the timer removed from list, it is
176 * possible to set timer->base = NULL and drop the lock: the timer remains
180 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
181 unsigned long *flags)
183 struct hrtimer_clock_base *base;
187 if (likely(base != NULL)) {
188 spin_lock_irqsave(&base->cpu_base->lock, *flags);
189 if (likely(base == timer->base))
191 /* The timer has migrated to another CPU: */
192 spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
199 * Switch the timer base to the current CPU when possible.
201 static inline struct hrtimer_clock_base *
202 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
205 struct hrtimer_clock_base *new_base;
206 struct hrtimer_cpu_base *new_cpu_base;
207 int cpu, preferred_cpu = -1;
209 cpu = smp_processor_id();
210 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
211 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) {
212 preferred_cpu = get_nohz_load_balancer();
213 if (preferred_cpu >= 0)
219 new_cpu_base = &per_cpu(hrtimer_bases, cpu);
220 new_base = &new_cpu_base->clock_base[base->index];
222 if (base != new_base) {
224 * We are trying to schedule the timer on the local CPU.
225 * However we can't change timer's base while it is running,
226 * so we keep it on the same CPU. No hassle vs. reprogramming
227 * the event source in the high resolution case. The softirq
228 * code will take care of this when the timer function has
229 * completed. There is no conflict as we hold the lock until
230 * the timer is enqueued.
232 if (unlikely(hrtimer_callback_running(timer)))
235 /* See the comment in lock_timer_base() */
237 spin_unlock(&base->cpu_base->lock);
238 spin_lock(&new_base->cpu_base->lock);
240 /* Optimized away for NOHZ=n SMP=n */
241 if (cpu == preferred_cpu) {
242 /* Calculate clock monotonic expiry time */
243 #ifdef CONFIG_HIGH_RES_TIMERS
244 ktime_t expires = ktime_sub(hrtimer_get_expires(timer),
247 ktime_t expires = hrtimer_get_expires(timer);
251 * Get the next event on target cpu from the
252 * clock events layer.
253 * This covers the highres=off nohz=on case as well.
255 ktime_t next = clockevents_get_next_event(cpu);
257 ktime_t delta = ktime_sub(expires, next);
260 * We do not migrate the timer when it is expiring
261 * before the next event on the target cpu because
262 * we cannot reprogram the target cpu hardware and
263 * we would cause it to fire late.
265 if (delta.tv64 < 0) {
266 cpu = smp_processor_id();
267 spin_unlock(&new_base->cpu_base->lock);
268 spin_lock(&base->cpu_base->lock);
273 timer->base = new_base;
278 #else /* CONFIG_SMP */
280 static inline struct hrtimer_clock_base *
281 lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
283 struct hrtimer_clock_base *base = timer->base;
285 spin_lock_irqsave(&base->cpu_base->lock, *flags);
290 # define switch_hrtimer_base(t, b, p) (b)
292 #endif /* !CONFIG_SMP */
295 * Functions for the union type storage format of ktime_t which are
296 * too large for inlining:
298 #if BITS_PER_LONG < 64
299 # ifndef CONFIG_KTIME_SCALAR
301 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
303 * @nsec: the scalar nsec value to add
305 * Returns the sum of kt and nsec in ktime_t format
307 ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
311 if (likely(nsec < NSEC_PER_SEC)) {
314 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
316 tmp = ktime_set((long)nsec, rem);
319 return ktime_add(kt, tmp);
322 EXPORT_SYMBOL_GPL(ktime_add_ns);
325 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
327 * @nsec: the scalar nsec value to subtract
329 * Returns the subtraction of @nsec from @kt in ktime_t format
331 ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
335 if (likely(nsec < NSEC_PER_SEC)) {
338 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
340 tmp = ktime_set((long)nsec, rem);
343 return ktime_sub(kt, tmp);
346 EXPORT_SYMBOL_GPL(ktime_sub_ns);
347 # endif /* !CONFIG_KTIME_SCALAR */
350 * Divide a ktime value by a nanosecond value
352 u64 ktime_divns(const ktime_t kt, s64 div)
357 dclc = ktime_to_ns(kt);
358 /* Make sure the divisor is less than 2^32: */
364 do_div(dclc, (unsigned long) div);
368 #endif /* BITS_PER_LONG >= 64 */
371 * Add two ktime values and do a safety check for overflow:
373 ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
375 ktime_t res = ktime_add(lhs, rhs);
378 * We use KTIME_SEC_MAX here, the maximum timeout which we can
379 * return to user space in a timespec:
381 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
382 res = ktime_set(KTIME_SEC_MAX, 0);
387 EXPORT_SYMBOL_GPL(ktime_add_safe);
389 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
391 static struct debug_obj_descr hrtimer_debug_descr;
394 * fixup_init is called when:
395 * - an active object is initialized
397 static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
399 struct hrtimer *timer = addr;
402 case ODEBUG_STATE_ACTIVE:
403 hrtimer_cancel(timer);
404 debug_object_init(timer, &hrtimer_debug_descr);
412 * fixup_activate is called when:
413 * - an active object is activated
414 * - an unknown object is activated (might be a statically initialized object)
416 static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
420 case ODEBUG_STATE_NOTAVAILABLE:
424 case ODEBUG_STATE_ACTIVE:
433 * fixup_free is called when:
434 * - an active object is freed
436 static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
438 struct hrtimer *timer = addr;
441 case ODEBUG_STATE_ACTIVE:
442 hrtimer_cancel(timer);
443 debug_object_free(timer, &hrtimer_debug_descr);
450 static struct debug_obj_descr hrtimer_debug_descr = {
452 .fixup_init = hrtimer_fixup_init,
453 .fixup_activate = hrtimer_fixup_activate,
454 .fixup_free = hrtimer_fixup_free,
457 static inline void debug_hrtimer_init(struct hrtimer *timer)
459 debug_object_init(timer, &hrtimer_debug_descr);
462 static inline void debug_hrtimer_activate(struct hrtimer *timer)
464 debug_object_activate(timer, &hrtimer_debug_descr);
467 static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
469 debug_object_deactivate(timer, &hrtimer_debug_descr);
472 static inline void debug_hrtimer_free(struct hrtimer *timer)
474 debug_object_free(timer, &hrtimer_debug_descr);
477 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
478 enum hrtimer_mode mode);
480 void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
481 enum hrtimer_mode mode)
483 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
484 __hrtimer_init(timer, clock_id, mode);
487 void destroy_hrtimer_on_stack(struct hrtimer *timer)
489 debug_object_free(timer, &hrtimer_debug_descr);
493 static inline void debug_hrtimer_init(struct hrtimer *timer) { }
494 static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
495 static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
498 /* High resolution timer related functions */
499 #ifdef CONFIG_HIGH_RES_TIMERS
502 * High resolution timer enabled ?
504 static int hrtimer_hres_enabled __read_mostly = 1;
507 * Enable / Disable high resolution mode
509 static int __init setup_hrtimer_hres(char *str)
511 if (!strcmp(str, "off"))
512 hrtimer_hres_enabled = 0;
513 else if (!strcmp(str, "on"))
514 hrtimer_hres_enabled = 1;
520 __setup("highres=", setup_hrtimer_hres);
523 * hrtimer_high_res_enabled - query, if the highres mode is enabled
525 static inline int hrtimer_is_hres_enabled(void)
527 return hrtimer_hres_enabled;
531 * Is the high resolution mode active ?
533 static inline int hrtimer_hres_active(void)
535 return __get_cpu_var(hrtimer_bases).hres_active;
539 * Reprogram the event source with checking both queues for the
541 * Called with interrupts disabled and base->lock held
543 static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
546 struct hrtimer_clock_base *base = cpu_base->clock_base;
549 cpu_base->expires_next.tv64 = KTIME_MAX;
551 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
552 struct hrtimer *timer;
556 timer = rb_entry(base->first, struct hrtimer, node);
557 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
559 * clock_was_set() has changed base->offset so the
560 * result might be negative. Fix it up to prevent a
561 * false positive in clockevents_program_event()
563 if (expires.tv64 < 0)
565 if (expires.tv64 < cpu_base->expires_next.tv64)
566 cpu_base->expires_next = expires;
569 if (cpu_base->expires_next.tv64 != KTIME_MAX)
570 tick_program_event(cpu_base->expires_next, 1);
574 * Shared reprogramming for clock_realtime and clock_monotonic
576 * When a timer is enqueued and expires earlier than the already enqueued
577 * timers, we have to check, whether it expires earlier than the timer for
578 * which the clock event device was armed.
580 * Called with interrupts disabled and base->cpu_base.lock held
582 static int hrtimer_reprogram(struct hrtimer *timer,
583 struct hrtimer_clock_base *base)
585 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
586 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
589 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
592 * When the callback is running, we do not reprogram the clock event
593 * device. The timer callback is either running on a different CPU or
594 * the callback is executed in the hrtimer_interrupt context. The
595 * reprogramming is handled either by the softirq, which called the
596 * callback or at the end of the hrtimer_interrupt.
598 if (hrtimer_callback_running(timer))
602 * CLOCK_REALTIME timer might be requested with an absolute
603 * expiry time which is less than base->offset. Nothing wrong
604 * about that, just avoid to call into the tick code, which
605 * has now objections against negative expiry values.
607 if (expires.tv64 < 0)
610 if (expires.tv64 >= expires_next->tv64)
614 * Clockevents returns -ETIME, when the event was in the past.
616 res = tick_program_event(expires, 0);
617 if (!IS_ERR_VALUE(res))
618 *expires_next = expires;
624 * Retrigger next event is called after clock was set
626 * Called with interrupts disabled via on_each_cpu()
628 static void retrigger_next_event(void *arg)
630 struct hrtimer_cpu_base *base;
631 struct timespec realtime_offset;
634 if (!hrtimer_hres_active())
638 seq = read_seqbegin(&xtime_lock);
639 set_normalized_timespec(&realtime_offset,
640 -wall_to_monotonic.tv_sec,
641 -wall_to_monotonic.tv_nsec);
642 } while (read_seqretry(&xtime_lock, seq));
644 base = &__get_cpu_var(hrtimer_bases);
646 /* Adjust CLOCK_REALTIME offset */
647 spin_lock(&base->lock);
648 base->clock_base[CLOCK_REALTIME].offset =
649 timespec_to_ktime(realtime_offset);
651 hrtimer_force_reprogram(base);
652 spin_unlock(&base->lock);
656 * Clock realtime was set
658 * Change the offset of the realtime clock vs. the monotonic
661 * We might have to reprogram the high resolution timer interrupt. On
662 * SMP we call the architecture specific code to retrigger _all_ high
663 * resolution timer interrupts. On UP we just disable interrupts and
664 * call the high resolution interrupt code.
666 void clock_was_set(void)
668 /* Retrigger the CPU local events everywhere */
669 on_each_cpu(retrigger_next_event, NULL, 1);
673 * During resume we might have to reprogram the high resolution timer
674 * interrupt (on the local CPU):
676 void hres_timers_resume(void)
678 WARN_ONCE(!irqs_disabled(),
679 KERN_INFO "hres_timers_resume() called with IRQs enabled!");
681 retrigger_next_event(NULL);
685 * Initialize the high resolution related parts of cpu_base
687 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
689 base->expires_next.tv64 = KTIME_MAX;
690 base->hres_active = 0;
694 * Initialize the high resolution related parts of a hrtimer
696 static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
702 * When High resolution timers are active, try to reprogram. Note, that in case
703 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
704 * check happens. The timer gets enqueued into the rbtree. The reprogramming
705 * and expiry check is done in the hrtimer_interrupt or in the softirq.
707 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
708 struct hrtimer_clock_base *base,
711 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
713 spin_unlock(&base->cpu_base->lock);
714 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
715 spin_lock(&base->cpu_base->lock);
717 __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
726 * Switch to high resolution mode
728 static int hrtimer_switch_to_hres(void)
730 int cpu = smp_processor_id();
731 struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
734 if (base->hres_active)
737 local_irq_save(flags);
739 if (tick_init_highres()) {
740 local_irq_restore(flags);
741 printk(KERN_WARNING "Could not switch to high resolution "
742 "mode on CPU %d\n", cpu);
745 base->hres_active = 1;
746 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
747 base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;
749 tick_setup_sched_timer();
751 /* "Retrigger" the interrupt to get things going */
752 retrigger_next_event(NULL);
753 local_irq_restore(flags);
754 printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
761 static inline int hrtimer_hres_active(void) { return 0; }
762 static inline int hrtimer_is_hres_enabled(void) { return 0; }
763 static inline int hrtimer_switch_to_hres(void) { return 0; }
764 static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
765 static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
766 struct hrtimer_clock_base *base,
771 static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
772 static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
774 #endif /* CONFIG_HIGH_RES_TIMERS */
776 #ifdef CONFIG_TIMER_STATS
777 void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
779 if (timer->start_site)
782 timer->start_site = addr;
783 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
784 timer->start_pid = current->pid;
789 * Counterpart to lock_hrtimer_base above:
792 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
794 spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
798 * hrtimer_forward - forward the timer expiry
799 * @timer: hrtimer to forward
800 * @now: forward past this time
801 * @interval: the interval to forward
803 * Forward the timer expiry so it will expire in the future.
804 * Returns the number of overruns.
806 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
811 delta = ktime_sub(now, hrtimer_get_expires(timer));
816 if (interval.tv64 < timer->base->resolution.tv64)
817 interval.tv64 = timer->base->resolution.tv64;
819 if (unlikely(delta.tv64 >= interval.tv64)) {
820 s64 incr = ktime_to_ns(interval);
822 orun = ktime_divns(delta, incr);
823 hrtimer_add_expires_ns(timer, incr * orun);
824 if (hrtimer_get_expires_tv64(timer) > now.tv64)
827 * This (and the ktime_add() below) is the
828 * correction for exact:
832 hrtimer_add_expires(timer, interval);
836 EXPORT_SYMBOL_GPL(hrtimer_forward);
839 * enqueue_hrtimer - internal function to (re)start a timer
841 * The timer is inserted in expiry order. Insertion into the
842 * red black tree is O(log(n)). Must hold the base lock.
844 * Returns 1 when the new timer is the leftmost timer in the tree.
846 static int enqueue_hrtimer(struct hrtimer *timer,
847 struct hrtimer_clock_base *base)
849 struct rb_node **link = &base->active.rb_node;
850 struct rb_node *parent = NULL;
851 struct hrtimer *entry;
854 debug_hrtimer_activate(timer);
857 * Find the right place in the rbtree:
861 entry = rb_entry(parent, struct hrtimer, node);
863 * We dont care about collisions. Nodes with
864 * the same expiry time stay together.
866 if (hrtimer_get_expires_tv64(timer) <
867 hrtimer_get_expires_tv64(entry)) {
868 link = &(*link)->rb_left;
870 link = &(*link)->rb_right;
876 * Insert the timer to the rbtree and check whether it
877 * replaces the first pending timer
880 base->first = &timer->node;
882 rb_link_node(&timer->node, parent, link);
883 rb_insert_color(&timer->node, &base->active);
885 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
886 * state of a possibly running callback.
888 timer->state |= HRTIMER_STATE_ENQUEUED;
894 * __remove_hrtimer - internal function to remove a timer
896 * Caller must hold the base lock.
898 * High resolution timer mode reprograms the clock event device when the
899 * timer is the one which expires next. The caller can disable this by setting
900 * reprogram to zero. This is useful, when the context does a reprogramming
901 * anyway (e.g. timer interrupt)
903 static void __remove_hrtimer(struct hrtimer *timer,
904 struct hrtimer_clock_base *base,
905 unsigned long newstate, int reprogram)
907 if (timer->state & HRTIMER_STATE_ENQUEUED) {
909 * Remove the timer from the rbtree and replace the
910 * first entry pointer if necessary.
912 if (base->first == &timer->node) {
913 base->first = rb_next(&timer->node);
914 /* Reprogram the clock event device. if enabled */
915 if (reprogram && hrtimer_hres_active())
916 hrtimer_force_reprogram(base->cpu_base);
918 rb_erase(&timer->node, &base->active);
920 timer->state = newstate;
924 * remove hrtimer, called with base lock held
927 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
929 if (hrtimer_is_queued(timer)) {
933 * Remove the timer and force reprogramming when high
934 * resolution mode is active and the timer is on the current
935 * CPU. If we remove a timer on another CPU, reprogramming is
936 * skipped. The interrupt event on this CPU is fired and
937 * reprogramming happens in the interrupt handler. This is a
938 * rare case and less expensive than a smp call.
940 debug_hrtimer_deactivate(timer);
941 timer_stats_hrtimer_clear_start_info(timer);
942 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
943 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
950 int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
951 unsigned long delta_ns, const enum hrtimer_mode mode,
954 struct hrtimer_clock_base *base, *new_base;
958 base = lock_hrtimer_base(timer, &flags);
960 /* Remove an active timer from the queue: */
961 ret = remove_hrtimer(timer, base);
963 /* Switch the timer base, if necessary: */
964 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
966 if (mode & HRTIMER_MODE_REL) {
967 tim = ktime_add_safe(tim, new_base->get_time());
969 * CONFIG_TIME_LOW_RES is a temporary way for architectures
970 * to signal that they simply return xtime in
971 * do_gettimeoffset(). In this case we want to round up by
972 * resolution when starting a relative timer, to avoid short
973 * timeouts. This will go away with the GTOD framework.
975 #ifdef CONFIG_TIME_LOW_RES
976 tim = ktime_add_safe(tim, base->resolution);
980 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
982 timer_stats_hrtimer_set_start_info(timer);
984 leftmost = enqueue_hrtimer(timer, new_base);
987 * Only allow reprogramming if the new base is on this CPU.
988 * (it might still be on another CPU if the timer was pending)
990 * XXX send_remote_softirq() ?
992 if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
993 hrtimer_enqueue_reprogram(timer, new_base, wakeup);
995 unlock_hrtimer_base(timer, &flags);
1001 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
1002 * @timer: the timer to be added
1004 * @delta_ns: "slack" range for the timer
1005 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
1009 * 1 when the timer was active
1011 int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1012 unsigned long delta_ns, const enum hrtimer_mode mode)
1014 return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
1016 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1019 * hrtimer_start - (re)start an hrtimer on the current CPU
1020 * @timer: the timer to be added
1022 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
1026 * 1 when the timer was active
1029 hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1031 return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
1033 EXPORT_SYMBOL_GPL(hrtimer_start);
1037 * hrtimer_try_to_cancel - try to deactivate a timer
1038 * @timer: hrtimer to stop
1041 * 0 when the timer was not active
1042 * 1 when the timer was active
1043 * -1 when the timer is currently excuting the callback function and
1046 int hrtimer_try_to_cancel(struct hrtimer *timer)
1048 struct hrtimer_clock_base *base;
1049 unsigned long flags;
1052 base = lock_hrtimer_base(timer, &flags);
1054 if (!hrtimer_callback_running(timer))
1055 ret = remove_hrtimer(timer, base);
1057 unlock_hrtimer_base(timer, &flags);
1062 EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1065 * hrtimer_cancel - cancel a timer and wait for the handler to finish.
1066 * @timer: the timer to be cancelled
1069 * 0 when the timer was not active
1070 * 1 when the timer was active
1072 int hrtimer_cancel(struct hrtimer *timer)
1075 int ret = hrtimer_try_to_cancel(timer);
1082 EXPORT_SYMBOL_GPL(hrtimer_cancel);
1085 * hrtimer_get_remaining - get remaining time for the timer
1086 * @timer: the timer to read
1088 ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1090 struct hrtimer_clock_base *base;
1091 unsigned long flags;
1094 base = lock_hrtimer_base(timer, &flags);
1095 rem = hrtimer_expires_remaining(timer);
1096 unlock_hrtimer_base(timer, &flags);
1100 EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
1104 * hrtimer_get_next_event - get the time until next expiry event
1106 * Returns the delta to the next expiry event or KTIME_MAX if no timer
1109 ktime_t hrtimer_get_next_event(void)
1111 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1112 struct hrtimer_clock_base *base = cpu_base->clock_base;
1113 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
1114 unsigned long flags;
1117 spin_lock_irqsave(&cpu_base->lock, flags);
1119 if (!hrtimer_hres_active()) {
1120 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
1121 struct hrtimer *timer;
1126 timer = rb_entry(base->first, struct hrtimer, node);
1127 delta.tv64 = hrtimer_get_expires_tv64(timer);
1128 delta = ktime_sub(delta, base->get_time());
1129 if (delta.tv64 < mindelta.tv64)
1130 mindelta.tv64 = delta.tv64;
1134 spin_unlock_irqrestore(&cpu_base->lock, flags);
1136 if (mindelta.tv64 < 0)
1142 static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1143 enum hrtimer_mode mode)
1145 struct hrtimer_cpu_base *cpu_base;
1147 memset(timer, 0, sizeof(struct hrtimer));
1149 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1151 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
1152 clock_id = CLOCK_MONOTONIC;
1154 timer->base = &cpu_base->clock_base[clock_id];
1155 INIT_LIST_HEAD(&timer->cb_entry);
1156 hrtimer_init_timer_hres(timer);
1158 #ifdef CONFIG_TIMER_STATS
1159 timer->start_site = NULL;
1160 timer->start_pid = -1;
1161 memset(timer->start_comm, 0, TASK_COMM_LEN);
1166 * hrtimer_init - initialize a timer to the given clock
1167 * @timer: the timer to be initialized
1168 * @clock_id: the clock to be used
1169 * @mode: timer mode abs/rel
1171 void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1172 enum hrtimer_mode mode)
1174 debug_hrtimer_init(timer);
1175 __hrtimer_init(timer, clock_id, mode);
1177 EXPORT_SYMBOL_GPL(hrtimer_init);
1180 * hrtimer_get_res - get the timer resolution for a clock
1181 * @which_clock: which clock to query
1182 * @tp: pointer to timespec variable to store the resolution
1184 * Store the resolution of the clock selected by @which_clock in the
1185 * variable pointed to by @tp.
1187 int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1189 struct hrtimer_cpu_base *cpu_base;
1191 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1192 *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
1196 EXPORT_SYMBOL_GPL(hrtimer_get_res);
1198 static void __run_hrtimer(struct hrtimer *timer)
1200 struct hrtimer_clock_base *base = timer->base;
1201 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1202 enum hrtimer_restart (*fn)(struct hrtimer *);
1205 WARN_ON(!irqs_disabled());
1207 debug_hrtimer_deactivate(timer);
1208 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1209 timer_stats_account_hrtimer(timer);
1210 fn = timer->function;
1213 * Because we run timers from hardirq context, there is no chance
1214 * they get migrated to another cpu, therefore its safe to unlock
1217 spin_unlock(&cpu_base->lock);
1218 restart = fn(timer);
1219 spin_lock(&cpu_base->lock);
1222 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
1223 * we do not reprogramm the event hardware. Happens either in
1224 * hrtimer_start_range_ns() or in hrtimer_interrupt()
1226 if (restart != HRTIMER_NORESTART) {
1227 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1228 enqueue_hrtimer(timer, base);
1230 timer->state &= ~HRTIMER_STATE_CALLBACK;
1233 #ifdef CONFIG_HIGH_RES_TIMERS
1235 static int force_clock_reprogram;
1238 * After 5 iteration's attempts, we consider that hrtimer_interrupt()
1239 * is hanging, which could happen with something that slows the interrupt
1240 * such as the tracing. Then we force the clock reprogramming for each future
1241 * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
1242 * threshold that we will overwrite.
1243 * The next tick event will be scheduled to 3 times we currently spend on
1244 * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
1245 * 1/4 of their time to process the hrtimer interrupts. This is enough to
1246 * let it running without serious starvation.
1250 hrtimer_interrupt_hanging(struct clock_event_device *dev,
1253 force_clock_reprogram = 1;
1254 dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
1255 printk(KERN_WARNING "hrtimer: interrupt too slow, "
1256 "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
1259 * High resolution timer interrupt
1260 * Called with interrupts disabled
1262 void hrtimer_interrupt(struct clock_event_device *dev)
1264 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1265 struct hrtimer_clock_base *base;
1266 ktime_t expires_next, now;
1270 BUG_ON(!cpu_base->hres_active);
1271 cpu_base->nr_events++;
1272 dev->next_event.tv64 = KTIME_MAX;
1275 /* 5 retries is enough to notice a hang */
1276 if (!(++nr_retries % 5))
1277 hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
1281 expires_next.tv64 = KTIME_MAX;
1283 base = cpu_base->clock_base;
1285 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1287 struct rb_node *node;
1289 spin_lock(&cpu_base->lock);
1291 basenow = ktime_add(now, base->offset);
1293 while ((node = base->first)) {
1294 struct hrtimer *timer;
1296 timer = rb_entry(node, struct hrtimer, node);
1299 * The immediate goal for using the softexpires is
1300 * minimizing wakeups, not running timers at the
1301 * earliest interrupt after their soft expiration.
1302 * This allows us to avoid using a Priority Search
1303 * Tree, which can answer a stabbing querry for
1304 * overlapping intervals and instead use the simple
1305 * BST we already have.
1306 * We don't add extra wakeups by delaying timers that
1307 * are right-of a not yet expired timer, because that
1308 * timer will have to trigger a wakeup anyway.
1311 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
1314 expires = ktime_sub(hrtimer_get_expires(timer),
1316 if (expires.tv64 < expires_next.tv64)
1317 expires_next = expires;
1321 __run_hrtimer(timer);
1323 spin_unlock(&cpu_base->lock);
1327 cpu_base->expires_next = expires_next;
1329 /* Reprogramming necessary ? */
1330 if (expires_next.tv64 != KTIME_MAX) {
1331 if (tick_program_event(expires_next, force_clock_reprogram))
1337 * local version of hrtimer_peek_ahead_timers() called with interrupts
1340 static void __hrtimer_peek_ahead_timers(void)
1342 struct tick_device *td;
1344 if (!hrtimer_hres_active())
1347 td = &__get_cpu_var(tick_cpu_device);
1348 if (td && td->evtdev)
1349 hrtimer_interrupt(td->evtdev);
1353 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1355 * hrtimer_peek_ahead_timers will peek at the timer queue of
1356 * the current cpu and check if there are any timers for which
1357 * the soft expires time has passed. If any such timers exist,
1358 * they are run immediately and then removed from the timer queue.
1361 void hrtimer_peek_ahead_timers(void)
1363 unsigned long flags;
1365 local_irq_save(flags);
1366 __hrtimer_peek_ahead_timers();
1367 local_irq_restore(flags);
1370 static void run_hrtimer_softirq(struct softirq_action *h)
1372 hrtimer_peek_ahead_timers();
1375 #else /* CONFIG_HIGH_RES_TIMERS */
1377 static inline void __hrtimer_peek_ahead_timers(void) { }
1379 #endif /* !CONFIG_HIGH_RES_TIMERS */
1382 * Called from timer softirq every jiffy, expire hrtimers:
1384 * For HRT its the fall back code to run the softirq in the timer
1385 * softirq context in case the hrtimer initialization failed or has
1386 * not been done yet.
1388 void hrtimer_run_pending(void)
1390 if (hrtimer_hres_active())
1394 * This _is_ ugly: We have to check in the softirq context,
1395 * whether we can switch to highres and / or nohz mode. The
1396 * clocksource switch happens in the timer interrupt with
1397 * xtime_lock held. Notification from there only sets the
1398 * check bit in the tick_oneshot code, otherwise we might
1399 * deadlock vs. xtime_lock.
1401 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1402 hrtimer_switch_to_hres();
1406 * Called from hardirq context every jiffy
1408 void hrtimer_run_queues(void)
1410 struct rb_node *node;
1411 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1412 struct hrtimer_clock_base *base;
1413 int index, gettime = 1;
1415 if (hrtimer_hres_active())
1418 for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
1419 base = &cpu_base->clock_base[index];
1425 hrtimer_get_softirq_time(cpu_base);
1429 spin_lock(&cpu_base->lock);
1431 while ((node = base->first)) {
1432 struct hrtimer *timer;
1434 timer = rb_entry(node, struct hrtimer, node);
1435 if (base->softirq_time.tv64 <=
1436 hrtimer_get_expires_tv64(timer))
1439 __run_hrtimer(timer);
1441 spin_unlock(&cpu_base->lock);
1446 * Sleep related functions:
1448 static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1450 struct hrtimer_sleeper *t =
1451 container_of(timer, struct hrtimer_sleeper, timer);
1452 struct task_struct *task = t->task;
1456 wake_up_process(task);
1458 return HRTIMER_NORESTART;
1461 void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1463 sl->timer.function = hrtimer_wakeup;
1467 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1469 hrtimer_init_sleeper(t, current);
1472 set_current_state(TASK_INTERRUPTIBLE);
1473 hrtimer_start_expires(&t->timer, mode);
1474 if (!hrtimer_active(&t->timer))
1477 if (likely(t->task))
1480 hrtimer_cancel(&t->timer);
1481 mode = HRTIMER_MODE_ABS;
1483 } while (t->task && !signal_pending(current));
1485 __set_current_state(TASK_RUNNING);
1487 return t->task == NULL;
1490 static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1492 struct timespec rmt;
1495 rem = hrtimer_expires_remaining(timer);
1498 rmt = ktime_to_timespec(rem);
1500 if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
1506 long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1508 struct hrtimer_sleeper t;
1509 struct timespec __user *rmtp;
1512 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
1514 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1516 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1519 rmtp = restart->nanosleep.rmtp;
1521 ret = update_rmtp(&t.timer, rmtp);
1526 /* The other values in restart are already filled in */
1527 ret = -ERESTART_RESTARTBLOCK;
1529 destroy_hrtimer_on_stack(&t.timer);
1533 long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1534 const enum hrtimer_mode mode, const clockid_t clockid)
1536 struct restart_block *restart;
1537 struct hrtimer_sleeper t;
1539 unsigned long slack;
1541 slack = current->timer_slack_ns;
1542 if (rt_task(current))
1545 hrtimer_init_on_stack(&t.timer, clockid, mode);
1546 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
1547 if (do_nanosleep(&t, mode))
1550 /* Absolute timers do not update the rmtp value and restart: */
1551 if (mode == HRTIMER_MODE_ABS) {
1552 ret = -ERESTARTNOHAND;
1557 ret = update_rmtp(&t.timer, rmtp);
1562 restart = ¤t_thread_info()->restart_block;
1563 restart->fn = hrtimer_nanosleep_restart;
1564 restart->nanosleep.index = t.timer.base->index;
1565 restart->nanosleep.rmtp = rmtp;
1566 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1568 ret = -ERESTART_RESTARTBLOCK;
1570 destroy_hrtimer_on_stack(&t.timer);
1574 SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1575 struct timespec __user *, rmtp)
1579 if (copy_from_user(&tu, rqtp, sizeof(tu)))
1582 if (!timespec_valid(&tu))
1585 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
1589 * Functions related to boot-time initialization:
1591 static void __cpuinit init_hrtimers_cpu(int cpu)
1593 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1596 spin_lock_init(&cpu_base->lock);
1598 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1599 cpu_base->clock_base[i].cpu_base = cpu_base;
1601 hrtimer_init_hres(cpu_base);
1604 #ifdef CONFIG_HOTPLUG_CPU
1606 static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1607 struct hrtimer_clock_base *new_base)
1609 struct hrtimer *timer;
1610 struct rb_node *node;
1612 while ((node = rb_first(&old_base->active))) {
1613 timer = rb_entry(node, struct hrtimer, node);
1614 BUG_ON(hrtimer_callback_running(timer));
1615 debug_hrtimer_deactivate(timer);
1618 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1619 * timer could be seen as !active and just vanish away
1620 * under us on another CPU
1622 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1623 timer->base = new_base;
1625 * Enqueue the timers on the new cpu. This does not
1626 * reprogram the event device in case the timer
1627 * expires before the earliest on this CPU, but we run
1628 * hrtimer_interrupt after we migrated everything to
1629 * sort out already expired timers and reprogram the
1632 enqueue_hrtimer(timer, new_base);
1634 /* Clear the migration state bit */
1635 timer->state &= ~HRTIMER_STATE_MIGRATE;
1639 static void migrate_hrtimers(int scpu)
1641 struct hrtimer_cpu_base *old_base, *new_base;
1644 BUG_ON(cpu_online(scpu));
1645 tick_cancel_sched_timer(scpu);
1647 local_irq_disable();
1648 old_base = &per_cpu(hrtimer_bases, scpu);
1649 new_base = &__get_cpu_var(hrtimer_bases);
1651 * The caller is globally serialized and nobody else
1652 * takes two locks at once, deadlock is not possible.
1654 spin_lock(&new_base->lock);
1655 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1657 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1658 migrate_hrtimer_list(&old_base->clock_base[i],
1659 &new_base->clock_base[i]);
1662 spin_unlock(&old_base->lock);
1663 spin_unlock(&new_base->lock);
1665 /* Check, if we got expired work to do */
1666 __hrtimer_peek_ahead_timers();
1670 #endif /* CONFIG_HOTPLUG_CPU */
1672 static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1673 unsigned long action, void *hcpu)
1675 int scpu = (long)hcpu;
1679 case CPU_UP_PREPARE:
1680 case CPU_UP_PREPARE_FROZEN:
1681 init_hrtimers_cpu(scpu);
1684 #ifdef CONFIG_HOTPLUG_CPU
1686 case CPU_DYING_FROZEN:
1687 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1690 case CPU_DEAD_FROZEN:
1692 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1693 migrate_hrtimers(scpu);
1705 static struct notifier_block __cpuinitdata hrtimers_nb = {
1706 .notifier_call = hrtimer_cpu_notify,
1709 void __init hrtimers_init(void)
1711 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1712 (void *)(long)smp_processor_id());
1713 register_cpu_notifier(&hrtimers_nb);
1714 #ifdef CONFIG_HIGH_RES_TIMERS
1715 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1720 * schedule_hrtimeout_range - sleep until timeout
1721 * @expires: timeout value (ktime_t)
1722 * @delta: slack in expires timeout (ktime_t)
1723 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1725 * Make the current task sleep until the given expiry time has
1726 * elapsed. The routine will return immediately unless
1727 * the current task state has been set (see set_current_state()).
1729 * The @delta argument gives the kernel the freedom to schedule the
1730 * actual wakeup to a time that is both power and performance friendly.
1731 * The kernel give the normal best effort behavior for "@expires+@delta",
1732 * but may decide to fire the timer earlier, but no earlier than @expires.
1734 * You can set the task state as follows -
1736 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1737 * pass before the routine returns.
1739 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1740 * delivered to the current task.
1742 * The current task state is guaranteed to be TASK_RUNNING when this
1745 * Returns 0 when the timer has expired otherwise -EINTR
1747 int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1748 const enum hrtimer_mode mode)
1750 struct hrtimer_sleeper t;
1753 * Optimize when a zero timeout value is given. It does not
1754 * matter whether this is an absolute or a relative time.
1756 if (expires && !expires->tv64) {
1757 __set_current_state(TASK_RUNNING);
1762 * A NULL parameter means "inifinte"
1766 __set_current_state(TASK_RUNNING);
1770 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
1771 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1773 hrtimer_init_sleeper(&t, current);
1775 hrtimer_start_expires(&t.timer, mode);
1776 if (!hrtimer_active(&t.timer))
1782 hrtimer_cancel(&t.timer);
1783 destroy_hrtimer_on_stack(&t.timer);
1785 __set_current_state(TASK_RUNNING);
1787 return !t.task ? 0 : -EINTR;
1789 EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1792 * schedule_hrtimeout - sleep until timeout
1793 * @expires: timeout value (ktime_t)
1794 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1796 * Make the current task sleep until the given expiry time has
1797 * elapsed. The routine will return immediately unless
1798 * the current task state has been set (see set_current_state()).
1800 * You can set the task state as follows -
1802 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1803 * pass before the routine returns.
1805 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1806 * delivered to the current task.
1808 * The current task state is guaranteed to be TASK_RUNNING when this
1811 * Returns 0 when the timer has expired otherwise -EINTR
1813 int __sched schedule_hrtimeout(ktime_t *expires,
1814 const enum hrtimer_mode mode)
1816 return schedule_hrtimeout_range(expires, 0, mode);
1818 EXPORT_SYMBOL_GPL(schedule_hrtimeout);