2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <linux/errno.h>
8 #include <linux/math64.h>
9 #include <asm/uaccess.h>
10 #include <linux/kernel_stat.h>
13 * Called after updating RLIMIT_CPU to set timer expiration if necessary.
15 void update_rlimit_cpu(unsigned long rlim_new)
19 cputime = secs_to_cputime(rlim_new);
20 if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
21 cputime_lt(current->signal->it_prof_expires, cputime)) {
22 spin_lock_irq(¤t->sighand->siglock);
23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
24 spin_unlock_irq(¤t->sighand->siglock);
28 static int check_clock(const clockid_t which_clock)
31 struct task_struct *p;
32 const pid_t pid = CPUCLOCK_PID(which_clock);
34 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
40 read_lock(&tasklist_lock);
41 p = find_task_by_vpid(pid);
42 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
43 same_thread_group(p, current) : thread_group_leader(p))) {
46 read_unlock(&tasklist_lock);
51 static inline union cpu_time_count
52 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
54 union cpu_time_count ret;
55 ret.sched = 0; /* high half always zero when .cpu used */
56 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
57 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
59 ret.cpu = timespec_to_cputime(tp);
64 static void sample_to_timespec(const clockid_t which_clock,
65 union cpu_time_count cpu,
68 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
69 *tp = ns_to_timespec(cpu.sched);
71 cputime_to_timespec(cpu.cpu, tp);
74 static inline int cpu_time_before(const clockid_t which_clock,
75 union cpu_time_count now,
76 union cpu_time_count then)
78 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
79 return now.sched < then.sched;
81 return cputime_lt(now.cpu, then.cpu);
84 static inline void cpu_time_add(const clockid_t which_clock,
85 union cpu_time_count *acc,
86 union cpu_time_count val)
88 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
89 acc->sched += val.sched;
91 acc->cpu = cputime_add(acc->cpu, val.cpu);
94 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
95 union cpu_time_count a,
96 union cpu_time_count b)
98 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
101 a.cpu = cputime_sub(a.cpu, b.cpu);
107 * Divide and limit the result to res >= 1
109 * This is necessary to prevent signal delivery starvation, when the result of
110 * the division would be rounded down to 0.
112 static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
114 cputime_t res = cputime_div(time, div);
116 return max_t(cputime_t, res, 1);
120 * Update expiry time from increment, and increase overrun count,
121 * given the current clock sample.
123 static void bump_cpu_timer(struct k_itimer *timer,
124 union cpu_time_count now)
128 if (timer->it.cpu.incr.sched == 0)
131 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
132 unsigned long long delta, incr;
134 if (now.sched < timer->it.cpu.expires.sched)
136 incr = timer->it.cpu.incr.sched;
137 delta = now.sched + incr - timer->it.cpu.expires.sched;
138 /* Don't use (incr*2 < delta), incr*2 might overflow. */
139 for (i = 0; incr < delta - incr; i++)
141 for (; i >= 0; incr >>= 1, i--) {
144 timer->it.cpu.expires.sched += incr;
145 timer->it_overrun += 1 << i;
149 cputime_t delta, incr;
151 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
153 incr = timer->it.cpu.incr.cpu;
154 delta = cputime_sub(cputime_add(now.cpu, incr),
155 timer->it.cpu.expires.cpu);
156 /* Don't use (incr*2 < delta), incr*2 might overflow. */
157 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
158 incr = cputime_add(incr, incr);
159 for (; i >= 0; incr = cputime_halve(incr), i--) {
160 if (cputime_lt(delta, incr))
162 timer->it.cpu.expires.cpu =
163 cputime_add(timer->it.cpu.expires.cpu, incr);
164 timer->it_overrun += 1 << i;
165 delta = cputime_sub(delta, incr);
170 static inline cputime_t prof_ticks(struct task_struct *p)
172 return cputime_add(p->utime, p->stime);
174 static inline cputime_t virt_ticks(struct task_struct *p)
179 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
181 int error = check_clock(which_clock);
184 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
185 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
187 * If sched_clock is using a cycle counter, we
188 * don't have any idea of its true resolution
189 * exported, but it is much more than 1s/HZ.
197 int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
200 * You can never reset a CPU clock, but we check for other errors
201 * in the call before failing with EPERM.
203 int error = check_clock(which_clock);
212 * Sample a per-thread clock for the given task.
214 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
215 union cpu_time_count *cpu)
217 switch (CPUCLOCK_WHICH(which_clock)) {
221 cpu->cpu = prof_ticks(p);
224 cpu->cpu = virt_ticks(p);
227 cpu->sched = task_sched_runtime(p);
234 * Sample a process (thread group) clock for the given group_leader task.
235 * Must be called with tasklist_lock held for reading.
237 static int cpu_clock_sample_group(const clockid_t which_clock,
238 struct task_struct *p,
239 union cpu_time_count *cpu)
241 struct task_cputime cputime;
243 switch (CPUCLOCK_WHICH(which_clock)) {
247 thread_group_cputime(p, &cputime);
248 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
251 thread_group_cputime(p, &cputime);
252 cpu->cpu = cputime.utime;
255 cpu->sched = thread_group_sched_runtime(p);
262 int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
264 const pid_t pid = CPUCLOCK_PID(which_clock);
266 union cpu_time_count rtn;
270 * Special case constant value for our own clocks.
271 * We don't have to do any lookup to find ourselves.
273 if (CPUCLOCK_PERTHREAD(which_clock)) {
275 * Sampling just ourselves we can do with no locking.
277 error = cpu_clock_sample(which_clock,
280 read_lock(&tasklist_lock);
281 error = cpu_clock_sample_group(which_clock,
283 read_unlock(&tasklist_lock);
287 * Find the given PID, and validate that the caller
288 * should be able to see it.
290 struct task_struct *p;
292 p = find_task_by_vpid(pid);
294 if (CPUCLOCK_PERTHREAD(which_clock)) {
295 if (same_thread_group(p, current)) {
296 error = cpu_clock_sample(which_clock,
300 read_lock(&tasklist_lock);
301 if (thread_group_leader(p) && p->signal) {
303 cpu_clock_sample_group(which_clock,
306 read_unlock(&tasklist_lock);
314 sample_to_timespec(which_clock, rtn, tp);
320 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
321 * This is called from sys_timer_create with the new timer already locked.
323 int posix_cpu_timer_create(struct k_itimer *new_timer)
326 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
327 struct task_struct *p;
329 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
332 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
333 new_timer->it.cpu.incr.sched = 0;
334 new_timer->it.cpu.expires.sched = 0;
336 read_lock(&tasklist_lock);
337 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
341 p = find_task_by_vpid(pid);
342 if (p && !same_thread_group(p, current))
347 p = current->group_leader;
349 p = find_task_by_vpid(pid);
350 if (p && !thread_group_leader(p))
354 new_timer->it.cpu.task = p;
360 read_unlock(&tasklist_lock);
366 * Clean up a CPU-clock timer that is about to be destroyed.
367 * This is called from timer deletion with the timer already locked.
368 * If we return TIMER_RETRY, it's necessary to release the timer's lock
369 * and try again. (This happens when the timer is in the middle of firing.)
371 int posix_cpu_timer_del(struct k_itimer *timer)
373 struct task_struct *p = timer->it.cpu.task;
376 if (likely(p != NULL)) {
377 read_lock(&tasklist_lock);
378 if (unlikely(p->signal == NULL)) {
380 * We raced with the reaping of the task.
381 * The deletion should have cleared us off the list.
383 BUG_ON(!list_empty(&timer->it.cpu.entry));
385 spin_lock(&p->sighand->siglock);
386 if (timer->it.cpu.firing)
389 list_del(&timer->it.cpu.entry);
390 spin_unlock(&p->sighand->siglock);
392 read_unlock(&tasklist_lock);
402 * Clean out CPU timers still ticking when a thread exited. The task
403 * pointer is cleared, and the expiry time is replaced with the residual
404 * time for later timer_gettime calls to return.
405 * This must be called with the siglock held.
407 static void cleanup_timers(struct list_head *head,
408 cputime_t utime, cputime_t stime,
409 unsigned long long sum_exec_runtime)
411 struct cpu_timer_list *timer, *next;
412 cputime_t ptime = cputime_add(utime, stime);
414 list_for_each_entry_safe(timer, next, head, entry) {
415 list_del_init(&timer->entry);
416 if (cputime_lt(timer->expires.cpu, ptime)) {
417 timer->expires.cpu = cputime_zero;
419 timer->expires.cpu = cputime_sub(timer->expires.cpu,
425 list_for_each_entry_safe(timer, next, head, entry) {
426 list_del_init(&timer->entry);
427 if (cputime_lt(timer->expires.cpu, utime)) {
428 timer->expires.cpu = cputime_zero;
430 timer->expires.cpu = cputime_sub(timer->expires.cpu,
436 list_for_each_entry_safe(timer, next, head, entry) {
437 list_del_init(&timer->entry);
438 if (timer->expires.sched < sum_exec_runtime) {
439 timer->expires.sched = 0;
441 timer->expires.sched -= sum_exec_runtime;
447 * These are both called with the siglock held, when the current thread
448 * is being reaped. When the final (leader) thread in the group is reaped,
449 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
451 void posix_cpu_timers_exit(struct task_struct *tsk)
453 cleanup_timers(tsk->cpu_timers,
454 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime);
457 void posix_cpu_timers_exit_group(struct task_struct *tsk)
459 struct task_cputime cputime;
461 thread_group_cputime(tsk, &cputime);
462 cleanup_timers(tsk->signal->cpu_timers,
463 cputime.utime, cputime.stime, cputime.sum_exec_runtime);
466 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
469 * That's all for this thread or process.
470 * We leave our residual in expires to be reported.
472 put_task_struct(timer->it.cpu.task);
473 timer->it.cpu.task = NULL;
474 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
475 timer->it.cpu.expires,
480 * Insert the timer on the appropriate list before any timers that
481 * expire later. This must be called with the tasklist_lock held
482 * for reading, and interrupts disabled.
484 static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
486 struct task_struct *p = timer->it.cpu.task;
487 struct list_head *head, *listpos;
488 struct cpu_timer_list *const nt = &timer->it.cpu;
489 struct cpu_timer_list *next;
492 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
493 p->cpu_timers : p->signal->cpu_timers);
494 head += CPUCLOCK_WHICH(timer->it_clock);
496 BUG_ON(!irqs_disabled());
497 spin_lock(&p->sighand->siglock);
500 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
501 list_for_each_entry(next, head, entry) {
502 if (next->expires.sched > nt->expires.sched)
504 listpos = &next->entry;
507 list_for_each_entry(next, head, entry) {
508 if (cputime_gt(next->expires.cpu, nt->expires.cpu))
510 listpos = &next->entry;
513 list_add(&nt->entry, listpos);
515 if (listpos == head) {
517 * We are the new earliest-expiring timer.
518 * If we are a thread timer, there can always
519 * be a process timer telling us to stop earlier.
522 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
523 switch (CPUCLOCK_WHICH(timer->it_clock)) {
527 if (cputime_eq(p->cputime_expires.prof_exp,
529 cputime_gt(p->cputime_expires.prof_exp,
531 p->cputime_expires.prof_exp =
535 if (cputime_eq(p->cputime_expires.virt_exp,
537 cputime_gt(p->cputime_expires.virt_exp,
539 p->cputime_expires.virt_exp =
543 if (p->cputime_expires.sched_exp == 0 ||
544 p->cputime_expires.sched_exp >
546 p->cputime_expires.sched_exp =
552 * For a process timer, set the cached expiration time.
554 switch (CPUCLOCK_WHICH(timer->it_clock)) {
558 if (!cputime_eq(p->signal->it_virt_expires,
560 cputime_lt(p->signal->it_virt_expires,
561 timer->it.cpu.expires.cpu))
563 p->signal->cputime_expires.virt_exp =
564 timer->it.cpu.expires.cpu;
567 if (!cputime_eq(p->signal->it_prof_expires,
569 cputime_lt(p->signal->it_prof_expires,
570 timer->it.cpu.expires.cpu))
572 i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
573 if (i != RLIM_INFINITY &&
574 i <= cputime_to_secs(timer->it.cpu.expires.cpu))
576 p->signal->cputime_expires.prof_exp =
577 timer->it.cpu.expires.cpu;
580 p->signal->cputime_expires.sched_exp =
581 timer->it.cpu.expires.sched;
587 spin_unlock(&p->sighand->siglock);
591 * The timer is locked, fire it and arrange for its reload.
593 static void cpu_timer_fire(struct k_itimer *timer)
595 if (unlikely(timer->sigq == NULL)) {
597 * This a special case for clock_nanosleep,
598 * not a normal timer from sys_timer_create.
600 wake_up_process(timer->it_process);
601 timer->it.cpu.expires.sched = 0;
602 } else if (timer->it.cpu.incr.sched == 0) {
604 * One-shot timer. Clear it as soon as it's fired.
606 posix_timer_event(timer, 0);
607 timer->it.cpu.expires.sched = 0;
608 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
610 * The signal did not get queued because the signal
611 * was ignored, so we won't get any callback to
612 * reload the timer. But we need to keep it
613 * ticking in case the signal is deliverable next time.
615 posix_cpu_timer_schedule(timer);
620 * Guts of sys_timer_settime for CPU timers.
621 * This is called with the timer locked and interrupts disabled.
622 * If we return TIMER_RETRY, it's necessary to release the timer's lock
623 * and try again. (This happens when the timer is in the middle of firing.)
625 int posix_cpu_timer_set(struct k_itimer *timer, int flags,
626 struct itimerspec *new, struct itimerspec *old)
628 struct task_struct *p = timer->it.cpu.task;
629 union cpu_time_count old_expires, new_expires, val;
632 if (unlikely(p == NULL)) {
634 * Timer refers to a dead task's clock.
639 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
641 read_lock(&tasklist_lock);
643 * We need the tasklist_lock to protect against reaping that
644 * clears p->signal. If p has just been reaped, we can no
645 * longer get any information about it at all.
647 if (unlikely(p->signal == NULL)) {
648 read_unlock(&tasklist_lock);
650 timer->it.cpu.task = NULL;
655 * Disarm any old timer after extracting its expiry time.
657 BUG_ON(!irqs_disabled());
660 spin_lock(&p->sighand->siglock);
661 old_expires = timer->it.cpu.expires;
662 if (unlikely(timer->it.cpu.firing)) {
663 timer->it.cpu.firing = -1;
666 list_del_init(&timer->it.cpu.entry);
667 spin_unlock(&p->sighand->siglock);
670 * We need to sample the current value to convert the new
671 * value from to relative and absolute, and to convert the
672 * old value from absolute to relative. To set a process
673 * timer, we need a sample to balance the thread expiry
674 * times (in arm_timer). With an absolute time, we must
675 * check if it's already passed. In short, we need a sample.
677 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
678 cpu_clock_sample(timer->it_clock, p, &val);
680 cpu_clock_sample_group(timer->it_clock, p, &val);
684 if (old_expires.sched == 0) {
685 old->it_value.tv_sec = 0;
686 old->it_value.tv_nsec = 0;
689 * Update the timer in case it has
690 * overrun already. If it has,
691 * we'll report it as having overrun
692 * and with the next reloaded timer
693 * already ticking, though we are
694 * swallowing that pending
695 * notification here to install the
698 bump_cpu_timer(timer, val);
699 if (cpu_time_before(timer->it_clock, val,
700 timer->it.cpu.expires)) {
701 old_expires = cpu_time_sub(
703 timer->it.cpu.expires, val);
704 sample_to_timespec(timer->it_clock,
708 old->it_value.tv_nsec = 1;
709 old->it_value.tv_sec = 0;
716 * We are colliding with the timer actually firing.
717 * Punt after filling in the timer's old value, and
718 * disable this firing since we are already reporting
719 * it as an overrun (thanks to bump_cpu_timer above).
721 read_unlock(&tasklist_lock);
725 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
726 cpu_time_add(timer->it_clock, &new_expires, val);
730 * Install the new expiry time (or zero).
731 * For a timer with no notification action, we don't actually
732 * arm the timer (we'll just fake it for timer_gettime).
734 timer->it.cpu.expires = new_expires;
735 if (new_expires.sched != 0 &&
736 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
737 cpu_time_before(timer->it_clock, val, new_expires)) {
738 arm_timer(timer, val);
741 read_unlock(&tasklist_lock);
744 * Install the new reload setting, and
745 * set up the signal and overrun bookkeeping.
747 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
751 * This acts as a modification timestamp for the timer,
752 * so any automatic reload attempt will punt on seeing
753 * that we have reset the timer manually.
755 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
757 timer->it_overrun_last = 0;
758 timer->it_overrun = -1;
760 if (new_expires.sched != 0 &&
761 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
762 !cpu_time_before(timer->it_clock, val, new_expires)) {
764 * The designated time already passed, so we notify
765 * immediately, even if the thread never runs to
766 * accumulate more time on this clock.
768 cpu_timer_fire(timer);
774 sample_to_timespec(timer->it_clock,
775 timer->it.cpu.incr, &old->it_interval);
780 void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
782 union cpu_time_count now;
783 struct task_struct *p = timer->it.cpu.task;
787 * Easy part: convert the reload time.
789 sample_to_timespec(timer->it_clock,
790 timer->it.cpu.incr, &itp->it_interval);
792 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
793 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
797 if (unlikely(p == NULL)) {
799 * This task already died and the timer will never fire.
800 * In this case, expires is actually the dead value.
803 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
809 * Sample the clock to take the difference with the expiry time.
811 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
812 cpu_clock_sample(timer->it_clock, p, &now);
813 clear_dead = p->exit_state;
815 read_lock(&tasklist_lock);
816 if (unlikely(p->signal == NULL)) {
818 * The process has been reaped.
819 * We can't even collect a sample any more.
820 * Call the timer disarmed, nothing else to do.
823 timer->it.cpu.task = NULL;
824 timer->it.cpu.expires.sched = 0;
825 read_unlock(&tasklist_lock);
828 cpu_clock_sample_group(timer->it_clock, p, &now);
829 clear_dead = (unlikely(p->exit_state) &&
830 thread_group_empty(p));
832 read_unlock(&tasklist_lock);
835 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
836 if (timer->it.cpu.incr.sched == 0 &&
837 cpu_time_before(timer->it_clock,
838 timer->it.cpu.expires, now)) {
840 * Do-nothing timer expired and has no reload,
841 * so it's as if it was never set.
843 timer->it.cpu.expires.sched = 0;
844 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
848 * Account for any expirations and reloads that should
851 bump_cpu_timer(timer, now);
854 if (unlikely(clear_dead)) {
856 * We've noticed that the thread is dead, but
857 * not yet reaped. Take this opportunity to
860 clear_dead_task(timer, now);
864 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
865 sample_to_timespec(timer->it_clock,
866 cpu_time_sub(timer->it_clock,
867 timer->it.cpu.expires, now),
871 * The timer should have expired already, but the firing
872 * hasn't taken place yet. Say it's just about to expire.
874 itp->it_value.tv_nsec = 1;
875 itp->it_value.tv_sec = 0;
880 * Check for any per-thread CPU timers that have fired and move them off
881 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
882 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
884 static void check_thread_timers(struct task_struct *tsk,
885 struct list_head *firing)
888 struct list_head *timers = tsk->cpu_timers;
889 struct signal_struct *const sig = tsk->signal;
892 tsk->cputime_expires.prof_exp = cputime_zero;
893 while (!list_empty(timers)) {
894 struct cpu_timer_list *t = list_first_entry(timers,
895 struct cpu_timer_list,
897 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
898 tsk->cputime_expires.prof_exp = t->expires.cpu;
902 list_move_tail(&t->entry, firing);
907 tsk->cputime_expires.virt_exp = cputime_zero;
908 while (!list_empty(timers)) {
909 struct cpu_timer_list *t = list_first_entry(timers,
910 struct cpu_timer_list,
912 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
913 tsk->cputime_expires.virt_exp = t->expires.cpu;
917 list_move_tail(&t->entry, firing);
922 tsk->cputime_expires.sched_exp = 0;
923 while (!list_empty(timers)) {
924 struct cpu_timer_list *t = list_first_entry(timers,
925 struct cpu_timer_list,
927 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
928 tsk->cputime_expires.sched_exp = t->expires.sched;
932 list_move_tail(&t->entry, firing);
936 * Check for the special case thread timers.
938 if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
939 unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
940 unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
942 if (hard != RLIM_INFINITY &&
943 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
945 * At the hard limit, we just die.
946 * No need to calculate anything else now.
948 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
951 if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
953 * At the soft limit, send a SIGXCPU every second.
955 if (sig->rlim[RLIMIT_RTTIME].rlim_cur
956 < sig->rlim[RLIMIT_RTTIME].rlim_max) {
957 sig->rlim[RLIMIT_RTTIME].rlim_cur +=
961 "RT Watchdog Timeout: %s[%d]\n",
962 tsk->comm, task_pid_nr(tsk));
963 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
969 * Check for any per-thread CPU timers that have fired and move them
970 * off the tsk->*_timers list onto the firing list. Per-thread timers
971 * have already been taken off.
973 static void check_process_timers(struct task_struct *tsk,
974 struct list_head *firing)
977 struct signal_struct *const sig = tsk->signal;
978 cputime_t utime, ptime, virt_expires, prof_expires;
979 unsigned long long sum_sched_runtime, sched_expires;
980 struct list_head *timers = sig->cpu_timers;
981 struct task_cputime cputime;
984 * Don't sample the current process CPU clocks if there are no timers.
986 if (list_empty(&timers[CPUCLOCK_PROF]) &&
987 cputime_eq(sig->it_prof_expires, cputime_zero) &&
988 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
989 list_empty(&timers[CPUCLOCK_VIRT]) &&
990 cputime_eq(sig->it_virt_expires, cputime_zero) &&
991 list_empty(&timers[CPUCLOCK_SCHED]))
995 * Collect the current process totals.
997 thread_group_cputime(tsk, &cputime);
998 utime = cputime.utime;
999 ptime = cputime_add(utime, cputime.stime);
1000 sum_sched_runtime = cputime.sum_exec_runtime;
1002 prof_expires = cputime_zero;
1003 while (!list_empty(timers)) {
1004 struct cpu_timer_list *tl = list_first_entry(timers,
1005 struct cpu_timer_list,
1007 if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) {
1008 prof_expires = tl->expires.cpu;
1012 list_move_tail(&tl->entry, firing);
1017 virt_expires = cputime_zero;
1018 while (!list_empty(timers)) {
1019 struct cpu_timer_list *tl = list_first_entry(timers,
1020 struct cpu_timer_list,
1022 if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) {
1023 virt_expires = tl->expires.cpu;
1027 list_move_tail(&tl->entry, firing);
1033 while (!list_empty(timers)) {
1034 struct cpu_timer_list *tl = list_first_entry(timers,
1035 struct cpu_timer_list,
1037 if (!--maxfire || sum_sched_runtime < tl->expires.sched) {
1038 sched_expires = tl->expires.sched;
1042 list_move_tail(&tl->entry, firing);
1046 * Check for the special case process timers.
1048 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1049 if (cputime_ge(ptime, sig->it_prof_expires)) {
1050 /* ITIMER_PROF fires and reloads. */
1051 sig->it_prof_expires = sig->it_prof_incr;
1052 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1053 sig->it_prof_expires = cputime_add(
1054 sig->it_prof_expires, ptime);
1056 __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1058 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1059 (cputime_eq(prof_expires, cputime_zero) ||
1060 cputime_lt(sig->it_prof_expires, prof_expires))) {
1061 prof_expires = sig->it_prof_expires;
1064 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1065 if (cputime_ge(utime, sig->it_virt_expires)) {
1066 /* ITIMER_VIRTUAL fires and reloads. */
1067 sig->it_virt_expires = sig->it_virt_incr;
1068 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1069 sig->it_virt_expires = cputime_add(
1070 sig->it_virt_expires, utime);
1072 __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1074 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1075 (cputime_eq(virt_expires, cputime_zero) ||
1076 cputime_lt(sig->it_virt_expires, virt_expires))) {
1077 virt_expires = sig->it_virt_expires;
1080 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1081 unsigned long psecs = cputime_to_secs(ptime);
1083 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
1085 * At the hard limit, we just die.
1086 * No need to calculate anything else now.
1088 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1091 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
1093 * At the soft limit, send a SIGXCPU every second.
1095 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1096 if (sig->rlim[RLIMIT_CPU].rlim_cur
1097 < sig->rlim[RLIMIT_CPU].rlim_max) {
1098 sig->rlim[RLIMIT_CPU].rlim_cur++;
1101 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
1102 if (cputime_eq(prof_expires, cputime_zero) ||
1103 cputime_lt(x, prof_expires)) {
1108 if (!cputime_eq(prof_expires, cputime_zero) &&
1109 (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
1110 cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
1111 sig->cputime_expires.prof_exp = prof_expires;
1112 if (!cputime_eq(virt_expires, cputime_zero) &&
1113 (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
1114 cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
1115 sig->cputime_expires.virt_exp = virt_expires;
1116 if (sched_expires != 0 &&
1117 (sig->cputime_expires.sched_exp == 0 ||
1118 sig->cputime_expires.sched_exp > sched_expires))
1119 sig->cputime_expires.sched_exp = sched_expires;
1123 * This is called from the signal code (via do_schedule_next_timer)
1124 * when the last timer signal was delivered and we have to reload the timer.
1126 void posix_cpu_timer_schedule(struct k_itimer *timer)
1128 struct task_struct *p = timer->it.cpu.task;
1129 union cpu_time_count now;
1131 if (unlikely(p == NULL))
1133 * The task was cleaned up already, no future firings.
1138 * Fetch the current sample and update the timer's expiry time.
1140 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1141 cpu_clock_sample(timer->it_clock, p, &now);
1142 bump_cpu_timer(timer, now);
1143 if (unlikely(p->exit_state)) {
1144 clear_dead_task(timer, now);
1147 read_lock(&tasklist_lock); /* arm_timer needs it. */
1149 read_lock(&tasklist_lock);
1150 if (unlikely(p->signal == NULL)) {
1152 * The process has been reaped.
1153 * We can't even collect a sample any more.
1156 timer->it.cpu.task = p = NULL;
1157 timer->it.cpu.expires.sched = 0;
1159 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1161 * We've noticed that the thread is dead, but
1162 * not yet reaped. Take this opportunity to
1163 * drop our task ref.
1165 clear_dead_task(timer, now);
1168 cpu_clock_sample_group(timer->it_clock, p, &now);
1169 bump_cpu_timer(timer, now);
1170 /* Leave the tasklist_lock locked for the call below. */
1174 * Now re-arm for the new expiry time.
1176 arm_timer(timer, now);
1179 read_unlock(&tasklist_lock);
1182 timer->it_overrun_last = timer->it_overrun;
1183 timer->it_overrun = -1;
1184 ++timer->it_requeue_pending;
1188 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1190 * @cputime: The struct to compare.
1192 * Checks @cputime to see if all fields are zero. Returns true if all fields
1193 * are zero, false if any field is nonzero.
1195 static inline int task_cputime_zero(const struct task_cputime *cputime)
1197 if (cputime_eq(cputime->utime, cputime_zero) &&
1198 cputime_eq(cputime->stime, cputime_zero) &&
1199 cputime->sum_exec_runtime == 0)
1205 * task_cputime_expired - Compare two task_cputime entities.
1207 * @sample: The task_cputime structure to be checked for expiration.
1208 * @expires: Expiration times, against which @sample will be checked.
1210 * Checks @sample against @expires to see if any field of @sample has expired.
1211 * Returns true if any field of the former is greater than the corresponding
1212 * field of the latter if the latter field is set. Otherwise returns false.
1214 static inline int task_cputime_expired(const struct task_cputime *sample,
1215 const struct task_cputime *expires)
1217 if (!cputime_eq(expires->utime, cputime_zero) &&
1218 cputime_ge(sample->utime, expires->utime))
1220 if (!cputime_eq(expires->stime, cputime_zero) &&
1221 cputime_ge(cputime_add(sample->utime, sample->stime),
1224 if (expires->sum_exec_runtime != 0 &&
1225 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1231 * fastpath_timer_check - POSIX CPU timers fast path.
1233 * @tsk: The task (thread) being checked.
1235 * Check the task and thread group timers. If both are zero (there are no
1236 * timers set) return false. Otherwise snapshot the task and thread group
1237 * timers and compare them with the corresponding expiration times. Return
1238 * true if a timer has expired, else return false.
1240 static inline int fastpath_timer_check(struct task_struct *tsk)
1242 struct signal_struct *sig;
1244 /* tsk == current, ensure it is safe to use ->signal/sighand */
1245 if (unlikely(tsk->exit_state))
1248 if (!task_cputime_zero(&tsk->cputime_expires)) {
1249 struct task_cputime task_sample = {
1250 .utime = tsk->utime,
1251 .stime = tsk->stime,
1252 .sum_exec_runtime = tsk->se.sum_exec_runtime
1255 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1260 if (!task_cputime_zero(&sig->cputime_expires)) {
1261 struct task_cputime group_sample;
1263 thread_group_cputime(tsk, &group_sample);
1264 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1271 * This is called from the timer interrupt handler. The irq handler has
1272 * already updated our counts. We need to check if any timers fire now.
1273 * Interrupts are disabled.
1275 void run_posix_cpu_timers(struct task_struct *tsk)
1278 struct k_itimer *timer, *next;
1280 BUG_ON(!irqs_disabled());
1283 * The fast path checks that there are no expired thread or thread
1284 * group timers. If that's so, just return.
1286 if (!fastpath_timer_check(tsk))
1289 spin_lock(&tsk->sighand->siglock);
1291 * Here we take off tsk->signal->cpu_timers[N] and
1292 * tsk->cpu_timers[N] all the timers that are firing, and
1293 * put them on the firing list.
1295 check_thread_timers(tsk, &firing);
1296 check_process_timers(tsk, &firing);
1299 * We must release these locks before taking any timer's lock.
1300 * There is a potential race with timer deletion here, as the
1301 * siglock now protects our private firing list. We have set
1302 * the firing flag in each timer, so that a deletion attempt
1303 * that gets the timer lock before we do will give it up and
1304 * spin until we've taken care of that timer below.
1306 spin_unlock(&tsk->sighand->siglock);
1309 * Now that all the timers on our list have the firing flag,
1310 * noone will touch their list entries but us. We'll take
1311 * each timer's lock before clearing its firing flag, so no
1312 * timer call will interfere.
1314 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1316 spin_lock(&timer->it_lock);
1317 list_del_init(&timer->it.cpu.entry);
1318 firing = timer->it.cpu.firing;
1319 timer->it.cpu.firing = 0;
1321 * The firing flag is -1 if we collided with a reset
1322 * of the timer, which already reported this
1323 * almost-firing as an overrun. So don't generate an event.
1325 if (likely(firing >= 0)) {
1326 cpu_timer_fire(timer);
1328 spin_unlock(&timer->it_lock);
1333 * Set one of the process-wide special case CPU timers.
1334 * The tsk->sighand->siglock must be held by the caller.
1335 * The *newval argument is relative and we update it to be absolute, *oldval
1336 * is absolute and we update it to be relative.
1338 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1339 cputime_t *newval, cputime_t *oldval)
1341 union cpu_time_count now;
1342 struct list_head *head;
1344 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1345 cpu_clock_sample_group(clock_idx, tsk, &now);
1348 if (!cputime_eq(*oldval, cputime_zero)) {
1349 if (cputime_le(*oldval, now.cpu)) {
1350 /* Just about to fire. */
1351 *oldval = jiffies_to_cputime(1);
1353 *oldval = cputime_sub(*oldval, now.cpu);
1357 if (cputime_eq(*newval, cputime_zero))
1359 *newval = cputime_add(*newval, now.cpu);
1362 * If the RLIMIT_CPU timer will expire before the
1363 * ITIMER_PROF timer, we have nothing else to do.
1365 if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
1366 < cputime_to_secs(*newval))
1371 * Check whether there are any process timers already set to fire
1372 * before this one. If so, we don't have anything more to do.
1374 head = &tsk->signal->cpu_timers[clock_idx];
1375 if (list_empty(head) ||
1376 cputime_ge(list_first_entry(head,
1377 struct cpu_timer_list, entry)->expires.cpu,
1379 switch (clock_idx) {
1381 tsk->signal->cputime_expires.prof_exp = *newval;
1384 tsk->signal->cputime_expires.virt_exp = *newval;
1390 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1391 struct timespec *rqtp, struct itimerspec *it)
1393 struct k_itimer timer;
1397 * Set up a temporary timer and then wait for it to go off.
1399 memset(&timer, 0, sizeof timer);
1400 spin_lock_init(&timer.it_lock);
1401 timer.it_clock = which_clock;
1402 timer.it_overrun = -1;
1403 error = posix_cpu_timer_create(&timer);
1404 timer.it_process = current;
1406 static struct itimerspec zero_it;
1408 memset(it, 0, sizeof *it);
1409 it->it_value = *rqtp;
1411 spin_lock_irq(&timer.it_lock);
1412 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1414 spin_unlock_irq(&timer.it_lock);
1418 while (!signal_pending(current)) {
1419 if (timer.it.cpu.expires.sched == 0) {
1421 * Our timer fired and was reset.
1423 spin_unlock_irq(&timer.it_lock);
1428 * Block until cpu_timer_fire (or a signal) wakes us.
1430 __set_current_state(TASK_INTERRUPTIBLE);
1431 spin_unlock_irq(&timer.it_lock);
1433 spin_lock_irq(&timer.it_lock);
1437 * We were interrupted by a signal.
1439 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1440 posix_cpu_timer_set(&timer, 0, &zero_it, it);
1441 spin_unlock_irq(&timer.it_lock);
1443 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1445 * It actually did fire already.
1450 error = -ERESTART_RESTARTBLOCK;
1456 int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1457 struct timespec *rqtp, struct timespec __user *rmtp)
1459 struct restart_block *restart_block =
1460 ¤t_thread_info()->restart_block;
1461 struct itimerspec it;
1465 * Diagnose required errors first.
1467 if (CPUCLOCK_PERTHREAD(which_clock) &&
1468 (CPUCLOCK_PID(which_clock) == 0 ||
1469 CPUCLOCK_PID(which_clock) == current->pid))
1472 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1474 if (error == -ERESTART_RESTARTBLOCK) {
1476 if (flags & TIMER_ABSTIME)
1477 return -ERESTARTNOHAND;
1479 * Report back to the user the time still remaining.
1481 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1484 restart_block->fn = posix_cpu_nsleep_restart;
1485 restart_block->arg0 = which_clock;
1486 restart_block->arg1 = (unsigned long) rmtp;
1487 restart_block->arg2 = rqtp->tv_sec;
1488 restart_block->arg3 = rqtp->tv_nsec;
1493 long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1495 clockid_t which_clock = restart_block->arg0;
1496 struct timespec __user *rmtp;
1498 struct itimerspec it;
1501 rmtp = (struct timespec __user *) restart_block->arg1;
1502 t.tv_sec = restart_block->arg2;
1503 t.tv_nsec = restart_block->arg3;
1505 restart_block->fn = do_no_restart_syscall;
1506 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1508 if (error == -ERESTART_RESTARTBLOCK) {
1510 * Report back to the user the time still remaining.
1512 if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1515 restart_block->fn = posix_cpu_nsleep_restart;
1516 restart_block->arg0 = which_clock;
1517 restart_block->arg1 = (unsigned long) rmtp;
1518 restart_block->arg2 = t.tv_sec;
1519 restart_block->arg3 = t.tv_nsec;
1526 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1527 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1529 static int process_cpu_clock_getres(const clockid_t which_clock,
1530 struct timespec *tp)
1532 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1534 static int process_cpu_clock_get(const clockid_t which_clock,
1535 struct timespec *tp)
1537 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1539 static int process_cpu_timer_create(struct k_itimer *timer)
1541 timer->it_clock = PROCESS_CLOCK;
1542 return posix_cpu_timer_create(timer);
1544 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1545 struct timespec *rqtp,
1546 struct timespec __user *rmtp)
1548 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1550 static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1554 static int thread_cpu_clock_getres(const clockid_t which_clock,
1555 struct timespec *tp)
1557 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1559 static int thread_cpu_clock_get(const clockid_t which_clock,
1560 struct timespec *tp)
1562 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1564 static int thread_cpu_timer_create(struct k_itimer *timer)
1566 timer->it_clock = THREAD_CLOCK;
1567 return posix_cpu_timer_create(timer);
1569 static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
1570 struct timespec *rqtp, struct timespec __user *rmtp)
1574 static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
1579 static __init int init_posix_cpu_timers(void)
1581 struct k_clock process = {
1582 .clock_getres = process_cpu_clock_getres,
1583 .clock_get = process_cpu_clock_get,
1584 .clock_set = do_posix_clock_nosettime,
1585 .timer_create = process_cpu_timer_create,
1586 .nsleep = process_cpu_nsleep,
1587 .nsleep_restart = process_cpu_nsleep_restart,
1589 struct k_clock thread = {
1590 .clock_getres = thread_cpu_clock_getres,
1591 .clock_get = thread_cpu_clock_get,
1592 .clock_set = do_posix_clock_nosettime,
1593 .timer_create = thread_cpu_timer_create,
1594 .nsleep = thread_cpu_nsleep,
1595 .nsleep_restart = thread_cpu_nsleep_restart,
1598 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1599 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1603 __initcall(init_posix_cpu_timers);