1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/tsacct_kern.h>
4 #include <linux/kernel_stat.h>
5 #include <linux/static_key.h>
6 #include <linux/context_tracking.h>
10 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
13 * There are no locks covering percpu hardirq/softirq time.
14 * They are only modified in vtime_account, on corresponding CPU
15 * with interrupts disabled. So, writes are safe.
16 * They are read and saved off onto struct rq in update_rq_clock().
17 * This may result in other CPU reading this CPU's irq time and can
18 * race with irq/vtime_account on this CPU. We would either get old
19 * or new value with a side effect of accounting a slice of irq time to wrong
20 * task when irq is in progress while we read rq->clock. That is a worthy
21 * compromise in place of having locks on each irq in account_system_time.
23 DEFINE_PER_CPU(u64, cpu_hardirq_time);
24 DEFINE_PER_CPU(u64, cpu_softirq_time);
26 static DEFINE_PER_CPU(u64, irq_start_time);
27 static int sched_clock_irqtime;
29 void enable_sched_clock_irqtime(void)
31 sched_clock_irqtime = 1;
34 void disable_sched_clock_irqtime(void)
36 sched_clock_irqtime = 0;
40 DEFINE_PER_CPU(seqcount_t, irq_time_seq);
41 #endif /* CONFIG_64BIT */
44 * Called before incrementing preempt_count on {soft,}irq_enter
45 * and before decrementing preempt_count on {soft,}irq_exit.
47 void irqtime_account_irq(struct task_struct *curr)
53 if (!sched_clock_irqtime)
56 local_irq_save(flags);
58 cpu = smp_processor_id();
59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
60 __this_cpu_add(irq_start_time, delta);
62 irq_time_write_begin();
64 * We do not account for softirq time from ksoftirqd here.
65 * We want to continue accounting softirq time to ksoftirqd thread
66 * in that case, so as not to confuse scheduler with a special task
67 * that do not consume any time, but still wants to run.
70 __this_cpu_add(cpu_hardirq_time, delta);
71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
72 __this_cpu_add(cpu_softirq_time, delta);
75 local_irq_restore(flags);
77 EXPORT_SYMBOL_GPL(irqtime_account_irq);
79 static int irqtime_account_hi_update(void)
81 u64 *cpustat = kcpustat_this_cpu->cpustat;
86 local_irq_save(flags);
87 latest_ns = this_cpu_read(cpu_hardirq_time);
88 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
90 local_irq_restore(flags);
94 static int irqtime_account_si_update(void)
96 u64 *cpustat = kcpustat_this_cpu->cpustat;
101 local_irq_save(flags);
102 latest_ns = this_cpu_read(cpu_softirq_time);
103 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
105 local_irq_restore(flags);
109 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
111 #define sched_clock_irqtime (0)
113 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
115 static inline void task_group_account_field(struct task_struct *p, int index,
119 * Since all updates are sure to touch the root cgroup, we
120 * get ourselves ahead and touch it first. If the root cgroup
121 * is the only cgroup, then nothing else should be necessary.
124 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
126 cpuacct_account_field(p, index, tmp);
130 * Account user cpu time to a process.
131 * @p: the process that the cpu time gets accounted to
132 * @cputime: the cpu time spent in user space since the last update
133 * @cputime_scaled: cputime scaled by cpu frequency
135 void account_user_time(struct task_struct *p, cputime_t cputime,
136 cputime_t cputime_scaled)
140 /* Add user time to process. */
142 p->utimescaled += cputime_scaled;
143 account_group_user_time(p, cputime);
145 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
147 /* Add user time to cpustat. */
148 task_group_account_field(p, index, (__force u64) cputime);
150 /* Account for user time used */
151 acct_account_cputime(p);
155 * Account guest cpu time to a process.
156 * @p: the process that the cpu time gets accounted to
157 * @cputime: the cpu time spent in virtual machine since the last update
158 * @cputime_scaled: cputime scaled by cpu frequency
160 static void account_guest_time(struct task_struct *p, cputime_t cputime,
161 cputime_t cputime_scaled)
163 u64 *cpustat = kcpustat_this_cpu->cpustat;
165 /* Add guest time to process. */
167 p->utimescaled += cputime_scaled;
168 account_group_user_time(p, cputime);
171 /* Add guest time to cpustat. */
172 if (task_nice(p) > 0) {
173 cpustat[CPUTIME_NICE] += (__force u64) cputime;
174 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
176 cpustat[CPUTIME_USER] += (__force u64) cputime;
177 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
182 * Account system cpu time to a process and desired cpustat field
183 * @p: the process that the cpu time gets accounted to
184 * @cputime: the cpu time spent in kernel space since the last update
185 * @cputime_scaled: cputime scaled by cpu frequency
186 * @target_cputime64: pointer to cpustat field that has to be updated
189 void __account_system_time(struct task_struct *p, cputime_t cputime,
190 cputime_t cputime_scaled, int index)
192 /* Add system time to process. */
194 p->stimescaled += cputime_scaled;
195 account_group_system_time(p, cputime);
197 /* Add system time to cpustat. */
198 task_group_account_field(p, index, (__force u64) cputime);
200 /* Account for system time used */
201 acct_account_cputime(p);
205 * Account system cpu time to a process.
206 * @p: the process that the cpu time gets accounted to
207 * @hardirq_offset: the offset to subtract from hardirq_count()
208 * @cputime: the cpu time spent in kernel space since the last update
209 * @cputime_scaled: cputime scaled by cpu frequency
211 void account_system_time(struct task_struct *p, int hardirq_offset,
212 cputime_t cputime, cputime_t cputime_scaled)
216 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
217 account_guest_time(p, cputime, cputime_scaled);
221 if (hardirq_count() - hardirq_offset)
223 else if (in_serving_softirq())
224 index = CPUTIME_SOFTIRQ;
226 index = CPUTIME_SYSTEM;
228 __account_system_time(p, cputime, cputime_scaled, index);
232 * Account for involuntary wait time.
233 * @cputime: the cpu time spent in involuntary wait
235 void account_steal_time(cputime_t cputime)
237 u64 *cpustat = kcpustat_this_cpu->cpustat;
239 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
243 * Account for idle time.
244 * @cputime: the cpu time spent in idle wait
246 void account_idle_time(cputime_t cputime)
248 u64 *cpustat = kcpustat_this_cpu->cpustat;
249 struct rq *rq = this_rq();
251 if (atomic_read(&rq->nr_iowait) > 0)
252 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
254 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
257 static __always_inline bool steal_account_process_tick(void)
259 #ifdef CONFIG_PARAVIRT
260 if (static_key_false(¶virt_steal_enabled)) {
264 steal = paravirt_steal_clock(smp_processor_id());
265 steal -= this_rq()->prev_steal_time;
268 * cputime_t may be less precise than nsecs (eg: if it's
269 * based on jiffies). Lets cast the result to cputime
270 * granularity and account the rest on the next rounds.
272 steal_ct = nsecs_to_cputime(steal);
273 this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
275 account_steal_time(steal_ct);
283 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
284 * tasks (sum on group iteration) belonging to @tsk's group.
286 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
288 struct signal_struct *sig = tsk->signal;
289 cputime_t utime, stime;
290 struct task_struct *t;
291 unsigned int seq, nextseq;
294 /* Attempt a lockless read on the first round. */
298 read_seqbegin_or_lock(&sig->stats_lock, &seq);
299 times->utime = sig->utime;
300 times->stime = sig->stime;
301 times->sum_exec_runtime = sig->sum_sched_runtime;
303 for_each_thread(tsk, t) {
304 task_cputime(t, &utime, &stime);
305 times->utime += utime;
306 times->stime += stime;
307 times->sum_exec_runtime += task_sched_runtime(t);
309 /* If lockless access failed, take the lock. */
311 } while (need_seqretry(&sig->stats_lock, seq));
312 done_seqretry(&sig->stats_lock, seq);
316 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
318 * Account a tick to a process and cpustat
319 * @p: the process that the cpu time gets accounted to
320 * @user_tick: is the tick from userspace
321 * @rq: the pointer to rq
323 * Tick demultiplexing follows the order
324 * - pending hardirq update
325 * - pending softirq update
329 * - check for guest_time
330 * - else account as system_time
332 * Check for hardirq is done both for system and user time as there is
333 * no timer going off while we are on hardirq and hence we may never get an
334 * opportunity to update it solely in system time.
335 * p->stime and friends are only updated on system time and not on irq
336 * softirq as those do not count in task exec_runtime any more.
338 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
339 struct rq *rq, int ticks)
341 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
342 u64 cputime = (__force u64) cputime_one_jiffy;
343 u64 *cpustat = kcpustat_this_cpu->cpustat;
345 if (steal_account_process_tick())
351 if (irqtime_account_hi_update()) {
352 cpustat[CPUTIME_IRQ] += cputime;
353 } else if (irqtime_account_si_update()) {
354 cpustat[CPUTIME_SOFTIRQ] += cputime;
355 } else if (this_cpu_ksoftirqd() == p) {
357 * ksoftirqd time do not get accounted in cpu_softirq_time.
358 * So, we have to handle it separately here.
359 * Also, p->stime needs to be updated for ksoftirqd.
361 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
362 } else if (user_tick) {
363 account_user_time(p, cputime, scaled);
364 } else if (p == rq->idle) {
365 account_idle_time(cputime);
366 } else if (p->flags & PF_VCPU) { /* System time or guest time */
367 account_guest_time(p, cputime, scaled);
369 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
373 static void irqtime_account_idle_ticks(int ticks)
375 struct rq *rq = this_rq();
377 irqtime_account_process_tick(current, 0, rq, ticks);
379 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
380 static inline void irqtime_account_idle_ticks(int ticks) {}
381 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
382 struct rq *rq, int nr_ticks) {}
383 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
386 * Use precise platform statistics if available:
388 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
390 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
391 void vtime_common_task_switch(struct task_struct *prev)
393 if (is_idle_task(prev))
394 vtime_account_idle(prev);
396 vtime_account_system(prev);
398 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
399 vtime_account_user(prev);
401 arch_vtime_task_switch(prev);
406 * Archs that account the whole time spent in the idle task
407 * (outside irq) as idle time can rely on this and just implement
408 * vtime_account_system() and vtime_account_idle(). Archs that
409 * have other meaning of the idle time (s390 only includes the
410 * time spent by the CPU when it's in low power mode) must override
413 #ifndef __ARCH_HAS_VTIME_ACCOUNT
414 void vtime_common_account_irq_enter(struct task_struct *tsk)
416 if (!in_interrupt()) {
418 * If we interrupted user, context_tracking_in_user()
419 * is 1 because the context tracking don't hook
420 * on irq entry/exit. This way we know if
421 * we need to flush user time on kernel entry.
423 if (context_tracking_in_user()) {
424 vtime_account_user(tsk);
428 if (is_idle_task(tsk)) {
429 vtime_account_idle(tsk);
433 vtime_account_system(tsk);
435 EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
436 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
437 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
440 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
441 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
447 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
449 struct task_cputime cputime;
451 thread_group_cputime(p, &cputime);
456 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
458 * Account a single tick of cpu time.
459 * @p: the process that the cpu time gets accounted to
460 * @user_tick: indicates if the tick is a user or a system tick
462 void account_process_tick(struct task_struct *p, int user_tick)
464 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
465 struct rq *rq = this_rq();
467 if (vtime_accounting_enabled())
470 if (sched_clock_irqtime) {
471 irqtime_account_process_tick(p, user_tick, rq, 1);
475 if (steal_account_process_tick())
479 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
480 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
481 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
484 account_idle_time(cputime_one_jiffy);
488 * Account multiple ticks of steal time.
489 * @p: the process from which the cpu time has been stolen
490 * @ticks: number of stolen ticks
492 void account_steal_ticks(unsigned long ticks)
494 account_steal_time(jiffies_to_cputime(ticks));
498 * Account multiple ticks of idle time.
499 * @ticks: number of stolen ticks
501 void account_idle_ticks(unsigned long ticks)
504 if (sched_clock_irqtime) {
505 irqtime_account_idle_ticks(ticks);
509 account_idle_time(jiffies_to_cputime(ticks));
513 * Perform (stime * rtime) / total, but avoid multiplication overflow by
514 * loosing precision when the numbers are big.
516 static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
521 /* Make sure "rtime" is the bigger of stime/rtime */
525 /* Make sure 'total' fits in 32 bits */
529 /* Does rtime (and thus stime) fit in 32 bits? */
533 /* Can we just balance rtime/stime rather than dropping bits? */
537 /* We can grow stime and shrink rtime and try to make them both fit */
543 /* We drop from rtime, it has more bits than stime */
549 * Make sure gcc understands that this is a 32x32->64 multiply,
550 * followed by a 64/32->64 divide.
552 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
553 return (__force cputime_t) scaled;
557 * Adjust tick based cputime random precision against scheduler
558 * runtime accounting.
560 static void cputime_adjust(struct task_cputime *curr,
561 struct cputime *prev,
562 cputime_t *ut, cputime_t *st)
564 cputime_t rtime, stime, utime;
567 * Tick based cputime accounting depend on random scheduling
568 * timeslices of a task to be interrupted or not by the timer.
569 * Depending on these circumstances, the number of these interrupts
570 * may be over or under-optimistic, matching the real user and system
571 * cputime with a variable precision.
573 * Fix this by scaling these tick based values against the total
574 * runtime accounted by the CFS scheduler.
576 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
579 * Update userspace visible utime/stime values only if actual execution
580 * time is bigger than already exported. Note that can happen, that we
581 * provided bigger values due to scaling inaccuracy on big numbers.
583 if (prev->stime + prev->utime >= rtime)
591 } else if (stime == 0) {
594 cputime_t total = stime + utime;
596 stime = scale_stime((__force u64)stime,
597 (__force u64)rtime, (__force u64)total);
598 utime = rtime - stime;
602 * If the tick based count grows faster than the scheduler one,
603 * the result of the scaling may go backward.
604 * Let's enforce monotonicity.
606 prev->stime = max(prev->stime, stime);
607 prev->utime = max(prev->utime, utime);
614 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
616 struct task_cputime cputime = {
617 .sum_exec_runtime = p->se.sum_exec_runtime,
620 task_cputime(p, &cputime.utime, &cputime.stime);
621 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
624 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
626 struct task_cputime cputime;
628 thread_group_cputime(p, &cputime);
629 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
631 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
633 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
634 static unsigned long long vtime_delta(struct task_struct *tsk)
636 unsigned long long clock;
638 clock = local_clock();
639 if (clock < tsk->vtime_snap)
642 return clock - tsk->vtime_snap;
645 static cputime_t get_vtime_delta(struct task_struct *tsk)
647 unsigned long long delta = vtime_delta(tsk);
649 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
650 tsk->vtime_snap += delta;
652 /* CHECKME: always safe to convert nsecs to cputime? */
653 return nsecs_to_cputime(delta);
656 static void __vtime_account_system(struct task_struct *tsk)
658 cputime_t delta_cpu = get_vtime_delta(tsk);
660 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
663 void vtime_account_system(struct task_struct *tsk)
665 write_seqlock(&tsk->vtime_seqlock);
666 __vtime_account_system(tsk);
667 write_sequnlock(&tsk->vtime_seqlock);
670 void vtime_gen_account_irq_exit(struct task_struct *tsk)
672 write_seqlock(&tsk->vtime_seqlock);
673 __vtime_account_system(tsk);
674 if (context_tracking_in_user())
675 tsk->vtime_snap_whence = VTIME_USER;
676 write_sequnlock(&tsk->vtime_seqlock);
679 void vtime_account_user(struct task_struct *tsk)
683 write_seqlock(&tsk->vtime_seqlock);
684 delta_cpu = get_vtime_delta(tsk);
685 tsk->vtime_snap_whence = VTIME_SYS;
686 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
687 write_sequnlock(&tsk->vtime_seqlock);
690 void vtime_user_enter(struct task_struct *tsk)
692 write_seqlock(&tsk->vtime_seqlock);
693 __vtime_account_system(tsk);
694 tsk->vtime_snap_whence = VTIME_USER;
695 write_sequnlock(&tsk->vtime_seqlock);
698 void vtime_guest_enter(struct task_struct *tsk)
701 * The flags must be updated under the lock with
702 * the vtime_snap flush and update.
703 * That enforces a right ordering and update sequence
704 * synchronization against the reader (task_gtime())
705 * that can thus safely catch up with a tickless delta.
707 write_seqlock(&tsk->vtime_seqlock);
708 __vtime_account_system(tsk);
709 current->flags |= PF_VCPU;
710 write_sequnlock(&tsk->vtime_seqlock);
712 EXPORT_SYMBOL_GPL(vtime_guest_enter);
714 void vtime_guest_exit(struct task_struct *tsk)
716 write_seqlock(&tsk->vtime_seqlock);
717 __vtime_account_system(tsk);
718 current->flags &= ~PF_VCPU;
719 write_sequnlock(&tsk->vtime_seqlock);
721 EXPORT_SYMBOL_GPL(vtime_guest_exit);
723 void vtime_account_idle(struct task_struct *tsk)
725 cputime_t delta_cpu = get_vtime_delta(tsk);
727 account_idle_time(delta_cpu);
730 void arch_vtime_task_switch(struct task_struct *prev)
732 write_seqlock(&prev->vtime_seqlock);
733 prev->vtime_snap_whence = VTIME_SLEEPING;
734 write_sequnlock(&prev->vtime_seqlock);
736 write_seqlock(¤t->vtime_seqlock);
737 current->vtime_snap_whence = VTIME_SYS;
738 current->vtime_snap = sched_clock_cpu(smp_processor_id());
739 write_sequnlock(¤t->vtime_seqlock);
742 void vtime_init_idle(struct task_struct *t, int cpu)
746 write_seqlock_irqsave(&t->vtime_seqlock, flags);
747 t->vtime_snap_whence = VTIME_SYS;
748 t->vtime_snap = sched_clock_cpu(cpu);
749 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
752 cputime_t task_gtime(struct task_struct *t)
758 seq = read_seqbegin(&t->vtime_seqlock);
761 if (t->flags & PF_VCPU)
762 gtime += vtime_delta(t);
764 } while (read_seqretry(&t->vtime_seqlock, seq));
770 * Fetch cputime raw values from fields of task_struct and
771 * add up the pending nohz execution time since the last
775 fetch_task_cputime(struct task_struct *t,
776 cputime_t *u_dst, cputime_t *s_dst,
777 cputime_t *u_src, cputime_t *s_src,
778 cputime_t *udelta, cputime_t *sdelta)
781 unsigned long long delta;
787 seq = read_seqbegin(&t->vtime_seqlock);
794 /* Task is sleeping, nothing to add */
795 if (t->vtime_snap_whence == VTIME_SLEEPING ||
799 delta = vtime_delta(t);
802 * Task runs either in user or kernel space, add pending nohz time to
805 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
808 if (t->vtime_snap_whence == VTIME_SYS)
811 } while (read_seqretry(&t->vtime_seqlock, seq));
815 void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
817 cputime_t udelta, sdelta;
819 fetch_task_cputime(t, utime, stime, &t->utime,
820 &t->stime, &udelta, &sdelta);
827 void task_cputime_scaled(struct task_struct *t,
828 cputime_t *utimescaled, cputime_t *stimescaled)
830 cputime_t udelta, sdelta;
832 fetch_task_cputime(t, utimescaled, stimescaled,
833 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
835 *utimescaled += cputime_to_scaled(udelta);
837 *stimescaled += cputime_to_scaled(sdelta);
839 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */