1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/tsacct_kern.h>
4 #include <linux/kernel_stat.h>
5 #include <linux/static_key.h>
6 #include <linux/context_tracking.h>
11 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
14 * There are no locks covering percpu hardirq/softirq time.
15 * They are only modified in vtime_account, on corresponding CPU
16 * with interrupts disabled. So, writes are safe.
17 * They are read and saved off onto struct rq in update_rq_clock().
18 * This may result in other CPU reading this CPU's irq time and can
19 * race with irq/vtime_account on this CPU. We would either get old
20 * or new value with a side effect of accounting a slice of irq time to wrong
21 * task when irq is in progress while we read rq->clock. That is a worthy
22 * compromise in place of having locks on each irq in account_system_time.
24 DEFINE_PER_CPU(u64, cpu_hardirq_time);
25 DEFINE_PER_CPU(u64, cpu_softirq_time);
27 static DEFINE_PER_CPU(u64, irq_start_time);
28 static int sched_clock_irqtime;
30 void enable_sched_clock_irqtime(void)
32 sched_clock_irqtime = 1;
35 void disable_sched_clock_irqtime(void)
37 sched_clock_irqtime = 0;
41 DEFINE_PER_CPU(seqcount_t, irq_time_seq);
42 #endif /* CONFIG_64BIT */
45 * Called before incrementing preempt_count on {soft,}irq_enter
46 * and before decrementing preempt_count on {soft,}irq_exit.
48 void irqtime_account_irq(struct task_struct *curr)
53 #ifdef CONFIG_SCHED_WALT
58 if (!sched_clock_irqtime)
61 local_irq_save(flags);
63 cpu = smp_processor_id();
64 #ifdef CONFIG_SCHED_WALT
65 wallclock = sched_clock_cpu(cpu);
67 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
68 __this_cpu_add(irq_start_time, delta);
70 irq_time_write_begin();
72 * We do not account for softirq time from ksoftirqd here.
73 * We want to continue accounting softirq time to ksoftirqd thread
74 * in that case, so as not to confuse scheduler with a special task
75 * that do not consume any time, but still wants to run.
78 __this_cpu_add(cpu_hardirq_time, delta);
79 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
80 __this_cpu_add(cpu_softirq_time, delta);
81 #ifdef CONFIG_SCHED_WALT
87 #ifdef CONFIG_SCHED_WALT
89 walt_account_irqtime(cpu, curr, delta, wallclock);
91 local_irq_restore(flags);
93 EXPORT_SYMBOL_GPL(irqtime_account_irq);
95 static int irqtime_account_hi_update(void)
97 u64 *cpustat = kcpustat_this_cpu->cpustat;
102 local_irq_save(flags);
103 latest_ns = this_cpu_read(cpu_hardirq_time);
104 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
106 local_irq_restore(flags);
110 static int irqtime_account_si_update(void)
112 u64 *cpustat = kcpustat_this_cpu->cpustat;
117 local_irq_save(flags);
118 latest_ns = this_cpu_read(cpu_softirq_time);
119 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
121 local_irq_restore(flags);
125 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
127 #define sched_clock_irqtime (0)
129 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
131 static inline void task_group_account_field(struct task_struct *p, int index,
135 * Since all updates are sure to touch the root cgroup, we
136 * get ourselves ahead and touch it first. If the root cgroup
137 * is the only cgroup, then nothing else should be necessary.
140 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
142 cpuacct_account_field(p, index, tmp);
146 * Account user cpu time to a process.
147 * @p: the process that the cpu time gets accounted to
148 * @cputime: the cpu time spent in user space since the last update
149 * @cputime_scaled: cputime scaled by cpu frequency
151 void account_user_time(struct task_struct *p, cputime_t cputime,
152 cputime_t cputime_scaled)
156 /* Add user time to process. */
158 p->utimescaled += cputime_scaled;
159 account_group_user_time(p, cputime);
161 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
163 /* Add user time to cpustat. */
164 task_group_account_field(p, index, (__force u64) cputime);
166 /* Account for user time used */
167 acct_account_cputime(p);
171 * Account guest cpu time to a process.
172 * @p: the process that the cpu time gets accounted to
173 * @cputime: the cpu time spent in virtual machine since the last update
174 * @cputime_scaled: cputime scaled by cpu frequency
176 static void account_guest_time(struct task_struct *p, cputime_t cputime,
177 cputime_t cputime_scaled)
179 u64 *cpustat = kcpustat_this_cpu->cpustat;
181 /* Add guest time to process. */
183 p->utimescaled += cputime_scaled;
184 account_group_user_time(p, cputime);
187 /* Add guest time to cpustat. */
188 if (task_nice(p) > 0) {
189 cpustat[CPUTIME_NICE] += (__force u64) cputime;
190 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
192 cpustat[CPUTIME_USER] += (__force u64) cputime;
193 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
198 * Account system cpu time to a process and desired cpustat field
199 * @p: the process that the cpu time gets accounted to
200 * @cputime: the cpu time spent in kernel space since the last update
201 * @cputime_scaled: cputime scaled by cpu frequency
202 * @target_cputime64: pointer to cpustat field that has to be updated
205 void __account_system_time(struct task_struct *p, cputime_t cputime,
206 cputime_t cputime_scaled, int index)
208 /* Add system time to process. */
210 p->stimescaled += cputime_scaled;
211 account_group_system_time(p, cputime);
213 /* Add system time to cpustat. */
214 task_group_account_field(p, index, (__force u64) cputime);
216 /* Account for system time used */
217 acct_account_cputime(p);
221 * Account system cpu time to a process.
222 * @p: the process that the cpu time gets accounted to
223 * @hardirq_offset: the offset to subtract from hardirq_count()
224 * @cputime: the cpu time spent in kernel space since the last update
225 * @cputime_scaled: cputime scaled by cpu frequency
227 void account_system_time(struct task_struct *p, int hardirq_offset,
228 cputime_t cputime, cputime_t cputime_scaled)
232 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
233 account_guest_time(p, cputime, cputime_scaled);
237 if (hardirq_count() - hardirq_offset)
239 else if (in_serving_softirq())
240 index = CPUTIME_SOFTIRQ;
242 index = CPUTIME_SYSTEM;
244 __account_system_time(p, cputime, cputime_scaled, index);
248 * Account for involuntary wait time.
249 * @cputime: the cpu time spent in involuntary wait
251 void account_steal_time(cputime_t cputime)
253 u64 *cpustat = kcpustat_this_cpu->cpustat;
255 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
259 * Account for idle time.
260 * @cputime: the cpu time spent in idle wait
262 void account_idle_time(cputime_t cputime)
264 u64 *cpustat = kcpustat_this_cpu->cpustat;
265 struct rq *rq = this_rq();
267 if (atomic_read(&rq->nr_iowait) > 0)
268 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
270 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
273 static __always_inline bool steal_account_process_tick(void)
275 #ifdef CONFIG_PARAVIRT
276 if (static_key_false(¶virt_steal_enabled)) {
278 unsigned long steal_jiffies;
280 steal = paravirt_steal_clock(smp_processor_id());
281 steal -= this_rq()->prev_steal_time;
284 * steal is in nsecs but our caller is expecting steal
285 * time in jiffies. Lets cast the result to jiffies
286 * granularity and account the rest on the next rounds.
288 steal_jiffies = nsecs_to_jiffies(steal);
289 this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
291 account_steal_time(jiffies_to_cputime(steal_jiffies));
292 return steal_jiffies;
299 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
300 * tasks (sum on group iteration) belonging to @tsk's group.
302 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
304 struct signal_struct *sig = tsk->signal;
305 cputime_t utime, stime;
306 struct task_struct *t;
307 unsigned int seq, nextseq;
311 /* Attempt a lockless read on the first round. */
315 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
316 times->utime = sig->utime;
317 times->stime = sig->stime;
318 times->sum_exec_runtime = sig->sum_sched_runtime;
320 for_each_thread(tsk, t) {
321 task_cputime(t, &utime, &stime);
322 times->utime += utime;
323 times->stime += stime;
324 times->sum_exec_runtime += task_sched_runtime(t);
326 /* If lockless access failed, take the lock. */
328 } while (need_seqretry(&sig->stats_lock, seq));
329 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
333 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
335 * Account a tick to a process and cpustat
336 * @p: the process that the cpu time gets accounted to
337 * @user_tick: is the tick from userspace
338 * @rq: the pointer to rq
340 * Tick demultiplexing follows the order
341 * - pending hardirq update
342 * - pending softirq update
346 * - check for guest_time
347 * - else account as system_time
349 * Check for hardirq is done both for system and user time as there is
350 * no timer going off while we are on hardirq and hence we may never get an
351 * opportunity to update it solely in system time.
352 * p->stime and friends are only updated on system time and not on irq
353 * softirq as those do not count in task exec_runtime any more.
355 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
356 struct rq *rq, int ticks)
358 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
359 u64 cputime = (__force u64) cputime_one_jiffy;
360 u64 *cpustat = kcpustat_this_cpu->cpustat;
362 if (steal_account_process_tick())
368 if (irqtime_account_hi_update()) {
369 cpustat[CPUTIME_IRQ] += cputime;
370 } else if (irqtime_account_si_update()) {
371 cpustat[CPUTIME_SOFTIRQ] += cputime;
372 } else if (this_cpu_ksoftirqd() == p) {
374 * ksoftirqd time do not get accounted in cpu_softirq_time.
375 * So, we have to handle it separately here.
376 * Also, p->stime needs to be updated for ksoftirqd.
378 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
379 } else if (user_tick) {
380 account_user_time(p, cputime, scaled);
381 } else if (p == rq->idle) {
382 account_idle_time(cputime);
383 } else if (p->flags & PF_VCPU) { /* System time or guest time */
384 account_guest_time(p, cputime, scaled);
386 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
390 static void irqtime_account_idle_ticks(int ticks)
392 struct rq *rq = this_rq();
394 irqtime_account_process_tick(current, 0, rq, ticks);
396 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
397 static inline void irqtime_account_idle_ticks(int ticks) {}
398 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
399 struct rq *rq, int nr_ticks) {}
400 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
403 * Use precise platform statistics if available:
405 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
407 #ifndef __ARCH_HAS_VTIME_TASK_SWITCH
408 void vtime_common_task_switch(struct task_struct *prev)
410 if (is_idle_task(prev))
411 vtime_account_idle(prev);
413 vtime_account_system(prev);
415 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
416 vtime_account_user(prev);
418 arch_vtime_task_switch(prev);
423 * Archs that account the whole time spent in the idle task
424 * (outside irq) as idle time can rely on this and just implement
425 * vtime_account_system() and vtime_account_idle(). Archs that
426 * have other meaning of the idle time (s390 only includes the
427 * time spent by the CPU when it's in low power mode) must override
430 #ifndef __ARCH_HAS_VTIME_ACCOUNT
431 void vtime_common_account_irq_enter(struct task_struct *tsk)
433 if (!in_interrupt()) {
435 * If we interrupted user, context_tracking_in_user()
436 * is 1 because the context tracking don't hook
437 * on irq entry/exit. This way we know if
438 * we need to flush user time on kernel entry.
440 if (context_tracking_in_user()) {
441 vtime_account_user(tsk);
445 if (is_idle_task(tsk)) {
446 vtime_account_idle(tsk);
450 vtime_account_system(tsk);
452 EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
453 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
454 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
457 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
458 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
463 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
465 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
467 struct task_cputime cputime;
469 thread_group_cputime(p, &cputime);
474 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
476 * Account a single tick of cpu time.
477 * @p: the process that the cpu time gets accounted to
478 * @user_tick: indicates if the tick is a user or a system tick
480 void account_process_tick(struct task_struct *p, int user_tick)
482 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
483 struct rq *rq = this_rq();
485 if (vtime_accounting_enabled())
488 if (sched_clock_irqtime) {
489 irqtime_account_process_tick(p, user_tick, rq, 1);
493 if (steal_account_process_tick())
497 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
498 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
499 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
502 account_idle_time(cputime_one_jiffy);
506 * Account multiple ticks of steal time.
507 * @p: the process from which the cpu time has been stolen
508 * @ticks: number of stolen ticks
510 void account_steal_ticks(unsigned long ticks)
512 account_steal_time(jiffies_to_cputime(ticks));
516 * Account multiple ticks of idle time.
517 * @ticks: number of stolen ticks
519 void account_idle_ticks(unsigned long ticks)
522 if (sched_clock_irqtime) {
523 irqtime_account_idle_ticks(ticks);
527 account_idle_time(jiffies_to_cputime(ticks));
531 * Perform (stime * rtime) / total, but avoid multiplication overflow by
532 * loosing precision when the numbers are big.
534 static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
539 /* Make sure "rtime" is the bigger of stime/rtime */
543 /* Make sure 'total' fits in 32 bits */
547 /* Does rtime (and thus stime) fit in 32 bits? */
551 /* Can we just balance rtime/stime rather than dropping bits? */
555 /* We can grow stime and shrink rtime and try to make them both fit */
561 /* We drop from rtime, it has more bits than stime */
567 * Make sure gcc understands that this is a 32x32->64 multiply,
568 * followed by a 64/32->64 divide.
570 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
571 return (__force cputime_t) scaled;
575 * Adjust tick based cputime random precision against scheduler runtime
578 * Tick based cputime accounting depend on random scheduling timeslices of a
579 * task to be interrupted or not by the timer. Depending on these
580 * circumstances, the number of these interrupts may be over or
581 * under-optimistic, matching the real user and system cputime with a variable
584 * Fix this by scaling these tick based values against the total runtime
585 * accounted by the CFS scheduler.
587 * This code provides the following guarantees:
589 * stime + utime == rtime
590 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
592 * Assuming that rtime_i+1 >= rtime_i.
594 static void cputime_adjust(struct task_cputime *curr,
595 struct prev_cputime *prev,
596 cputime_t *ut, cputime_t *st)
598 cputime_t rtime, stime, utime;
601 /* Serialize concurrent callers such that we can honour our guarantees */
602 raw_spin_lock_irqsave(&prev->lock, flags);
603 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
606 * This is possible under two circumstances:
607 * - rtime isn't monotonic after all (a bug);
608 * - we got reordered by the lock.
610 * In both cases this acts as a filter such that the rest of the code
611 * can assume it is monotonic regardless of anything else.
613 if (prev->stime + prev->utime >= rtime)
620 * If either stime or both stime and utime are 0, assume all runtime is
621 * userspace. Once a task gets some ticks, the monotonicy code at
622 * 'update' will ensure things converge to the observed ratio.
634 stime = scale_stime((__force u64)stime, (__force u64)rtime,
635 (__force u64)(stime + utime));
639 * Make sure stime doesn't go backwards; this preserves monotonicity
640 * for utime because rtime is monotonic.
642 * utime_i+1 = rtime_i+1 - stime_i
643 * = rtime_i+1 - (rtime_i - utime_i)
644 * = (rtime_i+1 - rtime_i) + utime_i
647 if (stime < prev->stime)
649 utime = rtime - stime;
652 * Make sure utime doesn't go backwards; this still preserves
653 * monotonicity for stime, analogous argument to above.
655 if (utime < prev->utime) {
657 stime = rtime - utime;
665 raw_spin_unlock_irqrestore(&prev->lock, flags);
668 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
670 struct task_cputime cputime = {
671 .sum_exec_runtime = p->se.sum_exec_runtime,
674 task_cputime(p, &cputime.utime, &cputime.stime);
675 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
677 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
679 void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
681 struct task_cputime cputime;
683 thread_group_cputime(p, &cputime);
684 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
686 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
688 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
689 static unsigned long long vtime_delta(struct task_struct *tsk)
691 unsigned long long clock;
693 clock = local_clock();
694 if (clock < tsk->vtime_snap)
697 return clock - tsk->vtime_snap;
700 static cputime_t get_vtime_delta(struct task_struct *tsk)
702 unsigned long long delta = vtime_delta(tsk);
704 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
705 tsk->vtime_snap += delta;
707 /* CHECKME: always safe to convert nsecs to cputime? */
708 return nsecs_to_cputime(delta);
711 static void __vtime_account_system(struct task_struct *tsk)
713 cputime_t delta_cpu = get_vtime_delta(tsk);
715 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
718 void vtime_account_system(struct task_struct *tsk)
720 write_seqlock(&tsk->vtime_seqlock);
721 __vtime_account_system(tsk);
722 write_sequnlock(&tsk->vtime_seqlock);
725 void vtime_gen_account_irq_exit(struct task_struct *tsk)
727 write_seqlock(&tsk->vtime_seqlock);
728 __vtime_account_system(tsk);
729 if (context_tracking_in_user())
730 tsk->vtime_snap_whence = VTIME_USER;
731 write_sequnlock(&tsk->vtime_seqlock);
734 void vtime_account_user(struct task_struct *tsk)
738 write_seqlock(&tsk->vtime_seqlock);
739 delta_cpu = get_vtime_delta(tsk);
740 tsk->vtime_snap_whence = VTIME_SYS;
741 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
742 write_sequnlock(&tsk->vtime_seqlock);
745 void vtime_user_enter(struct task_struct *tsk)
747 write_seqlock(&tsk->vtime_seqlock);
748 __vtime_account_system(tsk);
749 tsk->vtime_snap_whence = VTIME_USER;
750 write_sequnlock(&tsk->vtime_seqlock);
753 void vtime_guest_enter(struct task_struct *tsk)
756 * The flags must be updated under the lock with
757 * the vtime_snap flush and update.
758 * That enforces a right ordering and update sequence
759 * synchronization against the reader (task_gtime())
760 * that can thus safely catch up with a tickless delta.
762 write_seqlock(&tsk->vtime_seqlock);
763 __vtime_account_system(tsk);
764 current->flags |= PF_VCPU;
765 write_sequnlock(&tsk->vtime_seqlock);
767 EXPORT_SYMBOL_GPL(vtime_guest_enter);
769 void vtime_guest_exit(struct task_struct *tsk)
771 write_seqlock(&tsk->vtime_seqlock);
772 __vtime_account_system(tsk);
773 current->flags &= ~PF_VCPU;
774 write_sequnlock(&tsk->vtime_seqlock);
776 EXPORT_SYMBOL_GPL(vtime_guest_exit);
778 void vtime_account_idle(struct task_struct *tsk)
780 cputime_t delta_cpu = get_vtime_delta(tsk);
782 account_idle_time(delta_cpu);
785 void arch_vtime_task_switch(struct task_struct *prev)
787 write_seqlock(&prev->vtime_seqlock);
788 prev->vtime_snap_whence = VTIME_SLEEPING;
789 write_sequnlock(&prev->vtime_seqlock);
791 write_seqlock(¤t->vtime_seqlock);
792 current->vtime_snap_whence = VTIME_SYS;
793 current->vtime_snap = sched_clock_cpu(smp_processor_id());
794 write_sequnlock(¤t->vtime_seqlock);
797 void vtime_init_idle(struct task_struct *t, int cpu)
801 write_seqlock_irqsave(&t->vtime_seqlock, flags);
802 t->vtime_snap_whence = VTIME_SYS;
803 t->vtime_snap = sched_clock_cpu(cpu);
804 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
807 cputime_t task_gtime(struct task_struct *t)
812 if (!context_tracking_is_enabled())
816 seq = read_seqbegin(&t->vtime_seqlock);
819 if (t->flags & PF_VCPU)
820 gtime += vtime_delta(t);
822 } while (read_seqretry(&t->vtime_seqlock, seq));
828 * Fetch cputime raw values from fields of task_struct and
829 * add up the pending nohz execution time since the last
833 fetch_task_cputime(struct task_struct *t,
834 cputime_t *u_dst, cputime_t *s_dst,
835 cputime_t *u_src, cputime_t *s_src,
836 cputime_t *udelta, cputime_t *sdelta)
839 unsigned long long delta;
845 seq = read_seqbegin(&t->vtime_seqlock);
852 /* Task is sleeping, nothing to add */
853 if (t->vtime_snap_whence == VTIME_SLEEPING ||
857 delta = vtime_delta(t);
860 * Task runs either in user or kernel space, add pending nohz time to
863 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
866 if (t->vtime_snap_whence == VTIME_SYS)
869 } while (read_seqretry(&t->vtime_seqlock, seq));
873 void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
875 cputime_t udelta, sdelta;
877 fetch_task_cputime(t, utime, stime, &t->utime,
878 &t->stime, &udelta, &sdelta);
885 void task_cputime_scaled(struct task_struct *t,
886 cputime_t *utimescaled, cputime_t *stimescaled)
888 cputime_t udelta, sdelta;
890 fetch_task_cputime(t, utimescaled, stimescaled,
891 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
893 *utimescaled += cputime_to_scaled(udelta);
895 *stimescaled += cputime_to_scaled(sdelta);
897 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */