1 // SPDX-License-Identifier: GPL-2.0-only
3 * Simple CPU accounting cgroup controller
6 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
7 #include <asm/cputime.h>
10 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
13 * There are no locks covering percpu hardirq/softirq time.
14 * They are only modified in vtime_account, on corresponding CPU
15 * with interrupts disabled. So, writes are safe.
16 * They are read and saved off onto struct rq in update_rq_clock().
17 * This may result in other CPU reading this CPU's irq time and can
18 * race with irq/vtime_account on this CPU. We would either get old
19 * or new value with a side effect of accounting a slice of irq time to wrong
20 * task when irq is in progress while we read rq->clock. That is a worthy
21 * compromise in place of having locks on each irq in account_system_time.
23 DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
25 static int sched_clock_irqtime;
27 void enable_sched_clock_irqtime(void)
29 sched_clock_irqtime = 1;
32 void disable_sched_clock_irqtime(void)
34 sched_clock_irqtime = 0;
37 static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
38 enum cpu_usage_stat idx)
40 u64 *cpustat = kcpustat_this_cpu->cpustat;
42 u64_stats_update_begin(&irqtime->sync);
43 cpustat[idx] += delta;
44 irqtime->total += delta;
45 irqtime->tick_delta += delta;
46 u64_stats_update_end(&irqtime->sync);
50 * Called after incrementing preempt_count on {soft,}irq_enter
51 * and before decrementing preempt_count on {soft,}irq_exit.
53 void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
55 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
60 if (!sched_clock_irqtime)
63 cpu = smp_processor_id();
64 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
65 irqtime->irq_start_time += delta;
66 pc = irq_count() - offset;
69 * We do not account for softirq time from ksoftirqd here.
70 * We want to continue accounting softirq time to ksoftirqd thread
71 * in that case, so as not to confuse scheduler with a special task
72 * that do not consume any time, but still wants to run.
74 if (pc & HARDIRQ_MASK)
75 irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
76 else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
77 irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
80 static u64 irqtime_tick_accounted(u64 maxtime)
82 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
85 delta = min(irqtime->tick_delta, maxtime);
86 irqtime->tick_delta -= delta;
91 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
93 #define sched_clock_irqtime (0)
95 static u64 irqtime_tick_accounted(u64 dummy)
100 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
102 static inline void task_group_account_field(struct task_struct *p, int index,
106 * Since all updates are sure to touch the root cgroup, we
107 * get ourselves ahead and touch it first. If the root cgroup
108 * is the only cgroup, then nothing else should be necessary.
111 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
113 cgroup_account_cputime_field(p, index, tmp);
117 * Account user CPU time to a process.
118 * @p: the process that the CPU time gets accounted to
119 * @cputime: the CPU time spent in user space since the last update
121 void account_user_time(struct task_struct *p, u64 cputime)
125 /* Add user time to process. */
127 account_group_user_time(p, cputime);
129 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
131 /* Add user time to cpustat. */
132 task_group_account_field(p, index, cputime);
134 /* Account for user time used */
135 acct_account_cputime(p);
139 * Account guest CPU time to a process.
140 * @p: the process that the CPU time gets accounted to
141 * @cputime: the CPU time spent in virtual machine since the last update
143 void account_guest_time(struct task_struct *p, u64 cputime)
145 u64 *cpustat = kcpustat_this_cpu->cpustat;
147 /* Add guest time to process. */
149 account_group_user_time(p, cputime);
152 /* Add guest time to cpustat. */
153 if (task_nice(p) > 0) {
154 task_group_account_field(p, CPUTIME_NICE, cputime);
155 cpustat[CPUTIME_GUEST_NICE] += cputime;
157 task_group_account_field(p, CPUTIME_USER, cputime);
158 cpustat[CPUTIME_GUEST] += cputime;
163 * Account system CPU time to a process and desired cpustat field
164 * @p: the process that the CPU time gets accounted to
165 * @cputime: the CPU time spent in kernel space since the last update
166 * @index: pointer to cpustat field that has to be updated
168 void account_system_index_time(struct task_struct *p,
169 u64 cputime, enum cpu_usage_stat index)
171 /* Add system time to process. */
173 account_group_system_time(p, cputime);
175 /* Add system time to cpustat. */
176 task_group_account_field(p, index, cputime);
178 /* Account for system time used */
179 acct_account_cputime(p);
183 * Account system CPU time to a process.
184 * @p: the process that the CPU time gets accounted to
185 * @hardirq_offset: the offset to subtract from hardirq_count()
186 * @cputime: the CPU time spent in kernel space since the last update
188 void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
192 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
193 account_guest_time(p, cputime);
197 if (hardirq_count() - hardirq_offset)
199 else if (in_serving_softirq())
200 index = CPUTIME_SOFTIRQ;
202 index = CPUTIME_SYSTEM;
204 account_system_index_time(p, cputime, index);
208 * Account for involuntary wait time.
209 * @cputime: the CPU time spent in involuntary wait
211 void account_steal_time(u64 cputime)
213 u64 *cpustat = kcpustat_this_cpu->cpustat;
215 cpustat[CPUTIME_STEAL] += cputime;
219 * Account for idle time.
220 * @cputime: the CPU time spent in idle wait
222 void account_idle_time(u64 cputime)
224 u64 *cpustat = kcpustat_this_cpu->cpustat;
225 struct rq *rq = this_rq();
227 if (atomic_read(&rq->nr_iowait) > 0)
228 cpustat[CPUTIME_IOWAIT] += cputime;
230 cpustat[CPUTIME_IDLE] += cputime;
234 #ifdef CONFIG_SCHED_CORE
236 * Account for forceidle time due to core scheduling.
238 * REQUIRES: schedstat is enabled.
240 void __account_forceidle_time(struct task_struct *p, u64 delta)
242 __schedstat_add(p->stats.core_forceidle_sum, delta);
244 task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
249 * When a guest is interrupted for a longer amount of time, missed clock
250 * ticks are not redelivered later. Due to that, this function may on
251 * occasion account more time than the calling functions think elapsed.
253 static __always_inline u64 steal_account_process_time(u64 maxtime)
255 #ifdef CONFIG_PARAVIRT
256 if (static_key_false(¶virt_steal_enabled)) {
259 steal = paravirt_steal_clock(smp_processor_id());
260 steal -= this_rq()->prev_steal_time;
261 steal = min(steal, maxtime);
262 account_steal_time(steal);
263 this_rq()->prev_steal_time += steal;
272 * Account how much elapsed time was spent in steal, irq, or softirq time.
274 static inline u64 account_other_time(u64 max)
278 lockdep_assert_irqs_disabled();
280 accounted = steal_account_process_time(max);
283 accounted += irqtime_tick_accounted(max - accounted);
289 static inline u64 read_sum_exec_runtime(struct task_struct *t)
291 return t->se.sum_exec_runtime;
294 static u64 read_sum_exec_runtime(struct task_struct *t)
300 rq = task_rq_lock(t, &rf);
301 ns = t->se.sum_exec_runtime;
302 task_rq_unlock(rq, t, &rf);
309 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
310 * tasks (sum on group iteration) belonging to @tsk's group.
312 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
314 struct signal_struct *sig = tsk->signal;
316 struct task_struct *t;
317 unsigned int seq, nextseq;
321 * Update current task runtime to account pending time since last
322 * scheduler action or thread_group_cputime() call. This thread group
323 * might have other running tasks on different CPUs, but updating
324 * their runtime can affect syscall performance, so we skip account
325 * those pending times and rely only on values updated on tick or
326 * other scheduler action.
328 if (same_thread_group(current, tsk))
329 (void) task_sched_runtime(current);
332 /* Attempt a lockless read on the first round. */
336 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
337 times->utime = sig->utime;
338 times->stime = sig->stime;
339 times->sum_exec_runtime = sig->sum_sched_runtime;
341 for_each_thread(tsk, t) {
342 task_cputime(t, &utime, &stime);
343 times->utime += utime;
344 times->stime += stime;
345 times->sum_exec_runtime += read_sum_exec_runtime(t);
347 /* If lockless access failed, take the lock. */
349 } while (need_seqretry(&sig->stats_lock, seq));
350 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
354 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
356 * Account a tick to a process and cpustat
357 * @p: the process that the CPU time gets accounted to
358 * @user_tick: is the tick from userspace
359 * @rq: the pointer to rq
361 * Tick demultiplexing follows the order
362 * - pending hardirq update
363 * - pending softirq update
367 * - check for guest_time
368 * - else account as system_time
370 * Check for hardirq is done both for system and user time as there is
371 * no timer going off while we are on hardirq and hence we may never get an
372 * opportunity to update it solely in system time.
373 * p->stime and friends are only updated on system time and not on irq
374 * softirq as those do not count in task exec_runtime any more.
376 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
379 u64 other, cputime = TICK_NSEC * ticks;
382 * When returning from idle, many ticks can get accounted at
383 * once, including some ticks of steal, irq, and softirq time.
384 * Subtract those ticks from the amount of time accounted to
385 * idle, or potentially user or system time. Due to rounding,
386 * other time can exceed ticks occasionally.
388 other = account_other_time(ULONG_MAX);
389 if (other >= cputime)
394 if (this_cpu_ksoftirqd() == p) {
396 * ksoftirqd time do not get accounted in cpu_softirq_time.
397 * So, we have to handle it separately here.
398 * Also, p->stime needs to be updated for ksoftirqd.
400 account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
401 } else if (user_tick) {
402 account_user_time(p, cputime);
403 } else if (p == this_rq()->idle) {
404 account_idle_time(cputime);
405 } else if (p->flags & PF_VCPU) { /* System time or guest time */
406 account_guest_time(p, cputime);
408 account_system_index_time(p, cputime, CPUTIME_SYSTEM);
412 static void irqtime_account_idle_ticks(int ticks)
414 irqtime_account_process_tick(current, 0, ticks);
416 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
417 static inline void irqtime_account_idle_ticks(int ticks) { }
418 static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
420 #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
423 * Use precise platform statistics if available:
425 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
427 void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
429 unsigned int pc = irq_count() - offset;
431 if (pc & HARDIRQ_OFFSET) {
432 vtime_account_hardirq(tsk);
433 } else if (pc & SOFTIRQ_OFFSET) {
434 vtime_account_softirq(tsk);
435 } else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
437 vtime_account_idle(tsk);
439 vtime_account_kernel(tsk);
443 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
450 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
455 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
457 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
459 struct task_cputime cputime;
461 thread_group_cputime(p, &cputime);
467 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
470 * Account a single tick of CPU time.
471 * @p: the process that the CPU time gets accounted to
472 * @user_tick: indicates if the tick is a user or a system tick
474 void account_process_tick(struct task_struct *p, int user_tick)
478 if (vtime_accounting_enabled_this_cpu())
481 if (sched_clock_irqtime) {
482 irqtime_account_process_tick(p, user_tick, 1);
487 steal = steal_account_process_time(ULONG_MAX);
489 if (steal >= cputime)
495 account_user_time(p, cputime);
496 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
497 account_system_time(p, HARDIRQ_OFFSET, cputime);
499 account_idle_time(cputime);
503 * Account multiple ticks of idle time.
504 * @ticks: number of stolen ticks
506 void account_idle_ticks(unsigned long ticks)
510 if (sched_clock_irqtime) {
511 irqtime_account_idle_ticks(ticks);
515 cputime = ticks * TICK_NSEC;
516 steal = steal_account_process_time(ULONG_MAX);
518 if (steal >= cputime)
522 account_idle_time(cputime);
526 * Adjust tick based cputime random precision against scheduler runtime
529 * Tick based cputime accounting depend on random scheduling timeslices of a
530 * task to be interrupted or not by the timer. Depending on these
531 * circumstances, the number of these interrupts may be over or
532 * under-optimistic, matching the real user and system cputime with a variable
535 * Fix this by scaling these tick based values against the total runtime
536 * accounted by the CFS scheduler.
538 * This code provides the following guarantees:
540 * stime + utime == rtime
541 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
543 * Assuming that rtime_i+1 >= rtime_i.
545 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
548 u64 rtime, stime, utime;
551 /* Serialize concurrent callers such that we can honour our guarantees */
552 raw_spin_lock_irqsave(&prev->lock, flags);
553 rtime = curr->sum_exec_runtime;
556 * This is possible under two circumstances:
557 * - rtime isn't monotonic after all (a bug);
558 * - we got reordered by the lock.
560 * In both cases this acts as a filter such that the rest of the code
561 * can assume it is monotonic regardless of anything else.
563 if (prev->stime + prev->utime >= rtime)
570 * If either stime or utime are 0, assume all runtime is userspace.
571 * Once a task gets some ticks, the monotonicity code at 'update:'
572 * will ensure things converge to the observed ratio.
584 stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
588 * Make sure stime doesn't go backwards; this preserves monotonicity
589 * for utime because rtime is monotonic.
591 * utime_i+1 = rtime_i+1 - stime_i
592 * = rtime_i+1 - (rtime_i - utime_i)
593 * = (rtime_i+1 - rtime_i) + utime_i
596 if (stime < prev->stime)
598 utime = rtime - stime;
601 * Make sure utime doesn't go backwards; this still preserves
602 * monotonicity for stime, analogous argument to above.
604 if (utime < prev->utime) {
606 stime = rtime - utime;
614 raw_spin_unlock_irqrestore(&prev->lock, flags);
617 void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
619 struct task_cputime cputime = {
620 .sum_exec_runtime = p->se.sum_exec_runtime,
623 if (task_cputime(p, &cputime.utime, &cputime.stime))
624 cputime.sum_exec_runtime = task_sched_runtime(p);
625 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
627 EXPORT_SYMBOL_GPL(task_cputime_adjusted);
629 void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
631 struct task_cputime cputime;
633 thread_group_cputime(p, &cputime);
634 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
636 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
638 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
639 static u64 vtime_delta(struct vtime *vtime)
641 unsigned long long clock;
643 clock = sched_clock();
644 if (clock < vtime->starttime)
647 return clock - vtime->starttime;
650 static u64 get_vtime_delta(struct vtime *vtime)
652 u64 delta = vtime_delta(vtime);
656 * Unlike tick based timing, vtime based timing never has lost
657 * ticks, and no need for steal time accounting to make up for
658 * lost ticks. Vtime accounts a rounded version of actual
659 * elapsed time. Limit account_other_time to prevent rounding
660 * errors from causing elapsed vtime to go negative.
662 other = account_other_time(delta);
663 WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
664 vtime->starttime += delta;
666 return delta - other;
669 static void vtime_account_system(struct task_struct *tsk,
672 vtime->stime += get_vtime_delta(vtime);
673 if (vtime->stime >= TICK_NSEC) {
674 account_system_time(tsk, irq_count(), vtime->stime);
679 static void vtime_account_guest(struct task_struct *tsk,
682 vtime->gtime += get_vtime_delta(vtime);
683 if (vtime->gtime >= TICK_NSEC) {
684 account_guest_time(tsk, vtime->gtime);
689 static void __vtime_account_kernel(struct task_struct *tsk,
692 /* We might have scheduled out from guest path */
693 if (vtime->state == VTIME_GUEST)
694 vtime_account_guest(tsk, vtime);
696 vtime_account_system(tsk, vtime);
699 void vtime_account_kernel(struct task_struct *tsk)
701 struct vtime *vtime = &tsk->vtime;
703 if (!vtime_delta(vtime))
706 write_seqcount_begin(&vtime->seqcount);
707 __vtime_account_kernel(tsk, vtime);
708 write_seqcount_end(&vtime->seqcount);
711 void vtime_user_enter(struct task_struct *tsk)
713 struct vtime *vtime = &tsk->vtime;
715 write_seqcount_begin(&vtime->seqcount);
716 vtime_account_system(tsk, vtime);
717 vtime->state = VTIME_USER;
718 write_seqcount_end(&vtime->seqcount);
721 void vtime_user_exit(struct task_struct *tsk)
723 struct vtime *vtime = &tsk->vtime;
725 write_seqcount_begin(&vtime->seqcount);
726 vtime->utime += get_vtime_delta(vtime);
727 if (vtime->utime >= TICK_NSEC) {
728 account_user_time(tsk, vtime->utime);
731 vtime->state = VTIME_SYS;
732 write_seqcount_end(&vtime->seqcount);
735 void vtime_guest_enter(struct task_struct *tsk)
737 struct vtime *vtime = &tsk->vtime;
739 * The flags must be updated under the lock with
740 * the vtime_starttime flush and update.
741 * That enforces a right ordering and update sequence
742 * synchronization against the reader (task_gtime())
743 * that can thus safely catch up with a tickless delta.
745 write_seqcount_begin(&vtime->seqcount);
746 vtime_account_system(tsk, vtime);
747 tsk->flags |= PF_VCPU;
748 vtime->state = VTIME_GUEST;
749 write_seqcount_end(&vtime->seqcount);
751 EXPORT_SYMBOL_GPL(vtime_guest_enter);
753 void vtime_guest_exit(struct task_struct *tsk)
755 struct vtime *vtime = &tsk->vtime;
757 write_seqcount_begin(&vtime->seqcount);
758 vtime_account_guest(tsk, vtime);
759 tsk->flags &= ~PF_VCPU;
760 vtime->state = VTIME_SYS;
761 write_seqcount_end(&vtime->seqcount);
763 EXPORT_SYMBOL_GPL(vtime_guest_exit);
765 void vtime_account_idle(struct task_struct *tsk)
767 account_idle_time(get_vtime_delta(&tsk->vtime));
770 void vtime_task_switch_generic(struct task_struct *prev)
772 struct vtime *vtime = &prev->vtime;
774 write_seqcount_begin(&vtime->seqcount);
775 if (vtime->state == VTIME_IDLE)
776 vtime_account_idle(prev);
778 __vtime_account_kernel(prev, vtime);
779 vtime->state = VTIME_INACTIVE;
781 write_seqcount_end(&vtime->seqcount);
783 vtime = ¤t->vtime;
785 write_seqcount_begin(&vtime->seqcount);
786 if (is_idle_task(current))
787 vtime->state = VTIME_IDLE;
788 else if (current->flags & PF_VCPU)
789 vtime->state = VTIME_GUEST;
791 vtime->state = VTIME_SYS;
792 vtime->starttime = sched_clock();
793 vtime->cpu = smp_processor_id();
794 write_seqcount_end(&vtime->seqcount);
797 void vtime_init_idle(struct task_struct *t, int cpu)
799 struct vtime *vtime = &t->vtime;
802 local_irq_save(flags);
803 write_seqcount_begin(&vtime->seqcount);
804 vtime->state = VTIME_IDLE;
805 vtime->starttime = sched_clock();
807 write_seqcount_end(&vtime->seqcount);
808 local_irq_restore(flags);
811 u64 task_gtime(struct task_struct *t)
813 struct vtime *vtime = &t->vtime;
817 if (!vtime_accounting_enabled())
821 seq = read_seqcount_begin(&vtime->seqcount);
824 if (vtime->state == VTIME_GUEST)
825 gtime += vtime->gtime + vtime_delta(vtime);
827 } while (read_seqcount_retry(&vtime->seqcount, seq));
833 * Fetch cputime raw values from fields of task_struct and
834 * add up the pending nohz execution time since the last
837 bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
839 struct vtime *vtime = &t->vtime;
844 if (!vtime_accounting_enabled()) {
852 seq = read_seqcount_begin(&vtime->seqcount);
857 /* Task is sleeping or idle, nothing to add */
858 if (vtime->state < VTIME_SYS)
862 delta = vtime_delta(vtime);
865 * Task runs either in user (including guest) or kernel space,
866 * add pending nohz time to the right place.
868 if (vtime->state == VTIME_SYS)
869 *stime += vtime->stime + delta;
871 *utime += vtime->utime + delta;
872 } while (read_seqcount_retry(&vtime->seqcount, seq));
877 static int vtime_state_fetch(struct vtime *vtime, int cpu)
879 int state = READ_ONCE(vtime->state);
882 * We raced against a context switch, fetch the
883 * kcpustat task again.
885 if (vtime->cpu != cpu && vtime->cpu != -1)
889 * Two possible things here:
890 * 1) We are seeing the scheduling out task (prev) or any past one.
891 * 2) We are seeing the scheduling in task (next) but it hasn't
892 * passed though vtime_task_switch() yet so the pending
893 * cputime of the prev task may not be flushed yet.
895 * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
897 if (state == VTIME_INACTIVE)
903 static u64 kcpustat_user_vtime(struct vtime *vtime)
905 if (vtime->state == VTIME_USER)
906 return vtime->utime + vtime_delta(vtime);
907 else if (vtime->state == VTIME_GUEST)
908 return vtime->gtime + vtime_delta(vtime);
912 static int kcpustat_field_vtime(u64 *cpustat,
913 struct task_struct *tsk,
914 enum cpu_usage_stat usage,
917 struct vtime *vtime = &tsk->vtime;
923 seq = read_seqcount_begin(&vtime->seqcount);
925 state = vtime_state_fetch(vtime, cpu);
929 *val = cpustat[usage];
932 * Nice VS unnice cputime accounting may be inaccurate if
933 * the nice value has changed since the last vtime update.
934 * But proper fix would involve interrupting target on nice
935 * updates which is a no go on nohz_full (although the scheduler
936 * may still interrupt the target if rescheduling is needed...)
940 if (state == VTIME_SYS)
941 *val += vtime->stime + vtime_delta(vtime);
944 if (task_nice(tsk) <= 0)
945 *val += kcpustat_user_vtime(vtime);
948 if (task_nice(tsk) > 0)
949 *val += kcpustat_user_vtime(vtime);
952 if (state == VTIME_GUEST && task_nice(tsk) <= 0)
953 *val += vtime->gtime + vtime_delta(vtime);
955 case CPUTIME_GUEST_NICE:
956 if (state == VTIME_GUEST && task_nice(tsk) > 0)
957 *val += vtime->gtime + vtime_delta(vtime);
962 } while (read_seqcount_retry(&vtime->seqcount, seq));
967 u64 kcpustat_field(struct kernel_cpustat *kcpustat,
968 enum cpu_usage_stat usage, int cpu)
970 u64 *cpustat = kcpustat->cpustat;
971 u64 val = cpustat[usage];
975 if (!vtime_accounting_enabled_cpu(cpu))
981 struct task_struct *curr;
984 curr = rcu_dereference(rq->curr);
985 if (WARN_ON_ONCE(!curr)) {
987 return cpustat[usage];
990 err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
999 EXPORT_SYMBOL_GPL(kcpustat_field);
1001 static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
1002 const struct kernel_cpustat *src,
1003 struct task_struct *tsk, int cpu)
1005 struct vtime *vtime = &tsk->vtime;
1013 seq = read_seqcount_begin(&vtime->seqcount);
1015 state = vtime_state_fetch(vtime, cpu);
1020 cpustat = dst->cpustat;
1022 /* Task is sleeping, dead or idle, nothing to add */
1023 if (state < VTIME_SYS)
1026 delta = vtime_delta(vtime);
1029 * Task runs either in user (including guest) or kernel space,
1030 * add pending nohz time to the right place.
1032 if (state == VTIME_SYS) {
1033 cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
1034 } else if (state == VTIME_USER) {
1035 if (task_nice(tsk) > 0)
1036 cpustat[CPUTIME_NICE] += vtime->utime + delta;
1038 cpustat[CPUTIME_USER] += vtime->utime + delta;
1040 WARN_ON_ONCE(state != VTIME_GUEST);
1041 if (task_nice(tsk) > 0) {
1042 cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
1043 cpustat[CPUTIME_NICE] += vtime->gtime + delta;
1045 cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
1046 cpustat[CPUTIME_USER] += vtime->gtime + delta;
1049 } while (read_seqcount_retry(&vtime->seqcount, seq));
1054 void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
1056 const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
1060 if (!vtime_accounting_enabled_cpu(cpu)) {
1068 struct task_struct *curr;
1071 curr = rcu_dereference(rq->curr);
1072 if (WARN_ON_ONCE(!curr)) {
1078 err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
1087 EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
1089 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */