wifi: rtw89: support U-NII-4 channels on 5GHz band
[linux-block.git] / kernel / sched / cputime.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
325ea10c
IM
2/*
3 * Simple CPU accounting cgroup controller
4 */
73fbec60 5
c8997020
NP
6#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
7 #include <asm/cputime.h>
8#endif
9
73fbec60
FW
10#ifdef CONFIG_IRQ_TIME_ACCOUNTING
11
12/*
13 * There are no locks covering percpu hardirq/softirq time.
bf9fae9f 14 * They are only modified in vtime_account, on corresponding CPU
73fbec60
FW
15 * with interrupts disabled. So, writes are safe.
16 * They are read and saved off onto struct rq in update_rq_clock().
17 * This may result in other CPU reading this CPU's irq time and can
bf9fae9f 18 * race with irq/vtime_account on this CPU. We would either get old
73fbec60
FW
19 * or new value with a side effect of accounting a slice of irq time to wrong
20 * task when irq is in progress while we read rq->clock. That is a worthy
21 * compromise in place of having locks on each irq in account_system_time.
22 */
19d23dbf 23DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
73fbec60 24
73fbec60
FW
25static int sched_clock_irqtime;
26
27void enable_sched_clock_irqtime(void)
28{
29 sched_clock_irqtime = 1;
30}
31
32void disable_sched_clock_irqtime(void)
33{
34 sched_clock_irqtime = 0;
35}
36
25e2d8c1
FW
37static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
38 enum cpu_usage_stat idx)
39{
40 u64 *cpustat = kcpustat_this_cpu->cpustat;
41
42 u64_stats_update_begin(&irqtime->sync);
43 cpustat[idx] += delta;
44 irqtime->total += delta;
45 irqtime->tick_delta += delta;
46 u64_stats_update_end(&irqtime->sync);
47}
48
73fbec60 49/*
d3759e71 50 * Called after incrementing preempt_count on {soft,}irq_enter
73fbec60
FW
51 * and before decrementing preempt_count on {soft,}irq_exit.
52 */
d3759e71 53void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
73fbec60 54{
19d23dbf 55 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
d3759e71 56 unsigned int pc;
73fbec60
FW
57 s64 delta;
58 int cpu;
59
60 if (!sched_clock_irqtime)
61 return;
62
73fbec60 63 cpu = smp_processor_id();
19d23dbf
FW
64 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
65 irqtime->irq_start_time += delta;
6516b386 66 pc = irq_count() - offset;
73fbec60 67
73fbec60
FW
68 /*
69 * We do not account for softirq time from ksoftirqd here.
70 * We want to continue accounting softirq time to ksoftirqd thread
71 * in that case, so as not to confuse scheduler with a special task
72 * that do not consume any time, but still wants to run.
73 */
d3759e71 74 if (pc & HARDIRQ_MASK)
25e2d8c1 75 irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
d3759e71 76 else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
25e2d8c1 77 irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
73fbec60 78}
73fbec60 79
2b1f967d 80static u64 irqtime_tick_accounted(u64 maxtime)
73fbec60 81{
a499a5a1 82 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
2b1f967d 83 u64 delta;
73fbec60 84
2b1f967d
FW
85 delta = min(irqtime->tick_delta, maxtime);
86 irqtime->tick_delta -= delta;
2810f611 87
a499a5a1 88 return delta;
73fbec60
FW
89}
90
91#else /* CONFIG_IRQ_TIME_ACCOUNTING */
92
93#define sched_clock_irqtime (0)
94
2b1f967d 95static u64 irqtime_tick_accounted(u64 dummy)
57430218
RR
96{
97 return 0;
98}
99
73fbec60
FW
100#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
101
102static inline void task_group_account_field(struct task_struct *p, int index,
103 u64 tmp)
104{
73fbec60
FW
105 /*
106 * Since all updates are sure to touch the root cgroup, we
107 * get ourselves ahead and touch it first. If the root cgroup
108 * is the only cgroup, then nothing else should be necessary.
109 *
110 */
a4f61cc0 111 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
73fbec60 112
d2cc5ed6 113 cgroup_account_cputime_field(p, index, tmp);
73fbec60
FW
114}
115
116/*
97fb7a0a
IM
117 * Account user CPU time to a process.
118 * @p: the process that the CPU time gets accounted to
119 * @cputime: the CPU time spent in user space since the last update
73fbec60 120 */
23244a5c 121void account_user_time(struct task_struct *p, u64 cputime)
73fbec60
FW
122{
123 int index;
124
125 /* Add user time to process. */
23244a5c
FW
126 p->utime += cputime;
127 account_group_user_time(p, cputime);
73fbec60 128
d0ea0268 129 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
73fbec60
FW
130
131 /* Add user time to cpustat. */
23244a5c 132 task_group_account_field(p, index, cputime);
73fbec60
FW
133
134 /* Account for user time used */
6fac4829 135 acct_account_cputime(p);
73fbec60
FW
136}
137
138/*
97fb7a0a
IM
139 * Account guest CPU time to a process.
140 * @p: the process that the CPU time gets accounted to
141 * @cputime: the CPU time spent in virtual machine since the last update
73fbec60 142 */
fb8b049c 143void account_guest_time(struct task_struct *p, u64 cputime)
73fbec60
FW
144{
145 u64 *cpustat = kcpustat_this_cpu->cpustat;
146
147 /* Add guest time to process. */
fb8b049c
FW
148 p->utime += cputime;
149 account_group_user_time(p, cputime);
150 p->gtime += cputime;
73fbec60
FW
151
152 /* Add guest time to cpustat. */
d0ea0268 153 if (task_nice(p) > 0) {
9731698e 154 task_group_account_field(p, CPUTIME_NICE, cputime);
fb8b049c 155 cpustat[CPUTIME_GUEST_NICE] += cputime;
73fbec60 156 } else {
9731698e 157 task_group_account_field(p, CPUTIME_USER, cputime);
fb8b049c 158 cpustat[CPUTIME_GUEST] += cputime;
73fbec60
FW
159 }
160}
161
162/*
97fb7a0a
IM
163 * Account system CPU time to a process and desired cpustat field
164 * @p: the process that the CPU time gets accounted to
165 * @cputime: the CPU time spent in kernel space since the last update
40565b5a 166 * @index: pointer to cpustat field that has to be updated
73fbec60 167 */
c31cc6a5 168void account_system_index_time(struct task_struct *p,
fb8b049c 169 u64 cputime, enum cpu_usage_stat index)
73fbec60
FW
170{
171 /* Add system time to process. */
fb8b049c
FW
172 p->stime += cputime;
173 account_group_system_time(p, cputime);
73fbec60
FW
174
175 /* Add system time to cpustat. */
fb8b049c 176 task_group_account_field(p, index, cputime);
73fbec60
FW
177
178 /* Account for system time used */
6fac4829 179 acct_account_cputime(p);
73fbec60
FW
180}
181
182/*
97fb7a0a
IM
183 * Account system CPU time to a process.
184 * @p: the process that the CPU time gets accounted to
73fbec60 185 * @hardirq_offset: the offset to subtract from hardirq_count()
97fb7a0a 186 * @cputime: the CPU time spent in kernel space since the last update
73fbec60 187 */
fb8b049c 188void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
73fbec60
FW
189{
190 int index;
191
192 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
40565b5a 193 account_guest_time(p, cputime);
73fbec60
FW
194 return;
195 }
196
197 if (hardirq_count() - hardirq_offset)
198 index = CPUTIME_IRQ;
199 else if (in_serving_softirq())
200 index = CPUTIME_SOFTIRQ;
201 else
202 index = CPUTIME_SYSTEM;
203
c31cc6a5 204 account_system_index_time(p, cputime, index);
73fbec60
FW
205}
206
207/*
208 * Account for involuntary wait time.
97fb7a0a 209 * @cputime: the CPU time spent in involuntary wait
73fbec60 210 */
be9095ed 211void account_steal_time(u64 cputime)
73fbec60
FW
212{
213 u64 *cpustat = kcpustat_this_cpu->cpustat;
214
be9095ed 215 cpustat[CPUTIME_STEAL] += cputime;
73fbec60
FW
216}
217
218/*
219 * Account for idle time.
97fb7a0a 220 * @cputime: the CPU time spent in idle wait
73fbec60 221 */
18b43a9b 222void account_idle_time(u64 cputime)
73fbec60
FW
223{
224 u64 *cpustat = kcpustat_this_cpu->cpustat;
225 struct rq *rq = this_rq();
226
227 if (atomic_read(&rq->nr_iowait) > 0)
18b43a9b 228 cpustat[CPUTIME_IOWAIT] += cputime;
73fbec60 229 else
18b43a9b 230 cpustat[CPUTIME_IDLE] += cputime;
73fbec60
FW
231}
232
1fcf54de
JD
233
234#ifdef CONFIG_SCHED_CORE
235/*
236 * Account for forceidle time due to core scheduling.
237 *
238 * REQUIRES: schedstat is enabled.
239 */
240void __account_forceidle_time(struct task_struct *p, u64 delta)
241{
242 __schedstat_add(p->stats.core_forceidle_sum, delta);
243
244 task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
245}
246#endif
247
03cbc732
WL
248/*
249 * When a guest is interrupted for a longer amount of time, missed clock
250 * ticks are not redelivered later. Due to that, this function may on
251 * occasion account more time than the calling functions think elapsed.
252 */
2b1f967d 253static __always_inline u64 steal_account_process_time(u64 maxtime)
73fbec60
FW
254{
255#ifdef CONFIG_PARAVIRT
256 if (static_key_false(&paravirt_steal_enabled)) {
2b1f967d 257 u64 steal;
73fbec60
FW
258
259 steal = paravirt_steal_clock(smp_processor_id());
260 steal -= this_rq()->prev_steal_time;
2b1f967d
FW
261 steal = min(steal, maxtime);
262 account_steal_time(steal);
263 this_rq()->prev_steal_time += steal;
73fbec60 264
2b1f967d 265 return steal;
73fbec60
FW
266 }
267#endif
807e5b80 268 return 0;
73fbec60
FW
269}
270
57430218
RR
271/*
272 * Account how much elapsed time was spent in steal, irq, or softirq time.
273 */
2b1f967d 274static inline u64 account_other_time(u64 max)
57430218 275{
2b1f967d 276 u64 accounted;
57430218 277
2c11dba0 278 lockdep_assert_irqs_disabled();
2810f611 279
57430218
RR
280 accounted = steal_account_process_time(max);
281
282 if (accounted < max)
a499a5a1 283 accounted += irqtime_tick_accounted(max - accounted);
57430218
RR
284
285 return accounted;
286}
287
a1eb1411
SG
288#ifdef CONFIG_64BIT
289static inline u64 read_sum_exec_runtime(struct task_struct *t)
290{
291 return t->se.sum_exec_runtime;
292}
293#else
294static u64 read_sum_exec_runtime(struct task_struct *t)
295{
296 u64 ns;
297 struct rq_flags rf;
298 struct rq *rq;
299
300 rq = task_rq_lock(t, &rf);
301 ns = t->se.sum_exec_runtime;
302 task_rq_unlock(rq, t, &rf);
303
304 return ns;
305}
306#endif
307
a634f933
FW
308/*
309 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
310 * tasks (sum on group iteration) belonging to @tsk's group.
311 */
312void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
313{
314 struct signal_struct *sig = tsk->signal;
5613fda9 315 u64 utime, stime;
a634f933 316 struct task_struct *t;
e78c3496 317 unsigned int seq, nextseq;
9c368b5b 318 unsigned long flags;
a634f933 319
a1eb1411
SG
320 /*
321 * Update current task runtime to account pending time since last
322 * scheduler action or thread_group_cputime() call. This thread group
323 * might have other running tasks on different CPUs, but updating
324 * their runtime can affect syscall performance, so we skip account
325 * those pending times and rely only on values updated on tick or
326 * other scheduler action.
327 */
328 if (same_thread_group(current, tsk))
329 (void) task_sched_runtime(current);
330
a634f933 331 rcu_read_lock();
e78c3496
RR
332 /* Attempt a lockless read on the first round. */
333 nextseq = 0;
334 do {
335 seq = nextseq;
9c368b5b 336 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
e78c3496
RR
337 times->utime = sig->utime;
338 times->stime = sig->stime;
339 times->sum_exec_runtime = sig->sum_sched_runtime;
340
341 for_each_thread(tsk, t) {
342 task_cputime(t, &utime, &stime);
343 times->utime += utime;
344 times->stime += stime;
a1eb1411 345 times->sum_exec_runtime += read_sum_exec_runtime(t);
e78c3496
RR
346 }
347 /* If lockless access failed, take the lock. */
348 nextseq = 1;
349 } while (need_seqretry(&sig->stats_lock, seq));
9c368b5b 350 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
a634f933
FW
351 rcu_read_unlock();
352}
353
73fbec60
FW
354#ifdef CONFIG_IRQ_TIME_ACCOUNTING
355/*
356 * Account a tick to a process and cpustat
97fb7a0a 357 * @p: the process that the CPU time gets accounted to
73fbec60
FW
358 * @user_tick: is the tick from userspace
359 * @rq: the pointer to rq
360 *
361 * Tick demultiplexing follows the order
362 * - pending hardirq update
363 * - pending softirq update
364 * - user_time
365 * - idle_time
366 * - system time
367 * - check for guest_time
368 * - else account as system_time
369 *
370 * Check for hardirq is done both for system and user time as there is
371 * no timer going off while we are on hardirq and hence we may never get an
372 * opportunity to update it solely in system time.
373 * p->stime and friends are only updated on system time and not on irq
374 * softirq as those do not count in task exec_runtime any more.
375 */
376static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
9dec1b69 377 int ticks)
73fbec60 378{
2b1f967d 379 u64 other, cputime = TICK_NSEC * ticks;
73fbec60 380
57430218
RR
381 /*
382 * When returning from idle, many ticks can get accounted at
383 * once, including some ticks of steal, irq, and softirq time.
384 * Subtract those ticks from the amount of time accounted to
385 * idle, or potentially user or system time. Due to rounding,
386 * other time can exceed ticks occasionally.
387 */
03cbc732 388 other = account_other_time(ULONG_MAX);
2b1f967d 389 if (other >= cputime)
73fbec60 390 return;
23244a5c 391
2b1f967d 392 cputime -= other;
73fbec60 393
57430218 394 if (this_cpu_ksoftirqd() == p) {
73fbec60
FW
395 /*
396 * ksoftirqd time do not get accounted in cpu_softirq_time.
397 * So, we have to handle it separately here.
398 * Also, p->stime needs to be updated for ksoftirqd.
399 */
fb8b049c 400 account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
73fbec60 401 } else if (user_tick) {
40565b5a 402 account_user_time(p, cputime);
9dec1b69 403 } else if (p == this_rq()->idle) {
18b43a9b 404 account_idle_time(cputime);
73fbec60 405 } else if (p->flags & PF_VCPU) { /* System time or guest time */
fb8b049c 406 account_guest_time(p, cputime);
73fbec60 407 } else {
fb8b049c 408 account_system_index_time(p, cputime, CPUTIME_SYSTEM);
73fbec60
FW
409 }
410}
411
412static void irqtime_account_idle_ticks(int ticks)
413{
9dec1b69 414 irqtime_account_process_tick(current, 0, ticks);
73fbec60
FW
415}
416#else /* CONFIG_IRQ_TIME_ACCOUNTING */
97fb7a0a 417static inline void irqtime_account_idle_ticks(int ticks) { }
3f4724ea 418static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
9dec1b69 419 int nr_ticks) { }
73fbec60
FW
420#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
421
73fbec60
FW
422/*
423 * Use precise platform statistics if available:
424 */
8d495477
FW
425#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
426
97fb7a0a 427# ifndef __ARCH_HAS_VTIME_TASK_SWITCH
8d495477 428void vtime_task_switch(struct task_struct *prev)
e3942ba0
FW
429{
430 if (is_idle_task(prev))
431 vtime_account_idle(prev);
432 else
f83eeb1a 433 vtime_account_kernel(prev);
e3942ba0 434
c8d7dabf 435 vtime_flush(prev);
e3942ba0
FW
436 arch_vtime_task_switch(prev);
437}
97fb7a0a 438# endif
0cfdf9a1 439
d3759e71 440void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
a7e1a9e3 441{
6516b386 442 unsigned int pc = irq_count() - offset;
d3759e71
FW
443
444 if (pc & HARDIRQ_OFFSET) {
8a6a5920 445 vtime_account_hardirq(tsk);
d3759e71 446 } else if (pc & SOFTIRQ_OFFSET) {
8a6a5920
FW
447 vtime_account_softirq(tsk);
448 } else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
449 is_idle_task(tsk)) {
0cfdf9a1 450 vtime_account_idle(tsk);
8a6a5920 451 } else {
f83eeb1a 452 vtime_account_kernel(tsk);
8a6a5920 453 }
a7e1a9e3 454}
9fbc42ea 455
8157a7fa
TH
456void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
457 u64 *ut, u64 *st)
458{
459 *ut = curr->utime;
460 *st = curr->stime;
461}
462
5613fda9 463void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
9fbc42ea
FW
464{
465 *ut = p->utime;
466 *st = p->stime;
467}
9eec50b8 468EXPORT_SYMBOL_GPL(task_cputime_adjusted);
a7e1a9e3 469
5613fda9 470void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
9fbc42ea
FW
471{
472 struct task_cputime cputime;
73fbec60 473
9fbc42ea
FW
474 thread_group_cputime(p, &cputime);
475
476 *ut = cputime.utime;
477 *st = cputime.stime;
478}
97fb7a0a
IM
479
480#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
481
9fbc42ea 482/*
97fb7a0a
IM
483 * Account a single tick of CPU time.
484 * @p: the process that the CPU time gets accounted to
9fbc42ea
FW
485 * @user_tick: indicates if the tick is a user or a system tick
486 */
487void account_process_tick(struct task_struct *p, int user_tick)
73fbec60 488{
2b1f967d 489 u64 cputime, steal;
73fbec60 490
e44fcb4b 491 if (vtime_accounting_enabled_this_cpu())
9fbc42ea
FW
492 return;
493
494 if (sched_clock_irqtime) {
9dec1b69 495 irqtime_account_process_tick(p, user_tick, 1);
9fbc42ea
FW
496 return;
497 }
498
2b1f967d 499 cputime = TICK_NSEC;
03cbc732 500 steal = steal_account_process_time(ULONG_MAX);
57430218 501
2b1f967d 502 if (steal >= cputime)
9fbc42ea 503 return;
73fbec60 504
2b1f967d 505 cputime -= steal;
57430218 506
9fbc42ea 507 if (user_tick)
40565b5a 508 account_user_time(p, cputime);
9dec1b69 509 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
fb8b049c 510 account_system_time(p, HARDIRQ_OFFSET, cputime);
73fbec60 511 else
18b43a9b 512 account_idle_time(cputime);
9fbc42ea 513}
73fbec60 514
9fbc42ea
FW
515/*
516 * Account multiple ticks of idle time.
517 * @ticks: number of stolen ticks
518 */
519void account_idle_ticks(unsigned long ticks)
520{
18b43a9b 521 u64 cputime, steal;
26f2c75c 522
9fbc42ea
FW
523 if (sched_clock_irqtime) {
524 irqtime_account_idle_ticks(ticks);
525 return;
526 }
527
18b43a9b 528 cputime = ticks * TICK_NSEC;
2b1f967d 529 steal = steal_account_process_time(ULONG_MAX);
f9bcf1e0
WL
530
531 if (steal >= cputime)
532 return;
533
534 cputime -= steal;
535 account_idle_time(cputime);
9fbc42ea 536}
73fbec60 537
347abad9 538/*
9d7fb042
PZ
539 * Adjust tick based cputime random precision against scheduler runtime
540 * accounting.
347abad9 541 *
9d7fb042
PZ
542 * Tick based cputime accounting depend on random scheduling timeslices of a
543 * task to be interrupted or not by the timer. Depending on these
544 * circumstances, the number of these interrupts may be over or
545 * under-optimistic, matching the real user and system cputime with a variable
546 * precision.
547 *
548 * Fix this by scaling these tick based values against the total runtime
549 * accounted by the CFS scheduler.
550 *
551 * This code provides the following guarantees:
552 *
553 * stime + utime == rtime
554 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
555 *
556 * Assuming that rtime_i+1 >= rtime_i.
fa092057 557 */
cfb766da
TH
558void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
559 u64 *ut, u64 *st)
73fbec60 560{
5613fda9 561 u64 rtime, stime, utime;
9d7fb042 562 unsigned long flags;
fa092057 563
9d7fb042
PZ
564 /* Serialize concurrent callers such that we can honour our guarantees */
565 raw_spin_lock_irqsave(&prev->lock, flags);
5613fda9 566 rtime = curr->sum_exec_runtime;
73fbec60 567
772c808a 568 /*
9d7fb042
PZ
569 * This is possible under two circumstances:
570 * - rtime isn't monotonic after all (a bug);
571 * - we got reordered by the lock.
572 *
573 * In both cases this acts as a filter such that the rest of the code
574 * can assume it is monotonic regardless of anything else.
772c808a
SG
575 */
576 if (prev->stime + prev->utime >= rtime)
577 goto out;
578
5a8e01f8
SG
579 stime = curr->stime;
580 utime = curr->utime;
581
173be9a1 582 /*
3b9c08ae 583 * If either stime or utime are 0, assume all runtime is userspace.
3b03706f 584 * Once a task gets some ticks, the monotonicity code at 'update:'
3b9c08ae 585 * will ensure things converge to the observed ratio.
173be9a1 586 */
3b9c08ae
IM
587 if (stime == 0) {
588 utime = rtime;
589 goto update;
9d7fb042 590 }
5a8e01f8 591
3b9c08ae
IM
592 if (utime == 0) {
593 stime = rtime;
594 goto update;
595 }
596
3dc167ba 597 stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
3b9c08ae
IM
598
599update:
9d7fb042
PZ
600 /*
601 * Make sure stime doesn't go backwards; this preserves monotonicity
602 * for utime because rtime is monotonic.
603 *
604 * utime_i+1 = rtime_i+1 - stime_i
605 * = rtime_i+1 - (rtime_i - utime_i)
606 * = (rtime_i+1 - rtime_i) + utime_i
607 * >= utime_i
608 */
609 if (stime < prev->stime)
610 stime = prev->stime;
611 utime = rtime - stime;
612
613 /*
614 * Make sure utime doesn't go backwards; this still preserves
615 * monotonicity for stime, analogous argument to above.
616 */
617 if (utime < prev->utime) {
618 utime = prev->utime;
619 stime = rtime - utime;
620 }
d37f761d 621
9d7fb042
PZ
622 prev->stime = stime;
623 prev->utime = utime;
772c808a 624out:
d37f761d
FW
625 *ut = prev->utime;
626 *st = prev->stime;
9d7fb042 627 raw_spin_unlock_irqrestore(&prev->lock, flags);
d37f761d 628}
73fbec60 629
5613fda9 630void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
d37f761d
FW
631{
632 struct task_cputime cputime = {
d37f761d
FW
633 .sum_exec_runtime = p->se.sum_exec_runtime,
634 };
635
e7f2be11
FW
636 if (task_cputime(p, &cputime.utime, &cputime.stime))
637 cputime.sum_exec_runtime = task_sched_runtime(p);
d37f761d 638 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
73fbec60 639}
9eec50b8 640EXPORT_SYMBOL_GPL(task_cputime_adjusted);
73fbec60 641
5613fda9 642void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
73fbec60 643{
73fbec60 644 struct task_cputime cputime;
73fbec60
FW
645
646 thread_group_cputime(p, &cputime);
d37f761d 647 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
73fbec60 648}
9fbc42ea 649#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
abf917cd
FW
650
651#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
bac5b6b6 652static u64 vtime_delta(struct vtime *vtime)
6a61671b 653{
2a42eb95 654 unsigned long long clock;
6a61671b 655
0e4097c3 656 clock = sched_clock();
2a42eb95 657 if (clock < vtime->starttime)
6a61671b 658 return 0;
abf917cd 659
2a42eb95 660 return clock - vtime->starttime;
6a61671b
FW
661}
662
bac5b6b6 663static u64 get_vtime_delta(struct vtime *vtime)
abf917cd 664{
2a42eb95
WL
665 u64 delta = vtime_delta(vtime);
666 u64 other;
abf917cd 667
03cbc732
WL
668 /*
669 * Unlike tick based timing, vtime based timing never has lost
670 * ticks, and no need for steal time accounting to make up for
671 * lost ticks. Vtime accounts a rounded version of actual
672 * elapsed time. Limit account_other_time to prevent rounding
673 * errors from causing elapsed vtime to go negative.
674 */
b58c3584 675 other = account_other_time(delta);
bac5b6b6 676 WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
2a42eb95 677 vtime->starttime += delta;
abf917cd 678
b58c3584 679 return delta - other;
abf917cd
FW
680}
681
f83eeb1a
FW
682static void vtime_account_system(struct task_struct *tsk,
683 struct vtime *vtime)
6a61671b 684{
2a42eb95
WL
685 vtime->stime += get_vtime_delta(vtime);
686 if (vtime->stime >= TICK_NSEC) {
687 account_system_time(tsk, irq_count(), vtime->stime);
688 vtime->stime = 0;
689 }
690}
691
692static void vtime_account_guest(struct task_struct *tsk,
693 struct vtime *vtime)
694{
695 vtime->gtime += get_vtime_delta(vtime);
696 if (vtime->gtime >= TICK_NSEC) {
697 account_guest_time(tsk, vtime->gtime);
698 vtime->gtime = 0;
699 }
6a61671b
FW
700}
701
8d495477
FW
702static void __vtime_account_kernel(struct task_struct *tsk,
703 struct vtime *vtime)
704{
705 /* We might have scheduled out from guest path */
e6d5bf3e 706 if (vtime->state == VTIME_GUEST)
8d495477
FW
707 vtime_account_guest(tsk, vtime);
708 else
709 vtime_account_system(tsk, vtime);
710}
711
f83eeb1a 712void vtime_account_kernel(struct task_struct *tsk)
abf917cd 713{
bac5b6b6
FW
714 struct vtime *vtime = &tsk->vtime;
715
716 if (!vtime_delta(vtime))
ff9a9b4c
RR
717 return;
718
bac5b6b6 719 write_seqcount_begin(&vtime->seqcount);
8d495477 720 __vtime_account_kernel(tsk, vtime);
bac5b6b6 721 write_seqcount_end(&vtime->seqcount);
6a61671b 722}
3f4724ea 723
1c3eda01 724void vtime_user_enter(struct task_struct *tsk)
abf917cd 725{
bac5b6b6
FW
726 struct vtime *vtime = &tsk->vtime;
727
728 write_seqcount_begin(&vtime->seqcount);
f83eeb1a 729 vtime_account_system(tsk, vtime);
bac5b6b6
FW
730 vtime->state = VTIME_USER;
731 write_seqcount_end(&vtime->seqcount);
6a61671b
FW
732}
733
1c3eda01 734void vtime_user_exit(struct task_struct *tsk)
6a61671b 735{
bac5b6b6
FW
736 struct vtime *vtime = &tsk->vtime;
737
738 write_seqcount_begin(&vtime->seqcount);
2a42eb95
WL
739 vtime->utime += get_vtime_delta(vtime);
740 if (vtime->utime >= TICK_NSEC) {
741 account_user_time(tsk, vtime->utime);
742 vtime->utime = 0;
743 }
bac5b6b6
FW
744 vtime->state = VTIME_SYS;
745 write_seqcount_end(&vtime->seqcount);
6a61671b
FW
746}
747
748void vtime_guest_enter(struct task_struct *tsk)
749{
bac5b6b6 750 struct vtime *vtime = &tsk->vtime;
5b206d48
FW
751 /*
752 * The flags must be updated under the lock with
60a9ce57 753 * the vtime_starttime flush and update.
5b206d48
FW
754 * That enforces a right ordering and update sequence
755 * synchronization against the reader (task_gtime())
756 * that can thus safely catch up with a tickless delta.
757 */
bac5b6b6 758 write_seqcount_begin(&vtime->seqcount);
f83eeb1a 759 vtime_account_system(tsk, vtime);
68e7a4d6 760 tsk->flags |= PF_VCPU;
e6d5bf3e 761 vtime->state = VTIME_GUEST;
bac5b6b6 762 write_seqcount_end(&vtime->seqcount);
6a61671b 763}
48d6a816 764EXPORT_SYMBOL_GPL(vtime_guest_enter);
6a61671b
FW
765
766void vtime_guest_exit(struct task_struct *tsk)
767{
bac5b6b6
FW
768 struct vtime *vtime = &tsk->vtime;
769
770 write_seqcount_begin(&vtime->seqcount);
2a42eb95 771 vtime_account_guest(tsk, vtime);
68e7a4d6 772 tsk->flags &= ~PF_VCPU;
e6d5bf3e 773 vtime->state = VTIME_SYS;
bac5b6b6 774 write_seqcount_end(&vtime->seqcount);
abf917cd 775}
48d6a816 776EXPORT_SYMBOL_GPL(vtime_guest_exit);
abf917cd
FW
777
778void vtime_account_idle(struct task_struct *tsk)
779{
bac5b6b6 780 account_idle_time(get_vtime_delta(&tsk->vtime));
abf917cd 781}
3f4724ea 782
8d495477 783void vtime_task_switch_generic(struct task_struct *prev)
6a61671b 784{
bac5b6b6 785 struct vtime *vtime = &prev->vtime;
6a61671b 786
bac5b6b6 787 write_seqcount_begin(&vtime->seqcount);
14faf6fc 788 if (vtime->state == VTIME_IDLE)
8d495477
FW
789 vtime_account_idle(prev);
790 else
791 __vtime_account_kernel(prev, vtime);
bac5b6b6 792 vtime->state = VTIME_INACTIVE;
802f4a82 793 vtime->cpu = -1;
bac5b6b6
FW
794 write_seqcount_end(&vtime->seqcount);
795
796 vtime = &current->vtime;
797
798 write_seqcount_begin(&vtime->seqcount);
14faf6fc
FW
799 if (is_idle_task(current))
800 vtime->state = VTIME_IDLE;
e6d5bf3e
FW
801 else if (current->flags & PF_VCPU)
802 vtime->state = VTIME_GUEST;
14faf6fc
FW
803 else
804 vtime->state = VTIME_SYS;
0e4097c3 805 vtime->starttime = sched_clock();
802f4a82 806 vtime->cpu = smp_processor_id();
bac5b6b6 807 write_seqcount_end(&vtime->seqcount);
6a61671b
FW
808}
809
45eacc69 810void vtime_init_idle(struct task_struct *t, int cpu)
6a61671b 811{
bac5b6b6 812 struct vtime *vtime = &t->vtime;
6a61671b
FW
813 unsigned long flags;
814
b7ce2277 815 local_irq_save(flags);
bac5b6b6 816 write_seqcount_begin(&vtime->seqcount);
14faf6fc 817 vtime->state = VTIME_IDLE;
0e4097c3 818 vtime->starttime = sched_clock();
802f4a82 819 vtime->cpu = cpu;
bac5b6b6 820 write_seqcount_end(&vtime->seqcount);
b7ce2277 821 local_irq_restore(flags);
6a61671b
FW
822}
823
16a6d9be 824u64 task_gtime(struct task_struct *t)
6a61671b 825{
bac5b6b6 826 struct vtime *vtime = &t->vtime;
6a61671b 827 unsigned int seq;
16a6d9be 828 u64 gtime;
6a61671b 829
e5925394 830 if (!vtime_accounting_enabled())
2541117b
HS
831 return t->gtime;
832
6a61671b 833 do {
bac5b6b6 834 seq = read_seqcount_begin(&vtime->seqcount);
6a61671b
FW
835
836 gtime = t->gtime;
e6d5bf3e 837 if (vtime->state == VTIME_GUEST)
2a42eb95 838 gtime += vtime->gtime + vtime_delta(vtime);
6a61671b 839
bac5b6b6 840 } while (read_seqcount_retry(&vtime->seqcount, seq));
6a61671b
FW
841
842 return gtime;
843}
844
845/*
846 * Fetch cputime raw values from fields of task_struct and
847 * add up the pending nohz execution time since the last
848 * cputime snapshot.
849 */
e7f2be11 850bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
6a61671b 851{
bac5b6b6 852 struct vtime *vtime = &t->vtime;
6a61671b 853 unsigned int seq;
bac5b6b6 854 u64 delta;
e7f2be11 855 int ret;
6a61671b 856
353c50eb
SG
857 if (!vtime_accounting_enabled()) {
858 *utime = t->utime;
859 *stime = t->stime;
e7f2be11 860 return false;
353c50eb 861 }
6a61671b 862
353c50eb 863 do {
e7f2be11 864 ret = false;
bac5b6b6 865 seq = read_seqcount_begin(&vtime->seqcount);
6a61671b 866
353c50eb
SG
867 *utime = t->utime;
868 *stime = t->stime;
6a61671b 869
14faf6fc
FW
870 /* Task is sleeping or idle, nothing to add */
871 if (vtime->state < VTIME_SYS)
6a61671b
FW
872 continue;
873
e7f2be11 874 ret = true;
bac5b6b6 875 delta = vtime_delta(vtime);
6a61671b
FW
876
877 /*
e6d5bf3e
FW
878 * Task runs either in user (including guest) or kernel space,
879 * add pending nohz time to the right place.
6a61671b 880 */
e6d5bf3e 881 if (vtime->state == VTIME_SYS)
2a42eb95 882 *stime += vtime->stime + delta;
e6d5bf3e
FW
883 else
884 *utime += vtime->utime + delta;
bac5b6b6 885 } while (read_seqcount_retry(&vtime->seqcount, seq));
e7f2be11
FW
886
887 return ret;
6a61671b 888}
64eea63c 889
f1dfdab6 890static int vtime_state_fetch(struct vtime *vtime, int cpu)
74722bb2 891{
f1dfdab6
CW
892 int state = READ_ONCE(vtime->state);
893
74722bb2
FW
894 /*
895 * We raced against a context switch, fetch the
896 * kcpustat task again.
897 */
898 if (vtime->cpu != cpu && vtime->cpu != -1)
899 return -EAGAIN;
900
901 /*
902 * Two possible things here:
903 * 1) We are seeing the scheduling out task (prev) or any past one.
904 * 2) We are seeing the scheduling in task (next) but it hasn't
905 * passed though vtime_task_switch() yet so the pending
906 * cputime of the prev task may not be flushed yet.
907 *
908 * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
909 */
f1dfdab6 910 if (state == VTIME_INACTIVE)
74722bb2
FW
911 return -EAGAIN;
912
f1dfdab6 913 return state;
74722bb2
FW
914}
915
5a1c9558
FW
916static u64 kcpustat_user_vtime(struct vtime *vtime)
917{
918 if (vtime->state == VTIME_USER)
919 return vtime->utime + vtime_delta(vtime);
920 else if (vtime->state == VTIME_GUEST)
921 return vtime->gtime + vtime_delta(vtime);
922 return 0;
923}
924
64eea63c 925static int kcpustat_field_vtime(u64 *cpustat,
5a1c9558 926 struct task_struct *tsk,
64eea63c
FW
927 enum cpu_usage_stat usage,
928 int cpu, u64 *val)
929{
5a1c9558 930 struct vtime *vtime = &tsk->vtime;
64eea63c 931 unsigned int seq;
64eea63c
FW
932
933 do {
f1dfdab6
CW
934 int state;
935
64eea63c
FW
936 seq = read_seqcount_begin(&vtime->seqcount);
937
f1dfdab6
CW
938 state = vtime_state_fetch(vtime, cpu);
939 if (state < 0)
940 return state;
64eea63c
FW
941
942 *val = cpustat[usage];
943
5a1c9558
FW
944 /*
945 * Nice VS unnice cputime accounting may be inaccurate if
946 * the nice value has changed since the last vtime update.
947 * But proper fix would involve interrupting target on nice
948 * updates which is a no go on nohz_full (although the scheduler
949 * may still interrupt the target if rescheduling is needed...)
950 */
951 switch (usage) {
952 case CPUTIME_SYSTEM:
f1dfdab6 953 if (state == VTIME_SYS)
5a1c9558
FW
954 *val += vtime->stime + vtime_delta(vtime);
955 break;
956 case CPUTIME_USER:
957 if (task_nice(tsk) <= 0)
958 *val += kcpustat_user_vtime(vtime);
959 break;
960 case CPUTIME_NICE:
961 if (task_nice(tsk) > 0)
962 *val += kcpustat_user_vtime(vtime);
963 break;
964 case CPUTIME_GUEST:
f1dfdab6 965 if (state == VTIME_GUEST && task_nice(tsk) <= 0)
5a1c9558
FW
966 *val += vtime->gtime + vtime_delta(vtime);
967 break;
968 case CPUTIME_GUEST_NICE:
f1dfdab6 969 if (state == VTIME_GUEST && task_nice(tsk) > 0)
5a1c9558
FW
970 *val += vtime->gtime + vtime_delta(vtime);
971 break;
972 default:
973 break;
974 }
64eea63c
FW
975 } while (read_seqcount_retry(&vtime->seqcount, seq));
976
977 return 0;
978}
979
980u64 kcpustat_field(struct kernel_cpustat *kcpustat,
981 enum cpu_usage_stat usage, int cpu)
982{
983 u64 *cpustat = kcpustat->cpustat;
e0d648f9 984 u64 val = cpustat[usage];
64eea63c 985 struct rq *rq;
64eea63c
FW
986 int err;
987
988 if (!vtime_accounting_enabled_cpu(cpu))
e0d648f9 989 return val;
64eea63c 990
64eea63c
FW
991 rq = cpu_rq(cpu);
992
993 for (;;) {
994 struct task_struct *curr;
64eea63c
FW
995
996 rcu_read_lock();
997 curr = rcu_dereference(rq->curr);
998 if (WARN_ON_ONCE(!curr)) {
999 rcu_read_unlock();
1000 return cpustat[usage];
1001 }
1002
5a1c9558 1003 err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
64eea63c
FW
1004 rcu_read_unlock();
1005
1006 if (!err)
1007 return val;
1008
1009 cpu_relax();
1010 }
1011}
1012EXPORT_SYMBOL_GPL(kcpustat_field);
74722bb2
FW
1013
1014static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
1015 const struct kernel_cpustat *src,
1016 struct task_struct *tsk, int cpu)
1017{
1018 struct vtime *vtime = &tsk->vtime;
1019 unsigned int seq;
74722bb2
FW
1020
1021 do {
1022 u64 *cpustat;
1023 u64 delta;
f1dfdab6 1024 int state;
74722bb2
FW
1025
1026 seq = read_seqcount_begin(&vtime->seqcount);
1027
f1dfdab6
CW
1028 state = vtime_state_fetch(vtime, cpu);
1029 if (state < 0)
1030 return state;
74722bb2
FW
1031
1032 *dst = *src;
1033 cpustat = dst->cpustat;
1034
1035 /* Task is sleeping, dead or idle, nothing to add */
f1dfdab6 1036 if (state < VTIME_SYS)
74722bb2
FW
1037 continue;
1038
1039 delta = vtime_delta(vtime);
1040
1041 /*
1042 * Task runs either in user (including guest) or kernel space,
1043 * add pending nohz time to the right place.
1044 */
f1dfdab6 1045 if (state == VTIME_SYS) {
74722bb2 1046 cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
f1dfdab6 1047 } else if (state == VTIME_USER) {
74722bb2
FW
1048 if (task_nice(tsk) > 0)
1049 cpustat[CPUTIME_NICE] += vtime->utime + delta;
1050 else
1051 cpustat[CPUTIME_USER] += vtime->utime + delta;
1052 } else {
f1dfdab6 1053 WARN_ON_ONCE(state != VTIME_GUEST);
74722bb2
FW
1054 if (task_nice(tsk) > 0) {
1055 cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
1056 cpustat[CPUTIME_NICE] += vtime->gtime + delta;
1057 } else {
1058 cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
1059 cpustat[CPUTIME_USER] += vtime->gtime + delta;
1060 }
1061 }
1062 } while (read_seqcount_retry(&vtime->seqcount, seq));
1063
f1dfdab6 1064 return 0;
74722bb2
FW
1065}
1066
1067void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
1068{
1069 const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
1070 struct rq *rq;
1071 int err;
1072
1073 if (!vtime_accounting_enabled_cpu(cpu)) {
1074 *dst = *src;
1075 return;
1076 }
1077
1078 rq = cpu_rq(cpu);
1079
1080 for (;;) {
1081 struct task_struct *curr;
1082
1083 rcu_read_lock();
1084 curr = rcu_dereference(rq->curr);
1085 if (WARN_ON_ONCE(!curr)) {
1086 rcu_read_unlock();
1087 *dst = *src;
1088 return;
1089 }
1090
1091 err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
1092 rcu_read_unlock();
1093
1094 if (!err)
1095 return;
1096
1097 cpu_relax();
1098 }
1099}
1100EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
1101
abf917cd 1102#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */