1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "cgroup-internal.h"
4 #include <linux/sched/cputime.h>
6 static DEFINE_SPINLOCK(cgroup_rstat_lock);
7 static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock);
9 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
11 static struct cgroup_rstat_cpu *cgroup_rstat_cpu(struct cgroup *cgrp, int cpu)
13 return per_cpu_ptr(cgrp->rstat_cpu, cpu);
17 * cgroup_rstat_updated - keep track of updated rstat_cpu
18 * @cgrp: target cgroup
19 * @cpu: cpu on which rstat_cpu was updated
21 * @cgrp's rstat_cpu on @cpu was updated. Put it on the parent's matching
22 * rstat_cpu->updated_children list. See the comment on top of
23 * cgroup_rstat_cpu definition for details.
25 void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
27 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
31 * Speculative already-on-list test. This may race leading to
32 * temporary inaccuracies, which is fine.
34 * Because @parent's updated_children is terminated with @parent
35 * instead of NULL, we can tell whether @cgrp is on the list by
36 * testing the next pointer for NULL.
38 if (data_race(cgroup_rstat_cpu(cgrp, cpu)->updated_next))
41 raw_spin_lock_irqsave(cpu_lock, flags);
43 /* put @cgrp and all ancestors on the corresponding updated lists */
45 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
46 struct cgroup *parent = cgroup_parent(cgrp);
47 struct cgroup_rstat_cpu *prstatc;
50 * Both additions and removals are bottom-up. If a cgroup
51 * is already in the tree, all ancestors are.
53 if (rstatc->updated_next)
56 /* Root has no parent to link it to, but mark it busy */
58 rstatc->updated_next = cgrp;
62 prstatc = cgroup_rstat_cpu(parent, cpu);
63 rstatc->updated_next = prstatc->updated_children;
64 prstatc->updated_children = cgrp;
69 raw_spin_unlock_irqrestore(cpu_lock, flags);
73 * cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
74 * @pos: current position
75 * @root: root of the tree to traversal
78 * Walks the updated rstat_cpu tree on @cpu from @root. %NULL @pos starts
79 * the traversal and %NULL return indicates the end. During traversal,
80 * each returned cgroup is unlinked from the tree. Must be called with the
81 * matching cgroup_rstat_cpu_lock held.
83 * The only ordering guarantee is that, for a parent and a child pair
84 * covered by a given traversal, if a child is visited, its parent is
85 * guaranteed to be visited afterwards.
87 static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
88 struct cgroup *root, int cpu)
90 struct cgroup_rstat_cpu *rstatc;
91 struct cgroup *parent;
97 * We're gonna walk down to the first leaf and visit/remove it. We
98 * can pick whatever unvisited node as the starting point.
102 /* return NULL if this subtree is not on-list */
103 if (!cgroup_rstat_cpu(pos, cpu)->updated_next)
106 pos = cgroup_parent(pos);
109 /* walk down to the first leaf */
111 rstatc = cgroup_rstat_cpu(pos, cpu);
112 if (rstatc->updated_children == pos)
114 pos = rstatc->updated_children;
118 * Unlink @pos from the tree. As the updated_children list is
119 * singly linked, we have to walk it to find the removal point.
120 * However, due to the way we traverse, @pos will be the first
121 * child in most cases. The only exception is @root.
123 parent = cgroup_parent(pos);
125 struct cgroup_rstat_cpu *prstatc;
126 struct cgroup **nextp;
128 prstatc = cgroup_rstat_cpu(parent, cpu);
129 nextp = &prstatc->updated_children;
130 while (*nextp != pos) {
131 struct cgroup_rstat_cpu *nrstatc;
133 nrstatc = cgroup_rstat_cpu(*nextp, cpu);
134 WARN_ON_ONCE(*nextp == parent);
135 nextp = &nrstatc->updated_next;
137 *nextp = rstatc->updated_next;
140 rstatc->updated_next = NULL;
144 /* see cgroup_rstat_flush() */
145 static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
146 __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
150 lockdep_assert_held(&cgroup_rstat_lock);
152 for_each_possible_cpu(cpu) {
153 raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
155 struct cgroup *pos = NULL;
157 raw_spin_lock(cpu_lock);
158 while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
159 struct cgroup_subsys_state *css;
161 cgroup_base_stat_flush(pos, cpu);
164 list_for_each_entry_rcu(css, &pos->rstat_css_list,
166 css->ss->css_rstat_flush(css, cpu);
169 raw_spin_unlock(cpu_lock);
171 /* if @may_sleep, play nice and yield if necessary */
172 if (may_sleep && (need_resched() ||
173 spin_needbreak(&cgroup_rstat_lock))) {
174 spin_unlock_irq(&cgroup_rstat_lock);
177 spin_lock_irq(&cgroup_rstat_lock);
183 * cgroup_rstat_flush - flush stats in @cgrp's subtree
184 * @cgrp: target cgroup
186 * Collect all per-cpu stats in @cgrp's subtree into the global counters
187 * and propagate them upwards. After this function returns, all cgroups in
188 * the subtree have up-to-date ->stat.
190 * This also gets all cgroups in the subtree including @cgrp off the
191 * ->updated_children lists.
193 * This function may block.
195 void cgroup_rstat_flush(struct cgroup *cgrp)
199 spin_lock_irq(&cgroup_rstat_lock);
200 cgroup_rstat_flush_locked(cgrp, true);
201 spin_unlock_irq(&cgroup_rstat_lock);
205 * cgroup_rstat_flush_irqsafe - irqsafe version of cgroup_rstat_flush()
206 * @cgrp: target cgroup
208 * This function can be called from any context.
210 void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp)
214 spin_lock_irqsave(&cgroup_rstat_lock, flags);
215 cgroup_rstat_flush_locked(cgrp, false);
216 spin_unlock_irqrestore(&cgroup_rstat_lock, flags);
220 * cgroup_rstat_flush_hold - flush stats in @cgrp's subtree and hold
221 * @cgrp: target cgroup
223 * Flush stats in @cgrp's subtree and prevent further flushes. Must be
224 * paired with cgroup_rstat_flush_release().
226 * This function may block.
228 void cgroup_rstat_flush_hold(struct cgroup *cgrp)
229 __acquires(&cgroup_rstat_lock)
232 spin_lock_irq(&cgroup_rstat_lock);
233 cgroup_rstat_flush_locked(cgrp, true);
237 * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
239 void cgroup_rstat_flush_release(void)
240 __releases(&cgroup_rstat_lock)
242 spin_unlock_irq(&cgroup_rstat_lock);
245 int cgroup_rstat_init(struct cgroup *cgrp)
249 /* the root cgrp has rstat_cpu preallocated */
250 if (!cgrp->rstat_cpu) {
251 cgrp->rstat_cpu = alloc_percpu(struct cgroup_rstat_cpu);
252 if (!cgrp->rstat_cpu)
256 /* ->updated_children list is self terminated */
257 for_each_possible_cpu(cpu) {
258 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
260 rstatc->updated_children = cgrp;
261 u64_stats_init(&rstatc->bsync);
267 void cgroup_rstat_exit(struct cgroup *cgrp)
271 cgroup_rstat_flush(cgrp);
274 for_each_possible_cpu(cpu) {
275 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
277 if (WARN_ON_ONCE(rstatc->updated_children != cgrp) ||
278 WARN_ON_ONCE(rstatc->updated_next))
282 free_percpu(cgrp->rstat_cpu);
283 cgrp->rstat_cpu = NULL;
286 void __init cgroup_rstat_boot(void)
290 for_each_possible_cpu(cpu)
291 raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu));
295 * Functions for cgroup basic resource statistics implemented on top of
298 static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
299 struct cgroup_base_stat *src_bstat)
301 dst_bstat->cputime.utime += src_bstat->cputime.utime;
302 dst_bstat->cputime.stime += src_bstat->cputime.stime;
303 dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
306 static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
307 struct cgroup_base_stat *src_bstat)
309 dst_bstat->cputime.utime -= src_bstat->cputime.utime;
310 dst_bstat->cputime.stime -= src_bstat->cputime.stime;
311 dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
314 static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
316 struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
317 struct cgroup *parent = cgroup_parent(cgrp);
318 struct cgroup_base_stat delta;
321 /* Root-level stats are sourced from system-wide CPU stats */
325 /* fetch the current per-cpu values */
327 seq = __u64_stats_fetch_begin(&rstatc->bsync);
328 delta = rstatc->bstat;
329 } while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
331 /* propagate percpu delta to global */
332 cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
333 cgroup_base_stat_add(&cgrp->bstat, &delta);
334 cgroup_base_stat_add(&rstatc->last_bstat, &delta);
336 /* propagate global delta to parent (unless that's root) */
337 if (cgroup_parent(parent)) {
339 cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
340 cgroup_base_stat_add(&parent->bstat, &delta);
341 cgroup_base_stat_add(&cgrp->last_bstat, &delta);
345 static struct cgroup_rstat_cpu *
346 cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp, unsigned long *flags)
348 struct cgroup_rstat_cpu *rstatc;
350 rstatc = get_cpu_ptr(cgrp->rstat_cpu);
351 *flags = u64_stats_update_begin_irqsave(&rstatc->bsync);
355 static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
356 struct cgroup_rstat_cpu *rstatc,
359 u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
360 cgroup_rstat_updated(cgrp, smp_processor_id());
364 void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
366 struct cgroup_rstat_cpu *rstatc;
369 rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
370 rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
371 cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
374 void __cgroup_account_cputime_field(struct cgroup *cgrp,
375 enum cpu_usage_stat index, u64 delta_exec)
377 struct cgroup_rstat_cpu *rstatc;
380 rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags);
385 rstatc->bstat.cputime.utime += delta_exec;
389 case CPUTIME_SOFTIRQ:
390 rstatc->bstat.cputime.stime += delta_exec;
396 cgroup_base_stat_cputime_account_end(cgrp, rstatc, flags);
400 * compute the cputime for the root cgroup by getting the per cpu data
401 * at a global level, then categorizing the fields in a manner consistent
402 * with how it is done by __cgroup_account_cputime_field for each bit of
403 * cpu time attributed to a cgroup.
405 static void root_cgroup_cputime(struct task_cputime *cputime)
411 cputime->sum_exec_runtime = 0;
412 for_each_possible_cpu(i) {
413 struct kernel_cpustat kcpustat;
414 u64 *cpustat = kcpustat.cpustat;
418 kcpustat_cpu_fetch(&kcpustat, i);
420 user += cpustat[CPUTIME_USER];
421 user += cpustat[CPUTIME_NICE];
422 cputime->utime += user;
424 sys += cpustat[CPUTIME_SYSTEM];
425 sys += cpustat[CPUTIME_IRQ];
426 sys += cpustat[CPUTIME_SOFTIRQ];
427 cputime->stime += sys;
429 cputime->sum_exec_runtime += user;
430 cputime->sum_exec_runtime += sys;
431 cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
435 void cgroup_base_stat_cputime_show(struct seq_file *seq)
437 struct cgroup *cgrp = seq_css(seq)->cgroup;
438 u64 usage, utime, stime;
439 struct task_cputime cputime;
441 if (cgroup_parent(cgrp)) {
442 cgroup_rstat_flush_hold(cgrp);
443 usage = cgrp->bstat.cputime.sum_exec_runtime;
444 cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
446 cgroup_rstat_flush_release();
448 root_cgroup_cputime(&cputime);
449 usage = cputime.sum_exec_runtime;
450 utime = cputime.utime;
451 stime = cputime.stime;
454 do_div(usage, NSEC_PER_USEC);
455 do_div(utime, NSEC_PER_USEC);
456 do_div(stime, NSEC_PER_USEC);
458 seq_printf(seq, "usage_usec %llu\n"
460 "system_usec %llu\n",
461 usage, utime, stime);