1 #ifndef _LINUX_CGROUP_H
2 #define _LINUX_CGROUP_H
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/rculist.h>
15 #include <linux/cgroupstats.h>
16 #include <linux/rwsem.h>
18 #include <linux/seq_file.h>
19 #include <linux/kernfs.h>
20 #include <linux/jump_label.h>
22 #include <linux/cgroup-defs.h>
27 * All weight knobs on the default hierarhcy should use the following min,
28 * default and max values. The default value is the logarithmic center of
29 * MIN and MAX and allows 100x to be expressed in both directions.
31 #define CGROUP_WEIGHT_MIN 1
32 #define CGROUP_WEIGHT_DFL 100
33 #define CGROUP_WEIGHT_MAX 10000
35 /* a css_task_iter should be treated as an opaque object */
36 struct css_task_iter {
37 struct cgroup_subsys *ss;
39 struct list_head *cset_pos;
40 struct list_head *cset_head;
42 struct list_head *task_pos;
43 struct list_head *tasks_head;
44 struct list_head *mg_tasks_head;
47 extern struct cgroup_root cgrp_dfl_root;
48 extern struct css_set init_css_set;
50 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
51 #include <linux/cgroup_subsys.h>
55 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
56 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
57 #include <linux/cgroup_subsys.h>
61 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
62 * @ss: subsystem in question
64 #define cgroup_subsys_enabled(ss) \
65 static_branch_likely(&ss ## _enabled_key)
68 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
69 * @ss: subsystem in question
71 #define cgroup_subsys_on_dfl(ss) \
72 static_branch_likely(&ss ## _on_dfl_key)
74 bool css_has_online_children(struct cgroup_subsys_state *css);
75 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
76 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
77 struct cgroup_subsys *ss);
78 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
79 struct cgroup_subsys *ss);
81 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
82 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
83 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
85 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
86 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
87 int cgroup_rm_cftypes(struct cftype *cfts);
89 char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
90 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
91 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
92 struct pid *pid, struct task_struct *tsk);
94 void cgroup_fork(struct task_struct *p);
95 extern int cgroup_can_fork(struct task_struct *p,
96 void *ss_priv[CGROUP_CANFORK_COUNT]);
97 extern void cgroup_cancel_fork(struct task_struct *p,
98 void *ss_priv[CGROUP_CANFORK_COUNT]);
99 extern void cgroup_post_fork(struct task_struct *p,
100 void *old_ss_priv[CGROUP_CANFORK_COUNT]);
101 void cgroup_exit(struct task_struct *p);
103 int cgroup_init_early(void);
104 int cgroup_init(void);
107 * Iteration helpers and macros.
110 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
111 struct cgroup_subsys_state *parent);
112 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
113 struct cgroup_subsys_state *css);
114 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
115 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
116 struct cgroup_subsys_state *css);
118 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
119 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
121 void css_task_iter_start(struct cgroup_subsys_state *css,
122 struct css_task_iter *it);
123 struct task_struct *css_task_iter_next(struct css_task_iter *it);
124 void css_task_iter_end(struct css_task_iter *it);
127 * css_for_each_child - iterate through children of a css
128 * @pos: the css * to use as the loop cursor
129 * @parent: css whose children to walk
131 * Walk @parent's children. Must be called under rcu_read_lock().
133 * If a subsystem synchronizes ->css_online() and the start of iteration, a
134 * css which finished ->css_online() is guaranteed to be visible in the
135 * future iterations and will stay visible until the last reference is put.
136 * A css which hasn't finished ->css_online() or already finished
137 * ->css_offline() may show up during traversal. It's each subsystem's
138 * responsibility to synchronize against on/offlining.
140 * It is allowed to temporarily drop RCU read lock during iteration. The
141 * caller is responsible for ensuring that @pos remains accessible until
142 * the start of the next iteration by, for example, bumping the css refcnt.
144 #define css_for_each_child(pos, parent) \
145 for ((pos) = css_next_child(NULL, (parent)); (pos); \
146 (pos) = css_next_child((pos), (parent)))
149 * css_for_each_descendant_pre - pre-order walk of a css's descendants
150 * @pos: the css * to use as the loop cursor
151 * @root: css whose descendants to walk
153 * Walk @root's descendants. @root is included in the iteration and the
154 * first node to be visited. Must be called under rcu_read_lock().
156 * If a subsystem synchronizes ->css_online() and the start of iteration, a
157 * css which finished ->css_online() is guaranteed to be visible in the
158 * future iterations and will stay visible until the last reference is put.
159 * A css which hasn't finished ->css_online() or already finished
160 * ->css_offline() may show up during traversal. It's each subsystem's
161 * responsibility to synchronize against on/offlining.
163 * For example, the following guarantees that a descendant can't escape
164 * state updates of its ancestors.
168 * Lock @css's parent and @css;
169 * Inherit state from the parent;
173 * my_update_state(@css)
175 * css_for_each_descendant_pre(@pos, @css) {
178 * Update @css's state;
180 * Verify @pos is alive and inherit state from its parent;
185 * As long as the inheriting step, including checking the parent state, is
186 * enclosed inside @pos locking, double-locking the parent isn't necessary
187 * while inheriting. The state update to the parent is guaranteed to be
188 * visible by walking order and, as long as inheriting operations to the
189 * same @pos are atomic to each other, multiple updates racing each other
190 * still result in the correct state. It's guaranateed that at least one
191 * inheritance happens for any css after the latest update to its parent.
193 * If checking parent's state requires locking the parent, each inheriting
194 * iteration should lock and unlock both @pos->parent and @pos.
196 * Alternatively, a subsystem may choose to use a single global lock to
197 * synchronize ->css_online() and ->css_offline() against tree-walking
200 * It is allowed to temporarily drop RCU read lock during iteration. The
201 * caller is responsible for ensuring that @pos remains accessible until
202 * the start of the next iteration by, for example, bumping the css refcnt.
204 #define css_for_each_descendant_pre(pos, css) \
205 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
206 (pos) = css_next_descendant_pre((pos), (css)))
209 * css_for_each_descendant_post - post-order walk of a css's descendants
210 * @pos: the css * to use as the loop cursor
211 * @css: css whose descendants to walk
213 * Similar to css_for_each_descendant_pre() but performs post-order
214 * traversal instead. @root is included in the iteration and the last
215 * node to be visited.
217 * If a subsystem synchronizes ->css_online() and the start of iteration, a
218 * css which finished ->css_online() is guaranteed to be visible in the
219 * future iterations and will stay visible until the last reference is put.
220 * A css which hasn't finished ->css_online() or already finished
221 * ->css_offline() may show up during traversal. It's each subsystem's
222 * responsibility to synchronize against on/offlining.
224 * Note that the walk visibility guarantee example described in pre-order
225 * walk doesn't apply the same to post-order walks.
227 #define css_for_each_descendant_post(pos, css) \
228 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
229 (pos) = css_next_descendant_post((pos), (css)))
232 * cgroup_taskset_for_each - iterate cgroup_taskset
233 * @task: the loop cursor
234 * @tset: taskset to iterate
236 #define cgroup_taskset_for_each(task, tset) \
237 for ((task) = cgroup_taskset_first((tset)); (task); \
238 (task) = cgroup_taskset_next((tset)))
245 * css_get - obtain a reference on the specified css
248 * The caller must already have a reference.
250 static inline void css_get(struct cgroup_subsys_state *css)
252 if (!(css->flags & CSS_NO_REF))
253 percpu_ref_get(&css->refcnt);
257 * css_get_many - obtain references on the specified css
259 * @n: number of references to get
261 * The caller must already have a reference.
263 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
265 if (!(css->flags & CSS_NO_REF))
266 percpu_ref_get_many(&css->refcnt, n);
270 * css_tryget - try to obtain a reference on the specified css
273 * Obtain a reference on @css unless it already has reached zero and is
274 * being released. This function doesn't care whether @css is on or
275 * offline. The caller naturally needs to ensure that @css is accessible
276 * but doesn't have to be holding a reference on it - IOW, RCU protected
277 * access is good enough for this function. Returns %true if a reference
278 * count was successfully obtained; %false otherwise.
280 static inline bool css_tryget(struct cgroup_subsys_state *css)
282 if (!(css->flags & CSS_NO_REF))
283 return percpu_ref_tryget(&css->refcnt);
288 * css_tryget_online - try to obtain a reference on the specified css if online
291 * Obtain a reference on @css if it's online. The caller naturally needs
292 * to ensure that @css is accessible but doesn't have to be holding a
293 * reference on it - IOW, RCU protected access is good enough for this
294 * function. Returns %true if a reference count was successfully obtained;
297 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
299 if (!(css->flags & CSS_NO_REF))
300 return percpu_ref_tryget_live(&css->refcnt);
305 * css_put - put a css reference
308 * Put a reference obtained via css_get() and css_tryget_online().
310 static inline void css_put(struct cgroup_subsys_state *css)
312 if (!(css->flags & CSS_NO_REF))
313 percpu_ref_put(&css->refcnt);
317 * css_put_many - put css references
319 * @n: number of references to put
321 * Put references obtained via css_get() and css_tryget_online().
323 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
325 if (!(css->flags & CSS_NO_REF))
326 percpu_ref_put_many(&css->refcnt, n);
330 * task_css_set_check - obtain a task's css_set with extra access conditions
331 * @task: the task to obtain css_set for
332 * @__c: extra condition expression to be passed to rcu_dereference_check()
334 * A task's css_set is RCU protected, initialized and exited while holding
335 * task_lock(), and can only be modified while holding both cgroup_mutex
336 * and task_lock() while the task is alive. This macro verifies that the
337 * caller is inside proper critical section and returns @task's css_set.
339 * The caller can also specify additional allowed conditions via @__c, such
340 * as locks used during the cgroup_subsys::attach() methods.
342 #ifdef CONFIG_PROVE_RCU
343 extern struct mutex cgroup_mutex;
344 extern struct rw_semaphore css_set_rwsem;
345 #define task_css_set_check(task, __c) \
346 rcu_dereference_check((task)->cgroups, \
347 lockdep_is_held(&cgroup_mutex) || \
348 lockdep_is_held(&css_set_rwsem) || \
349 ((task)->flags & PF_EXITING) || (__c))
351 #define task_css_set_check(task, __c) \
352 rcu_dereference((task)->cgroups)
356 * task_css_check - obtain css for (task, subsys) w/ extra access conds
357 * @task: the target task
358 * @subsys_id: the target subsystem ID
359 * @__c: extra condition expression to be passed to rcu_dereference_check()
361 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
362 * synchronization rules are the same as task_css_set_check().
364 #define task_css_check(task, subsys_id, __c) \
365 task_css_set_check((task), (__c))->subsys[(subsys_id)]
368 * task_css_set - obtain a task's css_set
369 * @task: the task to obtain css_set for
371 * See task_css_set_check().
373 static inline struct css_set *task_css_set(struct task_struct *task)
375 return task_css_set_check(task, false);
379 * task_css - obtain css for (task, subsys)
380 * @task: the target task
381 * @subsys_id: the target subsystem ID
383 * See task_css_check().
385 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
388 return task_css_check(task, subsys_id, false);
392 * task_get_css - find and get the css for (task, subsys)
393 * @task: the target task
394 * @subsys_id: the target subsystem ID
396 * Find the css for the (@task, @subsys_id) combination, increment a
397 * reference on and return it. This function is guaranteed to return a
400 static inline struct cgroup_subsys_state *
401 task_get_css(struct task_struct *task, int subsys_id)
403 struct cgroup_subsys_state *css;
407 css = task_css(task, subsys_id);
408 if (likely(css_tryget_online(css)))
417 * task_css_is_root - test whether a task belongs to the root css
418 * @task: the target task
419 * @subsys_id: the target subsystem ID
421 * Test whether @task belongs to the root css on the specified subsystem.
422 * May be invoked in any context.
424 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
426 return task_css_check(task, subsys_id, true) ==
427 init_css_set.subsys[subsys_id];
430 static inline struct cgroup *task_cgroup(struct task_struct *task,
433 return task_css(task, subsys_id)->cgroup;
436 /* no synchronization, the result can only be used as a hint */
437 static inline bool cgroup_has_tasks(struct cgroup *cgrp)
439 return !list_empty(&cgrp->cset_links);
442 /* returns ino associated with a cgroup */
443 static inline ino_t cgroup_ino(struct cgroup *cgrp)
445 return cgrp->kn->ino;
448 /* cft/css accessors for cftype->write() operation */
449 static inline struct cftype *of_cft(struct kernfs_open_file *of)
454 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
456 /* cft/css accessors for cftype->seq_*() operations */
457 static inline struct cftype *seq_cft(struct seq_file *seq)
459 return of_cft(seq->private);
462 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
464 return of_css(seq->private);
468 * Name / path handling functions. All are thin wrappers around the kernfs
469 * counterparts and can be called under any context.
472 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
474 return kernfs_name(cgrp->kn, buf, buflen);
477 static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
480 return kernfs_path(cgrp->kn, buf, buflen);
483 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
485 pr_cont_kernfs_name(cgrp->kn);
488 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
490 pr_cont_kernfs_path(cgrp->kn);
493 #else /* !CONFIG_CGROUPS */
495 struct cgroup_subsys_state;
497 static inline void css_put(struct cgroup_subsys_state *css) {}
498 static inline int cgroup_attach_task_all(struct task_struct *from,
499 struct task_struct *t) { return 0; }
500 static inline int cgroupstats_build(struct cgroupstats *stats,
501 struct dentry *dentry) { return -EINVAL; }
503 static inline void cgroup_fork(struct task_struct *p) {}
504 static inline int cgroup_can_fork(struct task_struct *p,
505 void *ss_priv[CGROUP_CANFORK_COUNT])
507 static inline void cgroup_cancel_fork(struct task_struct *p,
508 void *ss_priv[CGROUP_CANFORK_COUNT]) {}
509 static inline void cgroup_post_fork(struct task_struct *p,
510 void *ss_priv[CGROUP_CANFORK_COUNT]) {}
511 static inline void cgroup_exit(struct task_struct *p) {}
513 static inline int cgroup_init_early(void) { return 0; }
514 static inline int cgroup_init(void) { return 0; }
516 #endif /* !CONFIG_CGROUPS */
518 #endif /* _LINUX_CGROUP_H */