Merge tag 'net-5.16-final' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-block.git] / kernel / cgroup / cgroup-v1.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
0a268dbd
TH
2#include "cgroup-internal.h"
3
1592c9b2 4#include <linux/ctype.h>
0a268dbd
TH
5#include <linux/kmod.h>
6#include <linux/sort.h>
1592c9b2 7#include <linux/delay.h>
0a268dbd 8#include <linux/mm.h>
c3edc401 9#include <linux/sched/signal.h>
56cd6973 10#include <linux/sched/task.h>
50ff9d13 11#include <linux/magic.h>
0a268dbd
TH
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14#include <linux/delayacct.h>
15#include <linux/pid_namespace.h>
16#include <linux/cgroupstats.h>
8d2451f4 17#include <linux/fs_parser.h>
0a268dbd
TH
18
19#include <trace/events/cgroup.h>
20
21/*
22 * pidlists linger the following amount before being destroyed. The goal
23 * is avoiding frequent destruction in the middle of consecutive read calls
24 * Expiring in the middle is a performance problem not a correctness one.
25 * 1 sec should be enough.
26 */
27#define CGROUP_PIDLIST_DESTROY_DELAY HZ
28
29/* Controllers blocked by the commandline in v1 */
30static u16 cgroup_no_v1_mask;
31
3fc9c12d
TH
32/* disable named v1 mounts */
33static bool cgroup_no_v1_named;
34
0a268dbd
TH
35/*
36 * pidlist destructions need to be flushed on cgroup destruction. Use a
37 * separate workqueue as flush domain.
38 */
39static struct workqueue_struct *cgroup_pidlist_destroy_wq;
40
e7b20d97 41/* protects cgroup_subsys->release_agent_path */
1592c9b2 42static DEFINE_SPINLOCK(release_agent_path_lock);
0a268dbd 43
d62beb7f 44bool cgroup1_ssid_disabled(int ssid)
0a268dbd
TH
45{
46 return cgroup_no_v1_mask & (1 << ssid);
47}
48
49/**
50 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
51 * @from: attach to all cgroups of a given task
52 * @tsk: the task to be attached
b4cc6196
RD
53 *
54 * Return: %0 on success or a negative errno code on failure
0a268dbd
TH
55 */
56int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
57{
58 struct cgroup_root *root;
59 int retval = 0;
60
61 mutex_lock(&cgroup_mutex);
62 percpu_down_write(&cgroup_threadgroup_rwsem);
63 for_each_root(root) {
64 struct cgroup *from_cgrp;
65
0a268dbd
TH
66 spin_lock_irq(&css_set_lock);
67 from_cgrp = task_cgroup_from_root(from, root);
68 spin_unlock_irq(&css_set_lock);
69
70 retval = cgroup_attach_task(from_cgrp, tsk, false);
71 if (retval)
72 break;
73 }
74 percpu_up_write(&cgroup_threadgroup_rwsem);
75 mutex_unlock(&cgroup_mutex);
76
77 return retval;
78}
79EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
80
81/**
b4cc6196 82 * cgroup_transfer_tasks - move tasks from one cgroup to another
0a268dbd
TH
83 * @to: cgroup to which the tasks will be moved
84 * @from: cgroup in which the tasks currently reside
85 *
86 * Locking rules between cgroup_post_fork() and the migration path
87 * guarantee that, if a task is forking while being migrated, the new child
88 * is guaranteed to be either visible in the source cgroup after the
89 * parent's migration is complete or put into the target cgroup. No task
90 * can slip out of migration through forking.
b4cc6196
RD
91 *
92 * Return: %0 on success or a negative errno code on failure
0a268dbd
TH
93 */
94int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
95{
e595cd70 96 DEFINE_CGROUP_MGCTX(mgctx);
0a268dbd
TH
97 struct cgrp_cset_link *link;
98 struct css_task_iter it;
99 struct task_struct *task;
100 int ret;
101
102 if (cgroup_on_dfl(to))
103 return -EINVAL;
104
8cfd8147
TH
105 ret = cgroup_migrate_vet_dst(to);
106 if (ret)
107 return ret;
0a268dbd
TH
108
109 mutex_lock(&cgroup_mutex);
110
111 percpu_down_write(&cgroup_threadgroup_rwsem);
112
113 /* all tasks in @from are being moved, all csets are source */
114 spin_lock_irq(&css_set_lock);
115 list_for_each_entry(link, &from->cset_links, cset_link)
e595cd70 116 cgroup_migrate_add_src(link->cset, to, &mgctx);
0a268dbd
TH
117 spin_unlock_irq(&css_set_lock);
118
e595cd70 119 ret = cgroup_migrate_prepare_dst(&mgctx);
0a268dbd
TH
120 if (ret)
121 goto out_err;
122
123 /*
124 * Migrate tasks one-by-one until @from is empty. This fails iff
125 * ->can_attach() fails.
126 */
127 do {
bc2fb7ed 128 css_task_iter_start(&from->self, 0, &it);
116d2f74
PS
129
130 do {
131 task = css_task_iter_next(&it);
132 } while (task && (task->flags & PF_EXITING));
133
0a268dbd
TH
134 if (task)
135 get_task_struct(task);
136 css_task_iter_end(&it);
137
138 if (task) {
bfc2cf6f 139 ret = cgroup_migrate(task, false, &mgctx);
0a268dbd 140 if (!ret)
e4f8d81c 141 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
0a268dbd
TH
142 put_task_struct(task);
143 }
144 } while (task && !ret);
145out_err:
e595cd70 146 cgroup_migrate_finish(&mgctx);
0a268dbd
TH
147 percpu_up_write(&cgroup_threadgroup_rwsem);
148 mutex_unlock(&cgroup_mutex);
149 return ret;
150}
151
152/*
153 * Stuff for reading the 'tasks'/'procs' files.
154 *
155 * Reading this file can return large amounts of data if a cgroup has
156 * *lots* of attached tasks. So it may need several calls to read(),
157 * but we cannot guarantee that the information we produce is correct
158 * unless we produce it entirely atomically.
159 *
160 */
161
162/* which pidlist file are we talking about? */
163enum cgroup_filetype {
164 CGROUP_FILE_PROCS,
165 CGROUP_FILE_TASKS,
166};
167
168/*
169 * A pidlist is a list of pids that virtually represents the contents of one
170 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
171 * a pair (one each for procs, tasks) for each pid namespace that's relevant
172 * to the cgroup.
173 */
174struct cgroup_pidlist {
175 /*
176 * used to find which pidlist is wanted. doesn't change as long as
177 * this particular list stays in the list.
178 */
179 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
180 /* array of xids */
181 pid_t *list;
182 /* how many elements the above list has */
183 int length;
184 /* each of these stored in a list by its cgroup */
185 struct list_head links;
186 /* pointer to the cgroup we belong to, for list removal purposes */
187 struct cgroup *owner;
188 /* for delayed destruction */
189 struct delayed_work destroy_dwork;
190};
191
0a268dbd
TH
192/*
193 * Used to destroy all pidlists lingering waiting for destroy timer. None
194 * should be left afterwards.
195 */
d62beb7f 196void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
0a268dbd
TH
197{
198 struct cgroup_pidlist *l, *tmp_l;
199
200 mutex_lock(&cgrp->pidlist_mutex);
201 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
202 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
203 mutex_unlock(&cgrp->pidlist_mutex);
204
205 flush_workqueue(cgroup_pidlist_destroy_wq);
206 BUG_ON(!list_empty(&cgrp->pidlists));
207}
208
209static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
210{
211 struct delayed_work *dwork = to_delayed_work(work);
212 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
213 destroy_dwork);
214 struct cgroup_pidlist *tofree = NULL;
215
216 mutex_lock(&l->owner->pidlist_mutex);
217
218 /*
219 * Destroy iff we didn't get queued again. The state won't change
220 * as destroy_dwork can only be queued while locked.
221 */
222 if (!delayed_work_pending(dwork)) {
223 list_del(&l->links);
653a23ca 224 kvfree(l->list);
0a268dbd
TH
225 put_pid_ns(l->key.ns);
226 tofree = l;
227 }
228
229 mutex_unlock(&l->owner->pidlist_mutex);
230 kfree(tofree);
231}
232
233/*
234 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
235 * Returns the number of unique elements.
236 */
237static int pidlist_uniq(pid_t *list, int length)
238{
239 int src, dest = 1;
240
241 /*
242 * we presume the 0th element is unique, so i starts at 1. trivial
243 * edge cases first; no work needs to be done for either
244 */
245 if (length == 0 || length == 1)
246 return length;
247 /* src and dest walk down the list; dest counts unique elements */
248 for (src = 1; src < length; src++) {
249 /* find next unique element */
250 while (list[src] == list[src-1]) {
251 src++;
252 if (src == length)
253 goto after;
254 }
255 /* dest always points to where the next unique element goes */
256 list[dest] = list[src];
257 dest++;
258 }
259after:
260 return dest;
261}
262
263/*
264 * The two pid files - task and cgroup.procs - guaranteed that the result
265 * is sorted, which forced this whole pidlist fiasco. As pid order is
266 * different per namespace, each namespace needs differently sorted list,
267 * making it impossible to use, for example, single rbtree of member tasks
268 * sorted by task pointer. As pidlists can be fairly large, allocating one
269 * per open file is dangerous, so cgroup had to implement shared pool of
270 * pidlists keyed by cgroup and namespace.
271 */
272static int cmppid(const void *a, const void *b)
273{
274 return *(pid_t *)a - *(pid_t *)b;
275}
276
277static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
278 enum cgroup_filetype type)
279{
280 struct cgroup_pidlist *l;
281 /* don't need task_nsproxy() if we're looking at ourself */
282 struct pid_namespace *ns = task_active_pid_ns(current);
283
284 lockdep_assert_held(&cgrp->pidlist_mutex);
285
286 list_for_each_entry(l, &cgrp->pidlists, links)
287 if (l->key.type == type && l->key.ns == ns)
288 return l;
289 return NULL;
290}
291
292/*
293 * find the appropriate pidlist for our purpose (given procs vs tasks)
294 * returns with the lock on that pidlist already held, and takes care
295 * of the use count, or returns NULL with no locks held if we're out of
296 * memory.
297 */
298static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
299 enum cgroup_filetype type)
300{
301 struct cgroup_pidlist *l;
302
303 lockdep_assert_held(&cgrp->pidlist_mutex);
304
305 l = cgroup_pidlist_find(cgrp, type);
306 if (l)
307 return l;
308
309 /* entry not found; create a new one */
310 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
311 if (!l)
312 return l;
313
314 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
315 l->key.type = type;
316 /* don't need task_nsproxy() if we're looking at ourself */
317 l->key.ns = get_pid_ns(task_active_pid_ns(current));
318 l->owner = cgrp;
319 list_add(&l->links, &cgrp->pidlists);
320 return l;
321}
322
0a268dbd
TH
323/*
324 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
325 */
326static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
327 struct cgroup_pidlist **lp)
328{
329 pid_t *array;
330 int length;
331 int pid, n = 0; /* used for populating the array */
332 struct css_task_iter it;
333 struct task_struct *tsk;
334 struct cgroup_pidlist *l;
335
336 lockdep_assert_held(&cgrp->pidlist_mutex);
337
338 /*
339 * If cgroup gets more users after we read count, we won't have
340 * enough space - tough. This race is indistinguishable to the
341 * caller from the case that the additional cgroup users didn't
342 * show up until sometime later on.
343 */
344 length = cgroup_task_count(cgrp);
653a23ca 345 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
0a268dbd
TH
346 if (!array)
347 return -ENOMEM;
348 /* now, populate the array */
bc2fb7ed 349 css_task_iter_start(&cgrp->self, 0, &it);
0a268dbd
TH
350 while ((tsk = css_task_iter_next(&it))) {
351 if (unlikely(n == length))
352 break;
353 /* get tgid or pid for procs or tasks file respectively */
354 if (type == CGROUP_FILE_PROCS)
355 pid = task_tgid_vnr(tsk);
356 else
357 pid = task_pid_vnr(tsk);
358 if (pid > 0) /* make sure to only use valid results */
359 array[n++] = pid;
360 }
361 css_task_iter_end(&it);
362 length = n;
363 /* now sort & (if procs) strip out duplicates */
364 sort(array, length, sizeof(pid_t), cmppid, NULL);
365 if (type == CGROUP_FILE_PROCS)
366 length = pidlist_uniq(array, length);
367
368 l = cgroup_pidlist_find_create(cgrp, type);
369 if (!l) {
653a23ca 370 kvfree(array);
0a268dbd
TH
371 return -ENOMEM;
372 }
373
374 /* store array, freeing old if necessary */
653a23ca 375 kvfree(l->list);
0a268dbd
TH
376 l->list = array;
377 l->length = length;
378 *lp = l;
379 return 0;
380}
381
382/*
383 * seq_file methods for the tasks/procs files. The seq_file position is the
384 * next pid to display; the seq_file iterator is a pointer to the pid
385 * in the cgroup->l->list array.
386 */
387
388static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
389{
390 /*
391 * Initially we receive a position value that corresponds to
392 * one more than the last pid shown (or 0 on the first call or
393 * after a seek to the start). Use a binary-search to find the
394 * next pid to display, if any
395 */
396 struct kernfs_open_file *of = s->private;
397 struct cgroup *cgrp = seq_css(s)->cgroup;
398 struct cgroup_pidlist *l;
399 enum cgroup_filetype type = seq_cft(s)->private;
400 int index = 0, pid = *pos;
401 int *iter, ret;
402
403 mutex_lock(&cgrp->pidlist_mutex);
404
405 /*
406 * !NULL @of->priv indicates that this isn't the first start()
407 * after open. If the matching pidlist is around, we can use that.
408 * Look for it. Note that @of->priv can't be used directly. It
409 * could already have been destroyed.
410 */
411 if (of->priv)
412 of->priv = cgroup_pidlist_find(cgrp, type);
413
414 /*
415 * Either this is the first start() after open or the matching
416 * pidlist has been destroyed inbetween. Create a new one.
417 */
418 if (!of->priv) {
419 ret = pidlist_array_load(cgrp, type,
420 (struct cgroup_pidlist **)&of->priv);
421 if (ret)
422 return ERR_PTR(ret);
423 }
424 l = of->priv;
425
426 if (pid) {
427 int end = l->length;
428
429 while (index < end) {
430 int mid = (index + end) / 2;
431 if (l->list[mid] == pid) {
432 index = mid;
433 break;
434 } else if (l->list[mid] <= pid)
435 index = mid + 1;
436 else
437 end = mid;
438 }
439 }
440 /* If we're off the end of the array, we're done */
441 if (index >= l->length)
442 return NULL;
443 /* Update the abstract position to be the actual pid that we found */
444 iter = l->list + index;
445 *pos = *iter;
446 return iter;
447}
448
449static void cgroup_pidlist_stop(struct seq_file *s, void *v)
450{
451 struct kernfs_open_file *of = s->private;
452 struct cgroup_pidlist *l = of->priv;
453
454 if (l)
455 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
456 CGROUP_PIDLIST_DESTROY_DELAY);
457 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
458}
459
460static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
461{
462 struct kernfs_open_file *of = s->private;
463 struct cgroup_pidlist *l = of->priv;
464 pid_t *p = v;
465 pid_t *end = l->list + l->length;
466 /*
467 * Advance to the next pid in the array. If this goes off the
468 * end, we're done
469 */
470 p++;
471 if (p >= end) {
db8dd969 472 (*pos)++;
0a268dbd
TH
473 return NULL;
474 } else {
475 *pos = *p;
476 return p;
477 }
478}
479
480static int cgroup_pidlist_show(struct seq_file *s, void *v)
481{
482 seq_printf(s, "%d\n", *(int *)v);
483
484 return 0;
485}
486
715c809d
TH
487static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
488 char *buf, size_t nbytes, loff_t off,
489 bool threadgroup)
0a268dbd 490{
715c809d
TH
491 struct cgroup *cgrp;
492 struct task_struct *task;
493 const struct cred *cred, *tcred;
494 ssize_t ret;
9a3284fa 495 bool locked;
715c809d
TH
496
497 cgrp = cgroup_kn_lock_live(of->kn, false);
498 if (!cgrp)
499 return -ENODEV;
500
9a3284fa 501 task = cgroup_procs_write_start(buf, threadgroup, &locked);
715c809d
TH
502 ret = PTR_ERR_OR_ZERO(task);
503 if (ret)
504 goto out_unlock;
505
506 /*
507 * Even if we're attaching all tasks in the thread group, we only
508 * need to check permissions on one of them.
509 */
510 cred = current_cred();
511 tcred = get_task_cred(task);
512 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
513 !uid_eq(cred->euid, tcred->uid) &&
514 !uid_eq(cred->euid, tcred->suid))
515 ret = -EACCES;
516 put_cred(tcred);
517 if (ret)
518 goto out_finish;
519
520 ret = cgroup_attach_task(cgrp, task, threadgroup);
521
522out_finish:
9a3284fa 523 cgroup_procs_write_finish(task, locked);
715c809d
TH
524out_unlock:
525 cgroup_kn_unlock(of->kn);
526
527 return ret ?: nbytes;
528}
529
530static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
531 char *buf, size_t nbytes, loff_t off)
532{
533 return __cgroup1_procs_write(of, buf, nbytes, off, true);
534}
535
536static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
537 char *buf, size_t nbytes, loff_t off)
538{
539 return __cgroup1_procs_write(of, buf, nbytes, off, false);
0a268dbd
TH
540}
541
542static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
543 char *buf, size_t nbytes, loff_t off)
544{
545 struct cgroup *cgrp;
546
547 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
548
549 cgrp = cgroup_kn_lock_live(of->kn, false);
550 if (!cgrp)
551 return -ENODEV;
552 spin_lock(&release_agent_path_lock);
553 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
554 sizeof(cgrp->root->release_agent_path));
555 spin_unlock(&release_agent_path_lock);
556 cgroup_kn_unlock(of->kn);
557 return nbytes;
558}
559
560static int cgroup_release_agent_show(struct seq_file *seq, void *v)
561{
562 struct cgroup *cgrp = seq_css(seq)->cgroup;
563
564 spin_lock(&release_agent_path_lock);
565 seq_puts(seq, cgrp->root->release_agent_path);
566 spin_unlock(&release_agent_path_lock);
567 seq_putc(seq, '\n');
568 return 0;
569}
570
571static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
572{
573 seq_puts(seq, "0\n");
574 return 0;
575}
576
577static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
578 struct cftype *cft)
579{
580 return notify_on_release(css->cgroup);
581}
582
583static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
584 struct cftype *cft, u64 val)
585{
586 if (val)
587 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
588 else
589 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
590 return 0;
591}
592
593static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
594 struct cftype *cft)
595{
596 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
597}
598
599static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
600 struct cftype *cft, u64 val)
601{
602 if (val)
603 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
604 else
605 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
606 return 0;
607}
608
609/* cgroup core interface files for the legacy hierarchies */
d62beb7f 610struct cftype cgroup1_base_files[] = {
0a268dbd
TH
611 {
612 .name = "cgroup.procs",
613 .seq_start = cgroup_pidlist_start,
614 .seq_next = cgroup_pidlist_next,
615 .seq_stop = cgroup_pidlist_stop,
616 .seq_show = cgroup_pidlist_show,
617 .private = CGROUP_FILE_PROCS,
715c809d 618 .write = cgroup1_procs_write,
0a268dbd
TH
619 },
620 {
621 .name = "cgroup.clone_children",
622 .read_u64 = cgroup_clone_children_read,
623 .write_u64 = cgroup_clone_children_write,
624 },
625 {
626 .name = "cgroup.sane_behavior",
627 .flags = CFTYPE_ONLY_ON_ROOT,
628 .seq_show = cgroup_sane_behavior_show,
629 },
630 {
631 .name = "tasks",
632 .seq_start = cgroup_pidlist_start,
633 .seq_next = cgroup_pidlist_next,
634 .seq_stop = cgroup_pidlist_stop,
635 .seq_show = cgroup_pidlist_show,
636 .private = CGROUP_FILE_TASKS,
715c809d 637 .write = cgroup1_tasks_write,
0a268dbd
TH
638 },
639 {
640 .name = "notify_on_release",
641 .read_u64 = cgroup_read_notify_on_release,
642 .write_u64 = cgroup_write_notify_on_release,
643 },
644 {
645 .name = "release_agent",
646 .flags = CFTYPE_ONLY_ON_ROOT,
647 .seq_show = cgroup_release_agent_show,
648 .write = cgroup_release_agent_write,
649 .max_write_len = PATH_MAX - 1,
650 },
651 { } /* terminate */
652};
653
654/* Display information about each subsystem and each hierarchy */
3f3942ac 655int proc_cgroupstats_show(struct seq_file *m, void *v)
0a268dbd
TH
656{
657 struct cgroup_subsys *ss;
658 int i;
659
660 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
661 /*
822bc9ba
SB
662 * Grab the subsystems state racily. No need to add avenue to
663 * cgroup_mutex contention.
0a268dbd 664 */
0a268dbd
TH
665
666 for_each_subsys(ss, i)
667 seq_printf(m, "%s\t%d\t%d\t%d\n",
668 ss->legacy_name, ss->root->hierarchy_id,
669 atomic_read(&ss->root->nr_cgrps),
670 cgroup_ssid_enabled(i));
671
0a268dbd
TH
672 return 0;
673}
674
0a268dbd
TH
675/**
676 * cgroupstats_build - build and fill cgroupstats
677 * @stats: cgroupstats to fill information into
678 * @dentry: A dentry entry belonging to the cgroup for which stats have
679 * been requested.
680 *
681 * Build and fill cgroupstats so that taskstats can export it to user
682 * space.
b4cc6196
RD
683 *
684 * Return: %0 on success or a negative errno code on failure
0a268dbd
TH
685 */
686int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
687{
688 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
689 struct cgroup *cgrp;
690 struct css_task_iter it;
691 struct task_struct *tsk;
692
693 /* it should be kernfs_node belonging to cgroupfs and is a directory */
694 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
695 kernfs_type(kn) != KERNFS_DIR)
696 return -EINVAL;
697
0a268dbd
TH
698 /*
699 * We aren't being called from kernfs and there's no guarantee on
700 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
701 * @kn->priv is RCU safe. Let's do the RCU dancing.
702 */
703 rcu_read_lock();
e0aed7c7 704 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
bb758421 705 if (!cgrp || !cgroup_tryget(cgrp)) {
0a268dbd 706 rcu_read_unlock();
0a268dbd
TH
707 return -ENOENT;
708 }
709 rcu_read_unlock();
710
bc2fb7ed 711 css_task_iter_start(&cgrp->self, 0, &it);
0a268dbd 712 while ((tsk = css_task_iter_next(&it))) {
2f064a59 713 switch (READ_ONCE(tsk->__state)) {
0a268dbd
TH
714 case TASK_RUNNING:
715 stats->nr_running++;
716 break;
717 case TASK_INTERRUPTIBLE:
718 stats->nr_sleeping++;
719 break;
720 case TASK_UNINTERRUPTIBLE:
721 stats->nr_uninterruptible++;
722 break;
723 case TASK_STOPPED:
724 stats->nr_stopped++;
725 break;
726 default:
ffeee417 727 if (tsk->in_iowait)
0a268dbd
TH
728 stats->nr_io_wait++;
729 break;
730 }
731 }
732 css_task_iter_end(&it);
733
bb758421 734 cgroup_put(cgrp);
0a268dbd
TH
735 return 0;
736}
737
d62beb7f 738void cgroup1_check_for_release(struct cgroup *cgrp)
0a268dbd
TH
739{
740 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
741 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
742 schedule_work(&cgrp->release_agent_work);
743}
744
745/*
746 * Notify userspace when a cgroup is released, by running the
747 * configured release agent with the name of the cgroup (path
748 * relative to the root of cgroup file system) as the argument.
749 *
750 * Most likely, this user command will try to rmdir this cgroup.
751 *
752 * This races with the possibility that some other task will be
753 * attached to this cgroup before it is removed, or that some other
754 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
755 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
756 * unused, and this cgroup will be reprieved from its death sentence,
757 * to continue to serve a useful existence. Next time it's released,
758 * we will get notified again, if it still has 'notify_on_release' set.
759 *
760 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
761 * means only wait until the task is successfully execve()'d. The
762 * separate release agent task is forked by call_usermodehelper(),
763 * then control in this thread returns here, without waiting for the
764 * release agent task. We don't bother to wait because the caller of
765 * this routine has no use for the exit status of the release agent
766 * task, so no sense holding our caller up for that.
767 */
d62beb7f 768void cgroup1_release_agent(struct work_struct *work)
0a268dbd
TH
769{
770 struct cgroup *cgrp =
771 container_of(work, struct cgroup, release_agent_work);
e7b20d97 772 char *pathbuf, *agentbuf;
0a268dbd
TH
773 char *argv[3], *envp[3];
774 int ret;
775
e7b20d97
TH
776 /* snoop agent path and exit early if empty */
777 if (!cgrp->root->release_agent_path[0])
778 return;
0a268dbd 779
e7b20d97 780 /* prepare argument buffers */
0a268dbd 781 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
e7b20d97
TH
782 agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
783 if (!pathbuf || !agentbuf)
784 goto out_free;
0a268dbd 785
e7b20d97
TH
786 spin_lock(&release_agent_path_lock);
787 strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
788 spin_unlock(&release_agent_path_lock);
789 if (!agentbuf[0])
790 goto out_free;
791
792 ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
0a268dbd 793 if (ret < 0 || ret >= PATH_MAX)
e7b20d97 794 goto out_free;
0a268dbd
TH
795
796 argv[0] = agentbuf;
797 argv[1] = pathbuf;
798 argv[2] = NULL;
799
800 /* minimal command environment */
801 envp[0] = "HOME=/";
802 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
803 envp[2] = NULL;
804
0a268dbd 805 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
0a268dbd
TH
806out_free:
807 kfree(agentbuf);
808 kfree(pathbuf);
809}
810
811/*
812 * cgroup_rename - Only allow simple rename of directories in place.
813 */
1592c9b2
TH
814static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
815 const char *new_name_str)
0a268dbd
TH
816{
817 struct cgroup *cgrp = kn->priv;
818 int ret;
819
b7e24eb1
AK
820 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
821 if (strchr(new_name_str, '\n'))
822 return -EINVAL;
823
0a268dbd
TH
824 if (kernfs_type(kn) != KERNFS_DIR)
825 return -ENOTDIR;
826 if (kn->parent != new_parent)
827 return -EIO;
828
0a268dbd
TH
829 /*
830 * We're gonna grab cgroup_mutex which nests outside kernfs
831 * active_ref. kernfs_rename() doesn't require active_ref
832 * protection. Break them before grabbing cgroup_mutex.
833 */
834 kernfs_break_active_protection(new_parent);
835 kernfs_break_active_protection(kn);
836
837 mutex_lock(&cgroup_mutex);
838
839 ret = kernfs_rename(kn, new_parent, new_name_str);
840 if (!ret)
e4f8d81c 841 TRACE_CGROUP_PATH(rename, cgrp);
0a268dbd
TH
842
843 mutex_unlock(&cgroup_mutex);
844
845 kernfs_unbreak_active_protection(kn);
846 kernfs_unbreak_active_protection(new_parent);
847 return ret;
848}
849
1592c9b2
TH
850static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
851{
852 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
853 struct cgroup_subsys *ss;
854 int ssid;
855
856 for_each_subsys(ss, ssid)
857 if (root->subsys_mask & (1 << ssid))
858 seq_show_option(seq, ss->legacy_name, NULL);
859 if (root->flags & CGRP_ROOT_NOPREFIX)
860 seq_puts(seq, ",noprefix");
861 if (root->flags & CGRP_ROOT_XATTR)
862 seq_puts(seq, ",xattr");
e1cba4b8
WL
863 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
864 seq_puts(seq, ",cpuset_v2_mode");
1592c9b2
TH
865
866 spin_lock(&release_agent_path_lock);
867 if (strlen(root->release_agent_path))
868 seq_show_option(seq, "release_agent",
869 root->release_agent_path);
870 spin_unlock(&release_agent_path_lock);
871
872 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
873 seq_puts(seq, ",clone_children");
874 if (strlen(root->name))
875 seq_show_option(seq, "name", root->name);
876 return 0;
877}
878
8d2451f4
AV
879enum cgroup1_param {
880 Opt_all,
881 Opt_clone_children,
882 Opt_cpuset_v2_mode,
883 Opt_name,
884 Opt_none,
885 Opt_noprefix,
886 Opt_release_agent,
887 Opt_xattr,
888};
1592c9b2 889
d7167b14 890const struct fs_parameter_spec cgroup1_fs_parameters[] = {
8d2451f4
AV
891 fsparam_flag ("all", Opt_all),
892 fsparam_flag ("clone_children", Opt_clone_children),
893 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
894 fsparam_string("name", Opt_name),
895 fsparam_flag ("none", Opt_none),
896 fsparam_flag ("noprefix", Opt_noprefix),
897 fsparam_string("release_agent", Opt_release_agent),
898 fsparam_flag ("xattr", Opt_xattr),
899 {}
900};
1592c9b2 901
8d2451f4
AV
902int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
903{
904 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
905 struct cgroup_subsys *ss;
906 struct fs_parse_result result;
907 int opt, i;
908
d7167b14 909 opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
8d2451f4 910 if (opt == -ENOPARAM) {
d1d488d8
CB
911 int ret;
912
913 ret = vfs_parse_fs_param_source(fc, param);
914 if (ret != -ENOPARAM)
915 return ret;
1592c9b2 916 for_each_subsys(ss, i) {
8d2451f4 917 if (strcmp(param->key, ss->legacy_name))
1592c9b2 918 continue;
61e960b0
CZ
919 if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
920 return invalfc(fc, "Disabled controller '%s'",
921 param->key);
f5dfb531 922 ctx->subsys_mask |= (1 << i);
8d2451f4 923 return 0;
1592c9b2 924 }
58c025f0 925 return invalfc(fc, "Unknown subsys name '%s'", param->key);
8d2451f4
AV
926 }
927 if (opt < 0)
928 return opt;
929
930 switch (opt) {
931 case Opt_none:
932 /* Explicitly have no subsystems */
933 ctx->none = true;
934 break;
935 case Opt_all:
936 ctx->all_ss = true;
937 break;
938 case Opt_noprefix:
939 ctx->flags |= CGRP_ROOT_NOPREFIX;
940 break;
941 case Opt_clone_children:
942 ctx->cpuset_clone_children = true;
943 break;
944 case Opt_cpuset_v2_mode:
945 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
946 break;
947 case Opt_xattr:
948 ctx->flags |= CGRP_ROOT_XATTR;
949 break;
950 case Opt_release_agent:
951 /* Specifying two release agents is forbidden */
952 if (ctx->release_agent)
58c025f0 953 return invalfc(fc, "release_agent respecified");
8d2451f4
AV
954 ctx->release_agent = param->string;
955 param->string = NULL;
956 break;
957 case Opt_name:
958 /* blocked by boot param? */
959 if (cgroup_no_v1_named)
1592c9b2 960 return -ENOENT;
8d2451f4
AV
961 /* Can't specify an empty name */
962 if (!param->size)
58c025f0 963 return invalfc(fc, "Empty name");
8d2451f4 964 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
58c025f0 965 return invalfc(fc, "Name too long");
8d2451f4
AV
966 /* Must match [\w.-]+ */
967 for (i = 0; i < param->size; i++) {
968 char c = param->string[i];
969 if (isalnum(c))
970 continue;
971 if ((c == '.') || (c == '-') || (c == '_'))
972 continue;
58c025f0 973 return invalfc(fc, "Invalid name");
8d2451f4
AV
974 }
975 /* Specifying two names is forbidden */
976 if (ctx->name)
58c025f0 977 return invalfc(fc, "name respecified");
8d2451f4
AV
978 ctx->name = param->string;
979 param->string = NULL;
980 break;
1592c9b2 981 }
f5dfb531
AV
982 return 0;
983}
984
8d2451f4 985static int check_cgroupfs_options(struct fs_context *fc)
f5dfb531 986{
8d2451f4 987 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
f5dfb531
AV
988 u16 mask = U16_MAX;
989 u16 enabled = 0;
990 struct cgroup_subsys *ss;
991 int i;
992
993#ifdef CONFIG_CPUSETS
994 mask = ~((u16)1 << cpuset_cgrp_id);
995#endif
996 for_each_subsys(ss, i)
997 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
998 enabled |= 1 << i;
999
1000 ctx->subsys_mask &= enabled;
1592c9b2
TH
1001
1002 /*
08b2b6fd 1003 * In absence of 'none', 'name=' and subsystem name options,
f5dfb531 1004 * let's default to 'all'.
1592c9b2 1005 */
f5dfb531
AV
1006 if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1007 ctx->all_ss = true;
1008
1009 if (ctx->all_ss) {
1010 /* Mutually exclusive option 'all' + subsystem name */
1011 if (ctx->subsys_mask)
58c025f0 1012 return invalfc(fc, "subsys name conflicts with all");
f5dfb531
AV
1013 /* 'all' => select all the subsystems */
1014 ctx->subsys_mask = enabled;
1015 }
1592c9b2
TH
1016
1017 /*
1018 * We either have to specify by name or by subsystems. (So all
1019 * empty hierarchies must have a name).
1020 */
f5dfb531 1021 if (!ctx->subsys_mask && !ctx->name)
58c025f0 1022 return invalfc(fc, "Need name or subsystem set");
1592c9b2
TH
1023
1024 /*
1025 * Option noprefix was introduced just for backward compatibility
1026 * with the old cpuset, so we allow noprefix only if mounting just
1027 * the cpuset subsystem.
1028 */
f5dfb531 1029 if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
58c025f0 1030 return invalfc(fc, "noprefix used incorrectly");
1592c9b2
TH
1031
1032 /* Can't specify "none" and some subsystems */
f5dfb531 1033 if (ctx->subsys_mask && ctx->none)
58c025f0 1034 return invalfc(fc, "none used incorrectly");
1592c9b2
TH
1035
1036 return 0;
1037}
1038
90129625 1039int cgroup1_reconfigure(struct fs_context *fc)
1592c9b2 1040{
90129625
AV
1041 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1042 struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1592c9b2 1043 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
90129625 1044 int ret = 0;
1592c9b2
TH
1045 u16 added_mask, removed_mask;
1046
1047 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1048
1049 /* See what subsystems are wanted */
8d2451f4 1050 ret = check_cgroupfs_options(fc);
1592c9b2
TH
1051 if (ret)
1052 goto out_unlock;
1053
f5dfb531 1054 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1592c9b2
TH
1055 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1056 task_tgid_nr(current), current->comm);
1057
f5dfb531
AV
1058 added_mask = ctx->subsys_mask & ~root->subsys_mask;
1059 removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1592c9b2
TH
1060
1061 /* Don't allow flags or name to change at remount */
f5dfb531
AV
1062 if ((ctx->flags ^ root->flags) ||
1063 (ctx->name && strcmp(ctx->name, root->name))) {
58c025f0 1064 errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
f5dfb531 1065 ctx->flags, ctx->name ?: "", root->flags, root->name);
1592c9b2
TH
1066 ret = -EINVAL;
1067 goto out_unlock;
1068 }
1069
1070 /* remounting is not allowed for populated hierarchies */
1071 if (!list_empty(&root->cgrp.self.children)) {
1072 ret = -EBUSY;
1073 goto out_unlock;
1074 }
1075
1076 ret = rebind_subsystems(root, added_mask);
1077 if (ret)
1078 goto out_unlock;
1079
1080 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1081
f5dfb531 1082 if (ctx->release_agent) {
1592c9b2 1083 spin_lock(&release_agent_path_lock);
f5dfb531 1084 strcpy(root->release_agent_path, ctx->release_agent);
1592c9b2
TH
1085 spin_unlock(&release_agent_path_lock);
1086 }
1087
1088 trace_cgroup_remount(root);
1089
1090 out_unlock:
1592c9b2
TH
1091 mutex_unlock(&cgroup_mutex);
1092 return ret;
1093}
1094
1095struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1096 .rename = cgroup1_rename,
1097 .show_options = cgroup1_show_options,
1592c9b2
TH
1098 .mkdir = cgroup_mkdir,
1099 .rmdir = cgroup_rmdir,
1100 .show_path = cgroup_show_path,
1101};
1102
6678889f
AV
1103/*
1104 * The guts of cgroup1 mount - find or create cgroup_root to use.
1105 * Called with cgroup_mutex held; returns 0 on success, -E... on
1106 * error and positive - in case when the candidate is busy dying.
1107 * On success it stashes a reference to cgroup_root into given
1108 * cgroup_fs_context; that reference is *NOT* counting towards the
1109 * cgroup_root refcount.
1110 */
1111static int cgroup1_root_to_use(struct fs_context *fc)
1592c9b2 1112{
7feeef58 1113 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1592c9b2
TH
1114 struct cgroup_root *root;
1115 struct cgroup_subsys *ss;
1592c9b2
TH
1116 int i, ret;
1117
1592c9b2 1118 /* First find the desired set of subsystems */
8d2451f4 1119 ret = check_cgroupfs_options(fc);
1592c9b2 1120 if (ret)
6678889f 1121 return ret;
1592c9b2
TH
1122
1123 /*
1124 * Destruction of cgroup root is asynchronous, so subsystems may
1125 * still be dying after the previous unmount. Let's drain the
1126 * dying subsystems. We just need to ensure that the ones
1127 * unmounted previously finish dying and don't care about new ones
1128 * starting. Testing ref liveliness is good enough.
1129 */
1130 for_each_subsys(ss, i) {
f5dfb531 1131 if (!(ctx->subsys_mask & (1 << i)) ||
1592c9b2
TH
1132 ss->root == &cgrp_dfl_root)
1133 continue;
1134
6678889f
AV
1135 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1136 return 1; /* restart */
1592c9b2
TH
1137 cgroup_put(&ss->root->cgrp);
1138 }
1139
1140 for_each_root(root) {
1141 bool name_match = false;
1142
1143 if (root == &cgrp_dfl_root)
1144 continue;
1145
1146 /*
1147 * If we asked for a name then it must match. Also, if
1148 * name matches but sybsys_mask doesn't, we should fail.
1149 * Remember whether name matched.
1150 */
f5dfb531
AV
1151 if (ctx->name) {
1152 if (strcmp(ctx->name, root->name))
1592c9b2
TH
1153 continue;
1154 name_match = true;
1155 }
1156
1157 /*
1158 * If we asked for subsystems (or explicitly for no
1159 * subsystems) then they must match.
1160 */
f5dfb531
AV
1161 if ((ctx->subsys_mask || ctx->none) &&
1162 (ctx->subsys_mask != root->subsys_mask)) {
1592c9b2
TH
1163 if (!name_match)
1164 continue;
6678889f 1165 return -EBUSY;
1592c9b2
TH
1166 }
1167
f5dfb531 1168 if (root->flags ^ ctx->flags)
1592c9b2
TH
1169 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1170
cf6299b1 1171 ctx->root = root;
6678889f 1172 return 0;
1592c9b2
TH
1173 }
1174
1175 /*
1176 * No such thing, create a new one. name= matching without subsys
1177 * specification is allowed for already existing hierarchies but we
1178 * can't create new one without subsys specification.
1179 */
6678889f 1180 if (!ctx->subsys_mask && !ctx->none)
58c025f0 1181 return invalfc(fc, "No subsys list or none specified");
1592c9b2
TH
1182
1183 /* Hierarchies may only be created in the initial cgroup namespace. */
cca8f327 1184 if (ctx->ns != &init_cgroup_ns)
6678889f 1185 return -EPERM;
1592c9b2
TH
1186
1187 root = kzalloc(sizeof(*root), GFP_KERNEL);
6678889f
AV
1188 if (!root)
1189 return -ENOMEM;
1592c9b2 1190
cf6299b1
AV
1191 ctx->root = root;
1192 init_cgroup_root(ctx);
1592c9b2 1193
f5dfb531 1194 ret = cgroup_setup_root(root, ctx->subsys_mask);
1592c9b2
TH
1195 if (ret)
1196 cgroup_free_root(root);
6678889f
AV
1197 return ret;
1198}
1199
1200int cgroup1_get_tree(struct fs_context *fc)
1201{
6678889f
AV
1202 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1203 int ret;
1204
1205 /* Check if the caller has permission to mount. */
cca8f327 1206 if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
6678889f
AV
1207 return -EPERM;
1208
1209 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1210
1211 ret = cgroup1_root_to_use(fc);
1212 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1213 ret = 1; /* restart */
1592c9b2 1214
1592c9b2 1215 mutex_unlock(&cgroup_mutex);
1592c9b2 1216
6678889f 1217 if (!ret)
cca8f327 1218 ret = cgroup_do_get_tree(fc);
6678889f
AV
1219
1220 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1e7107c5 1221 fc_drop_locked(fc);
6678889f
AV
1222 ret = 1;
1223 }
1224
1225 if (unlikely(ret > 0)) {
35ac1184 1226 msleep(10);
7feeef58 1227 return restart_syscall();
9732adc5 1228 }
71d883c3 1229 return ret;
1592c9b2
TH
1230}
1231
0a268dbd
TH
1232static int __init cgroup1_wq_init(void)
1233{
1234 /*
1235 * Used to destroy pidlists and separate to serve as flush domain.
1236 * Cap @max_active to 1 too.
1237 */
1238 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1239 0, 1);
1240 BUG_ON(!cgroup_pidlist_destroy_wq);
1241 return 0;
1242}
1243core_initcall(cgroup1_wq_init);
1244
1245static int __init cgroup_no_v1(char *str)
1246{
1247 struct cgroup_subsys *ss;
1248 char *token;
1249 int i;
1250
1251 while ((token = strsep(&str, ",")) != NULL) {
1252 if (!*token)
1253 continue;
1254
1255 if (!strcmp(token, "all")) {
1256 cgroup_no_v1_mask = U16_MAX;
3fc9c12d
TH
1257 continue;
1258 }
1259
1260 if (!strcmp(token, "named")) {
1261 cgroup_no_v1_named = true;
1262 continue;
0a268dbd
TH
1263 }
1264
1265 for_each_subsys(ss, i) {
1266 if (strcmp(token, ss->name) &&
1267 strcmp(token, ss->legacy_name))
1268 continue;
1269
1270 cgroup_no_v1_mask |= 1 << i;
1271 }
1272 }
1273 return 1;
1274}
1275__setup("cgroup_no_v1=", cgroup_no_v1);