Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / kernel / cgroup / cgroup-v1.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0-only
2#include "cgroup-internal.h"
3
4#include <linux/ctype.h>
5#include <linux/kmod.h>
6#include <linux/sort.h>
7#include <linux/delay.h>
8#include <linux/mm.h>
9#include <linux/sched/signal.h>
10#include <linux/sched/task.h>
11#include <linux/magic.h>
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14#include <linux/delayacct.h>
15#include <linux/pid_namespace.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs_parser.h>
18
19#include <trace/events/cgroup.h>
20
21/*
22 * pidlists linger the following amount before being destroyed. The goal
23 * is avoiding frequent destruction in the middle of consecutive read calls
24 * Expiring in the middle is a performance problem not a correctness one.
25 * 1 sec should be enough.
26 */
27#define CGROUP_PIDLIST_DESTROY_DELAY HZ
28
29/* Controllers blocked by the commandline in v1 */
30static u16 cgroup_no_v1_mask;
31
32/* disable named v1 mounts */
33static bool cgroup_no_v1_named;
34
35/*
36 * pidlist destructions need to be flushed on cgroup destruction. Use a
37 * separate workqueue as flush domain.
38 */
39static struct workqueue_struct *cgroup_pidlist_destroy_wq;
40
41/* protects cgroup_subsys->release_agent_path */
42static DEFINE_SPINLOCK(release_agent_path_lock);
43
44bool cgroup1_ssid_disabled(int ssid)
45{
46 return cgroup_no_v1_mask & (1 << ssid);
47}
48
49static bool cgroup1_subsys_absent(struct cgroup_subsys *ss)
50{
51 /* Check also dfl_cftypes for file-less controllers, i.e. perf_event */
52 return ss->legacy_cftypes == NULL && ss->dfl_cftypes;
53}
54
55/**
56 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
57 * @from: attach to all cgroups of a given task
58 * @tsk: the task to be attached
59 *
60 * Return: %0 on success or a negative errno code on failure
61 */
62int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
63{
64 struct cgroup_root *root;
65 int retval = 0;
66
67 cgroup_lock();
68 cgroup_attach_lock(true);
69 for_each_root(root) {
70 struct cgroup *from_cgrp;
71
72 spin_lock_irq(&css_set_lock);
73 from_cgrp = task_cgroup_from_root(from, root);
74 spin_unlock_irq(&css_set_lock);
75
76 retval = cgroup_attach_task(from_cgrp, tsk, false);
77 if (retval)
78 break;
79 }
80 cgroup_attach_unlock(true);
81 cgroup_unlock();
82
83 return retval;
84}
85EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
86
87/**
88 * cgroup_transfer_tasks - move tasks from one cgroup to another
89 * @to: cgroup to which the tasks will be moved
90 * @from: cgroup in which the tasks currently reside
91 *
92 * Locking rules between cgroup_post_fork() and the migration path
93 * guarantee that, if a task is forking while being migrated, the new child
94 * is guaranteed to be either visible in the source cgroup after the
95 * parent's migration is complete or put into the target cgroup. No task
96 * can slip out of migration through forking.
97 *
98 * Return: %0 on success or a negative errno code on failure
99 */
100int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
101{
102 DEFINE_CGROUP_MGCTX(mgctx);
103 struct cgrp_cset_link *link;
104 struct css_task_iter it;
105 struct task_struct *task;
106 int ret;
107
108 if (cgroup_on_dfl(to))
109 return -EINVAL;
110
111 ret = cgroup_migrate_vet_dst(to);
112 if (ret)
113 return ret;
114
115 cgroup_lock();
116
117 cgroup_attach_lock(true);
118
119 /* all tasks in @from are being moved, all csets are source */
120 spin_lock_irq(&css_set_lock);
121 list_for_each_entry(link, &from->cset_links, cset_link)
122 cgroup_migrate_add_src(link->cset, to, &mgctx);
123 spin_unlock_irq(&css_set_lock);
124
125 ret = cgroup_migrate_prepare_dst(&mgctx);
126 if (ret)
127 goto out_err;
128
129 /*
130 * Migrate tasks one-by-one until @from is empty. This fails iff
131 * ->can_attach() fails.
132 */
133 do {
134 css_task_iter_start(&from->self, 0, &it);
135
136 do {
137 task = css_task_iter_next(&it);
138 } while (task && (task->flags & PF_EXITING));
139
140 if (task)
141 get_task_struct(task);
142 css_task_iter_end(&it);
143
144 if (task) {
145 ret = cgroup_migrate(task, false, &mgctx);
146 if (!ret)
147 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
148 put_task_struct(task);
149 }
150 } while (task && !ret);
151out_err:
152 cgroup_migrate_finish(&mgctx);
153 cgroup_attach_unlock(true);
154 cgroup_unlock();
155 return ret;
156}
157
158/*
159 * Stuff for reading the 'tasks'/'procs' files.
160 *
161 * Reading this file can return large amounts of data if a cgroup has
162 * *lots* of attached tasks. So it may need several calls to read(),
163 * but we cannot guarantee that the information we produce is correct
164 * unless we produce it entirely atomically.
165 *
166 */
167
168/* which pidlist file are we talking about? */
169enum cgroup_filetype {
170 CGROUP_FILE_PROCS,
171 CGROUP_FILE_TASKS,
172};
173
174/*
175 * A pidlist is a list of pids that virtually represents the contents of one
176 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
177 * a pair (one each for procs, tasks) for each pid namespace that's relevant
178 * to the cgroup.
179 */
180struct cgroup_pidlist {
181 /*
182 * used to find which pidlist is wanted. doesn't change as long as
183 * this particular list stays in the list.
184 */
185 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
186 /* array of xids */
187 pid_t *list;
188 /* how many elements the above list has */
189 int length;
190 /* each of these stored in a list by its cgroup */
191 struct list_head links;
192 /* pointer to the cgroup we belong to, for list removal purposes */
193 struct cgroup *owner;
194 /* for delayed destruction */
195 struct delayed_work destroy_dwork;
196};
197
198/*
199 * Used to destroy all pidlists lingering waiting for destroy timer. None
200 * should be left afterwards.
201 */
202void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
203{
204 struct cgroup_pidlist *l, *tmp_l;
205
206 mutex_lock(&cgrp->pidlist_mutex);
207 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
208 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
209 mutex_unlock(&cgrp->pidlist_mutex);
210
211 flush_workqueue(cgroup_pidlist_destroy_wq);
212 BUG_ON(!list_empty(&cgrp->pidlists));
213}
214
215static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
216{
217 struct delayed_work *dwork = to_delayed_work(work);
218 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
219 destroy_dwork);
220 struct cgroup_pidlist *tofree = NULL;
221
222 mutex_lock(&l->owner->pidlist_mutex);
223
224 /*
225 * Destroy iff we didn't get queued again. The state won't change
226 * as destroy_dwork can only be queued while locked.
227 */
228 if (!delayed_work_pending(dwork)) {
229 list_del(&l->links);
230 kvfree(l->list);
231 put_pid_ns(l->key.ns);
232 tofree = l;
233 }
234
235 mutex_unlock(&l->owner->pidlist_mutex);
236 kfree(tofree);
237}
238
239/*
240 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
241 * Returns the number of unique elements.
242 */
243static int pidlist_uniq(pid_t *list, int length)
244{
245 int src, dest = 1;
246
247 /*
248 * we presume the 0th element is unique, so i starts at 1. trivial
249 * edge cases first; no work needs to be done for either
250 */
251 if (length == 0 || length == 1)
252 return length;
253 /* src and dest walk down the list; dest counts unique elements */
254 for (src = 1; src < length; src++) {
255 /* find next unique element */
256 while (list[src] == list[src-1]) {
257 src++;
258 if (src == length)
259 goto after;
260 }
261 /* dest always points to where the next unique element goes */
262 list[dest] = list[src];
263 dest++;
264 }
265after:
266 return dest;
267}
268
269/*
270 * The two pid files - task and cgroup.procs - guaranteed that the result
271 * is sorted, which forced this whole pidlist fiasco. As pid order is
272 * different per namespace, each namespace needs differently sorted list,
273 * making it impossible to use, for example, single rbtree of member tasks
274 * sorted by task pointer. As pidlists can be fairly large, allocating one
275 * per open file is dangerous, so cgroup had to implement shared pool of
276 * pidlists keyed by cgroup and namespace.
277 */
278static int cmppid(const void *a, const void *b)
279{
280 return *(pid_t *)a - *(pid_t *)b;
281}
282
283static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
284 enum cgroup_filetype type)
285{
286 struct cgroup_pidlist *l;
287 /* don't need task_nsproxy() if we're looking at ourself */
288 struct pid_namespace *ns = task_active_pid_ns(current);
289
290 lockdep_assert_held(&cgrp->pidlist_mutex);
291
292 list_for_each_entry(l, &cgrp->pidlists, links)
293 if (l->key.type == type && l->key.ns == ns)
294 return l;
295 return NULL;
296}
297
298/*
299 * find the appropriate pidlist for our purpose (given procs vs tasks)
300 * returns with the lock on that pidlist already held, and takes care
301 * of the use count, or returns NULL with no locks held if we're out of
302 * memory.
303 */
304static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
305 enum cgroup_filetype type)
306{
307 struct cgroup_pidlist *l;
308
309 lockdep_assert_held(&cgrp->pidlist_mutex);
310
311 l = cgroup_pidlist_find(cgrp, type);
312 if (l)
313 return l;
314
315 /* entry not found; create a new one */
316 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
317 if (!l)
318 return l;
319
320 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
321 l->key.type = type;
322 /* don't need task_nsproxy() if we're looking at ourself */
323 l->key.ns = get_pid_ns(task_active_pid_ns(current));
324 l->owner = cgrp;
325 list_add(&l->links, &cgrp->pidlists);
326 return l;
327}
328
329/*
330 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
331 */
332static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
333 struct cgroup_pidlist **lp)
334{
335 pid_t *array;
336 int length;
337 int pid, n = 0; /* used for populating the array */
338 struct css_task_iter it;
339 struct task_struct *tsk;
340 struct cgroup_pidlist *l;
341
342 lockdep_assert_held(&cgrp->pidlist_mutex);
343
344 /*
345 * If cgroup gets more users after we read count, we won't have
346 * enough space - tough. This race is indistinguishable to the
347 * caller from the case that the additional cgroup users didn't
348 * show up until sometime later on.
349 */
350 length = cgroup_task_count(cgrp);
351 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
352 if (!array)
353 return -ENOMEM;
354 /* now, populate the array */
355 css_task_iter_start(&cgrp->self, 0, &it);
356 while ((tsk = css_task_iter_next(&it))) {
357 if (unlikely(n == length))
358 break;
359 /* get tgid or pid for procs or tasks file respectively */
360 if (type == CGROUP_FILE_PROCS)
361 pid = task_tgid_vnr(tsk);
362 else
363 pid = task_pid_vnr(tsk);
364 if (pid > 0) /* make sure to only use valid results */
365 array[n++] = pid;
366 }
367 css_task_iter_end(&it);
368 length = n;
369 /* now sort & strip out duplicates (tgids or recycled thread PIDs) */
370 sort(array, length, sizeof(pid_t), cmppid, NULL);
371 length = pidlist_uniq(array, length);
372
373 l = cgroup_pidlist_find_create(cgrp, type);
374 if (!l) {
375 kvfree(array);
376 return -ENOMEM;
377 }
378
379 /* store array, freeing old if necessary */
380 kvfree(l->list);
381 l->list = array;
382 l->length = length;
383 *lp = l;
384 return 0;
385}
386
387/*
388 * seq_file methods for the tasks/procs files. The seq_file position is the
389 * next pid to display; the seq_file iterator is a pointer to the pid
390 * in the cgroup->l->list array.
391 */
392
393static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
394{
395 /*
396 * Initially we receive a position value that corresponds to
397 * one more than the last pid shown (or 0 on the first call or
398 * after a seek to the start). Use a binary-search to find the
399 * next pid to display, if any
400 */
401 struct kernfs_open_file *of = s->private;
402 struct cgroup_file_ctx *ctx = of->priv;
403 struct cgroup *cgrp = seq_css(s)->cgroup;
404 struct cgroup_pidlist *l;
405 enum cgroup_filetype type = seq_cft(s)->private;
406 int index = 0, pid = *pos;
407 int *iter, ret;
408
409 mutex_lock(&cgrp->pidlist_mutex);
410
411 /*
412 * !NULL @ctx->procs1.pidlist indicates that this isn't the first
413 * start() after open. If the matching pidlist is around, we can use
414 * that. Look for it. Note that @ctx->procs1.pidlist can't be used
415 * directly. It could already have been destroyed.
416 */
417 if (ctx->procs1.pidlist)
418 ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
419
420 /*
421 * Either this is the first start() after open or the matching
422 * pidlist has been destroyed inbetween. Create a new one.
423 */
424 if (!ctx->procs1.pidlist) {
425 ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
426 if (ret)
427 return ERR_PTR(ret);
428 }
429 l = ctx->procs1.pidlist;
430
431 if (pid) {
432 int end = l->length;
433
434 while (index < end) {
435 int mid = (index + end) / 2;
436 if (l->list[mid] == pid) {
437 index = mid;
438 break;
439 } else if (l->list[mid] < pid)
440 index = mid + 1;
441 else
442 end = mid;
443 }
444 }
445 /* If we're off the end of the array, we're done */
446 if (index >= l->length)
447 return NULL;
448 /* Update the abstract position to be the actual pid that we found */
449 iter = l->list + index;
450 *pos = *iter;
451 return iter;
452}
453
454static void cgroup_pidlist_stop(struct seq_file *s, void *v)
455{
456 struct kernfs_open_file *of = s->private;
457 struct cgroup_file_ctx *ctx = of->priv;
458 struct cgroup_pidlist *l = ctx->procs1.pidlist;
459
460 if (l)
461 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
462 CGROUP_PIDLIST_DESTROY_DELAY);
463 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
464}
465
466static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
467{
468 struct kernfs_open_file *of = s->private;
469 struct cgroup_file_ctx *ctx = of->priv;
470 struct cgroup_pidlist *l = ctx->procs1.pidlist;
471 pid_t *p = v;
472 pid_t *end = l->list + l->length;
473 /*
474 * Advance to the next pid in the array. If this goes off the
475 * end, we're done
476 */
477 p++;
478 if (p >= end) {
479 (*pos)++;
480 return NULL;
481 } else {
482 *pos = *p;
483 return p;
484 }
485}
486
487static int cgroup_pidlist_show(struct seq_file *s, void *v)
488{
489 seq_printf(s, "%d\n", *(int *)v);
490
491 return 0;
492}
493
494static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
495 char *buf, size_t nbytes, loff_t off,
496 bool threadgroup)
497{
498 struct cgroup *cgrp;
499 struct task_struct *task;
500 const struct cred *cred, *tcred;
501 ssize_t ret;
502 bool locked;
503
504 cgrp = cgroup_kn_lock_live(of->kn, false);
505 if (!cgrp)
506 return -ENODEV;
507
508 task = cgroup_procs_write_start(buf, threadgroup, &locked);
509 ret = PTR_ERR_OR_ZERO(task);
510 if (ret)
511 goto out_unlock;
512
513 /*
514 * Even if we're attaching all tasks in the thread group, we only need
515 * to check permissions on one of them. Check permissions using the
516 * credentials from file open to protect against inherited fd attacks.
517 */
518 cred = of->file->f_cred;
519 tcred = get_task_cred(task);
520 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
521 !uid_eq(cred->euid, tcred->uid) &&
522 !uid_eq(cred->euid, tcred->suid))
523 ret = -EACCES;
524 put_cred(tcred);
525 if (ret)
526 goto out_finish;
527
528 ret = cgroup_attach_task(cgrp, task, threadgroup);
529
530out_finish:
531 cgroup_procs_write_finish(task, locked);
532out_unlock:
533 cgroup_kn_unlock(of->kn);
534
535 return ret ?: nbytes;
536}
537
538static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
539 char *buf, size_t nbytes, loff_t off)
540{
541 return __cgroup1_procs_write(of, buf, nbytes, off, true);
542}
543
544static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
545 char *buf, size_t nbytes, loff_t off)
546{
547 return __cgroup1_procs_write(of, buf, nbytes, off, false);
548}
549
550static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
551 char *buf, size_t nbytes, loff_t off)
552{
553 struct cgroup *cgrp;
554 struct cgroup_file_ctx *ctx;
555
556 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
557
558 /*
559 * Release agent gets called with all capabilities,
560 * require capabilities to set release agent.
561 */
562 ctx = of->priv;
563 if ((ctx->ns->user_ns != &init_user_ns) ||
564 !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
565 return -EPERM;
566
567 cgrp = cgroup_kn_lock_live(of->kn, false);
568 if (!cgrp)
569 return -ENODEV;
570 spin_lock(&release_agent_path_lock);
571 strscpy(cgrp->root->release_agent_path, strstrip(buf),
572 sizeof(cgrp->root->release_agent_path));
573 spin_unlock(&release_agent_path_lock);
574 cgroup_kn_unlock(of->kn);
575 return nbytes;
576}
577
578static int cgroup_release_agent_show(struct seq_file *seq, void *v)
579{
580 struct cgroup *cgrp = seq_css(seq)->cgroup;
581
582 spin_lock(&release_agent_path_lock);
583 seq_puts(seq, cgrp->root->release_agent_path);
584 spin_unlock(&release_agent_path_lock);
585 seq_putc(seq, '\n');
586 return 0;
587}
588
589static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
590{
591 seq_puts(seq, "0\n");
592 return 0;
593}
594
595static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
596 struct cftype *cft)
597{
598 return notify_on_release(css->cgroup);
599}
600
601static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
602 struct cftype *cft, u64 val)
603{
604 if (val)
605 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
606 else
607 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
608 return 0;
609}
610
611static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
612 struct cftype *cft)
613{
614 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
615}
616
617static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
618 struct cftype *cft, u64 val)
619{
620 if (val)
621 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
622 else
623 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
624 return 0;
625}
626
627/* cgroup core interface files for the legacy hierarchies */
628struct cftype cgroup1_base_files[] = {
629 {
630 .name = "cgroup.procs",
631 .seq_start = cgroup_pidlist_start,
632 .seq_next = cgroup_pidlist_next,
633 .seq_stop = cgroup_pidlist_stop,
634 .seq_show = cgroup_pidlist_show,
635 .private = CGROUP_FILE_PROCS,
636 .write = cgroup1_procs_write,
637 },
638 {
639 .name = "cgroup.clone_children",
640 .read_u64 = cgroup_clone_children_read,
641 .write_u64 = cgroup_clone_children_write,
642 },
643 {
644 .name = "cgroup.sane_behavior",
645 .flags = CFTYPE_ONLY_ON_ROOT,
646 .seq_show = cgroup_sane_behavior_show,
647 },
648 {
649 .name = "tasks",
650 .seq_start = cgroup_pidlist_start,
651 .seq_next = cgroup_pidlist_next,
652 .seq_stop = cgroup_pidlist_stop,
653 .seq_show = cgroup_pidlist_show,
654 .private = CGROUP_FILE_TASKS,
655 .write = cgroup1_tasks_write,
656 },
657 {
658 .name = "notify_on_release",
659 .read_u64 = cgroup_read_notify_on_release,
660 .write_u64 = cgroup_write_notify_on_release,
661 },
662 {
663 .name = "release_agent",
664 .flags = CFTYPE_ONLY_ON_ROOT,
665 .seq_show = cgroup_release_agent_show,
666 .write = cgroup_release_agent_write,
667 .max_write_len = PATH_MAX - 1,
668 },
669 { } /* terminate */
670};
671
672/* Display information about each subsystem and each hierarchy */
673int proc_cgroupstats_show(struct seq_file *m, void *v)
674{
675 struct cgroup_subsys *ss;
676 bool cgrp_v1_visible = false;
677 int i;
678
679 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
680 /*
681 * Grab the subsystems state racily. No need to add avenue to
682 * cgroup_mutex contention.
683 */
684
685 for_each_subsys(ss, i) {
686 if (cgroup1_subsys_absent(ss))
687 continue;
688 cgrp_v1_visible |= ss->root != &cgrp_dfl_root;
689
690 seq_printf(m, "%s\t%d\t%d\t%d\n",
691 ss->legacy_name, ss->root->hierarchy_id,
692 atomic_read(&ss->root->nr_cgrps),
693 cgroup_ssid_enabled(i));
694 }
695
696 if (cgrp_dfl_visible && !cgrp_v1_visible)
697 pr_info_once("/proc/cgroups lists only v1 controllers, use cgroup.controllers of root cgroup for v2 info\n");
698
699
700 return 0;
701}
702
703/**
704 * cgroupstats_build - build and fill cgroupstats
705 * @stats: cgroupstats to fill information into
706 * @dentry: A dentry entry belonging to the cgroup for which stats have
707 * been requested.
708 *
709 * Build and fill cgroupstats so that taskstats can export it to user
710 * space.
711 *
712 * Return: %0 on success or a negative errno code on failure
713 */
714int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
715{
716 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
717 struct cgroup *cgrp;
718 struct css_task_iter it;
719 struct task_struct *tsk;
720
721 /* it should be kernfs_node belonging to cgroupfs and is a directory */
722 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
723 kernfs_type(kn) != KERNFS_DIR)
724 return -EINVAL;
725
726 /*
727 * We aren't being called from kernfs and there's no guarantee on
728 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
729 * @kn->priv is RCU safe. Let's do the RCU dancing.
730 */
731 rcu_read_lock();
732 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
733 if (!cgrp || !cgroup_tryget(cgrp)) {
734 rcu_read_unlock();
735 return -ENOENT;
736 }
737 rcu_read_unlock();
738
739 css_task_iter_start(&cgrp->self, 0, &it);
740 while ((tsk = css_task_iter_next(&it))) {
741 switch (READ_ONCE(tsk->__state)) {
742 case TASK_RUNNING:
743 stats->nr_running++;
744 break;
745 case TASK_INTERRUPTIBLE:
746 stats->nr_sleeping++;
747 break;
748 case TASK_UNINTERRUPTIBLE:
749 stats->nr_uninterruptible++;
750 break;
751 case TASK_STOPPED:
752 stats->nr_stopped++;
753 break;
754 default:
755 if (tsk->in_iowait)
756 stats->nr_io_wait++;
757 break;
758 }
759 }
760 css_task_iter_end(&it);
761
762 cgroup_put(cgrp);
763 return 0;
764}
765
766void cgroup1_check_for_release(struct cgroup *cgrp)
767{
768 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
769 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
770 schedule_work(&cgrp->release_agent_work);
771}
772
773/*
774 * Notify userspace when a cgroup is released, by running the
775 * configured release agent with the name of the cgroup (path
776 * relative to the root of cgroup file system) as the argument.
777 *
778 * Most likely, this user command will try to rmdir this cgroup.
779 *
780 * This races with the possibility that some other task will be
781 * attached to this cgroup before it is removed, or that some other
782 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
783 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
784 * unused, and this cgroup will be reprieved from its death sentence,
785 * to continue to serve a useful existence. Next time it's released,
786 * we will get notified again, if it still has 'notify_on_release' set.
787 *
788 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
789 * means only wait until the task is successfully execve()'d. The
790 * separate release agent task is forked by call_usermodehelper(),
791 * then control in this thread returns here, without waiting for the
792 * release agent task. We don't bother to wait because the caller of
793 * this routine has no use for the exit status of the release agent
794 * task, so no sense holding our caller up for that.
795 */
796void cgroup1_release_agent(struct work_struct *work)
797{
798 struct cgroup *cgrp =
799 container_of(work, struct cgroup, release_agent_work);
800 char *pathbuf, *agentbuf;
801 char *argv[3], *envp[3];
802 int ret;
803
804 /* snoop agent path and exit early if empty */
805 if (!cgrp->root->release_agent_path[0])
806 return;
807
808 /* prepare argument buffers */
809 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
810 agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
811 if (!pathbuf || !agentbuf)
812 goto out_free;
813
814 spin_lock(&release_agent_path_lock);
815 strscpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
816 spin_unlock(&release_agent_path_lock);
817 if (!agentbuf[0])
818 goto out_free;
819
820 ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
821 if (ret < 0)
822 goto out_free;
823
824 argv[0] = agentbuf;
825 argv[1] = pathbuf;
826 argv[2] = NULL;
827
828 /* minimal command environment */
829 envp[0] = "HOME=/";
830 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
831 envp[2] = NULL;
832
833 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
834out_free:
835 kfree(agentbuf);
836 kfree(pathbuf);
837}
838
839/*
840 * cgroup_rename - Only allow simple rename of directories in place.
841 */
842static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
843 const char *new_name_str)
844{
845 struct cgroup *cgrp = kn->priv;
846 int ret;
847
848 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
849 if (strchr(new_name_str, '\n'))
850 return -EINVAL;
851
852 if (kernfs_type(kn) != KERNFS_DIR)
853 return -ENOTDIR;
854 if (rcu_access_pointer(kn->__parent) != new_parent)
855 return -EIO;
856
857 /*
858 * We're gonna grab cgroup_mutex which nests outside kernfs
859 * active_ref. kernfs_rename() doesn't require active_ref
860 * protection. Break them before grabbing cgroup_mutex.
861 */
862 kernfs_break_active_protection(new_parent);
863 kernfs_break_active_protection(kn);
864
865 cgroup_lock();
866
867 ret = kernfs_rename(kn, new_parent, new_name_str);
868 if (!ret)
869 TRACE_CGROUP_PATH(rename, cgrp);
870
871 cgroup_unlock();
872
873 kernfs_unbreak_active_protection(kn);
874 kernfs_unbreak_active_protection(new_parent);
875 return ret;
876}
877
878static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
879{
880 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
881 struct cgroup_subsys *ss;
882 int ssid;
883
884 for_each_subsys(ss, ssid)
885 if (root->subsys_mask & (1 << ssid))
886 seq_show_option(seq, ss->legacy_name, NULL);
887 if (root->flags & CGRP_ROOT_NOPREFIX)
888 seq_puts(seq, ",noprefix");
889 if (root->flags & CGRP_ROOT_XATTR)
890 seq_puts(seq, ",xattr");
891 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
892 seq_puts(seq, ",cpuset_v2_mode");
893 if (root->flags & CGRP_ROOT_FAVOR_DYNMODS)
894 seq_puts(seq, ",favordynmods");
895
896 spin_lock(&release_agent_path_lock);
897 if (strlen(root->release_agent_path))
898 seq_show_option(seq, "release_agent",
899 root->release_agent_path);
900 spin_unlock(&release_agent_path_lock);
901
902 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
903 seq_puts(seq, ",clone_children");
904 if (strlen(root->name))
905 seq_show_option(seq, "name", root->name);
906 return 0;
907}
908
909enum cgroup1_param {
910 Opt_all,
911 Opt_clone_children,
912 Opt_cpuset_v2_mode,
913 Opt_name,
914 Opt_none,
915 Opt_noprefix,
916 Opt_release_agent,
917 Opt_xattr,
918 Opt_favordynmods,
919 Opt_nofavordynmods,
920};
921
922const struct fs_parameter_spec cgroup1_fs_parameters[] = {
923 fsparam_flag ("all", Opt_all),
924 fsparam_flag ("clone_children", Opt_clone_children),
925 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
926 fsparam_string("name", Opt_name),
927 fsparam_flag ("none", Opt_none),
928 fsparam_flag ("noprefix", Opt_noprefix),
929 fsparam_string("release_agent", Opt_release_agent),
930 fsparam_flag ("xattr", Opt_xattr),
931 fsparam_flag ("favordynmods", Opt_favordynmods),
932 fsparam_flag ("nofavordynmods", Opt_nofavordynmods),
933 {}
934};
935
936int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
937{
938 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
939 struct cgroup_subsys *ss;
940 struct fs_parse_result result;
941 int opt, i;
942
943 opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
944 if (opt == -ENOPARAM) {
945 int ret;
946
947 ret = vfs_parse_fs_param_source(fc, param);
948 if (ret != -ENOPARAM)
949 return ret;
950 for_each_subsys(ss, i) {
951 if (strcmp(param->key, ss->legacy_name) ||
952 cgroup1_subsys_absent(ss))
953 continue;
954 if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
955 return invalfc(fc, "Disabled controller '%s'",
956 param->key);
957 ctx->subsys_mask |= (1 << i);
958 return 0;
959 }
960 return invalfc(fc, "Unknown subsys name '%s'", param->key);
961 }
962 if (opt < 0)
963 return opt;
964
965 switch (opt) {
966 case Opt_none:
967 /* Explicitly have no subsystems */
968 ctx->none = true;
969 break;
970 case Opt_all:
971 ctx->all_ss = true;
972 break;
973 case Opt_noprefix:
974 ctx->flags |= CGRP_ROOT_NOPREFIX;
975 break;
976 case Opt_clone_children:
977 ctx->cpuset_clone_children = true;
978 break;
979 case Opt_cpuset_v2_mode:
980 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
981 break;
982 case Opt_xattr:
983 ctx->flags |= CGRP_ROOT_XATTR;
984 break;
985 case Opt_favordynmods:
986 ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS;
987 break;
988 case Opt_nofavordynmods:
989 ctx->flags &= ~CGRP_ROOT_FAVOR_DYNMODS;
990 break;
991 case Opt_release_agent:
992 /* Specifying two release agents is forbidden */
993 if (ctx->release_agent)
994 return invalfc(fc, "release_agent respecified");
995 /*
996 * Release agent gets called with all capabilities,
997 * require capabilities to set release agent.
998 */
999 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
1000 return invalfc(fc, "Setting release_agent not allowed");
1001 ctx->release_agent = param->string;
1002 param->string = NULL;
1003 break;
1004 case Opt_name:
1005 /* blocked by boot param? */
1006 if (cgroup_no_v1_named)
1007 return -ENOENT;
1008 /* Can't specify an empty name */
1009 if (!param->size)
1010 return invalfc(fc, "Empty name");
1011 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
1012 return invalfc(fc, "Name too long");
1013 /* Must match [\w.-]+ */
1014 for (i = 0; i < param->size; i++) {
1015 char c = param->string[i];
1016 if (isalnum(c))
1017 continue;
1018 if ((c == '.') || (c == '-') || (c == '_'))
1019 continue;
1020 return invalfc(fc, "Invalid name");
1021 }
1022 /* Specifying two names is forbidden */
1023 if (ctx->name)
1024 return invalfc(fc, "name respecified");
1025 ctx->name = param->string;
1026 param->string = NULL;
1027 break;
1028 }
1029 return 0;
1030}
1031
1032static int check_cgroupfs_options(struct fs_context *fc)
1033{
1034 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1035 u16 mask = U16_MAX;
1036 u16 enabled = 0;
1037 struct cgroup_subsys *ss;
1038 int i;
1039
1040#ifdef CONFIG_CPUSETS
1041 mask = ~((u16)1 << cpuset_cgrp_id);
1042#endif
1043 for_each_subsys(ss, i)
1044 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i) &&
1045 !cgroup1_subsys_absent(ss))
1046 enabled |= 1 << i;
1047
1048 ctx->subsys_mask &= enabled;
1049
1050 /*
1051 * In absence of 'none', 'name=' and subsystem name options,
1052 * let's default to 'all'.
1053 */
1054 if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1055 ctx->all_ss = true;
1056
1057 if (ctx->all_ss) {
1058 /* Mutually exclusive option 'all' + subsystem name */
1059 if (ctx->subsys_mask)
1060 return invalfc(fc, "subsys name conflicts with all");
1061 /* 'all' => select all the subsystems */
1062 ctx->subsys_mask = enabled;
1063 }
1064
1065 /*
1066 * We either have to specify by name or by subsystems. (So all
1067 * empty hierarchies must have a name).
1068 */
1069 if (!ctx->subsys_mask && !ctx->name)
1070 return invalfc(fc, "Need name or subsystem set");
1071
1072 /*
1073 * Option noprefix was introduced just for backward compatibility
1074 * with the old cpuset, so we allow noprefix only if mounting just
1075 * the cpuset subsystem.
1076 */
1077 if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1078 return invalfc(fc, "noprefix used incorrectly");
1079
1080 /* Can't specify "none" and some subsystems */
1081 if (ctx->subsys_mask && ctx->none)
1082 return invalfc(fc, "none used incorrectly");
1083
1084 return 0;
1085}
1086
1087int cgroup1_reconfigure(struct fs_context *fc)
1088{
1089 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1090 struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1091 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1092 int ret = 0;
1093 u16 added_mask, removed_mask;
1094
1095 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1096
1097 /* See what subsystems are wanted */
1098 ret = check_cgroupfs_options(fc);
1099 if (ret)
1100 goto out_unlock;
1101
1102 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1103 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1104 task_tgid_nr(current), current->comm);
1105
1106 added_mask = ctx->subsys_mask & ~root->subsys_mask;
1107 removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1108
1109 /* Don't allow flags or name to change at remount */
1110 if ((ctx->flags ^ root->flags) ||
1111 (ctx->name && strcmp(ctx->name, root->name))) {
1112 errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1113 ctx->flags, ctx->name ?: "", root->flags, root->name);
1114 ret = -EINVAL;
1115 goto out_unlock;
1116 }
1117
1118 /* remounting is not allowed for populated hierarchies */
1119 if (!list_empty(&root->cgrp.self.children)) {
1120 ret = -EBUSY;
1121 goto out_unlock;
1122 }
1123
1124 ret = rebind_subsystems(root, added_mask);
1125 if (ret)
1126 goto out_unlock;
1127
1128 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1129
1130 if (ctx->release_agent) {
1131 spin_lock(&release_agent_path_lock);
1132 strcpy(root->release_agent_path, ctx->release_agent);
1133 spin_unlock(&release_agent_path_lock);
1134 }
1135
1136 trace_cgroup_remount(root);
1137
1138 out_unlock:
1139 cgroup_unlock();
1140 return ret;
1141}
1142
1143struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1144 .rename = cgroup1_rename,
1145 .show_options = cgroup1_show_options,
1146 .mkdir = cgroup_mkdir,
1147 .rmdir = cgroup_rmdir,
1148 .show_path = cgroup_show_path,
1149};
1150
1151/*
1152 * The guts of cgroup1 mount - find or create cgroup_root to use.
1153 * Called with cgroup_mutex held; returns 0 on success, -E... on
1154 * error and positive - in case when the candidate is busy dying.
1155 * On success it stashes a reference to cgroup_root into given
1156 * cgroup_fs_context; that reference is *NOT* counting towards the
1157 * cgroup_root refcount.
1158 */
1159static int cgroup1_root_to_use(struct fs_context *fc)
1160{
1161 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1162 struct cgroup_root *root;
1163 struct cgroup_subsys *ss;
1164 int i, ret;
1165
1166 /* First find the desired set of subsystems */
1167 ret = check_cgroupfs_options(fc);
1168 if (ret)
1169 return ret;
1170
1171 /*
1172 * Destruction of cgroup root is asynchronous, so subsystems may
1173 * still be dying after the previous unmount. Let's drain the
1174 * dying subsystems. We just need to ensure that the ones
1175 * unmounted previously finish dying and don't care about new ones
1176 * starting. Testing ref liveliness is good enough.
1177 */
1178 for_each_subsys(ss, i) {
1179 if (!(ctx->subsys_mask & (1 << i)) ||
1180 ss->root == &cgrp_dfl_root)
1181 continue;
1182
1183 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1184 return 1; /* restart */
1185 cgroup_put(&ss->root->cgrp);
1186 }
1187
1188 for_each_root(root) {
1189 bool name_match = false;
1190
1191 if (root == &cgrp_dfl_root)
1192 continue;
1193
1194 /*
1195 * If we asked for a name then it must match. Also, if
1196 * name matches but sybsys_mask doesn't, we should fail.
1197 * Remember whether name matched.
1198 */
1199 if (ctx->name) {
1200 if (strcmp(ctx->name, root->name))
1201 continue;
1202 name_match = true;
1203 }
1204
1205 /*
1206 * If we asked for subsystems (or explicitly for no
1207 * subsystems) then they must match.
1208 */
1209 if ((ctx->subsys_mask || ctx->none) &&
1210 (ctx->subsys_mask != root->subsys_mask)) {
1211 if (!name_match)
1212 continue;
1213 return -EBUSY;
1214 }
1215
1216 if (root->flags ^ ctx->flags)
1217 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1218
1219 ctx->root = root;
1220 return 0;
1221 }
1222
1223 /*
1224 * No such thing, create a new one. name= matching without subsys
1225 * specification is allowed for already existing hierarchies but we
1226 * can't create new one without subsys specification.
1227 */
1228 if (!ctx->subsys_mask && !ctx->none)
1229 return invalfc(fc, "No subsys list or none specified");
1230
1231 /* Hierarchies may only be created in the initial cgroup namespace. */
1232 if (ctx->ns != &init_cgroup_ns)
1233 return -EPERM;
1234
1235 root = kzalloc(sizeof(*root), GFP_KERNEL);
1236 if (!root)
1237 return -ENOMEM;
1238
1239 ctx->root = root;
1240 init_cgroup_root(ctx);
1241
1242 ret = cgroup_setup_root(root, ctx->subsys_mask);
1243 if (!ret)
1244 cgroup_favor_dynmods(root, ctx->flags & CGRP_ROOT_FAVOR_DYNMODS);
1245 else
1246 cgroup_free_root(root);
1247
1248 return ret;
1249}
1250
1251int cgroup1_get_tree(struct fs_context *fc)
1252{
1253 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1254 int ret;
1255
1256 /* Check if the caller has permission to mount. */
1257 if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1258 return -EPERM;
1259
1260 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1261
1262 ret = cgroup1_root_to_use(fc);
1263 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1264 ret = 1; /* restart */
1265
1266 cgroup_unlock();
1267
1268 if (!ret)
1269 ret = cgroup_do_get_tree(fc);
1270
1271 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1272 fc_drop_locked(fc);
1273 ret = 1;
1274 }
1275
1276 if (unlikely(ret > 0)) {
1277 msleep(10);
1278 return restart_syscall();
1279 }
1280 return ret;
1281}
1282
1283/**
1284 * task_get_cgroup1 - Acquires the associated cgroup of a task within a
1285 * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
1286 * hierarchy ID.
1287 * @tsk: The target task
1288 * @hierarchy_id: The ID of a cgroup1 hierarchy
1289 *
1290 * On success, the cgroup is returned. On failure, ERR_PTR is returned.
1291 * We limit it to cgroup1 only.
1292 */
1293struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id)
1294{
1295 struct cgroup *cgrp = ERR_PTR(-ENOENT);
1296 struct cgroup_root *root;
1297 unsigned long flags;
1298
1299 rcu_read_lock();
1300 for_each_root(root) {
1301 /* cgroup1 only*/
1302 if (root == &cgrp_dfl_root)
1303 continue;
1304 if (root->hierarchy_id != hierarchy_id)
1305 continue;
1306 spin_lock_irqsave(&css_set_lock, flags);
1307 cgrp = task_cgroup_from_root(tsk, root);
1308 if (!cgrp || !cgroup_tryget(cgrp))
1309 cgrp = ERR_PTR(-ENOENT);
1310 spin_unlock_irqrestore(&css_set_lock, flags);
1311 break;
1312 }
1313 rcu_read_unlock();
1314 return cgrp;
1315}
1316
1317static int __init cgroup1_wq_init(void)
1318{
1319 /*
1320 * Used to destroy pidlists and separate to serve as flush domain.
1321 * Cap @max_active to 1 too.
1322 */
1323 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1324 0, 1);
1325 BUG_ON(!cgroup_pidlist_destroy_wq);
1326 return 0;
1327}
1328core_initcall(cgroup1_wq_init);
1329
1330static int __init cgroup_no_v1(char *str)
1331{
1332 struct cgroup_subsys *ss;
1333 char *token;
1334 int i;
1335
1336 while ((token = strsep(&str, ",")) != NULL) {
1337 if (!*token)
1338 continue;
1339
1340 if (!strcmp(token, "all")) {
1341 cgroup_no_v1_mask = U16_MAX;
1342 continue;
1343 }
1344
1345 if (!strcmp(token, "named")) {
1346 cgroup_no_v1_named = true;
1347 continue;
1348 }
1349
1350 for_each_subsys(ss, i) {
1351 if (strcmp(token, ss->name) &&
1352 strcmp(token, ss->legacy_name))
1353 continue;
1354
1355 cgroup_no_v1_mask |= 1 << i;
1356 break;
1357 }
1358 }
1359 return 1;
1360}
1361__setup("cgroup_no_v1=", cgroup_no_v1);