task cgroups: enable cgroups by default in some configs
[linux-block.git] / kernel / cgroup.c
CommitLineData
ddbcc7e8
PM
1/*
2 * kernel/cgroup.c
3 *
4 * Generic process-grouping system.
5 *
6 * Based originally on the cpuset system, extracted by Paul Menage
7 * Copyright (C) 2006 Google, Inc
8 *
9 * Copyright notices from the original cpuset code:
10 * --------------------------------------------------
11 * Copyright (C) 2003 BULL SA.
12 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
13 *
14 * Portions derived from Patrick Mochel's sysfs code.
15 * sysfs is Copyright (c) 2001-3 Patrick Mochel
16 *
17 * 2003-10-10 Written by Simon Derr.
18 * 2003-10-22 Updates by Stephen Hemminger.
19 * 2004 May-July Rework by Paul Jackson.
20 * ---------------------------------------------------
21 *
22 * This file is subject to the terms and conditions of the GNU General Public
23 * License. See the file COPYING in the main directory of the Linux
24 * distribution for more details.
25 */
26
27#include <linux/cgroup.h>
28#include <linux/errno.h>
29#include <linux/fs.h>
30#include <linux/kernel.h>
31#include <linux/list.h>
32#include <linux/mm.h>
33#include <linux/mutex.h>
34#include <linux/mount.h>
35#include <linux/pagemap.h>
a424316c 36#include <linux/proc_fs.h>
ddbcc7e8
PM
37#include <linux/rcupdate.h>
38#include <linux/sched.h>
817929ec 39#include <linux/backing-dev.h>
ddbcc7e8
PM
40#include <linux/seq_file.h>
41#include <linux/slab.h>
42#include <linux/magic.h>
43#include <linux/spinlock.h>
44#include <linux/string.h>
bbcb81d0 45#include <linux/sort.h>
81a6a5cd 46#include <linux/kmod.h>
ddbcc7e8
PM
47#include <asm/atomic.h>
48
81a6a5cd
PM
49static DEFINE_MUTEX(cgroup_mutex);
50
ddbcc7e8
PM
51/* Generate an array of cgroup subsystem pointers */
52#define SUBSYS(_x) &_x ## _subsys,
53
54static struct cgroup_subsys *subsys[] = {
55#include <linux/cgroup_subsys.h>
56};
57
58/*
59 * A cgroupfs_root represents the root of a cgroup hierarchy,
60 * and may be associated with a superblock to form an active
61 * hierarchy
62 */
63struct cgroupfs_root {
64 struct super_block *sb;
65
66 /*
67 * The bitmask of subsystems intended to be attached to this
68 * hierarchy
69 */
70 unsigned long subsys_bits;
71
72 /* The bitmask of subsystems currently attached to this hierarchy */
73 unsigned long actual_subsys_bits;
74
75 /* A list running through the attached subsystems */
76 struct list_head subsys_list;
77
78 /* The root cgroup for this hierarchy */
79 struct cgroup top_cgroup;
80
81 /* Tracks how many cgroups are currently defined in hierarchy.*/
82 int number_of_cgroups;
83
84 /* A list running through the mounted hierarchies */
85 struct list_head root_list;
86
87 /* Hierarchy-specific flags */
88 unsigned long flags;
81a6a5cd
PM
89
90 /* The path to use for release notifications. No locking
91 * between setting and use - so if userspace updates this
92 * while child cgroups exist, you could miss a
93 * notification. We ensure that it's always a valid
94 * NUL-terminated string */
95 char release_agent_path[PATH_MAX];
ddbcc7e8
PM
96};
97
98
99/*
100 * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
101 * subsystems that are otherwise unattached - it never has more than a
102 * single cgroup, and all tasks are part of that cgroup.
103 */
104static struct cgroupfs_root rootnode;
105
106/* The list of hierarchy roots */
107
108static LIST_HEAD(roots);
817929ec 109static int root_count;
ddbcc7e8
PM
110
111/* dummytop is a shorthand for the dummy hierarchy's top cgroup */
112#define dummytop (&rootnode.top_cgroup)
113
114/* This flag indicates whether tasks in the fork and exit paths should
115 * take callback_mutex and check for fork/exit handlers to call. This
116 * avoids us having to do extra work in the fork/exit path if none of the
117 * subsystems need to be called.
118 */
119static int need_forkexit_callback;
120
121/* bits in struct cgroup flags field */
122enum {
81a6a5cd 123 /* Control Group is dead */
ddbcc7e8 124 CONT_REMOVED,
81a6a5cd
PM
125 /* Control Group has previously had a child cgroup or a task,
126 * but no longer (only if CONT_NOTIFY_ON_RELEASE is set) */
127 CONT_RELEASABLE,
128 /* Control Group requires release notifications to userspace */
129 CONT_NOTIFY_ON_RELEASE,
ddbcc7e8
PM
130};
131
132/* convenient tests for these bits */
133inline int cgroup_is_removed(const struct cgroup *cont)
134{
135 return test_bit(CONT_REMOVED, &cont->flags);
136}
137
138/* bits in struct cgroupfs_root flags field */
139enum {
140 ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
141};
142
81a6a5cd
PM
143inline int cgroup_is_releasable(const struct cgroup *cont)
144{
145 const int bits =
146 (1 << CONT_RELEASABLE) |
147 (1 << CONT_NOTIFY_ON_RELEASE);
148 return (cont->flags & bits) == bits;
149}
150
151inline int notify_on_release(const struct cgroup *cont)
152{
153 return test_bit(CONT_NOTIFY_ON_RELEASE, &cont->flags);
154}
155
ddbcc7e8
PM
156/*
157 * for_each_subsys() allows you to iterate on each subsystem attached to
158 * an active hierarchy
159 */
160#define for_each_subsys(_root, _ss) \
161list_for_each_entry(_ss, &_root->subsys_list, sibling)
162
163/* for_each_root() allows you to iterate across the active hierarchies */
164#define for_each_root(_root) \
165list_for_each_entry(_root, &roots, root_list)
166
81a6a5cd
PM
167/* the list of cgroups eligible for automatic release. Protected by
168 * release_list_lock */
169static LIST_HEAD(release_list);
170static DEFINE_SPINLOCK(release_list_lock);
171static void cgroup_release_agent(struct work_struct *work);
172static DECLARE_WORK(release_agent_work, cgroup_release_agent);
173static void check_for_release(struct cgroup *cont);
174
817929ec
PM
175/* Link structure for associating css_set objects with cgroups */
176struct cg_cgroup_link {
177 /*
178 * List running through cg_cgroup_links associated with a
179 * cgroup, anchored on cgroup->css_sets
180 */
181 struct list_head cont_link_list;
182 /*
183 * List running through cg_cgroup_links pointing at a
184 * single css_set object, anchored on css_set->cg_links
185 */
186 struct list_head cg_link_list;
187 struct css_set *cg;
188};
189
190/* The default css_set - used by init and its children prior to any
191 * hierarchies being mounted. It contains a pointer to the root state
192 * for each subsystem. Also used to anchor the list of css_sets. Not
193 * reference-counted, to improve performance when child cgroups
194 * haven't been created.
195 */
196
197static struct css_set init_css_set;
198static struct cg_cgroup_link init_css_set_link;
199
200/* css_set_lock protects the list of css_set objects, and the
201 * chain of tasks off each css_set. Nests outside task->alloc_lock
202 * due to cgroup_iter_start() */
203static DEFINE_RWLOCK(css_set_lock);
204static int css_set_count;
205
206/* We don't maintain the lists running through each css_set to its
207 * task until after the first call to cgroup_iter_start(). This
208 * reduces the fork()/exit() overhead for people who have cgroups
209 * compiled into their kernel but not actually in use */
210static int use_task_css_set_links;
211
212/* When we create or destroy a css_set, the operation simply
213 * takes/releases a reference count on all the cgroups referenced
214 * by subsystems in this css_set. This can end up multiple-counting
215 * some cgroups, but that's OK - the ref-count is just a
216 * busy/not-busy indicator; ensuring that we only count each cgroup
217 * once would require taking a global lock to ensure that no
b4f48b63
PM
218 * subsystems moved between hierarchies while we were doing so.
219 *
220 * Possible TODO: decide at boot time based on the number of
221 * registered subsystems and the number of CPUs or NUMA nodes whether
222 * it's better for performance to ref-count every subsystem, or to
223 * take a global lock and only add one ref count to each hierarchy.
224 */
817929ec
PM
225
226/*
227 * unlink a css_set from the list and free it
228 */
81a6a5cd 229static void unlink_css_set(struct css_set *cg)
b4f48b63 230{
817929ec
PM
231 write_lock(&css_set_lock);
232 list_del(&cg->list);
233 css_set_count--;
234 while (!list_empty(&cg->cg_links)) {
235 struct cg_cgroup_link *link;
236 link = list_entry(cg->cg_links.next,
237 struct cg_cgroup_link, cg_link_list);
238 list_del(&link->cg_link_list);
239 list_del(&link->cont_link_list);
240 kfree(link);
241 }
242 write_unlock(&css_set_lock);
81a6a5cd
PM
243}
244
245static void __release_css_set(struct kref *k, int taskexit)
246{
247 int i;
248 struct css_set *cg = container_of(k, struct css_set, ref);
249
250 unlink_css_set(cg);
251
252 rcu_read_lock();
253 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
254 struct cgroup *cont = cg->subsys[i]->cgroup;
255 if (atomic_dec_and_test(&cont->count) &&
256 notify_on_release(cont)) {
257 if (taskexit)
258 set_bit(CONT_RELEASABLE, &cont->flags);
259 check_for_release(cont);
260 }
261 }
262 rcu_read_unlock();
817929ec 263 kfree(cg);
b4f48b63
PM
264}
265
81a6a5cd
PM
266static void release_css_set(struct kref *k)
267{
268 __release_css_set(k, 0);
269}
270
271static void release_css_set_taskexit(struct kref *k)
272{
273 __release_css_set(k, 1);
274}
275
817929ec
PM
276/*
277 * refcounted get/put for css_set objects
278 */
279static inline void get_css_set(struct css_set *cg)
280{
281 kref_get(&cg->ref);
282}
283
284static inline void put_css_set(struct css_set *cg)
285{
286 kref_put(&cg->ref, release_css_set);
287}
288
81a6a5cd
PM
289static inline void put_css_set_taskexit(struct css_set *cg)
290{
291 kref_put(&cg->ref, release_css_set_taskexit);
292}
293
817929ec
PM
294/*
295 * find_existing_css_set() is a helper for
296 * find_css_set(), and checks to see whether an existing
297 * css_set is suitable. This currently walks a linked-list for
298 * simplicity; a later patch will use a hash table for better
299 * performance
300 *
301 * oldcg: the cgroup group that we're using before the cgroup
302 * transition
303 *
304 * cont: the cgroup that we're moving into
305 *
306 * template: location in which to build the desired set of subsystem
307 * state objects for the new cgroup group
308 */
309
310static struct css_set *find_existing_css_set(
311 struct css_set *oldcg,
312 struct cgroup *cont,
313 struct cgroup_subsys_state *template[])
b4f48b63
PM
314{
315 int i;
817929ec
PM
316 struct cgroupfs_root *root = cont->root;
317 struct list_head *l = &init_css_set.list;
318
319 /* Built the set of subsystem state objects that we want to
320 * see in the new css_set */
321 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
322 if (root->subsys_bits & (1ull << i)) {
323 /* Subsystem is in this hierarchy. So we want
324 * the subsystem state from the new
325 * cgroup */
326 template[i] = cont->subsys[i];
327 } else {
328 /* Subsystem is not in this hierarchy, so we
329 * don't want to change the subsystem state */
330 template[i] = oldcg->subsys[i];
331 }
332 }
333
334 /* Look through existing cgroup groups to find one to reuse */
335 do {
336 struct css_set *cg =
337 list_entry(l, struct css_set, list);
338
339 if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) {
340 /* All subsystems matched */
341 return cg;
342 }
343 /* Try the next cgroup group */
344 l = l->next;
345 } while (l != &init_css_set.list);
346
347 /* No existing cgroup group matched */
348 return NULL;
349}
350
351/*
352 * allocate_cg_links() allocates "count" cg_cgroup_link structures
353 * and chains them on tmp through their cont_link_list fields. Returns 0 on
354 * success or a negative error
355 */
356
357static int allocate_cg_links(int count, struct list_head *tmp)
358{
359 struct cg_cgroup_link *link;
360 int i;
361 INIT_LIST_HEAD(tmp);
362 for (i = 0; i < count; i++) {
363 link = kmalloc(sizeof(*link), GFP_KERNEL);
364 if (!link) {
365 while (!list_empty(tmp)) {
366 link = list_entry(tmp->next,
367 struct cg_cgroup_link,
368 cont_link_list);
369 list_del(&link->cont_link_list);
370 kfree(link);
371 }
372 return -ENOMEM;
373 }
374 list_add(&link->cont_link_list, tmp);
375 }
376 return 0;
377}
378
379static void free_cg_links(struct list_head *tmp)
380{
381 while (!list_empty(tmp)) {
382 struct cg_cgroup_link *link;
383 link = list_entry(tmp->next,
384 struct cg_cgroup_link,
385 cont_link_list);
386 list_del(&link->cont_link_list);
387 kfree(link);
388 }
389}
390
391/*
392 * find_css_set() takes an existing cgroup group and a
393 * cgroup object, and returns a css_set object that's
394 * equivalent to the old group, but with the given cgroup
395 * substituted into the appropriate hierarchy. Must be called with
396 * cgroup_mutex held
397 */
398
399static struct css_set *find_css_set(
400 struct css_set *oldcg, struct cgroup *cont)
401{
402 struct css_set *res;
403 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
404 int i;
405
406 struct list_head tmp_cg_links;
407 struct cg_cgroup_link *link;
408
409 /* First see if we already have a cgroup group that matches
410 * the desired set */
411 write_lock(&css_set_lock);
412 res = find_existing_css_set(oldcg, cont, template);
413 if (res)
414 get_css_set(res);
415 write_unlock(&css_set_lock);
416
417 if (res)
418 return res;
419
420 res = kmalloc(sizeof(*res), GFP_KERNEL);
421 if (!res)
422 return NULL;
423
424 /* Allocate all the cg_cgroup_link objects that we'll need */
425 if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
426 kfree(res);
427 return NULL;
428 }
429
430 kref_init(&res->ref);
431 INIT_LIST_HEAD(&res->cg_links);
432 INIT_LIST_HEAD(&res->tasks);
433
434 /* Copy the set of subsystem state objects generated in
435 * find_existing_css_set() */
436 memcpy(res->subsys, template, sizeof(res->subsys));
437
438 write_lock(&css_set_lock);
439 /* Add reference counts and links from the new css_set. */
440 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
441 struct cgroup *cont = res->subsys[i]->cgroup;
442 struct cgroup_subsys *ss = subsys[i];
443 atomic_inc(&cont->count);
444 /*
445 * We want to add a link once per cgroup, so we
446 * only do it for the first subsystem in each
447 * hierarchy
448 */
449 if (ss->root->subsys_list.next == &ss->sibling) {
450 BUG_ON(list_empty(&tmp_cg_links));
451 link = list_entry(tmp_cg_links.next,
452 struct cg_cgroup_link,
453 cont_link_list);
454 list_del(&link->cont_link_list);
455 list_add(&link->cont_link_list, &cont->css_sets);
456 link->cg = res;
457 list_add(&link->cg_link_list, &res->cg_links);
458 }
459 }
460 if (list_empty(&rootnode.subsys_list)) {
461 link = list_entry(tmp_cg_links.next,
462 struct cg_cgroup_link,
463 cont_link_list);
464 list_del(&link->cont_link_list);
465 list_add(&link->cont_link_list, &dummytop->css_sets);
466 link->cg = res;
467 list_add(&link->cg_link_list, &res->cg_links);
468 }
469
470 BUG_ON(!list_empty(&tmp_cg_links));
471
472 /* Link this cgroup group into the list */
473 list_add(&res->list, &init_css_set.list);
474 css_set_count++;
475 INIT_LIST_HEAD(&res->tasks);
476 write_unlock(&css_set_lock);
477
478 return res;
b4f48b63
PM
479}
480
ddbcc7e8
PM
481/*
482 * There is one global cgroup mutex. We also require taking
483 * task_lock() when dereferencing a task's cgroup subsys pointers.
484 * See "The task_lock() exception", at the end of this comment.
485 *
486 * A task must hold cgroup_mutex to modify cgroups.
487 *
488 * Any task can increment and decrement the count field without lock.
489 * So in general, code holding cgroup_mutex can't rely on the count
490 * field not changing. However, if the count goes to zero, then only
491 * attach_task() can increment it again. Because a count of zero
492 * means that no tasks are currently attached, therefore there is no
493 * way a task attached to that cgroup can fork (the other way to
494 * increment the count). So code holding cgroup_mutex can safely
495 * assume that if the count is zero, it will stay zero. Similarly, if
496 * a task holds cgroup_mutex on a cgroup with zero count, it
497 * knows that the cgroup won't be removed, as cgroup_rmdir()
498 * needs that mutex.
499 *
500 * The cgroup_common_file_write handler for operations that modify
501 * the cgroup hierarchy holds cgroup_mutex across the entire operation,
502 * single threading all such cgroup modifications across the system.
503 *
504 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
505 * (usually) take cgroup_mutex. These are the two most performance
506 * critical pieces of code here. The exception occurs on cgroup_exit(),
507 * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
508 * is taken, and if the cgroup count is zero, a usermode call made
509 * to /sbin/cgroup_release_agent with the name of the cgroup (path
510 * relative to the root of cgroup file system) as the argument.
511 *
512 * A cgroup can only be deleted if both its 'count' of using tasks
513 * is zero, and its list of 'children' cgroups is empty. Since all
514 * tasks in the system use _some_ cgroup, and since there is always at
515 * least one task in the system (init, pid == 1), therefore, top_cgroup
516 * always has either children cgroups and/or using tasks. So we don't
517 * need a special hack to ensure that top_cgroup cannot be deleted.
518 *
519 * The task_lock() exception
520 *
521 * The need for this exception arises from the action of
522 * attach_task(), which overwrites one tasks cgroup pointer with
523 * another. It does so using cgroup_mutexe, however there are
524 * several performance critical places that need to reference
525 * task->cgroup without the expense of grabbing a system global
526 * mutex. Therefore except as noted below, when dereferencing or, as
527 * in attach_task(), modifying a task'ss cgroup pointer we use
528 * task_lock(), which acts on a spinlock (task->alloc_lock) already in
529 * the task_struct routinely used for such matters.
530 *
531 * P.S. One more locking exception. RCU is used to guard the
532 * update of a tasks cgroup pointer by attach_task()
533 */
534
ddbcc7e8
PM
535/**
536 * cgroup_lock - lock out any changes to cgroup structures
537 *
538 */
539
540void cgroup_lock(void)
541{
542 mutex_lock(&cgroup_mutex);
543}
544
545/**
546 * cgroup_unlock - release lock on cgroup changes
547 *
548 * Undo the lock taken in a previous cgroup_lock() call.
549 */
550
551void cgroup_unlock(void)
552{
553 mutex_unlock(&cgroup_mutex);
554}
555
556/*
557 * A couple of forward declarations required, due to cyclic reference loop:
558 * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
559 * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
560 * -> cgroup_mkdir.
561 */
562
563static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
564static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
565static int cgroup_populate_dir(struct cgroup *cont);
566static struct inode_operations cgroup_dir_inode_operations;
a424316c
PM
567static struct file_operations proc_cgroupstats_operations;
568
569static struct backing_dev_info cgroup_backing_dev_info = {
570 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
571};
ddbcc7e8
PM
572
573static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
574{
575 struct inode *inode = new_inode(sb);
ddbcc7e8
PM
576
577 if (inode) {
578 inode->i_mode = mode;
579 inode->i_uid = current->fsuid;
580 inode->i_gid = current->fsgid;
581 inode->i_blocks = 0;
582 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
583 inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
584 }
585 return inode;
586}
587
588static void cgroup_diput(struct dentry *dentry, struct inode *inode)
589{
590 /* is dentry a directory ? if so, kfree() associated cgroup */
591 if (S_ISDIR(inode->i_mode)) {
592 struct cgroup *cont = dentry->d_fsdata;
593 BUG_ON(!(cgroup_is_removed(cont)));
81a6a5cd
PM
594 /* It's possible for external users to be holding css
595 * reference counts on a cgroup; css_put() needs to
596 * be able to access the cgroup after decrementing
597 * the reference count in order to know if it needs to
598 * queue the cgroup to be handled by the release
599 * agent */
600 synchronize_rcu();
ddbcc7e8
PM
601 kfree(cont);
602 }
603 iput(inode);
604}
605
606static void remove_dir(struct dentry *d)
607{
608 struct dentry *parent = dget(d->d_parent);
609
610 d_delete(d);
611 simple_rmdir(parent->d_inode, d);
612 dput(parent);
613}
614
615static void cgroup_clear_directory(struct dentry *dentry)
616{
617 struct list_head *node;
618
619 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
620 spin_lock(&dcache_lock);
621 node = dentry->d_subdirs.next;
622 while (node != &dentry->d_subdirs) {
623 struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
624 list_del_init(node);
625 if (d->d_inode) {
626 /* This should never be called on a cgroup
627 * directory with child cgroups */
628 BUG_ON(d->d_inode->i_mode & S_IFDIR);
629 d = dget_locked(d);
630 spin_unlock(&dcache_lock);
631 d_delete(d);
632 simple_unlink(dentry->d_inode, d);
633 dput(d);
634 spin_lock(&dcache_lock);
635 }
636 node = dentry->d_subdirs.next;
637 }
638 spin_unlock(&dcache_lock);
639}
640
641/*
642 * NOTE : the dentry must have been dget()'ed
643 */
644static void cgroup_d_remove_dir(struct dentry *dentry)
645{
646 cgroup_clear_directory(dentry);
647
648 spin_lock(&dcache_lock);
649 list_del_init(&dentry->d_u.d_child);
650 spin_unlock(&dcache_lock);
651 remove_dir(dentry);
652}
653
654static int rebind_subsystems(struct cgroupfs_root *root,
655 unsigned long final_bits)
656{
657 unsigned long added_bits, removed_bits;
658 struct cgroup *cont = &root->top_cgroup;
659 int i;
660
661 removed_bits = root->actual_subsys_bits & ~final_bits;
662 added_bits = final_bits & ~root->actual_subsys_bits;
663 /* Check that any added subsystems are currently free */
664 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
665 unsigned long long bit = 1ull << i;
666 struct cgroup_subsys *ss = subsys[i];
667 if (!(bit & added_bits))
668 continue;
669 if (ss->root != &rootnode) {
670 /* Subsystem isn't free */
671 return -EBUSY;
672 }
673 }
674
675 /* Currently we don't handle adding/removing subsystems when
676 * any child cgroups exist. This is theoretically supportable
677 * but involves complex error handling, so it's being left until
678 * later */
679 if (!list_empty(&cont->children))
680 return -EBUSY;
681
682 /* Process each subsystem */
683 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
684 struct cgroup_subsys *ss = subsys[i];
685 unsigned long bit = 1UL << i;
686 if (bit & added_bits) {
687 /* We're binding this subsystem to this hierarchy */
688 BUG_ON(cont->subsys[i]);
689 BUG_ON(!dummytop->subsys[i]);
690 BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
691 cont->subsys[i] = dummytop->subsys[i];
692 cont->subsys[i]->cgroup = cont;
693 list_add(&ss->sibling, &root->subsys_list);
694 rcu_assign_pointer(ss->root, root);
695 if (ss->bind)
696 ss->bind(ss, cont);
697
698 } else if (bit & removed_bits) {
699 /* We're removing this subsystem */
700 BUG_ON(cont->subsys[i] != dummytop->subsys[i]);
701 BUG_ON(cont->subsys[i]->cgroup != cont);
702 if (ss->bind)
703 ss->bind(ss, dummytop);
704 dummytop->subsys[i]->cgroup = dummytop;
705 cont->subsys[i] = NULL;
706 rcu_assign_pointer(subsys[i]->root, &rootnode);
707 list_del(&ss->sibling);
708 } else if (bit & final_bits) {
709 /* Subsystem state should already exist */
710 BUG_ON(!cont->subsys[i]);
711 } else {
712 /* Subsystem state shouldn't exist */
713 BUG_ON(cont->subsys[i]);
714 }
715 }
716 root->subsys_bits = root->actual_subsys_bits = final_bits;
717 synchronize_rcu();
718
719 return 0;
720}
721
722static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
723{
724 struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
725 struct cgroup_subsys *ss;
726
727 mutex_lock(&cgroup_mutex);
728 for_each_subsys(root, ss)
729 seq_printf(seq, ",%s", ss->name);
730 if (test_bit(ROOT_NOPREFIX, &root->flags))
731 seq_puts(seq, ",noprefix");
81a6a5cd
PM
732 if (strlen(root->release_agent_path))
733 seq_printf(seq, ",release_agent=%s", root->release_agent_path);
ddbcc7e8
PM
734 mutex_unlock(&cgroup_mutex);
735 return 0;
736}
737
738struct cgroup_sb_opts {
739 unsigned long subsys_bits;
740 unsigned long flags;
81a6a5cd 741 char *release_agent;
ddbcc7e8
PM
742};
743
744/* Convert a hierarchy specifier into a bitmask of subsystems and
745 * flags. */
746static int parse_cgroupfs_options(char *data,
747 struct cgroup_sb_opts *opts)
748{
749 char *token, *o = data ?: "all";
750
751 opts->subsys_bits = 0;
752 opts->flags = 0;
81a6a5cd 753 opts->release_agent = NULL;
ddbcc7e8
PM
754
755 while ((token = strsep(&o, ",")) != NULL) {
756 if (!*token)
757 return -EINVAL;
758 if (!strcmp(token, "all")) {
759 opts->subsys_bits = (1 << CGROUP_SUBSYS_COUNT) - 1;
760 } else if (!strcmp(token, "noprefix")) {
761 set_bit(ROOT_NOPREFIX, &opts->flags);
81a6a5cd
PM
762 } else if (!strncmp(token, "release_agent=", 14)) {
763 /* Specifying two release agents is forbidden */
764 if (opts->release_agent)
765 return -EINVAL;
766 opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL);
767 if (!opts->release_agent)
768 return -ENOMEM;
769 strncpy(opts->release_agent, token + 14, PATH_MAX - 1);
770 opts->release_agent[PATH_MAX - 1] = 0;
ddbcc7e8
PM
771 } else {
772 struct cgroup_subsys *ss;
773 int i;
774 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
775 ss = subsys[i];
776 if (!strcmp(token, ss->name)) {
777 set_bit(i, &opts->subsys_bits);
778 break;
779 }
780 }
781 if (i == CGROUP_SUBSYS_COUNT)
782 return -ENOENT;
783 }
784 }
785
786 /* We can't have an empty hierarchy */
787 if (!opts->subsys_bits)
788 return -EINVAL;
789
790 return 0;
791}
792
793static int cgroup_remount(struct super_block *sb, int *flags, char *data)
794{
795 int ret = 0;
796 struct cgroupfs_root *root = sb->s_fs_info;
797 struct cgroup *cont = &root->top_cgroup;
798 struct cgroup_sb_opts opts;
799
800 mutex_lock(&cont->dentry->d_inode->i_mutex);
801 mutex_lock(&cgroup_mutex);
802
803 /* See what subsystems are wanted */
804 ret = parse_cgroupfs_options(data, &opts);
805 if (ret)
806 goto out_unlock;
807
808 /* Don't allow flags to change at remount */
809 if (opts.flags != root->flags) {
810 ret = -EINVAL;
811 goto out_unlock;
812 }
813
814 ret = rebind_subsystems(root, opts.subsys_bits);
815
816 /* (re)populate subsystem files */
817 if (!ret)
818 cgroup_populate_dir(cont);
819
81a6a5cd
PM
820 if (opts.release_agent)
821 strcpy(root->release_agent_path, opts.release_agent);
ddbcc7e8 822 out_unlock:
81a6a5cd
PM
823 if (opts.release_agent)
824 kfree(opts.release_agent);
ddbcc7e8
PM
825 mutex_unlock(&cgroup_mutex);
826 mutex_unlock(&cont->dentry->d_inode->i_mutex);
827 return ret;
828}
829
830static struct super_operations cgroup_ops = {
831 .statfs = simple_statfs,
832 .drop_inode = generic_delete_inode,
833 .show_options = cgroup_show_options,
834 .remount_fs = cgroup_remount,
835};
836
837static void init_cgroup_root(struct cgroupfs_root *root)
838{
839 struct cgroup *cont = &root->top_cgroup;
840 INIT_LIST_HEAD(&root->subsys_list);
841 INIT_LIST_HEAD(&root->root_list);
842 root->number_of_cgroups = 1;
843 cont->root = root;
844 cont->top_cgroup = cont;
845 INIT_LIST_HEAD(&cont->sibling);
846 INIT_LIST_HEAD(&cont->children);
817929ec 847 INIT_LIST_HEAD(&cont->css_sets);
81a6a5cd 848 INIT_LIST_HEAD(&cont->release_list);
ddbcc7e8
PM
849}
850
851static int cgroup_test_super(struct super_block *sb, void *data)
852{
853 struct cgroupfs_root *new = data;
854 struct cgroupfs_root *root = sb->s_fs_info;
855
856 /* First check subsystems */
857 if (new->subsys_bits != root->subsys_bits)
858 return 0;
859
860 /* Next check flags */
861 if (new->flags != root->flags)
862 return 0;
863
864 return 1;
865}
866
867static int cgroup_set_super(struct super_block *sb, void *data)
868{
869 int ret;
870 struct cgroupfs_root *root = data;
871
872 ret = set_anon_super(sb, NULL);
873 if (ret)
874 return ret;
875
876 sb->s_fs_info = root;
877 root->sb = sb;
878
879 sb->s_blocksize = PAGE_CACHE_SIZE;
880 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
881 sb->s_magic = CGROUP_SUPER_MAGIC;
882 sb->s_op = &cgroup_ops;
883
884 return 0;
885}
886
887static int cgroup_get_rootdir(struct super_block *sb)
888{
889 struct inode *inode =
890 cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
891 struct dentry *dentry;
892
893 if (!inode)
894 return -ENOMEM;
895
896 inode->i_op = &simple_dir_inode_operations;
897 inode->i_fop = &simple_dir_operations;
898 inode->i_op = &cgroup_dir_inode_operations;
899 /* directories start off with i_nlink == 2 (for "." entry) */
900 inc_nlink(inode);
901 dentry = d_alloc_root(inode);
902 if (!dentry) {
903 iput(inode);
904 return -ENOMEM;
905 }
906 sb->s_root = dentry;
907 return 0;
908}
909
910static int cgroup_get_sb(struct file_system_type *fs_type,
911 int flags, const char *unused_dev_name,
912 void *data, struct vfsmount *mnt)
913{
914 struct cgroup_sb_opts opts;
915 int ret = 0;
916 struct super_block *sb;
917 struct cgroupfs_root *root;
817929ec
PM
918 struct list_head tmp_cg_links, *l;
919 INIT_LIST_HEAD(&tmp_cg_links);
ddbcc7e8
PM
920
921 /* First find the desired set of subsystems */
922 ret = parse_cgroupfs_options(data, &opts);
81a6a5cd
PM
923 if (ret) {
924 if (opts.release_agent)
925 kfree(opts.release_agent);
ddbcc7e8 926 return ret;
81a6a5cd 927 }
ddbcc7e8
PM
928
929 root = kzalloc(sizeof(*root), GFP_KERNEL);
930 if (!root)
931 return -ENOMEM;
932
933 init_cgroup_root(root);
934 root->subsys_bits = opts.subsys_bits;
935 root->flags = opts.flags;
81a6a5cd
PM
936 if (opts.release_agent) {
937 strcpy(root->release_agent_path, opts.release_agent);
938 kfree(opts.release_agent);
939 }
ddbcc7e8
PM
940
941 sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root);
942
943 if (IS_ERR(sb)) {
944 kfree(root);
945 return PTR_ERR(sb);
946 }
947
948 if (sb->s_fs_info != root) {
949 /* Reusing an existing superblock */
950 BUG_ON(sb->s_root == NULL);
951 kfree(root);
952 root = NULL;
953 } else {
954 /* New superblock */
955 struct cgroup *cont = &root->top_cgroup;
817929ec 956 struct inode *inode;
ddbcc7e8
PM
957
958 BUG_ON(sb->s_root != NULL);
959
960 ret = cgroup_get_rootdir(sb);
961 if (ret)
962 goto drop_new_super;
817929ec 963 inode = sb->s_root->d_inode;
ddbcc7e8 964
817929ec 965 mutex_lock(&inode->i_mutex);
ddbcc7e8
PM
966 mutex_lock(&cgroup_mutex);
967
817929ec
PM
968 /*
969 * We're accessing css_set_count without locking
970 * css_set_lock here, but that's OK - it can only be
971 * increased by someone holding cgroup_lock, and
972 * that's us. The worst that can happen is that we
973 * have some link structures left over
974 */
975 ret = allocate_cg_links(css_set_count, &tmp_cg_links);
976 if (ret) {
977 mutex_unlock(&cgroup_mutex);
978 mutex_unlock(&inode->i_mutex);
979 goto drop_new_super;
980 }
981
ddbcc7e8
PM
982 ret = rebind_subsystems(root, root->subsys_bits);
983 if (ret == -EBUSY) {
984 mutex_unlock(&cgroup_mutex);
817929ec 985 mutex_unlock(&inode->i_mutex);
ddbcc7e8
PM
986 goto drop_new_super;
987 }
988
989 /* EBUSY should be the only error here */
990 BUG_ON(ret);
991
992 list_add(&root->root_list, &roots);
817929ec 993 root_count++;
ddbcc7e8
PM
994
995 sb->s_root->d_fsdata = &root->top_cgroup;
996 root->top_cgroup.dentry = sb->s_root;
997
817929ec
PM
998 /* Link the top cgroup in this hierarchy into all
999 * the css_set objects */
1000 write_lock(&css_set_lock);
1001 l = &init_css_set.list;
1002 do {
1003 struct css_set *cg;
1004 struct cg_cgroup_link *link;
1005 cg = list_entry(l, struct css_set, list);
1006 BUG_ON(list_empty(&tmp_cg_links));
1007 link = list_entry(tmp_cg_links.next,
1008 struct cg_cgroup_link,
1009 cont_link_list);
1010 list_del(&link->cont_link_list);
1011 link->cg = cg;
1012 list_add(&link->cont_link_list,
1013 &root->top_cgroup.css_sets);
1014 list_add(&link->cg_link_list, &cg->cg_links);
1015 l = l->next;
1016 } while (l != &init_css_set.list);
1017 write_unlock(&css_set_lock);
1018
1019 free_cg_links(&tmp_cg_links);
1020
ddbcc7e8
PM
1021 BUG_ON(!list_empty(&cont->sibling));
1022 BUG_ON(!list_empty(&cont->children));
1023 BUG_ON(root->number_of_cgroups != 1);
1024
ddbcc7e8 1025 cgroup_populate_dir(cont);
817929ec 1026 mutex_unlock(&inode->i_mutex);
ddbcc7e8
PM
1027 mutex_unlock(&cgroup_mutex);
1028 }
1029
1030 return simple_set_mnt(mnt, sb);
1031
1032 drop_new_super:
1033 up_write(&sb->s_umount);
1034 deactivate_super(sb);
817929ec 1035 free_cg_links(&tmp_cg_links);
ddbcc7e8
PM
1036 return ret;
1037}
1038
1039static void cgroup_kill_sb(struct super_block *sb) {
1040 struct cgroupfs_root *root = sb->s_fs_info;
1041 struct cgroup *cont = &root->top_cgroup;
1042 int ret;
1043
1044 BUG_ON(!root);
1045
1046 BUG_ON(root->number_of_cgroups != 1);
1047 BUG_ON(!list_empty(&cont->children));
1048 BUG_ON(!list_empty(&cont->sibling));
1049
1050 mutex_lock(&cgroup_mutex);
1051
1052 /* Rebind all subsystems back to the default hierarchy */
1053 ret = rebind_subsystems(root, 0);
1054 /* Shouldn't be able to fail ... */
1055 BUG_ON(ret);
1056
817929ec
PM
1057 /*
1058 * Release all the links from css_sets to this hierarchy's
1059 * root cgroup
1060 */
1061 write_lock(&css_set_lock);
1062 while (!list_empty(&cont->css_sets)) {
1063 struct cg_cgroup_link *link;
1064 link = list_entry(cont->css_sets.next,
1065 struct cg_cgroup_link, cont_link_list);
1066 list_del(&link->cg_link_list);
1067 list_del(&link->cont_link_list);
1068 kfree(link);
1069 }
1070 write_unlock(&css_set_lock);
1071
1072 if (!list_empty(&root->root_list)) {
ddbcc7e8 1073 list_del(&root->root_list);
817929ec
PM
1074 root_count--;
1075 }
ddbcc7e8
PM
1076 mutex_unlock(&cgroup_mutex);
1077
1078 kfree(root);
1079 kill_litter_super(sb);
1080}
1081
1082static struct file_system_type cgroup_fs_type = {
1083 .name = "cgroup",
1084 .get_sb = cgroup_get_sb,
1085 .kill_sb = cgroup_kill_sb,
1086};
1087
1088static inline struct cgroup *__d_cont(struct dentry *dentry)
1089{
1090 return dentry->d_fsdata;
1091}
1092
1093static inline struct cftype *__d_cft(struct dentry *dentry)
1094{
1095 return dentry->d_fsdata;
1096}
1097
1098/*
1099 * Called with cgroup_mutex held. Writes path of cgroup into buf.
1100 * Returns 0 on success, -errno on error.
1101 */
1102int cgroup_path(const struct cgroup *cont, char *buf, int buflen)
1103{
1104 char *start;
1105
1106 if (cont == dummytop) {
1107 /*
1108 * Inactive subsystems have no dentry for their root
1109 * cgroup
1110 */
1111 strcpy(buf, "/");
1112 return 0;
1113 }
1114
1115 start = buf + buflen;
1116
1117 *--start = '\0';
1118 for (;;) {
1119 int len = cont->dentry->d_name.len;
1120 if ((start -= len) < buf)
1121 return -ENAMETOOLONG;
1122 memcpy(start, cont->dentry->d_name.name, len);
1123 cont = cont->parent;
1124 if (!cont)
1125 break;
1126 if (!cont->parent)
1127 continue;
1128 if (--start < buf)
1129 return -ENAMETOOLONG;
1130 *start = '/';
1131 }
1132 memmove(buf, start, buf + buflen - start);
1133 return 0;
1134}
1135
bbcb81d0
PM
1136/*
1137 * Return the first subsystem attached to a cgroup's hierarchy, and
1138 * its subsystem id.
1139 */
1140
1141static void get_first_subsys(const struct cgroup *cont,
1142 struct cgroup_subsys_state **css, int *subsys_id)
1143{
1144 const struct cgroupfs_root *root = cont->root;
1145 const struct cgroup_subsys *test_ss;
1146 BUG_ON(list_empty(&root->subsys_list));
1147 test_ss = list_entry(root->subsys_list.next,
1148 struct cgroup_subsys, sibling);
1149 if (css) {
1150 *css = cont->subsys[test_ss->subsys_id];
1151 BUG_ON(!*css);
1152 }
1153 if (subsys_id)
1154 *subsys_id = test_ss->subsys_id;
1155}
1156
1157/*
1158 * Attach task 'tsk' to cgroup 'cont'
1159 *
1160 * Call holding cgroup_mutex. May take task_lock of
1161 * the task 'pid' during call.
1162 */
1163static int attach_task(struct cgroup *cont, struct task_struct *tsk)
1164{
1165 int retval = 0;
1166 struct cgroup_subsys *ss;
1167 struct cgroup *oldcont;
817929ec
PM
1168 struct css_set *cg = tsk->cgroups;
1169 struct css_set *newcg;
bbcb81d0 1170 struct cgroupfs_root *root = cont->root;
bbcb81d0
PM
1171 int subsys_id;
1172
1173 get_first_subsys(cont, NULL, &subsys_id);
1174
1175 /* Nothing to do if the task is already in that cgroup */
1176 oldcont = task_cgroup(tsk, subsys_id);
1177 if (cont == oldcont)
1178 return 0;
1179
1180 for_each_subsys(root, ss) {
1181 if (ss->can_attach) {
1182 retval = ss->can_attach(ss, cont, tsk);
1183 if (retval) {
1184 return retval;
1185 }
1186 }
1187 }
1188
817929ec
PM
1189 /*
1190 * Locate or allocate a new css_set for this task,
1191 * based on its final set of cgroups
1192 */
1193 newcg = find_css_set(cg, cont);
1194 if (!newcg) {
1195 return -ENOMEM;
1196 }
1197
bbcb81d0
PM
1198 task_lock(tsk);
1199 if (tsk->flags & PF_EXITING) {
1200 task_unlock(tsk);
817929ec 1201 put_css_set(newcg);
bbcb81d0
PM
1202 return -ESRCH;
1203 }
817929ec 1204 rcu_assign_pointer(tsk->cgroups, newcg);
bbcb81d0
PM
1205 task_unlock(tsk);
1206
817929ec
PM
1207 /* Update the css_set linked lists if we're using them */
1208 write_lock(&css_set_lock);
1209 if (!list_empty(&tsk->cg_list)) {
1210 list_del(&tsk->cg_list);
1211 list_add(&tsk->cg_list, &newcg->tasks);
1212 }
1213 write_unlock(&css_set_lock);
1214
bbcb81d0
PM
1215 for_each_subsys(root, ss) {
1216 if (ss->attach) {
1217 ss->attach(ss, cont, oldcont, tsk);
1218 }
1219 }
81a6a5cd 1220 set_bit(CONT_RELEASABLE, &oldcont->flags);
bbcb81d0 1221 synchronize_rcu();
817929ec 1222 put_css_set(cg);
bbcb81d0
PM
1223 return 0;
1224}
1225
1226/*
1227 * Attach task with pid 'pid' to cgroup 'cont'. Call with
1228 * cgroup_mutex, may take task_lock of task
1229 */
1230static int attach_task_by_pid(struct cgroup *cont, char *pidbuf)
1231{
1232 pid_t pid;
1233 struct task_struct *tsk;
1234 int ret;
1235
1236 if (sscanf(pidbuf, "%d", &pid) != 1)
1237 return -EIO;
1238
1239 if (pid) {
1240 rcu_read_lock();
1241 tsk = find_task_by_pid(pid);
1242 if (!tsk || tsk->flags & PF_EXITING) {
1243 rcu_read_unlock();
1244 return -ESRCH;
1245 }
1246 get_task_struct(tsk);
1247 rcu_read_unlock();
1248
1249 if ((current->euid) && (current->euid != tsk->uid)
1250 && (current->euid != tsk->suid)) {
1251 put_task_struct(tsk);
1252 return -EACCES;
1253 }
1254 } else {
1255 tsk = current;
1256 get_task_struct(tsk);
1257 }
1258
1259 ret = attach_task(cont, tsk);
1260 put_task_struct(tsk);
1261 return ret;
1262}
1263
ddbcc7e8
PM
1264/* The various types of files and directories in a cgroup file system */
1265
1266enum cgroup_filetype {
1267 FILE_ROOT,
1268 FILE_DIR,
1269 FILE_TASKLIST,
81a6a5cd
PM
1270 FILE_NOTIFY_ON_RELEASE,
1271 FILE_RELEASABLE,
1272 FILE_RELEASE_AGENT,
ddbcc7e8
PM
1273};
1274
355e0c48
PM
1275static ssize_t cgroup_write_uint(struct cgroup *cont, struct cftype *cft,
1276 struct file *file,
1277 const char __user *userbuf,
1278 size_t nbytes, loff_t *unused_ppos)
1279{
1280 char buffer[64];
1281 int retval = 0;
1282 u64 val;
1283 char *end;
1284
1285 if (!nbytes)
1286 return -EINVAL;
1287 if (nbytes >= sizeof(buffer))
1288 return -E2BIG;
1289 if (copy_from_user(buffer, userbuf, nbytes))
1290 return -EFAULT;
1291
1292 buffer[nbytes] = 0; /* nul-terminate */
1293
1294 /* strip newline if necessary */
1295 if (nbytes && (buffer[nbytes-1] == '\n'))
1296 buffer[nbytes-1] = 0;
1297 val = simple_strtoull(buffer, &end, 0);
1298 if (*end)
1299 return -EINVAL;
1300
1301 /* Pass to subsystem */
1302 retval = cft->write_uint(cont, cft, val);
1303 if (!retval)
1304 retval = nbytes;
1305 return retval;
1306}
1307
bbcb81d0
PM
1308static ssize_t cgroup_common_file_write(struct cgroup *cont,
1309 struct cftype *cft,
1310 struct file *file,
1311 const char __user *userbuf,
1312 size_t nbytes, loff_t *unused_ppos)
1313{
1314 enum cgroup_filetype type = cft->private;
1315 char *buffer;
1316 int retval = 0;
1317
1318 if (nbytes >= PATH_MAX)
1319 return -E2BIG;
1320
1321 /* +1 for nul-terminator */
1322 buffer = kmalloc(nbytes + 1, GFP_KERNEL);
1323 if (buffer == NULL)
1324 return -ENOMEM;
1325
1326 if (copy_from_user(buffer, userbuf, nbytes)) {
1327 retval = -EFAULT;
1328 goto out1;
1329 }
1330 buffer[nbytes] = 0; /* nul-terminate */
1331
1332 mutex_lock(&cgroup_mutex);
1333
1334 if (cgroup_is_removed(cont)) {
1335 retval = -ENODEV;
1336 goto out2;
1337 }
1338
1339 switch (type) {
1340 case FILE_TASKLIST:
1341 retval = attach_task_by_pid(cont, buffer);
1342 break;
81a6a5cd
PM
1343 case FILE_NOTIFY_ON_RELEASE:
1344 clear_bit(CONT_RELEASABLE, &cont->flags);
1345 if (simple_strtoul(buffer, NULL, 10) != 0)
1346 set_bit(CONT_NOTIFY_ON_RELEASE, &cont->flags);
1347 else
1348 clear_bit(CONT_NOTIFY_ON_RELEASE, &cont->flags);
1349 break;
1350 case FILE_RELEASE_AGENT:
1351 {
1352 struct cgroupfs_root *root = cont->root;
1353 /* Strip trailing newline */
1354 if (nbytes && (buffer[nbytes-1] == '\n')) {
1355 buffer[nbytes-1] = 0;
1356 }
1357 if (nbytes < sizeof(root->release_agent_path)) {
1358 /* We never write anything other than '\0'
1359 * into the last char of release_agent_path,
1360 * so it always remains a NUL-terminated
1361 * string */
1362 strncpy(root->release_agent_path, buffer, nbytes);
1363 root->release_agent_path[nbytes] = 0;
1364 } else {
1365 retval = -ENOSPC;
1366 }
1367 break;
1368 }
bbcb81d0
PM
1369 default:
1370 retval = -EINVAL;
1371 goto out2;
1372 }
1373
1374 if (retval == 0)
1375 retval = nbytes;
1376out2:
1377 mutex_unlock(&cgroup_mutex);
1378out1:
1379 kfree(buffer);
1380 return retval;
1381}
1382
ddbcc7e8
PM
1383static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
1384 size_t nbytes, loff_t *ppos)
1385{
1386 struct cftype *cft = __d_cft(file->f_dentry);
1387 struct cgroup *cont = __d_cont(file->f_dentry->d_parent);
1388
1389 if (!cft)
1390 return -ENODEV;
355e0c48
PM
1391 if (cft->write)
1392 return cft->write(cont, cft, file, buf, nbytes, ppos);
1393 if (cft->write_uint)
1394 return cgroup_write_uint(cont, cft, file, buf, nbytes, ppos);
1395 return -EINVAL;
ddbcc7e8
PM
1396}
1397
1398static ssize_t cgroup_read_uint(struct cgroup *cont, struct cftype *cft,
1399 struct file *file,
1400 char __user *buf, size_t nbytes,
1401 loff_t *ppos)
1402{
1403 char tmp[64];
1404 u64 val = cft->read_uint(cont, cft);
1405 int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
1406
1407 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
1408}
1409
81a6a5cd
PM
1410static ssize_t cgroup_common_file_read(struct cgroup *cont,
1411 struct cftype *cft,
1412 struct file *file,
1413 char __user *buf,
1414 size_t nbytes, loff_t *ppos)
1415{
1416 enum cgroup_filetype type = cft->private;
1417 char *page;
1418 ssize_t retval = 0;
1419 char *s;
1420
1421 if (!(page = (char *)__get_free_page(GFP_KERNEL)))
1422 return -ENOMEM;
1423
1424 s = page;
1425
1426 switch (type) {
1427 case FILE_RELEASE_AGENT:
1428 {
1429 struct cgroupfs_root *root;
1430 size_t n;
1431 mutex_lock(&cgroup_mutex);
1432 root = cont->root;
1433 n = strnlen(root->release_agent_path,
1434 sizeof(root->release_agent_path));
1435 n = min(n, (size_t) PAGE_SIZE);
1436 strncpy(s, root->release_agent_path, n);
1437 mutex_unlock(&cgroup_mutex);
1438 s += n;
1439 break;
1440 }
1441 default:
1442 retval = -EINVAL;
1443 goto out;
1444 }
1445 *s++ = '\n';
1446
1447 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1448out:
1449 free_page((unsigned long)page);
1450 return retval;
1451}
1452
ddbcc7e8
PM
1453static ssize_t cgroup_file_read(struct file *file, char __user *buf,
1454 size_t nbytes, loff_t *ppos)
1455{
1456 struct cftype *cft = __d_cft(file->f_dentry);
1457 struct cgroup *cont = __d_cont(file->f_dentry->d_parent);
1458
1459 if (!cft)
1460 return -ENODEV;
1461
1462 if (cft->read)
1463 return cft->read(cont, cft, file, buf, nbytes, ppos);
1464 if (cft->read_uint)
1465 return cgroup_read_uint(cont, cft, file, buf, nbytes, ppos);
1466 return -EINVAL;
1467}
1468
1469static int cgroup_file_open(struct inode *inode, struct file *file)
1470{
1471 int err;
1472 struct cftype *cft;
1473
1474 err = generic_file_open(inode, file);
1475 if (err)
1476 return err;
1477
1478 cft = __d_cft(file->f_dentry);
1479 if (!cft)
1480 return -ENODEV;
1481 if (cft->open)
1482 err = cft->open(inode, file);
1483 else
1484 err = 0;
1485
1486 return err;
1487}
1488
1489static int cgroup_file_release(struct inode *inode, struct file *file)
1490{
1491 struct cftype *cft = __d_cft(file->f_dentry);
1492 if (cft->release)
1493 return cft->release(inode, file);
1494 return 0;
1495}
1496
1497/*
1498 * cgroup_rename - Only allow simple rename of directories in place.
1499 */
1500static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
1501 struct inode *new_dir, struct dentry *new_dentry)
1502{
1503 if (!S_ISDIR(old_dentry->d_inode->i_mode))
1504 return -ENOTDIR;
1505 if (new_dentry->d_inode)
1506 return -EEXIST;
1507 if (old_dir != new_dir)
1508 return -EIO;
1509 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1510}
1511
1512static struct file_operations cgroup_file_operations = {
1513 .read = cgroup_file_read,
1514 .write = cgroup_file_write,
1515 .llseek = generic_file_llseek,
1516 .open = cgroup_file_open,
1517 .release = cgroup_file_release,
1518};
1519
1520static struct inode_operations cgroup_dir_inode_operations = {
1521 .lookup = simple_lookup,
1522 .mkdir = cgroup_mkdir,
1523 .rmdir = cgroup_rmdir,
1524 .rename = cgroup_rename,
1525};
1526
1527static int cgroup_create_file(struct dentry *dentry, int mode,
1528 struct super_block *sb)
1529{
1530 static struct dentry_operations cgroup_dops = {
1531 .d_iput = cgroup_diput,
1532 };
1533
1534 struct inode *inode;
1535
1536 if (!dentry)
1537 return -ENOENT;
1538 if (dentry->d_inode)
1539 return -EEXIST;
1540
1541 inode = cgroup_new_inode(mode, sb);
1542 if (!inode)
1543 return -ENOMEM;
1544
1545 if (S_ISDIR(mode)) {
1546 inode->i_op = &cgroup_dir_inode_operations;
1547 inode->i_fop = &simple_dir_operations;
1548
1549 /* start off with i_nlink == 2 (for "." entry) */
1550 inc_nlink(inode);
1551
1552 /* start with the directory inode held, so that we can
1553 * populate it without racing with another mkdir */
817929ec 1554 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
ddbcc7e8
PM
1555 } else if (S_ISREG(mode)) {
1556 inode->i_size = 0;
1557 inode->i_fop = &cgroup_file_operations;
1558 }
1559 dentry->d_op = &cgroup_dops;
1560 d_instantiate(dentry, inode);
1561 dget(dentry); /* Extra count - pin the dentry in core */
1562 return 0;
1563}
1564
1565/*
1566 * cgroup_create_dir - create a directory for an object.
1567 * cont: the cgroup we create the directory for.
1568 * It must have a valid ->parent field
1569 * And we are going to fill its ->dentry field.
1570 * dentry: dentry of the new container
1571 * mode: mode to set on new directory.
1572 */
1573static int cgroup_create_dir(struct cgroup *cont, struct dentry *dentry,
1574 int mode)
1575{
1576 struct dentry *parent;
1577 int error = 0;
1578
1579 parent = cont->parent->dentry;
1580 error = cgroup_create_file(dentry, S_IFDIR | mode, cont->root->sb);
1581 if (!error) {
1582 dentry->d_fsdata = cont;
1583 inc_nlink(parent->d_inode);
1584 cont->dentry = dentry;
1585 dget(dentry);
1586 }
1587 dput(dentry);
1588
1589 return error;
1590}
1591
1592int cgroup_add_file(struct cgroup *cont,
1593 struct cgroup_subsys *subsys,
1594 const struct cftype *cft)
1595{
1596 struct dentry *dir = cont->dentry;
1597 struct dentry *dentry;
1598 int error;
1599
1600 char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
1601 if (subsys && !test_bit(ROOT_NOPREFIX, &cont->root->flags)) {
1602 strcpy(name, subsys->name);
1603 strcat(name, ".");
1604 }
1605 strcat(name, cft->name);
1606 BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
1607 dentry = lookup_one_len(name, dir, strlen(name));
1608 if (!IS_ERR(dentry)) {
1609 error = cgroup_create_file(dentry, 0644 | S_IFREG,
1610 cont->root->sb);
1611 if (!error)
1612 dentry->d_fsdata = (void *)cft;
1613 dput(dentry);
1614 } else
1615 error = PTR_ERR(dentry);
1616 return error;
1617}
1618
1619int cgroup_add_files(struct cgroup *cont,
1620 struct cgroup_subsys *subsys,
1621 const struct cftype cft[],
1622 int count)
1623{
1624 int i, err;
1625 for (i = 0; i < count; i++) {
1626 err = cgroup_add_file(cont, subsys, &cft[i]);
1627 if (err)
1628 return err;
1629 }
1630 return 0;
1631}
1632
817929ec
PM
1633/* Count the number of tasks in a cgroup. */
1634
1635int cgroup_task_count(const struct cgroup *cont)
bbcb81d0
PM
1636{
1637 int count = 0;
817929ec
PM
1638 struct list_head *l;
1639
1640 read_lock(&css_set_lock);
1641 l = cont->css_sets.next;
1642 while (l != &cont->css_sets) {
1643 struct cg_cgroup_link *link =
1644 list_entry(l, struct cg_cgroup_link, cont_link_list);
1645 count += atomic_read(&link->cg->ref.refcount);
1646 l = l->next;
1647 }
1648 read_unlock(&css_set_lock);
bbcb81d0
PM
1649 return count;
1650}
1651
817929ec
PM
1652/*
1653 * Advance a list_head iterator. The iterator should be positioned at
1654 * the start of a css_set
1655 */
1656static void cgroup_advance_iter(struct cgroup *cont,
1657 struct cgroup_iter *it)
1658{
1659 struct list_head *l = it->cg_link;
1660 struct cg_cgroup_link *link;
1661 struct css_set *cg;
1662
1663 /* Advance to the next non-empty css_set */
1664 do {
1665 l = l->next;
1666 if (l == &cont->css_sets) {
1667 it->cg_link = NULL;
1668 return;
1669 }
1670 link = list_entry(l, struct cg_cgroup_link, cont_link_list);
1671 cg = link->cg;
1672 } while (list_empty(&cg->tasks));
1673 it->cg_link = l;
1674 it->task = cg->tasks.next;
1675}
1676
1677void cgroup_iter_start(struct cgroup *cont, struct cgroup_iter *it)
1678{
1679 /*
1680 * The first time anyone tries to iterate across a cgroup,
1681 * we need to enable the list linking each css_set to its
1682 * tasks, and fix up all existing tasks.
1683 */
1684 if (!use_task_css_set_links) {
1685 struct task_struct *p, *g;
1686 write_lock(&css_set_lock);
1687 use_task_css_set_links = 1;
1688 do_each_thread(g, p) {
1689 task_lock(p);
1690 if (list_empty(&p->cg_list))
1691 list_add(&p->cg_list, &p->cgroups->tasks);
1692 task_unlock(p);
1693 } while_each_thread(g, p);
1694 write_unlock(&css_set_lock);
1695 }
1696 read_lock(&css_set_lock);
1697 it->cg_link = &cont->css_sets;
1698 cgroup_advance_iter(cont, it);
1699}
1700
1701struct task_struct *cgroup_iter_next(struct cgroup *cont,
1702 struct cgroup_iter *it)
1703{
1704 struct task_struct *res;
1705 struct list_head *l = it->task;
1706
1707 /* If the iterator cg is NULL, we have no tasks */
1708 if (!it->cg_link)
1709 return NULL;
1710 res = list_entry(l, struct task_struct, cg_list);
1711 /* Advance iterator to find next entry */
1712 l = l->next;
1713 if (l == &res->cgroups->tasks) {
1714 /* We reached the end of this task list - move on to
1715 * the next cg_cgroup_link */
1716 cgroup_advance_iter(cont, it);
1717 } else {
1718 it->task = l;
1719 }
1720 return res;
1721}
1722
1723void cgroup_iter_end(struct cgroup *cont, struct cgroup_iter *it)
1724{
1725 read_unlock(&css_set_lock);
1726}
1727
bbcb81d0
PM
1728/*
1729 * Stuff for reading the 'tasks' file.
1730 *
1731 * Reading this file can return large amounts of data if a cgroup has
1732 * *lots* of attached tasks. So it may need several calls to read(),
1733 * but we cannot guarantee that the information we produce is correct
1734 * unless we produce it entirely atomically.
1735 *
1736 * Upon tasks file open(), a struct ctr_struct is allocated, that
1737 * will have a pointer to an array (also allocated here). The struct
1738 * ctr_struct * is stored in file->private_data. Its resources will
1739 * be freed by release() when the file is closed. The array is used
1740 * to sprintf the PIDs and then used by read().
1741 */
1742struct ctr_struct {
1743 char *buf;
1744 int bufsz;
1745};
1746
1747/*
1748 * Load into 'pidarray' up to 'npids' of the tasks using cgroup
1749 * 'cont'. Return actual number of pids loaded. No need to
1750 * task_lock(p) when reading out p->cgroup, since we're in an RCU
1751 * read section, so the css_set can't go away, and is
1752 * immutable after creation.
1753 */
1754static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cont)
1755{
1756 int n = 0;
817929ec
PM
1757 struct cgroup_iter it;
1758 struct task_struct *tsk;
1759 cgroup_iter_start(cont, &it);
1760 while ((tsk = cgroup_iter_next(cont, &it))) {
1761 if (unlikely(n == npids))
1762 break;
1763 pidarray[n++] = pid_nr(task_pid(tsk));
1764 }
1765 cgroup_iter_end(cont, &it);
bbcb81d0
PM
1766 return n;
1767}
1768
1769static int cmppid(const void *a, const void *b)
1770{
1771 return *(pid_t *)a - *(pid_t *)b;
1772}
1773
1774/*
1775 * Convert array 'a' of 'npids' pid_t's to a string of newline separated
1776 * decimal pids in 'buf'. Don't write more than 'sz' chars, but return
1777 * count 'cnt' of how many chars would be written if buf were large enough.
1778 */
1779static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
1780{
1781 int cnt = 0;
1782 int i;
1783
1784 for (i = 0; i < npids; i++)
1785 cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
1786 return cnt;
1787}
1788
1789/*
1790 * Handle an open on 'tasks' file. Prepare a buffer listing the
1791 * process id's of tasks currently attached to the cgroup being opened.
1792 *
1793 * Does not require any specific cgroup mutexes, and does not take any.
1794 */
1795static int cgroup_tasks_open(struct inode *unused, struct file *file)
1796{
1797 struct cgroup *cont = __d_cont(file->f_dentry->d_parent);
1798 struct ctr_struct *ctr;
1799 pid_t *pidarray;
1800 int npids;
1801 char c;
1802
1803 if (!(file->f_mode & FMODE_READ))
1804 return 0;
1805
1806 ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
1807 if (!ctr)
1808 goto err0;
1809
1810 /*
1811 * If cgroup gets more users after we read count, we won't have
1812 * enough space - tough. This race is indistinguishable to the
1813 * caller from the case that the additional cgroup users didn't
1814 * show up until sometime later on.
1815 */
1816 npids = cgroup_task_count(cont);
1817 if (npids) {
1818 pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
1819 if (!pidarray)
1820 goto err1;
1821
1822 npids = pid_array_load(pidarray, npids, cont);
1823 sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
1824
1825 /* Call pid_array_to_buf() twice, first just to get bufsz */
1826 ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
1827 ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
1828 if (!ctr->buf)
1829 goto err2;
1830 ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
1831
1832 kfree(pidarray);
1833 } else {
1834 ctr->buf = 0;
1835 ctr->bufsz = 0;
1836 }
1837 file->private_data = ctr;
1838 return 0;
1839
1840err2:
1841 kfree(pidarray);
1842err1:
1843 kfree(ctr);
1844err0:
1845 return -ENOMEM;
1846}
1847
1848static ssize_t cgroup_tasks_read(struct cgroup *cont,
1849 struct cftype *cft,
1850 struct file *file, char __user *buf,
1851 size_t nbytes, loff_t *ppos)
1852{
1853 struct ctr_struct *ctr = file->private_data;
1854
1855 return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz);
1856}
1857
1858static int cgroup_tasks_release(struct inode *unused_inode,
1859 struct file *file)
1860{
1861 struct ctr_struct *ctr;
1862
1863 if (file->f_mode & FMODE_READ) {
1864 ctr = file->private_data;
1865 kfree(ctr->buf);
1866 kfree(ctr);
1867 }
1868 return 0;
1869}
1870
81a6a5cd
PM
1871static u64 cgroup_read_notify_on_release(struct cgroup *cont,
1872 struct cftype *cft)
1873{
1874 return notify_on_release(cont);
1875}
1876
1877static u64 cgroup_read_releasable(struct cgroup *cont, struct cftype *cft)
1878{
1879 return test_bit(CONT_RELEASABLE, &cont->flags);
1880}
1881
bbcb81d0
PM
1882/*
1883 * for the common functions, 'private' gives the type of file
1884 */
81a6a5cd
PM
1885static struct cftype files[] = {
1886 {
1887 .name = "tasks",
1888 .open = cgroup_tasks_open,
1889 .read = cgroup_tasks_read,
1890 .write = cgroup_common_file_write,
1891 .release = cgroup_tasks_release,
1892 .private = FILE_TASKLIST,
1893 },
1894
1895 {
1896 .name = "notify_on_release",
1897 .read_uint = cgroup_read_notify_on_release,
1898 .write = cgroup_common_file_write,
1899 .private = FILE_NOTIFY_ON_RELEASE,
1900 },
1901
1902 {
1903 .name = "releasable",
1904 .read_uint = cgroup_read_releasable,
1905 .private = FILE_RELEASABLE,
1906 }
1907};
1908
1909static struct cftype cft_release_agent = {
1910 .name = "release_agent",
1911 .read = cgroup_common_file_read,
bbcb81d0 1912 .write = cgroup_common_file_write,
81a6a5cd 1913 .private = FILE_RELEASE_AGENT,
bbcb81d0
PM
1914};
1915
ddbcc7e8
PM
1916static int cgroup_populate_dir(struct cgroup *cont)
1917{
1918 int err;
1919 struct cgroup_subsys *ss;
1920
1921 /* First clear out any existing files */
1922 cgroup_clear_directory(cont->dentry);
1923
81a6a5cd 1924 err = cgroup_add_files(cont, NULL, files, ARRAY_SIZE(files));
bbcb81d0
PM
1925 if (err < 0)
1926 return err;
1927
81a6a5cd
PM
1928 if (cont == cont->top_cgroup) {
1929 if ((err = cgroup_add_file(cont, NULL, &cft_release_agent)) < 0)
1930 return err;
1931 }
1932
ddbcc7e8
PM
1933 for_each_subsys(cont->root, ss) {
1934 if (ss->populate && (err = ss->populate(ss, cont)) < 0)
1935 return err;
1936 }
1937
1938 return 0;
1939}
1940
1941static void init_cgroup_css(struct cgroup_subsys_state *css,
1942 struct cgroup_subsys *ss,
1943 struct cgroup *cont)
1944{
1945 css->cgroup = cont;
1946 atomic_set(&css->refcnt, 0);
1947 css->flags = 0;
1948 if (cont == dummytop)
1949 set_bit(CSS_ROOT, &css->flags);
1950 BUG_ON(cont->subsys[ss->subsys_id]);
1951 cont->subsys[ss->subsys_id] = css;
1952}
1953
1954/*
1955 * cgroup_create - create a cgroup
1956 * parent: cgroup that will be parent of the new cgroup.
1957 * name: name of the new cgroup. Will be strcpy'ed.
1958 * mode: mode to set on new inode
1959 *
1960 * Must be called with the mutex on the parent inode held
1961 */
1962
1963static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
1964 int mode)
1965{
1966 struct cgroup *cont;
1967 struct cgroupfs_root *root = parent->root;
1968 int err = 0;
1969 struct cgroup_subsys *ss;
1970 struct super_block *sb = root->sb;
1971
1972 cont = kzalloc(sizeof(*cont), GFP_KERNEL);
1973 if (!cont)
1974 return -ENOMEM;
1975
1976 /* Grab a reference on the superblock so the hierarchy doesn't
1977 * get deleted on unmount if there are child cgroups. This
1978 * can be done outside cgroup_mutex, since the sb can't
1979 * disappear while someone has an open control file on the
1980 * fs */
1981 atomic_inc(&sb->s_active);
1982
1983 mutex_lock(&cgroup_mutex);
1984
1985 cont->flags = 0;
1986 INIT_LIST_HEAD(&cont->sibling);
1987 INIT_LIST_HEAD(&cont->children);
817929ec 1988 INIT_LIST_HEAD(&cont->css_sets);
81a6a5cd 1989 INIT_LIST_HEAD(&cont->release_list);
ddbcc7e8
PM
1990
1991 cont->parent = parent;
1992 cont->root = parent->root;
1993 cont->top_cgroup = parent->top_cgroup;
1994
1995 for_each_subsys(root, ss) {
1996 struct cgroup_subsys_state *css = ss->create(ss, cont);
1997 if (IS_ERR(css)) {
1998 err = PTR_ERR(css);
1999 goto err_destroy;
2000 }
2001 init_cgroup_css(css, ss, cont);
2002 }
2003
2004 list_add(&cont->sibling, &cont->parent->children);
2005 root->number_of_cgroups++;
2006
2007 err = cgroup_create_dir(cont, dentry, mode);
2008 if (err < 0)
2009 goto err_remove;
2010
2011 /* The cgroup directory was pre-locked for us */
2012 BUG_ON(!mutex_is_locked(&cont->dentry->d_inode->i_mutex));
2013
2014 err = cgroup_populate_dir(cont);
2015 /* If err < 0, we have a half-filled directory - oh well ;) */
2016
2017 mutex_unlock(&cgroup_mutex);
2018 mutex_unlock(&cont->dentry->d_inode->i_mutex);
2019
2020 return 0;
2021
2022 err_remove:
2023
2024 list_del(&cont->sibling);
2025 root->number_of_cgroups--;
2026
2027 err_destroy:
2028
2029 for_each_subsys(root, ss) {
2030 if (cont->subsys[ss->subsys_id])
2031 ss->destroy(ss, cont);
2032 }
2033
2034 mutex_unlock(&cgroup_mutex);
2035
2036 /* Release the reference count that we took on the superblock */
2037 deactivate_super(sb);
2038
2039 kfree(cont);
2040 return err;
2041}
2042
2043static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2044{
2045 struct cgroup *c_parent = dentry->d_parent->d_fsdata;
2046
2047 /* the vfs holds inode->i_mutex already */
2048 return cgroup_create(c_parent, dentry, mode | S_IFDIR);
2049}
2050
81a6a5cd
PM
2051static inline int cgroup_has_css_refs(struct cgroup *cont)
2052{
2053 /* Check the reference count on each subsystem. Since we
2054 * already established that there are no tasks in the
2055 * cgroup, if the css refcount is also 0, then there should
2056 * be no outstanding references, so the subsystem is safe to
2057 * destroy. We scan across all subsystems rather than using
2058 * the per-hierarchy linked list of mounted subsystems since
2059 * we can be called via check_for_release() with no
2060 * synchronization other than RCU, and the subsystem linked
2061 * list isn't RCU-safe */
2062 int i;
2063 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2064 struct cgroup_subsys *ss = subsys[i];
2065 struct cgroup_subsys_state *css;
2066 /* Skip subsystems not in this hierarchy */
2067 if (ss->root != cont->root)
2068 continue;
2069 css = cont->subsys[ss->subsys_id];
2070 /* When called from check_for_release() it's possible
2071 * that by this point the cgroup has been removed
2072 * and the css deleted. But a false-positive doesn't
2073 * matter, since it can only happen if the cgroup
2074 * has been deleted and hence no longer needs the
2075 * release agent to be called anyway. */
2076 if (css && atomic_read(&css->refcnt)) {
2077 return 1;
2078 }
2079 }
2080 return 0;
2081}
2082
ddbcc7e8
PM
2083static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2084{
2085 struct cgroup *cont = dentry->d_fsdata;
2086 struct dentry *d;
2087 struct cgroup *parent;
2088 struct cgroup_subsys *ss;
2089 struct super_block *sb;
2090 struct cgroupfs_root *root;
ddbcc7e8
PM
2091
2092 /* the vfs holds both inode->i_mutex already */
2093
2094 mutex_lock(&cgroup_mutex);
2095 if (atomic_read(&cont->count) != 0) {
2096 mutex_unlock(&cgroup_mutex);
2097 return -EBUSY;
2098 }
2099 if (!list_empty(&cont->children)) {
2100 mutex_unlock(&cgroup_mutex);
2101 return -EBUSY;
2102 }
2103
2104 parent = cont->parent;
2105 root = cont->root;
2106 sb = root->sb;
2107
81a6a5cd 2108 if (cgroup_has_css_refs(cont)) {
ddbcc7e8
PM
2109 mutex_unlock(&cgroup_mutex);
2110 return -EBUSY;
2111 }
2112
2113 for_each_subsys(root, ss) {
2114 if (cont->subsys[ss->subsys_id])
2115 ss->destroy(ss, cont);
2116 }
2117
81a6a5cd 2118 spin_lock(&release_list_lock);
ddbcc7e8 2119 set_bit(CONT_REMOVED, &cont->flags);
81a6a5cd
PM
2120 if (!list_empty(&cont->release_list))
2121 list_del(&cont->release_list);
2122 spin_unlock(&release_list_lock);
ddbcc7e8
PM
2123 /* delete my sibling from parent->children */
2124 list_del(&cont->sibling);
2125 spin_lock(&cont->dentry->d_lock);
2126 d = dget(cont->dentry);
2127 cont->dentry = NULL;
2128 spin_unlock(&d->d_lock);
2129
2130 cgroup_d_remove_dir(d);
2131 dput(d);
2132 root->number_of_cgroups--;
2133
81a6a5cd
PM
2134 set_bit(CONT_RELEASABLE, &parent->flags);
2135 check_for_release(parent);
2136
ddbcc7e8
PM
2137 mutex_unlock(&cgroup_mutex);
2138 /* Drop the active superblock reference that we took when we
2139 * created the cgroup */
2140 deactivate_super(sb);
2141 return 0;
2142}
2143
2144static void cgroup_init_subsys(struct cgroup_subsys *ss)
2145{
ddbcc7e8 2146 struct cgroup_subsys_state *css;
817929ec 2147 struct list_head *l;
ddbcc7e8
PM
2148 printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name);
2149
2150 /* Create the top cgroup state for this subsystem */
2151 ss->root = &rootnode;
2152 css = ss->create(ss, dummytop);
2153 /* We don't handle early failures gracefully */
2154 BUG_ON(IS_ERR(css));
2155 init_cgroup_css(css, ss, dummytop);
2156
817929ec
PM
2157 /* Update all cgroup groups to contain a subsys
2158 * pointer to this state - since the subsystem is
2159 * newly registered, all tasks and hence all cgroup
2160 * groups are in the subsystem's top cgroup. */
2161 write_lock(&css_set_lock);
2162 l = &init_css_set.list;
2163 do {
2164 struct css_set *cg =
2165 list_entry(l, struct css_set, list);
2166 cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
2167 l = l->next;
2168 } while (l != &init_css_set.list);
2169 write_unlock(&css_set_lock);
ddbcc7e8
PM
2170
2171 /* If this subsystem requested that it be notified with fork
2172 * events, we should send it one now for every process in the
2173 * system */
81a6a5cd
PM
2174 if (ss->fork) {
2175 struct task_struct *g, *p;
2176
2177 read_lock(&tasklist_lock);
2178 do_each_thread(g, p) {
2179 ss->fork(ss, p);
2180 } while_each_thread(g, p);
2181 read_unlock(&tasklist_lock);
2182 }
ddbcc7e8
PM
2183
2184 need_forkexit_callback |= ss->fork || ss->exit;
2185
2186 ss->active = 1;
2187}
2188
2189/**
2190 * cgroup_init_early - initialize cgroups at system boot, and
2191 * initialize any subsystems that request early init.
2192 */
2193int __init cgroup_init_early(void)
2194{
2195 int i;
817929ec
PM
2196 kref_init(&init_css_set.ref);
2197 kref_get(&init_css_set.ref);
2198 INIT_LIST_HEAD(&init_css_set.list);
2199 INIT_LIST_HEAD(&init_css_set.cg_links);
2200 INIT_LIST_HEAD(&init_css_set.tasks);
2201 css_set_count = 1;
ddbcc7e8
PM
2202 init_cgroup_root(&rootnode);
2203 list_add(&rootnode.root_list, &roots);
817929ec
PM
2204 root_count = 1;
2205 init_task.cgroups = &init_css_set;
2206
2207 init_css_set_link.cg = &init_css_set;
2208 list_add(&init_css_set_link.cont_link_list,
2209 &rootnode.top_cgroup.css_sets);
2210 list_add(&init_css_set_link.cg_link_list,
2211 &init_css_set.cg_links);
ddbcc7e8
PM
2212
2213 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2214 struct cgroup_subsys *ss = subsys[i];
2215
2216 BUG_ON(!ss->name);
2217 BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
2218 BUG_ON(!ss->create);
2219 BUG_ON(!ss->destroy);
2220 if (ss->subsys_id != i) {
2221 printk(KERN_ERR "Subsys %s id == %d\n",
2222 ss->name, ss->subsys_id);
2223 BUG();
2224 }
2225
2226 if (ss->early_init)
2227 cgroup_init_subsys(ss);
2228 }
2229 return 0;
2230}
2231
2232/**
2233 * cgroup_init - register cgroup filesystem and /proc file, and
2234 * initialize any subsystems that didn't request early init.
2235 */
2236int __init cgroup_init(void)
2237{
2238 int err;
2239 int i;
a424316c
PM
2240 struct proc_dir_entry *entry;
2241
2242 err = bdi_init(&cgroup_backing_dev_info);
2243 if (err)
2244 return err;
ddbcc7e8
PM
2245
2246 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2247 struct cgroup_subsys *ss = subsys[i];
2248 if (!ss->early_init)
2249 cgroup_init_subsys(ss);
2250 }
2251
2252 err = register_filesystem(&cgroup_fs_type);
2253 if (err < 0)
2254 goto out;
2255
a424316c
PM
2256 entry = create_proc_entry("cgroups", 0, NULL);
2257 if (entry)
2258 entry->proc_fops = &proc_cgroupstats_operations;
2259
ddbcc7e8 2260out:
a424316c
PM
2261 if (err)
2262 bdi_destroy(&cgroup_backing_dev_info);
2263
ddbcc7e8
PM
2264 return err;
2265}
b4f48b63 2266
a424316c
PM
2267/*
2268 * proc_cgroup_show()
2269 * - Print task's cgroup paths into seq_file, one line for each hierarchy
2270 * - Used for /proc/<pid>/cgroup.
2271 * - No need to task_lock(tsk) on this tsk->cgroup reference, as it
2272 * doesn't really matter if tsk->cgroup changes after we read it,
2273 * and we take cgroup_mutex, keeping attach_task() from changing it
2274 * anyway. No need to check that tsk->cgroup != NULL, thanks to
2275 * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
2276 * cgroup to top_cgroup.
2277 */
2278
2279/* TODO: Use a proper seq_file iterator */
2280static int proc_cgroup_show(struct seq_file *m, void *v)
2281{
2282 struct pid *pid;
2283 struct task_struct *tsk;
2284 char *buf;
2285 int retval;
2286 struct cgroupfs_root *root;
2287
2288 retval = -ENOMEM;
2289 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2290 if (!buf)
2291 goto out;
2292
2293 retval = -ESRCH;
2294 pid = m->private;
2295 tsk = get_pid_task(pid, PIDTYPE_PID);
2296 if (!tsk)
2297 goto out_free;
2298
2299 retval = 0;
2300
2301 mutex_lock(&cgroup_mutex);
2302
2303 for_each_root(root) {
2304 struct cgroup_subsys *ss;
2305 struct cgroup *cont;
2306 int subsys_id;
2307 int count = 0;
2308
2309 /* Skip this hierarchy if it has no active subsystems */
2310 if (!root->actual_subsys_bits)
2311 continue;
2312 for_each_subsys(root, ss)
2313 seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
2314 seq_putc(m, ':');
2315 get_first_subsys(&root->top_cgroup, NULL, &subsys_id);
2316 cont = task_cgroup(tsk, subsys_id);
2317 retval = cgroup_path(cont, buf, PAGE_SIZE);
2318 if (retval < 0)
2319 goto out_unlock;
2320 seq_puts(m, buf);
2321 seq_putc(m, '\n');
2322 }
2323
2324out_unlock:
2325 mutex_unlock(&cgroup_mutex);
2326 put_task_struct(tsk);
2327out_free:
2328 kfree(buf);
2329out:
2330 return retval;
2331}
2332
2333static int cgroup_open(struct inode *inode, struct file *file)
2334{
2335 struct pid *pid = PROC_I(inode)->pid;
2336 return single_open(file, proc_cgroup_show, pid);
2337}
2338
2339struct file_operations proc_cgroup_operations = {
2340 .open = cgroup_open,
2341 .read = seq_read,
2342 .llseek = seq_lseek,
2343 .release = single_release,
2344};
2345
2346/* Display information about each subsystem and each hierarchy */
2347static int proc_cgroupstats_show(struct seq_file *m, void *v)
2348{
2349 int i;
2350 struct cgroupfs_root *root;
2351
817929ec 2352 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\n");
a424316c 2353 mutex_lock(&cgroup_mutex);
a424316c
PM
2354 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2355 struct cgroup_subsys *ss = subsys[i];
817929ec
PM
2356 seq_printf(m, "%s\t%lu\t%d\n",
2357 ss->name, ss->root->subsys_bits,
2358 ss->root->number_of_cgroups);
a424316c
PM
2359 }
2360 mutex_unlock(&cgroup_mutex);
2361 return 0;
2362}
2363
2364static int cgroupstats_open(struct inode *inode, struct file *file)
2365{
2366 return single_open(file, proc_cgroupstats_show, 0);
2367}
2368
2369static struct file_operations proc_cgroupstats_operations = {
2370 .open = cgroupstats_open,
2371 .read = seq_read,
2372 .llseek = seq_lseek,
2373 .release = single_release,
2374};
2375
b4f48b63
PM
2376/**
2377 * cgroup_fork - attach newly forked task to its parents cgroup.
2378 * @tsk: pointer to task_struct of forking parent process.
2379 *
2380 * Description: A task inherits its parent's cgroup at fork().
2381 *
2382 * A pointer to the shared css_set was automatically copied in
2383 * fork.c by dup_task_struct(). However, we ignore that copy, since
2384 * it was not made under the protection of RCU or cgroup_mutex, so
2385 * might no longer be a valid cgroup pointer. attach_task() might
817929ec
PM
2386 * have already changed current->cgroups, allowing the previously
2387 * referenced cgroup group to be removed and freed.
b4f48b63
PM
2388 *
2389 * At the point that cgroup_fork() is called, 'current' is the parent
2390 * task, and the passed argument 'child' points to the child task.
2391 */
2392void cgroup_fork(struct task_struct *child)
2393{
817929ec
PM
2394 task_lock(current);
2395 child->cgroups = current->cgroups;
2396 get_css_set(child->cgroups);
2397 task_unlock(current);
2398 INIT_LIST_HEAD(&child->cg_list);
b4f48b63
PM
2399}
2400
2401/**
2402 * cgroup_fork_callbacks - called on a new task very soon before
2403 * adding it to the tasklist. No need to take any locks since no-one
2404 * can be operating on this task
2405 */
2406void cgroup_fork_callbacks(struct task_struct *child)
2407{
2408 if (need_forkexit_callback) {
2409 int i;
2410 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2411 struct cgroup_subsys *ss = subsys[i];
2412 if (ss->fork)
2413 ss->fork(ss, child);
2414 }
2415 }
2416}
2417
817929ec
PM
2418/**
2419 * cgroup_post_fork - called on a new task after adding it to the
2420 * task list. Adds the task to the list running through its css_set
2421 * if necessary. Has to be after the task is visible on the task list
2422 * in case we race with the first call to cgroup_iter_start() - to
2423 * guarantee that the new task ends up on its list. */
2424void cgroup_post_fork(struct task_struct *child)
2425{
2426 if (use_task_css_set_links) {
2427 write_lock(&css_set_lock);
2428 if (list_empty(&child->cg_list))
2429 list_add(&child->cg_list, &child->cgroups->tasks);
2430 write_unlock(&css_set_lock);
2431 }
2432}
b4f48b63
PM
2433/**
2434 * cgroup_exit - detach cgroup from exiting task
2435 * @tsk: pointer to task_struct of exiting process
2436 *
2437 * Description: Detach cgroup from @tsk and release it.
2438 *
2439 * Note that cgroups marked notify_on_release force every task in
2440 * them to take the global cgroup_mutex mutex when exiting.
2441 * This could impact scaling on very large systems. Be reluctant to
2442 * use notify_on_release cgroups where very high task exit scaling
2443 * is required on large systems.
2444 *
2445 * the_top_cgroup_hack:
2446 *
2447 * Set the exiting tasks cgroup to the root cgroup (top_cgroup).
2448 *
2449 * We call cgroup_exit() while the task is still competent to
2450 * handle notify_on_release(), then leave the task attached to the
2451 * root cgroup in each hierarchy for the remainder of its exit.
2452 *
2453 * To do this properly, we would increment the reference count on
2454 * top_cgroup, and near the very end of the kernel/exit.c do_exit()
2455 * code we would add a second cgroup function call, to drop that
2456 * reference. This would just create an unnecessary hot spot on
2457 * the top_cgroup reference count, to no avail.
2458 *
2459 * Normally, holding a reference to a cgroup without bumping its
2460 * count is unsafe. The cgroup could go away, or someone could
2461 * attach us to a different cgroup, decrementing the count on
2462 * the first cgroup that we never incremented. But in this case,
2463 * top_cgroup isn't going away, and either task has PF_EXITING set,
2464 * which wards off any attach_task() attempts, or task is a failed
2465 * fork, never visible to attach_task.
2466 *
2467 */
2468void cgroup_exit(struct task_struct *tsk, int run_callbacks)
2469{
2470 int i;
817929ec 2471 struct css_set *cg;
b4f48b63
PM
2472
2473 if (run_callbacks && need_forkexit_callback) {
2474 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
2475 struct cgroup_subsys *ss = subsys[i];
2476 if (ss->exit)
2477 ss->exit(ss, tsk);
2478 }
2479 }
817929ec
PM
2480
2481 /*
2482 * Unlink from the css_set task list if necessary.
2483 * Optimistically check cg_list before taking
2484 * css_set_lock
2485 */
2486 if (!list_empty(&tsk->cg_list)) {
2487 write_lock(&css_set_lock);
2488 if (!list_empty(&tsk->cg_list))
2489 list_del(&tsk->cg_list);
2490 write_unlock(&css_set_lock);
2491 }
2492
b4f48b63
PM
2493 /* Reassign the task to the init_css_set. */
2494 task_lock(tsk);
817929ec
PM
2495 cg = tsk->cgroups;
2496 tsk->cgroups = &init_css_set;
b4f48b63 2497 task_unlock(tsk);
817929ec 2498 if (cg)
81a6a5cd 2499 put_css_set_taskexit(cg);
b4f48b63 2500}
697f4161
PM
2501
2502/**
2503 * cgroup_clone - duplicate the current cgroup in the hierarchy
2504 * that the given subsystem is attached to, and move this task into
2505 * the new child
2506 */
2507int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
2508{
2509 struct dentry *dentry;
2510 int ret = 0;
2511 char nodename[MAX_CGROUP_TYPE_NAMELEN];
2512 struct cgroup *parent, *child;
2513 struct inode *inode;
2514 struct css_set *cg;
2515 struct cgroupfs_root *root;
2516 struct cgroup_subsys *ss;
2517
2518 /* We shouldn't be called by an unregistered subsystem */
2519 BUG_ON(!subsys->active);
2520
2521 /* First figure out what hierarchy and cgroup we're dealing
2522 * with, and pin them so we can drop cgroup_mutex */
2523 mutex_lock(&cgroup_mutex);
2524 again:
2525 root = subsys->root;
2526 if (root == &rootnode) {
2527 printk(KERN_INFO
2528 "Not cloning cgroup for unused subsystem %s\n",
2529 subsys->name);
2530 mutex_unlock(&cgroup_mutex);
2531 return 0;
2532 }
817929ec 2533 cg = tsk->cgroups;
697f4161
PM
2534 parent = task_cgroup(tsk, subsys->subsys_id);
2535
2536 snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid);
2537
2538 /* Pin the hierarchy */
2539 atomic_inc(&parent->root->sb->s_active);
2540
817929ec
PM
2541 /* Keep the cgroup alive */
2542 get_css_set(cg);
697f4161
PM
2543 mutex_unlock(&cgroup_mutex);
2544
2545 /* Now do the VFS work to create a cgroup */
2546 inode = parent->dentry->d_inode;
2547
2548 /* Hold the parent directory mutex across this operation to
2549 * stop anyone else deleting the new cgroup */
2550 mutex_lock(&inode->i_mutex);
2551 dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
2552 if (IS_ERR(dentry)) {
2553 printk(KERN_INFO
2554 "Couldn't allocate dentry for %s: %ld\n", nodename,
2555 PTR_ERR(dentry));
2556 ret = PTR_ERR(dentry);
2557 goto out_release;
2558 }
2559
2560 /* Create the cgroup directory, which also creates the cgroup */
2561 ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755);
2562 child = __d_cont(dentry);
2563 dput(dentry);
2564 if (ret) {
2565 printk(KERN_INFO
2566 "Failed to create cgroup %s: %d\n", nodename,
2567 ret);
2568 goto out_release;
2569 }
2570
2571 if (!child) {
2572 printk(KERN_INFO
2573 "Couldn't find new cgroup %s\n", nodename);
2574 ret = -ENOMEM;
2575 goto out_release;
2576 }
2577
2578 /* The cgroup now exists. Retake cgroup_mutex and check
2579 * that we're still in the same state that we thought we
2580 * were. */
2581 mutex_lock(&cgroup_mutex);
2582 if ((root != subsys->root) ||
2583 (parent != task_cgroup(tsk, subsys->subsys_id))) {
2584 /* Aargh, we raced ... */
2585 mutex_unlock(&inode->i_mutex);
817929ec 2586 put_css_set(cg);
697f4161
PM
2587
2588 deactivate_super(parent->root->sb);
2589 /* The cgroup is still accessible in the VFS, but
2590 * we're not going to try to rmdir() it at this
2591 * point. */
2592 printk(KERN_INFO
2593 "Race in cgroup_clone() - leaking cgroup %s\n",
2594 nodename);
2595 goto again;
2596 }
2597
2598 /* do any required auto-setup */
2599 for_each_subsys(root, ss) {
2600 if (ss->post_clone)
2601 ss->post_clone(ss, child);
2602 }
2603
2604 /* All seems fine. Finish by moving the task into the new cgroup */
2605 ret = attach_task(child, tsk);
2606 mutex_unlock(&cgroup_mutex);
2607
2608 out_release:
2609 mutex_unlock(&inode->i_mutex);
81a6a5cd
PM
2610
2611 mutex_lock(&cgroup_mutex);
817929ec 2612 put_css_set(cg);
81a6a5cd 2613 mutex_unlock(&cgroup_mutex);
697f4161
PM
2614 deactivate_super(parent->root->sb);
2615 return ret;
2616}
2617
2618/*
2619 * See if "cont" is a descendant of the current task's cgroup in
2620 * the appropriate hierarchy
2621 *
2622 * If we are sending in dummytop, then presumably we are creating
2623 * the top cgroup in the subsystem.
2624 *
2625 * Called only by the ns (nsproxy) cgroup.
2626 */
2627int cgroup_is_descendant(const struct cgroup *cont)
2628{
2629 int ret;
2630 struct cgroup *target;
2631 int subsys_id;
2632
2633 if (cont == dummytop)
2634 return 1;
2635
2636 get_first_subsys(cont, NULL, &subsys_id);
2637 target = task_cgroup(current, subsys_id);
2638 while (cont != target && cont!= cont->top_cgroup)
2639 cont = cont->parent;
2640 ret = (cont == target);
2641 return ret;
2642}
81a6a5cd
PM
2643
2644static void check_for_release(struct cgroup *cont)
2645{
2646 /* All of these checks rely on RCU to keep the cgroup
2647 * structure alive */
2648 if (cgroup_is_releasable(cont) && !atomic_read(&cont->count)
2649 && list_empty(&cont->children) && !cgroup_has_css_refs(cont)) {
2650 /* Control Group is currently removeable. If it's not
2651 * already queued for a userspace notification, queue
2652 * it now */
2653 int need_schedule_work = 0;
2654 spin_lock(&release_list_lock);
2655 if (!cgroup_is_removed(cont) &&
2656 list_empty(&cont->release_list)) {
2657 list_add(&cont->release_list, &release_list);
2658 need_schedule_work = 1;
2659 }
2660 spin_unlock(&release_list_lock);
2661 if (need_schedule_work)
2662 schedule_work(&release_agent_work);
2663 }
2664}
2665
2666void __css_put(struct cgroup_subsys_state *css)
2667{
2668 struct cgroup *cont = css->cgroup;
2669 rcu_read_lock();
2670 if (atomic_dec_and_test(&css->refcnt) && notify_on_release(cont)) {
2671 set_bit(CONT_RELEASABLE, &cont->flags);
2672 check_for_release(cont);
2673 }
2674 rcu_read_unlock();
2675}
2676
2677/*
2678 * Notify userspace when a cgroup is released, by running the
2679 * configured release agent with the name of the cgroup (path
2680 * relative to the root of cgroup file system) as the argument.
2681 *
2682 * Most likely, this user command will try to rmdir this cgroup.
2683 *
2684 * This races with the possibility that some other task will be
2685 * attached to this cgroup before it is removed, or that some other
2686 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
2687 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
2688 * unused, and this cgroup will be reprieved from its death sentence,
2689 * to continue to serve a useful existence. Next time it's released,
2690 * we will get notified again, if it still has 'notify_on_release' set.
2691 *
2692 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
2693 * means only wait until the task is successfully execve()'d. The
2694 * separate release agent task is forked by call_usermodehelper(),
2695 * then control in this thread returns here, without waiting for the
2696 * release agent task. We don't bother to wait because the caller of
2697 * this routine has no use for the exit status of the release agent
2698 * task, so no sense holding our caller up for that.
2699 *
2700 */
2701
2702static void cgroup_release_agent(struct work_struct *work)
2703{
2704 BUG_ON(work != &release_agent_work);
2705 mutex_lock(&cgroup_mutex);
2706 spin_lock(&release_list_lock);
2707 while (!list_empty(&release_list)) {
2708 char *argv[3], *envp[3];
2709 int i;
2710 char *pathbuf;
2711 struct cgroup *cont = list_entry(release_list.next,
2712 struct cgroup,
2713 release_list);
2714 list_del_init(&cont->release_list);
2715 spin_unlock(&release_list_lock);
2716 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2717 if (!pathbuf) {
2718 spin_lock(&release_list_lock);
2719 continue;
2720 }
2721
2722 if (cgroup_path(cont, pathbuf, PAGE_SIZE) < 0) {
2723 kfree(pathbuf);
2724 spin_lock(&release_list_lock);
2725 continue;
2726 }
2727
2728 i = 0;
2729 argv[i++] = cont->root->release_agent_path;
2730 argv[i++] = (char *)pathbuf;
2731 argv[i] = NULL;
2732
2733 i = 0;
2734 /* minimal command environment */
2735 envp[i++] = "HOME=/";
2736 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
2737 envp[i] = NULL;
2738
2739 /* Drop the lock while we invoke the usermode helper,
2740 * since the exec could involve hitting disk and hence
2741 * be a slow process */
2742 mutex_unlock(&cgroup_mutex);
2743 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
2744 kfree(pathbuf);
2745 mutex_lock(&cgroup_mutex);
2746 spin_lock(&release_list_lock);
2747 }
2748 spin_unlock(&release_list_lock);
2749 mutex_unlock(&cgroup_mutex);
2750}