[PATCH] undo partial cpu_exclusive sched domain disabling
[linux-2.6-block.git] / kernel / cpuset.c
CommitLineData
1da177e4
LT
1/*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004 Silicon Graphics, Inc.
8 *
9 * Portions derived from Patrick Mochel's sysfs code.
10 * sysfs is Copyright (c) 2001-3 Patrick Mochel
11 * Portions Copyright (c) 2004 Silicon Graphics, Inc.
12 *
13 * 2003-10-10 Written by Simon Derr <simon.derr@bull.net>
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson <pj@sgi.com>
16 *
17 * This file is subject to the terms and conditions of the GNU General Public
18 * License. See the file COPYING in the main directory of the Linux
19 * distribution for more details.
20 */
21
22#include <linux/config.h>
23#include <linux/cpu.h>
24#include <linux/cpumask.h>
25#include <linux/cpuset.h>
26#include <linux/err.h>
27#include <linux/errno.h>
28#include <linux/file.h>
29#include <linux/fs.h>
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/kernel.h>
33#include <linux/kmod.h>
34#include <linux/list.h>
35#include <linux/mm.h>
36#include <linux/module.h>
37#include <linux/mount.h>
38#include <linux/namei.h>
39#include <linux/pagemap.h>
40#include <linux/proc_fs.h>
41#include <linux/sched.h>
42#include <linux/seq_file.h>
43#include <linux/slab.h>
44#include <linux/smp_lock.h>
45#include <linux/spinlock.h>
46#include <linux/stat.h>
47#include <linux/string.h>
48#include <linux/time.h>
49#include <linux/backing-dev.h>
50#include <linux/sort.h>
51
52#include <asm/uaccess.h>
53#include <asm/atomic.h>
54#include <asm/semaphore.h>
55
56#define CPUSET_SUPER_MAGIC 0x27e0eb
57
58struct cpuset {
59 unsigned long flags; /* "unsigned long" so bitops work */
60 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
61 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
62
63 atomic_t count; /* count tasks using this cpuset */
64
65 /*
66 * We link our 'sibling' struct into our parents 'children'.
67 * Our children link their 'sibling' into our 'children'.
68 */
69 struct list_head sibling; /* my parents children */
70 struct list_head children; /* my children */
71
72 struct cpuset *parent; /* my parent */
73 struct dentry *dentry; /* cpuset fs entry */
74
75 /*
76 * Copy of global cpuset_mems_generation as of the most
77 * recent time this cpuset changed its mems_allowed.
78 */
79 int mems_generation;
80};
81
82/* bits in struct cpuset flags field */
83typedef enum {
84 CS_CPU_EXCLUSIVE,
85 CS_MEM_EXCLUSIVE,
86 CS_REMOVED,
87 CS_NOTIFY_ON_RELEASE
88} cpuset_flagbits_t;
89
90/* convenient tests for these bits */
91static inline int is_cpu_exclusive(const struct cpuset *cs)
92{
93 return !!test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
94}
95
96static inline int is_mem_exclusive(const struct cpuset *cs)
97{
98 return !!test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
99}
100
101static inline int is_removed(const struct cpuset *cs)
102{
103 return !!test_bit(CS_REMOVED, &cs->flags);
104}
105
106static inline int notify_on_release(const struct cpuset *cs)
107{
108 return !!test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
109}
110
111/*
112 * Increment this atomic integer everytime any cpuset changes its
113 * mems_allowed value. Users of cpusets can track this generation
114 * number, and avoid having to lock and reload mems_allowed unless
115 * the cpuset they're using changes generation.
116 *
117 * A single, global generation is needed because attach_task() could
118 * reattach a task to a different cpuset, which must not have its
119 * generation numbers aliased with those of that tasks previous cpuset.
120 *
121 * Generations are needed for mems_allowed because one task cannot
122 * modify anothers memory placement. So we must enable every task,
123 * on every visit to __alloc_pages(), to efficiently check whether
124 * its current->cpuset->mems_allowed has changed, requiring an update
125 * of its current->mems_allowed.
126 */
127static atomic_t cpuset_mems_generation = ATOMIC_INIT(1);
128
129static struct cpuset top_cpuset = {
130 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
131 .cpus_allowed = CPU_MASK_ALL,
132 .mems_allowed = NODE_MASK_ALL,
133 .count = ATOMIC_INIT(0),
134 .sibling = LIST_HEAD_INIT(top_cpuset.sibling),
135 .children = LIST_HEAD_INIT(top_cpuset.children),
136 .parent = NULL,
137 .dentry = NULL,
138 .mems_generation = 0,
139};
140
141static struct vfsmount *cpuset_mount;
142static struct super_block *cpuset_sb = NULL;
143
144/*
145 * cpuset_sem should be held by anyone who is depending on the children
146 * or sibling lists of any cpuset, or performing non-atomic operations
147 * on the flags or *_allowed values of a cpuset, such as raising the
148 * CS_REMOVED flag bit iff it is not already raised, or reading and
149 * conditionally modifying the *_allowed values. One kernel global
150 * cpuset semaphore should be sufficient - these things don't change
151 * that much.
152 *
153 * The code that modifies cpusets holds cpuset_sem across the entire
154 * operation, from cpuset_common_file_write() down, single threading
155 * all cpuset modifications (except for counter manipulations from
156 * fork and exit) across the system. This presumes that cpuset
157 * modifications are rare - better kept simple and safe, even if slow.
158 *
159 * The code that reads cpusets, such as in cpuset_common_file_read()
160 * and below, only holds cpuset_sem across small pieces of code, such
161 * as when reading out possibly multi-word cpumasks and nodemasks, as
162 * the risks are less, and the desire for performance a little greater.
163 * The proc_cpuset_show() routine needs to hold cpuset_sem to insure
164 * that no cs->dentry is NULL, as it walks up the cpuset tree to root.
165 *
166 * The hooks from fork and exit, cpuset_fork() and cpuset_exit(), don't
167 * (usually) grab cpuset_sem. These are the two most performance
168 * critical pieces of code here. The exception occurs on exit(),
2efe86b8
PJ
169 * when a task in a notify_on_release cpuset exits. Then cpuset_sem
170 * is taken, and if the cpuset count is zero, a usermode call made
1da177e4
LT
171 * to /sbin/cpuset_release_agent with the name of the cpuset (path
172 * relative to the root of cpuset file system) as the argument.
173 *
174 * A cpuset can only be deleted if both its 'count' of using tasks is
175 * zero, and its list of 'children' cpusets is empty. Since all tasks
176 * in the system use _some_ cpuset, and since there is always at least
177 * one task in the system (init, pid == 1), therefore, top_cpuset
178 * always has either children cpusets and/or using tasks. So no need
179 * for any special hack to ensure that top_cpuset cannot be deleted.
180 */
181
182static DECLARE_MUTEX(cpuset_sem);
183
184/*
185 * A couple of forward declarations required, due to cyclic reference loop:
186 * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
187 * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir.
188 */
189
190static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode);
191static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry);
192
193static struct backing_dev_info cpuset_backing_dev_info = {
194 .ra_pages = 0, /* No readahead */
195 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
196};
197
198static struct inode *cpuset_new_inode(mode_t mode)
199{
200 struct inode *inode = new_inode(cpuset_sb);
201
202 if (inode) {
203 inode->i_mode = mode;
204 inode->i_uid = current->fsuid;
205 inode->i_gid = current->fsgid;
206 inode->i_blksize = PAGE_CACHE_SIZE;
207 inode->i_blocks = 0;
208 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
209 inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info;
210 }
211 return inode;
212}
213
214static void cpuset_diput(struct dentry *dentry, struct inode *inode)
215{
216 /* is dentry a directory ? if so, kfree() associated cpuset */
217 if (S_ISDIR(inode->i_mode)) {
218 struct cpuset *cs = dentry->d_fsdata;
219 BUG_ON(!(is_removed(cs)));
220 kfree(cs);
221 }
222 iput(inode);
223}
224
225static struct dentry_operations cpuset_dops = {
226 .d_iput = cpuset_diput,
227};
228
229static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name)
230{
5f45f1a7 231 struct dentry *d = lookup_one_len(name, parent, strlen(name));
1da177e4
LT
232 if (!IS_ERR(d))
233 d->d_op = &cpuset_dops;
234 return d;
235}
236
237static void remove_dir(struct dentry *d)
238{
239 struct dentry *parent = dget(d->d_parent);
240
241 d_delete(d);
242 simple_rmdir(parent->d_inode, d);
243 dput(parent);
244}
245
246/*
247 * NOTE : the dentry must have been dget()'ed
248 */
249static void cpuset_d_remove_dir(struct dentry *dentry)
250{
251 struct list_head *node;
252
253 spin_lock(&dcache_lock);
254 node = dentry->d_subdirs.next;
255 while (node != &dentry->d_subdirs) {
256 struct dentry *d = list_entry(node, struct dentry, d_child);
257 list_del_init(node);
258 if (d->d_inode) {
259 d = dget_locked(d);
260 spin_unlock(&dcache_lock);
261 d_delete(d);
262 simple_unlink(dentry->d_inode, d);
263 dput(d);
264 spin_lock(&dcache_lock);
265 }
266 node = dentry->d_subdirs.next;
267 }
268 list_del_init(&dentry->d_child);
269 spin_unlock(&dcache_lock);
270 remove_dir(dentry);
271}
272
273static struct super_operations cpuset_ops = {
274 .statfs = simple_statfs,
275 .drop_inode = generic_delete_inode,
276};
277
278static int cpuset_fill_super(struct super_block *sb, void *unused_data,
279 int unused_silent)
280{
281 struct inode *inode;
282 struct dentry *root;
283
284 sb->s_blocksize = PAGE_CACHE_SIZE;
285 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
286 sb->s_magic = CPUSET_SUPER_MAGIC;
287 sb->s_op = &cpuset_ops;
288 cpuset_sb = sb;
289
290 inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR);
291 if (inode) {
292 inode->i_op = &simple_dir_inode_operations;
293 inode->i_fop = &simple_dir_operations;
294 /* directories start off with i_nlink == 2 (for "." entry) */
295 inode->i_nlink++;
296 } else {
297 return -ENOMEM;
298 }
299
300 root = d_alloc_root(inode);
301 if (!root) {
302 iput(inode);
303 return -ENOMEM;
304 }
305 sb->s_root = root;
306 return 0;
307}
308
309static struct super_block *cpuset_get_sb(struct file_system_type *fs_type,
310 int flags, const char *unused_dev_name,
311 void *data)
312{
313 return get_sb_single(fs_type, flags, data, cpuset_fill_super);
314}
315
316static struct file_system_type cpuset_fs_type = {
317 .name = "cpuset",
318 .get_sb = cpuset_get_sb,
319 .kill_sb = kill_litter_super,
320};
321
322/* struct cftype:
323 *
324 * The files in the cpuset filesystem mostly have a very simple read/write
325 * handling, some common function will take care of it. Nevertheless some cases
326 * (read tasks) are special and therefore I define this structure for every
327 * kind of file.
328 *
329 *
330 * When reading/writing to a file:
331 * - the cpuset to use in file->f_dentry->d_parent->d_fsdata
332 * - the 'cftype' of the file is file->f_dentry->d_fsdata
333 */
334
335struct cftype {
336 char *name;
337 int private;
338 int (*open) (struct inode *inode, struct file *file);
339 ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes,
340 loff_t *ppos);
341 int (*write) (struct file *file, const char __user *buf, size_t nbytes,
342 loff_t *ppos);
343 int (*release) (struct inode *inode, struct file *file);
344};
345
346static inline struct cpuset *__d_cs(struct dentry *dentry)
347{
348 return dentry->d_fsdata;
349}
350
351static inline struct cftype *__d_cft(struct dentry *dentry)
352{
353 return dentry->d_fsdata;
354}
355
356/*
357 * Call with cpuset_sem held. Writes path of cpuset into buf.
358 * Returns 0 on success, -errno on error.
359 */
360
361static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
362{
363 char *start;
364
365 start = buf + buflen;
366
367 *--start = '\0';
368 for (;;) {
369 int len = cs->dentry->d_name.len;
370 if ((start -= len) < buf)
371 return -ENAMETOOLONG;
372 memcpy(start, cs->dentry->d_name.name, len);
373 cs = cs->parent;
374 if (!cs)
375 break;
376 if (!cs->parent)
377 continue;
378 if (--start < buf)
379 return -ENAMETOOLONG;
380 *start = '/';
381 }
382 memmove(buf, start, buf + buflen - start);
383 return 0;
384}
385
386/*
387 * Notify userspace when a cpuset is released, by running
388 * /sbin/cpuset_release_agent with the name of the cpuset (path
389 * relative to the root of cpuset file system) as the argument.
390 *
391 * Most likely, this user command will try to rmdir this cpuset.
392 *
393 * This races with the possibility that some other task will be
394 * attached to this cpuset before it is removed, or that some other
395 * user task will 'mkdir' a child cpuset of this cpuset. That's ok.
396 * The presumed 'rmdir' will fail quietly if this cpuset is no longer
397 * unused, and this cpuset will be reprieved from its death sentence,
398 * to continue to serve a useful existence. Next time it's released,
399 * we will get notified again, if it still has 'notify_on_release' set.
400 *
3077a260
PJ
401 * The final arg to call_usermodehelper() is 0, which means don't
402 * wait. The separate /sbin/cpuset_release_agent task is forked by
403 * call_usermodehelper(), then control in this thread returns here,
404 * without waiting for the release agent task. We don't bother to
405 * wait because the caller of this routine has no use for the exit
406 * status of the /sbin/cpuset_release_agent task, so no sense holding
407 * our caller up for that.
408 *
409 * The simple act of forking that task might require more memory,
410 * which might need cpuset_sem. So this routine must be called while
411 * cpuset_sem is not held, to avoid a possible deadlock. See also
412 * comments for check_for_release(), below.
1da177e4
LT
413 */
414
3077a260 415static void cpuset_release_agent(const char *pathbuf)
1da177e4
LT
416{
417 char *argv[3], *envp[3];
418 int i;
419
3077a260
PJ
420 if (!pathbuf)
421 return;
422
1da177e4
LT
423 i = 0;
424 argv[i++] = "/sbin/cpuset_release_agent";
3077a260 425 argv[i++] = (char *)pathbuf;
1da177e4
LT
426 argv[i] = NULL;
427
428 i = 0;
429 /* minimal command environment */
430 envp[i++] = "HOME=/";
431 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
432 envp[i] = NULL;
433
3077a260
PJ
434 call_usermodehelper(argv[0], argv, envp, 0);
435 kfree(pathbuf);
1da177e4
LT
436}
437
438/*
439 * Either cs->count of using tasks transitioned to zero, or the
440 * cs->children list of child cpusets just became empty. If this
441 * cs is notify_on_release() and now both the user count is zero and
3077a260
PJ
442 * the list of children is empty, prepare cpuset path in a kmalloc'd
443 * buffer, to be returned via ppathbuf, so that the caller can invoke
444 * cpuset_release_agent() with it later on, once cpuset_sem is dropped.
445 * Call here with cpuset_sem held.
446 *
447 * This check_for_release() routine is responsible for kmalloc'ing
448 * pathbuf. The above cpuset_release_agent() is responsible for
449 * kfree'ing pathbuf. The caller of these routines is responsible
450 * for providing a pathbuf pointer, initialized to NULL, then
451 * calling check_for_release() with cpuset_sem held and the address
452 * of the pathbuf pointer, then dropping cpuset_sem, then calling
453 * cpuset_release_agent() with pathbuf, as set by check_for_release().
1da177e4
LT
454 */
455
3077a260 456static void check_for_release(struct cpuset *cs, char **ppathbuf)
1da177e4
LT
457{
458 if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
459 list_empty(&cs->children)) {
460 char *buf;
461
462 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
463 if (!buf)
464 return;
465 if (cpuset_path(cs, buf, PAGE_SIZE) < 0)
3077a260
PJ
466 kfree(buf);
467 else
468 *ppathbuf = buf;
1da177e4
LT
469 }
470}
471
472/*
473 * Return in *pmask the portion of a cpusets's cpus_allowed that
474 * are online. If none are online, walk up the cpuset hierarchy
475 * until we find one that does have some online cpus. If we get
476 * all the way to the top and still haven't found any online cpus,
477 * return cpu_online_map. Or if passed a NULL cs from an exit'ing
478 * task, return cpu_online_map.
479 *
480 * One way or another, we guarantee to return some non-empty subset
481 * of cpu_online_map.
482 *
483 * Call with cpuset_sem held.
484 */
485
486static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
487{
488 while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map))
489 cs = cs->parent;
490 if (cs)
491 cpus_and(*pmask, cs->cpus_allowed, cpu_online_map);
492 else
493 *pmask = cpu_online_map;
494 BUG_ON(!cpus_intersects(*pmask, cpu_online_map));
495}
496
497/*
498 * Return in *pmask the portion of a cpusets's mems_allowed that
499 * are online. If none are online, walk up the cpuset hierarchy
500 * until we find one that does have some online mems. If we get
501 * all the way to the top and still haven't found any online mems,
502 * return node_online_map.
503 *
504 * One way or another, we guarantee to return some non-empty subset
505 * of node_online_map.
506 *
507 * Call with cpuset_sem held.
508 */
509
510static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
511{
512 while (cs && !nodes_intersects(cs->mems_allowed, node_online_map))
513 cs = cs->parent;
514 if (cs)
515 nodes_and(*pmask, cs->mems_allowed, node_online_map);
516 else
517 *pmask = node_online_map;
518 BUG_ON(!nodes_intersects(*pmask, node_online_map));
519}
520
521/*
522 * Refresh current tasks mems_allowed and mems_generation from
523 * current tasks cpuset. Call with cpuset_sem held.
524 *
525 * Be sure to call refresh_mems() on any cpuset operation which
526 * (1) holds cpuset_sem, and (2) might possibly alloc memory.
527 * Call after obtaining cpuset_sem lock, before any possible
528 * allocation. Otherwise one risks trying to allocate memory
529 * while the task cpuset_mems_generation is not the same as
530 * the mems_generation in its cpuset, which would deadlock on
531 * cpuset_sem in cpuset_update_current_mems_allowed().
532 *
533 * Since we hold cpuset_sem, once refresh_mems() is called, the
534 * test (current->cpuset_mems_generation != cs->mems_generation)
535 * in cpuset_update_current_mems_allowed() will remain false,
536 * until we drop cpuset_sem. Anyone else who would change our
537 * cpusets mems_generation needs to lock cpuset_sem first.
538 */
539
540static void refresh_mems(void)
541{
542 struct cpuset *cs = current->cpuset;
543
544 if (current->cpuset_mems_generation != cs->mems_generation) {
545 guarantee_online_mems(cs, &current->mems_allowed);
546 current->cpuset_mems_generation = cs->mems_generation;
547 }
548}
549
550/*
551 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
552 *
553 * One cpuset is a subset of another if all its allowed CPUs and
554 * Memory Nodes are a subset of the other, and its exclusive flags
555 * are only set if the other's are set.
556 */
557
558static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
559{
560 return cpus_subset(p->cpus_allowed, q->cpus_allowed) &&
561 nodes_subset(p->mems_allowed, q->mems_allowed) &&
562 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
563 is_mem_exclusive(p) <= is_mem_exclusive(q);
564}
565
566/*
567 * validate_change() - Used to validate that any proposed cpuset change
568 * follows the structural rules for cpusets.
569 *
570 * If we replaced the flag and mask values of the current cpuset
571 * (cur) with those values in the trial cpuset (trial), would
572 * our various subset and exclusive rules still be valid? Presumes
573 * cpuset_sem held.
574 *
575 * 'cur' is the address of an actual, in-use cpuset. Operations
576 * such as list traversal that depend on the actual address of the
577 * cpuset in the list must use cur below, not trial.
578 *
579 * 'trial' is the address of bulk structure copy of cur, with
580 * perhaps one or more of the fields cpus_allowed, mems_allowed,
581 * or flags changed to new, trial values.
582 *
583 * Return 0 if valid, -errno if not.
584 */
585
586static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
587{
588 struct cpuset *c, *par;
589
590 /* Each of our child cpusets must be a subset of us */
591 list_for_each_entry(c, &cur->children, sibling) {
592 if (!is_cpuset_subset(c, trial))
593 return -EBUSY;
594 }
595
596 /* Remaining checks don't apply to root cpuset */
597 if ((par = cur->parent) == NULL)
598 return 0;
599
600 /* We must be a subset of our parent cpuset */
601 if (!is_cpuset_subset(trial, par))
602 return -EACCES;
603
604 /* If either I or some sibling (!= me) is exclusive, we can't overlap */
605 list_for_each_entry(c, &par->children, sibling) {
606 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
607 c != cur &&
608 cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
609 return -EINVAL;
610 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
611 c != cur &&
612 nodes_intersects(trial->mems_allowed, c->mems_allowed))
613 return -EINVAL;
614 }
615
616 return 0;
617}
618
85d7b949
DG
619/*
620 * For a given cpuset cur, partition the system as follows
621 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
622 * exclusive child cpusets
623 * b. All cpus in the current cpuset's cpus_allowed that are not part of any
624 * exclusive child cpusets
625 * Build these two partitions by calling partition_sched_domains
626 *
627 * Call with cpuset_sem held. May nest a call to the
628 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
629 */
630static void update_cpu_domains(struct cpuset *cur)
631{
632 struct cpuset *c, *par = cur->parent;
633 cpumask_t pspan, cspan;
634
635 if (par == NULL || cpus_empty(cur->cpus_allowed))
636 return;
637
638 /*
639 * Get all cpus from parent's cpus_allowed not part of exclusive
640 * children
641 */
642 pspan = par->cpus_allowed;
643 list_for_each_entry(c, &par->children, sibling) {
644 if (is_cpu_exclusive(c))
645 cpus_andnot(pspan, pspan, c->cpus_allowed);
646 }
647 if (is_removed(cur) || !is_cpu_exclusive(cur)) {
648 cpus_or(pspan, pspan, cur->cpus_allowed);
649 if (cpus_equal(pspan, cur->cpus_allowed))
650 return;
651 cspan = CPU_MASK_NONE;
652 } else {
653 if (cpus_empty(pspan))
654 return;
655 cspan = cur->cpus_allowed;
656 /*
657 * Get all cpus from current cpuset's cpus_allowed not part
658 * of exclusive children
659 */
660 list_for_each_entry(c, &cur->children, sibling) {
661 if (is_cpu_exclusive(c))
662 cpus_andnot(cspan, cspan, c->cpus_allowed);
663 }
664 }
665
666 lock_cpu_hotplug();
667 partition_sched_domains(&pspan, &cspan);
668 unlock_cpu_hotplug();
669}
670
1da177e4
LT
671static int update_cpumask(struct cpuset *cs, char *buf)
672{
673 struct cpuset trialcs;
85d7b949 674 int retval, cpus_unchanged;
1da177e4
LT
675
676 trialcs = *cs;
677 retval = cpulist_parse(buf, trialcs.cpus_allowed);
678 if (retval < 0)
679 return retval;
680 cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
681 if (cpus_empty(trialcs.cpus_allowed))
682 return -ENOSPC;
683 retval = validate_change(cs, &trialcs);
85d7b949
DG
684 if (retval < 0)
685 return retval;
686 cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
687 cs->cpus_allowed = trialcs.cpus_allowed;
688 if (is_cpu_exclusive(cs) && !cpus_unchanged)
689 update_cpu_domains(cs);
690 return 0;
1da177e4
LT
691}
692
693static int update_nodemask(struct cpuset *cs, char *buf)
694{
695 struct cpuset trialcs;
696 int retval;
697
698 trialcs = *cs;
699 retval = nodelist_parse(buf, trialcs.mems_allowed);
700 if (retval < 0)
701 return retval;
702 nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map);
703 if (nodes_empty(trialcs.mems_allowed))
704 return -ENOSPC;
705 retval = validate_change(cs, &trialcs);
706 if (retval == 0) {
707 cs->mems_allowed = trialcs.mems_allowed;
708 atomic_inc(&cpuset_mems_generation);
709 cs->mems_generation = atomic_read(&cpuset_mems_generation);
710 }
711 return retval;
712}
713
714/*
715 * update_flag - read a 0 or a 1 in a file and update associated flag
716 * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
717 * CS_NOTIFY_ON_RELEASE)
718 * cs: the cpuset to update
719 * buf: the buffer where we read the 0 or 1
720 */
721
722static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
723{
724 int turning_on;
725 struct cpuset trialcs;
85d7b949 726 int err, cpu_exclusive_changed;
1da177e4
LT
727
728 turning_on = (simple_strtoul(buf, NULL, 10) != 0);
729
730 trialcs = *cs;
731 if (turning_on)
732 set_bit(bit, &trialcs.flags);
733 else
734 clear_bit(bit, &trialcs.flags);
735
736 err = validate_change(cs, &trialcs);
85d7b949
DG
737 if (err < 0)
738 return err;
739 cpu_exclusive_changed =
740 (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
741 if (turning_on)
742 set_bit(bit, &cs->flags);
743 else
744 clear_bit(bit, &cs->flags);
745
746 if (cpu_exclusive_changed)
747 update_cpu_domains(cs);
748 return 0;
1da177e4
LT
749}
750
3077a260 751static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
1da177e4
LT
752{
753 pid_t pid;
754 struct task_struct *tsk;
755 struct cpuset *oldcs;
756 cpumask_t cpus;
757
3077a260 758 if (sscanf(pidbuf, "%d", &pid) != 1)
1da177e4
LT
759 return -EIO;
760 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
761 return -ENOSPC;
762
763 if (pid) {
764 read_lock(&tasklist_lock);
765
766 tsk = find_task_by_pid(pid);
767 if (!tsk) {
768 read_unlock(&tasklist_lock);
769 return -ESRCH;
770 }
771
772 get_task_struct(tsk);
773 read_unlock(&tasklist_lock);
774
775 if ((current->euid) && (current->euid != tsk->uid)
776 && (current->euid != tsk->suid)) {
777 put_task_struct(tsk);
778 return -EACCES;
779 }
780 } else {
781 tsk = current;
782 get_task_struct(tsk);
783 }
784
785 task_lock(tsk);
786 oldcs = tsk->cpuset;
787 if (!oldcs) {
788 task_unlock(tsk);
789 put_task_struct(tsk);
790 return -ESRCH;
791 }
792 atomic_inc(&cs->count);
793 tsk->cpuset = cs;
794 task_unlock(tsk);
795
796 guarantee_online_cpus(cs, &cpus);
797 set_cpus_allowed(tsk, cpus);
798
799 put_task_struct(tsk);
800 if (atomic_dec_and_test(&oldcs->count))
3077a260 801 check_for_release(oldcs, ppathbuf);
1da177e4
LT
802 return 0;
803}
804
805/* The various types of files and directories in a cpuset file system */
806
807typedef enum {
808 FILE_ROOT,
809 FILE_DIR,
810 FILE_CPULIST,
811 FILE_MEMLIST,
812 FILE_CPU_EXCLUSIVE,
813 FILE_MEM_EXCLUSIVE,
814 FILE_NOTIFY_ON_RELEASE,
815 FILE_TASKLIST,
816} cpuset_filetype_t;
817
818static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf,
819 size_t nbytes, loff_t *unused_ppos)
820{
821 struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
822 struct cftype *cft = __d_cft(file->f_dentry);
823 cpuset_filetype_t type = cft->private;
824 char *buffer;
3077a260 825 char *pathbuf = NULL;
1da177e4
LT
826 int retval = 0;
827
828 /* Crude upper limit on largest legitimate cpulist user might write. */
829 if (nbytes > 100 + 6 * NR_CPUS)
830 return -E2BIG;
831
832 /* +1 for nul-terminator */
833 if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0)
834 return -ENOMEM;
835
836 if (copy_from_user(buffer, userbuf, nbytes)) {
837 retval = -EFAULT;
838 goto out1;
839 }
840 buffer[nbytes] = 0; /* nul-terminate */
841
842 down(&cpuset_sem);
843
844 if (is_removed(cs)) {
845 retval = -ENODEV;
846 goto out2;
847 }
848
849 switch (type) {
850 case FILE_CPULIST:
851 retval = update_cpumask(cs, buffer);
852 break;
853 case FILE_MEMLIST:
854 retval = update_nodemask(cs, buffer);
855 break;
856 case FILE_CPU_EXCLUSIVE:
857 retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer);
858 break;
859 case FILE_MEM_EXCLUSIVE:
860 retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
861 break;
862 case FILE_NOTIFY_ON_RELEASE:
863 retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer);
864 break;
865 case FILE_TASKLIST:
3077a260 866 retval = attach_task(cs, buffer, &pathbuf);
1da177e4
LT
867 break;
868 default:
869 retval = -EINVAL;
870 goto out2;
871 }
872
873 if (retval == 0)
874 retval = nbytes;
875out2:
876 up(&cpuset_sem);
3077a260 877 cpuset_release_agent(pathbuf);
1da177e4
LT
878out1:
879 kfree(buffer);
880 return retval;
881}
882
883static ssize_t cpuset_file_write(struct file *file, const char __user *buf,
884 size_t nbytes, loff_t *ppos)
885{
886 ssize_t retval = 0;
887 struct cftype *cft = __d_cft(file->f_dentry);
888 if (!cft)
889 return -ENODEV;
890
891 /* special function ? */
892 if (cft->write)
893 retval = cft->write(file, buf, nbytes, ppos);
894 else
895 retval = cpuset_common_file_write(file, buf, nbytes, ppos);
896
897 return retval;
898}
899
900/*
901 * These ascii lists should be read in a single call, by using a user
902 * buffer large enough to hold the entire map. If read in smaller
903 * chunks, there is no guarantee of atomicity. Since the display format
904 * used, list of ranges of sequential numbers, is variable length,
905 * and since these maps can change value dynamically, one could read
906 * gibberish by doing partial reads while a list was changing.
907 * A single large read to a buffer that crosses a page boundary is
908 * ok, because the result being copied to user land is not recomputed
909 * across a page fault.
910 */
911
912static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
913{
914 cpumask_t mask;
915
916 down(&cpuset_sem);
917 mask = cs->cpus_allowed;
918 up(&cpuset_sem);
919
920 return cpulist_scnprintf(page, PAGE_SIZE, mask);
921}
922
923static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
924{
925 nodemask_t mask;
926
927 down(&cpuset_sem);
928 mask = cs->mems_allowed;
929 up(&cpuset_sem);
930
931 return nodelist_scnprintf(page, PAGE_SIZE, mask);
932}
933
934static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
935 size_t nbytes, loff_t *ppos)
936{
937 struct cftype *cft = __d_cft(file->f_dentry);
938 struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
939 cpuset_filetype_t type = cft->private;
940 char *page;
941 ssize_t retval = 0;
942 char *s;
943 char *start;
944 size_t n;
945
946 if (!(page = (char *)__get_free_page(GFP_KERNEL)))
947 return -ENOMEM;
948
949 s = page;
950
951 switch (type) {
952 case FILE_CPULIST:
953 s += cpuset_sprintf_cpulist(s, cs);
954 break;
955 case FILE_MEMLIST:
956 s += cpuset_sprintf_memlist(s, cs);
957 break;
958 case FILE_CPU_EXCLUSIVE:
959 *s++ = is_cpu_exclusive(cs) ? '1' : '0';
960 break;
961 case FILE_MEM_EXCLUSIVE:
962 *s++ = is_mem_exclusive(cs) ? '1' : '0';
963 break;
964 case FILE_NOTIFY_ON_RELEASE:
965 *s++ = notify_on_release(cs) ? '1' : '0';
966 break;
967 default:
968 retval = -EINVAL;
969 goto out;
970 }
971 *s++ = '\n';
972 *s = '\0';
973
974 start = page + *ppos;
975 n = s - start;
976 retval = n - copy_to_user(buf, start, min(n, nbytes));
977 *ppos += retval;
978out:
979 free_page((unsigned long)page);
980 return retval;
981}
982
983static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes,
984 loff_t *ppos)
985{
986 ssize_t retval = 0;
987 struct cftype *cft = __d_cft(file->f_dentry);
988 if (!cft)
989 return -ENODEV;
990
991 /* special function ? */
992 if (cft->read)
993 retval = cft->read(file, buf, nbytes, ppos);
994 else
995 retval = cpuset_common_file_read(file, buf, nbytes, ppos);
996
997 return retval;
998}
999
1000static int cpuset_file_open(struct inode *inode, struct file *file)
1001{
1002 int err;
1003 struct cftype *cft;
1004
1005 err = generic_file_open(inode, file);
1006 if (err)
1007 return err;
1008
1009 cft = __d_cft(file->f_dentry);
1010 if (!cft)
1011 return -ENODEV;
1012 if (cft->open)
1013 err = cft->open(inode, file);
1014 else
1015 err = 0;
1016
1017 return err;
1018}
1019
1020static int cpuset_file_release(struct inode *inode, struct file *file)
1021{
1022 struct cftype *cft = __d_cft(file->f_dentry);
1023 if (cft->release)
1024 return cft->release(inode, file);
1025 return 0;
1026}
1027
1028static struct file_operations cpuset_file_operations = {
1029 .read = cpuset_file_read,
1030 .write = cpuset_file_write,
1031 .llseek = generic_file_llseek,
1032 .open = cpuset_file_open,
1033 .release = cpuset_file_release,
1034};
1035
1036static struct inode_operations cpuset_dir_inode_operations = {
1037 .lookup = simple_lookup,
1038 .mkdir = cpuset_mkdir,
1039 .rmdir = cpuset_rmdir,
1040};
1041
1042static int cpuset_create_file(struct dentry *dentry, int mode)
1043{
1044 struct inode *inode;
1045
1046 if (!dentry)
1047 return -ENOENT;
1048 if (dentry->d_inode)
1049 return -EEXIST;
1050
1051 inode = cpuset_new_inode(mode);
1052 if (!inode)
1053 return -ENOMEM;
1054
1055 if (S_ISDIR(mode)) {
1056 inode->i_op = &cpuset_dir_inode_operations;
1057 inode->i_fop = &simple_dir_operations;
1058
1059 /* start off with i_nlink == 2 (for "." entry) */
1060 inode->i_nlink++;
1061 } else if (S_ISREG(mode)) {
1062 inode->i_size = 0;
1063 inode->i_fop = &cpuset_file_operations;
1064 }
1065
1066 d_instantiate(dentry, inode);
1067 dget(dentry); /* Extra count - pin the dentry in core */
1068 return 0;
1069}
1070
1071/*
1072 * cpuset_create_dir - create a directory for an object.
1073 * cs: the cpuset we create the directory for.
1074 * It must have a valid ->parent field
1075 * And we are going to fill its ->dentry field.
1076 * name: The name to give to the cpuset directory. Will be copied.
1077 * mode: mode to set on new directory.
1078 */
1079
1080static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode)
1081{
1082 struct dentry *dentry = NULL;
1083 struct dentry *parent;
1084 int error = 0;
1085
1086 parent = cs->parent->dentry;
1087 dentry = cpuset_get_dentry(parent, name);
1088 if (IS_ERR(dentry))
1089 return PTR_ERR(dentry);
1090 error = cpuset_create_file(dentry, S_IFDIR | mode);
1091 if (!error) {
1092 dentry->d_fsdata = cs;
1093 parent->d_inode->i_nlink++;
1094 cs->dentry = dentry;
1095 }
1096 dput(dentry);
1097
1098 return error;
1099}
1100
1101static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
1102{
1103 struct dentry *dentry;
1104 int error;
1105
1106 down(&dir->d_inode->i_sem);
1107 dentry = cpuset_get_dentry(dir, cft->name);
1108 if (!IS_ERR(dentry)) {
1109 error = cpuset_create_file(dentry, 0644 | S_IFREG);
1110 if (!error)
1111 dentry->d_fsdata = (void *)cft;
1112 dput(dentry);
1113 } else
1114 error = PTR_ERR(dentry);
1115 up(&dir->d_inode->i_sem);
1116 return error;
1117}
1118
1119/*
1120 * Stuff for reading the 'tasks' file.
1121 *
1122 * Reading this file can return large amounts of data if a cpuset has
1123 * *lots* of attached tasks. So it may need several calls to read(),
1124 * but we cannot guarantee that the information we produce is correct
1125 * unless we produce it entirely atomically.
1126 *
1127 * Upon tasks file open(), a struct ctr_struct is allocated, that
1128 * will have a pointer to an array (also allocated here). The struct
1129 * ctr_struct * is stored in file->private_data. Its resources will
1130 * be freed by release() when the file is closed. The array is used
1131 * to sprintf the PIDs and then used by read().
1132 */
1133
1134/* cpusets_tasks_read array */
1135
1136struct ctr_struct {
1137 char *buf;
1138 int bufsz;
1139};
1140
1141/*
1142 * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'.
1143 * Return actual number of pids loaded.
1144 */
1145static inline int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs)
1146{
1147 int n = 0;
1148 struct task_struct *g, *p;
1149
1150 read_lock(&tasklist_lock);
1151
1152 do_each_thread(g, p) {
1153 if (p->cpuset == cs) {
1154 pidarray[n++] = p->pid;
1155 if (unlikely(n == npids))
1156 goto array_full;
1157 }
1158 } while_each_thread(g, p);
1159
1160array_full:
1161 read_unlock(&tasklist_lock);
1162 return n;
1163}
1164
1165static int cmppid(const void *a, const void *b)
1166{
1167 return *(pid_t *)a - *(pid_t *)b;
1168}
1169
1170/*
1171 * Convert array 'a' of 'npids' pid_t's to a string of newline separated
1172 * decimal pids in 'buf'. Don't write more than 'sz' chars, but return
1173 * count 'cnt' of how many chars would be written if buf were large enough.
1174 */
1175static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
1176{
1177 int cnt = 0;
1178 int i;
1179
1180 for (i = 0; i < npids; i++)
1181 cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
1182 return cnt;
1183}
1184
1185static int cpuset_tasks_open(struct inode *unused, struct file *file)
1186{
1187 struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
1188 struct ctr_struct *ctr;
1189 pid_t *pidarray;
1190 int npids;
1191 char c;
1192
1193 if (!(file->f_mode & FMODE_READ))
1194 return 0;
1195
1196 ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
1197 if (!ctr)
1198 goto err0;
1199
1200 /*
1201 * If cpuset gets more users after we read count, we won't have
1202 * enough space - tough. This race is indistinguishable to the
1203 * caller from the case that the additional cpuset users didn't
1204 * show up until sometime later on.
1205 */
1206 npids = atomic_read(&cs->count);
1207 pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
1208 if (!pidarray)
1209 goto err1;
1210
1211 npids = pid_array_load(pidarray, npids, cs);
1212 sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
1213
1214 /* Call pid_array_to_buf() twice, first just to get bufsz */
1215 ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
1216 ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
1217 if (!ctr->buf)
1218 goto err2;
1219 ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
1220
1221 kfree(pidarray);
1222 file->private_data = ctr;
1223 return 0;
1224
1225err2:
1226 kfree(pidarray);
1227err1:
1228 kfree(ctr);
1229err0:
1230 return -ENOMEM;
1231}
1232
1233static ssize_t cpuset_tasks_read(struct file *file, char __user *buf,
1234 size_t nbytes, loff_t *ppos)
1235{
1236 struct ctr_struct *ctr = file->private_data;
1237
1238 if (*ppos + nbytes > ctr->bufsz)
1239 nbytes = ctr->bufsz - *ppos;
1240 if (copy_to_user(buf, ctr->buf + *ppos, nbytes))
1241 return -EFAULT;
1242 *ppos += nbytes;
1243 return nbytes;
1244}
1245
1246static int cpuset_tasks_release(struct inode *unused_inode, struct file *file)
1247{
1248 struct ctr_struct *ctr;
1249
1250 if (file->f_mode & FMODE_READ) {
1251 ctr = file->private_data;
1252 kfree(ctr->buf);
1253 kfree(ctr);
1254 }
1255 return 0;
1256}
1257
1258/*
1259 * for the common functions, 'private' gives the type of file
1260 */
1261
1262static struct cftype cft_tasks = {
1263 .name = "tasks",
1264 .open = cpuset_tasks_open,
1265 .read = cpuset_tasks_read,
1266 .release = cpuset_tasks_release,
1267 .private = FILE_TASKLIST,
1268};
1269
1270static struct cftype cft_cpus = {
1271 .name = "cpus",
1272 .private = FILE_CPULIST,
1273};
1274
1275static struct cftype cft_mems = {
1276 .name = "mems",
1277 .private = FILE_MEMLIST,
1278};
1279
1280static struct cftype cft_cpu_exclusive = {
1281 .name = "cpu_exclusive",
1282 .private = FILE_CPU_EXCLUSIVE,
1283};
1284
1285static struct cftype cft_mem_exclusive = {
1286 .name = "mem_exclusive",
1287 .private = FILE_MEM_EXCLUSIVE,
1288};
1289
1290static struct cftype cft_notify_on_release = {
1291 .name = "notify_on_release",
1292 .private = FILE_NOTIFY_ON_RELEASE,
1293};
1294
1295static int cpuset_populate_dir(struct dentry *cs_dentry)
1296{
1297 int err;
1298
1299 if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0)
1300 return err;
1301 if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0)
1302 return err;
1303 if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0)
1304 return err;
1305 if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0)
1306 return err;
1307 if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0)
1308 return err;
1309 if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0)
1310 return err;
1311 return 0;
1312}
1313
1314/*
1315 * cpuset_create - create a cpuset
1316 * parent: cpuset that will be parent of the new cpuset.
1317 * name: name of the new cpuset. Will be strcpy'ed.
1318 * mode: mode to set on new inode
1319 *
1320 * Must be called with the semaphore on the parent inode held
1321 */
1322
1323static long cpuset_create(struct cpuset *parent, const char *name, int mode)
1324{
1325 struct cpuset *cs;
1326 int err;
1327
1328 cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1329 if (!cs)
1330 return -ENOMEM;
1331
1332 down(&cpuset_sem);
1333 refresh_mems();
1334 cs->flags = 0;
1335 if (notify_on_release(parent))
1336 set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
1337 cs->cpus_allowed = CPU_MASK_NONE;
1338 cs->mems_allowed = NODE_MASK_NONE;
1339 atomic_set(&cs->count, 0);
1340 INIT_LIST_HEAD(&cs->sibling);
1341 INIT_LIST_HEAD(&cs->children);
1342 atomic_inc(&cpuset_mems_generation);
1343 cs->mems_generation = atomic_read(&cpuset_mems_generation);
1344
1345 cs->parent = parent;
1346
1347 list_add(&cs->sibling, &cs->parent->children);
1348
1349 err = cpuset_create_dir(cs, name, mode);
1350 if (err < 0)
1351 goto err;
1352
1353 /*
1354 * Release cpuset_sem before cpuset_populate_dir() because it
1355 * will down() this new directory's i_sem and if we race with
1356 * another mkdir, we might deadlock.
1357 */
1358 up(&cpuset_sem);
1359
1360 err = cpuset_populate_dir(cs->dentry);
1361 /* If err < 0, we have a half-filled directory - oh well ;) */
1362 return 0;
1363err:
1364 list_del(&cs->sibling);
1365 up(&cpuset_sem);
1366 kfree(cs);
1367 return err;
1368}
1369
1370static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1371{
1372 struct cpuset *c_parent = dentry->d_parent->d_fsdata;
1373
1374 /* the vfs holds inode->i_sem already */
1375 return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
1376}
1377
1378static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1379{
1380 struct cpuset *cs = dentry->d_fsdata;
1381 struct dentry *d;
1382 struct cpuset *parent;
3077a260 1383 char *pathbuf = NULL;
1da177e4
LT
1384
1385 /* the vfs holds both inode->i_sem already */
1386
1387 down(&cpuset_sem);
1388 refresh_mems();
1389 if (atomic_read(&cs->count) > 0) {
1390 up(&cpuset_sem);
1391 return -EBUSY;
1392 }
1393 if (!list_empty(&cs->children)) {
1394 up(&cpuset_sem);
1395 return -EBUSY;
1396 }
1da177e4
LT
1397 parent = cs->parent;
1398 set_bit(CS_REMOVED, &cs->flags);
85d7b949
DG
1399 if (is_cpu_exclusive(cs))
1400 update_cpu_domains(cs);
1da177e4
LT
1401 list_del(&cs->sibling); /* delete my sibling from parent->children */
1402 if (list_empty(&parent->children))
3077a260 1403 check_for_release(parent, &pathbuf);
85d7b949 1404 spin_lock(&cs->dentry->d_lock);
1da177e4
LT
1405 d = dget(cs->dentry);
1406 cs->dentry = NULL;
1407 spin_unlock(&d->d_lock);
1408 cpuset_d_remove_dir(d);
1409 dput(d);
1410 up(&cpuset_sem);
3077a260 1411 cpuset_release_agent(pathbuf);
1da177e4
LT
1412 return 0;
1413}
1414
1415/**
1416 * cpuset_init - initialize cpusets at system boot
1417 *
1418 * Description: Initialize top_cpuset and the cpuset internal file system,
1419 **/
1420
1421int __init cpuset_init(void)
1422{
1423 struct dentry *root;
1424 int err;
1425
1426 top_cpuset.cpus_allowed = CPU_MASK_ALL;
1427 top_cpuset.mems_allowed = NODE_MASK_ALL;
1428
1429 atomic_inc(&cpuset_mems_generation);
1430 top_cpuset.mems_generation = atomic_read(&cpuset_mems_generation);
1431
1432 init_task.cpuset = &top_cpuset;
1433
1434 err = register_filesystem(&cpuset_fs_type);
1435 if (err < 0)
1436 goto out;
1437 cpuset_mount = kern_mount(&cpuset_fs_type);
1438 if (IS_ERR(cpuset_mount)) {
1439 printk(KERN_ERR "cpuset: could not mount!\n");
1440 err = PTR_ERR(cpuset_mount);
1441 cpuset_mount = NULL;
1442 goto out;
1443 }
1444 root = cpuset_mount->mnt_sb->s_root;
1445 root->d_fsdata = &top_cpuset;
1446 root->d_inode->i_nlink++;
1447 top_cpuset.dentry = root;
1448 root->d_inode->i_op = &cpuset_dir_inode_operations;
1449 err = cpuset_populate_dir(root);
1450out:
1451 return err;
1452}
1453
1454/**
1455 * cpuset_init_smp - initialize cpus_allowed
1456 *
1457 * Description: Finish top cpuset after cpu, node maps are initialized
1458 **/
1459
1460void __init cpuset_init_smp(void)
1461{
1462 top_cpuset.cpus_allowed = cpu_online_map;
1463 top_cpuset.mems_allowed = node_online_map;
1464}
1465
1466/**
1467 * cpuset_fork - attach newly forked task to its parents cpuset.
d9fd8a6d 1468 * @tsk: pointer to task_struct of forking parent process.
1da177e4
LT
1469 *
1470 * Description: By default, on fork, a task inherits its
d9fd8a6d 1471 * parent's cpuset. The pointer to the shared cpuset is
1da177e4
LT
1472 * automatically copied in fork.c by dup_task_struct().
1473 * This cpuset_fork() routine need only increment the usage
1474 * counter in that cpuset.
1475 **/
1476
1477void cpuset_fork(struct task_struct *tsk)
1478{
1479 atomic_inc(&tsk->cpuset->count);
1480}
1481
1482/**
1483 * cpuset_exit - detach cpuset from exiting task
1484 * @tsk: pointer to task_struct of exiting process
1485 *
1486 * Description: Detach cpuset from @tsk and release it.
1487 *
2efe86b8
PJ
1488 * Note that cpusets marked notify_on_release force every task
1489 * in them to take the global cpuset_sem semaphore when exiting.
1490 * This could impact scaling on very large systems. Be reluctant
1491 * to use notify_on_release cpusets where very high task exit
1492 * scaling is required on large systems.
1493 *
1494 * Don't even think about derefencing 'cs' after the cpuset use
1495 * count goes to zero, except inside a critical section guarded
1496 * by the cpuset_sem semaphore. If you don't hold cpuset_sem,
1497 * then a zero cpuset use count is a license to any other task to
1498 * nuke the cpuset immediately.
1da177e4
LT
1499 **/
1500
1501void cpuset_exit(struct task_struct *tsk)
1502{
1503 struct cpuset *cs;
1504
1505 task_lock(tsk);
1506 cs = tsk->cpuset;
1507 tsk->cpuset = NULL;
1508 task_unlock(tsk);
1509
2efe86b8 1510 if (notify_on_release(cs)) {
3077a260
PJ
1511 char *pathbuf = NULL;
1512
1da177e4 1513 down(&cpuset_sem);
2efe86b8 1514 if (atomic_dec_and_test(&cs->count))
3077a260 1515 check_for_release(cs, &pathbuf);
1da177e4 1516 up(&cpuset_sem);
3077a260 1517 cpuset_release_agent(pathbuf);
2efe86b8
PJ
1518 } else {
1519 atomic_dec(&cs->count);
1da177e4
LT
1520 }
1521}
1522
1523/**
1524 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
1525 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
1526 *
1527 * Description: Returns the cpumask_t cpus_allowed of the cpuset
1528 * attached to the specified @tsk. Guaranteed to return some non-empty
1529 * subset of cpu_online_map, even if this means going outside the
1530 * tasks cpuset.
1531 **/
1532
9a848896 1533cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk)
1da177e4
LT
1534{
1535 cpumask_t mask;
1536
1537 down(&cpuset_sem);
1538 task_lock((struct task_struct *)tsk);
1539 guarantee_online_cpus(tsk->cpuset, &mask);
1540 task_unlock((struct task_struct *)tsk);
1541 up(&cpuset_sem);
1542
1543 return mask;
1544}
1545
1546void cpuset_init_current_mems_allowed(void)
1547{
1548 current->mems_allowed = NODE_MASK_ALL;
1549}
1550
d9fd8a6d
RD
1551/**
1552 * cpuset_update_current_mems_allowed - update mems parameters to new values
1553 *
1da177e4
LT
1554 * If the current tasks cpusets mems_allowed changed behind our backs,
1555 * update current->mems_allowed and mems_generation to the new value.
1556 * Do not call this routine if in_interrupt().
1557 */
1558
1559void cpuset_update_current_mems_allowed(void)
1560{
1561 struct cpuset *cs = current->cpuset;
1562
1563 if (!cs)
1564 return; /* task is exiting */
1565 if (current->cpuset_mems_generation != cs->mems_generation) {
1566 down(&cpuset_sem);
1567 refresh_mems();
1568 up(&cpuset_sem);
1569 }
1570}
1571
d9fd8a6d
RD
1572/**
1573 * cpuset_restrict_to_mems_allowed - limit nodes to current mems_allowed
1574 * @nodes: pointer to a node bitmap that is and-ed with mems_allowed
1575 */
1da177e4
LT
1576void cpuset_restrict_to_mems_allowed(unsigned long *nodes)
1577{
1578 bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed),
1579 MAX_NUMNODES);
1580}
1581
d9fd8a6d
RD
1582/**
1583 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
1584 * @zl: the zonelist to be checked
1585 *
1da177e4
LT
1586 * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
1587 */
1588int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
1589{
1590 int i;
1591
1592 for (i = 0; zl->zones[i]; i++) {
1593 int nid = zl->zones[i]->zone_pgdat->node_id;
1594
1595 if (node_isset(nid, current->mems_allowed))
1596 return 1;
1597 }
1598 return 0;
1599}
1600
d9fd8a6d
RD
1601/**
1602 * cpuset_zone_allowed - is zone z allowed in current->mems_allowed
1603 * @z: zone in question
1604 *
1605 * Is zone z allowed in current->mems_allowed, or is
1606 * the CPU in interrupt context? (zone is always allowed in this case)
1da177e4
LT
1607 */
1608int cpuset_zone_allowed(struct zone *z)
1609{
1610 return in_interrupt() ||
1611 node_isset(z->zone_pgdat->node_id, current->mems_allowed);
1612}
1613
1614/*
1615 * proc_cpuset_show()
1616 * - Print tasks cpuset path into seq_file.
1617 * - Used for /proc/<pid>/cpuset.
1618 */
1619
1620static int proc_cpuset_show(struct seq_file *m, void *v)
1621{
1622 struct cpuset *cs;
1623 struct task_struct *tsk;
1624 char *buf;
1625 int retval = 0;
1626
1627 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1628 if (!buf)
1629 return -ENOMEM;
1630
1631 tsk = m->private;
1632 down(&cpuset_sem);
1633 task_lock(tsk);
1634 cs = tsk->cpuset;
1635 task_unlock(tsk);
1636 if (!cs) {
1637 retval = -EINVAL;
1638 goto out;
1639 }
1640
1641 retval = cpuset_path(cs, buf, PAGE_SIZE);
1642 if (retval < 0)
1643 goto out;
1644 seq_puts(m, buf);
1645 seq_putc(m, '\n');
1646out:
1647 up(&cpuset_sem);
1648 kfree(buf);
1649 return retval;
1650}
1651
1652static int cpuset_open(struct inode *inode, struct file *file)
1653{
1654 struct task_struct *tsk = PROC_I(inode)->task;
1655 return single_open(file, proc_cpuset_show, tsk);
1656}
1657
1658struct file_operations proc_cpuset_operations = {
1659 .open = cpuset_open,
1660 .read = seq_read,
1661 .llseek = seq_lseek,
1662 .release = single_release,
1663};
1664
1665/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
1666char *cpuset_task_status_allowed(struct task_struct *task, char *buffer)
1667{
1668 buffer += sprintf(buffer, "Cpus_allowed:\t");
1669 buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed);
1670 buffer += sprintf(buffer, "\n");
1671 buffer += sprintf(buffer, "Mems_allowed:\t");
1672 buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed);
1673 buffer += sprintf(buffer, "\n");
1674 return buffer;
1675}