Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * kernel/cpuset.c | |
3 | * | |
4 | * Processor and Memory placement constraints for sets of tasks. | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA. | |
825a46af | 7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
8793d854 | 8 | * Copyright (C) 2006 Google, Inc |
1da177e4 LT |
9 | * |
10 | * Portions derived from Patrick Mochel's sysfs code. | |
11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel | |
1da177e4 | 12 | * |
825a46af | 13 | * 2003-10-10 Written by Simon Derr. |
1da177e4 | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
825a46af | 15 | * 2004 May-July Rework by Paul Jackson. |
8793d854 | 16 | * 2006 Rework by Paul Menage to use generic cgroups |
1da177e4 LT |
17 | * |
18 | * This file is subject to the terms and conditions of the GNU General Public | |
19 | * License. See the file COPYING in the main directory of the Linux | |
20 | * distribution for more details. | |
21 | */ | |
22 | ||
1da177e4 LT |
23 | #include <linux/cpu.h> |
24 | #include <linux/cpumask.h> | |
25 | #include <linux/cpuset.h> | |
26 | #include <linux/err.h> | |
27 | #include <linux/errno.h> | |
28 | #include <linux/file.h> | |
29 | #include <linux/fs.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/interrupt.h> | |
32 | #include <linux/kernel.h> | |
33 | #include <linux/kmod.h> | |
34 | #include <linux/list.h> | |
68860ec1 | 35 | #include <linux/mempolicy.h> |
1da177e4 LT |
36 | #include <linux/mm.h> |
37 | #include <linux/module.h> | |
38 | #include <linux/mount.h> | |
39 | #include <linux/namei.h> | |
40 | #include <linux/pagemap.h> | |
41 | #include <linux/proc_fs.h> | |
6b9c2603 | 42 | #include <linux/rcupdate.h> |
1da177e4 LT |
43 | #include <linux/sched.h> |
44 | #include <linux/seq_file.h> | |
22fb52dd | 45 | #include <linux/security.h> |
1da177e4 | 46 | #include <linux/slab.h> |
1da177e4 LT |
47 | #include <linux/spinlock.h> |
48 | #include <linux/stat.h> | |
49 | #include <linux/string.h> | |
50 | #include <linux/time.h> | |
51 | #include <linux/backing-dev.h> | |
52 | #include <linux/sort.h> | |
53 | ||
54 | #include <asm/uaccess.h> | |
55 | #include <asm/atomic.h> | |
3d3f26a7 | 56 | #include <linux/mutex.h> |
1da177e4 | 57 | |
202f72d5 PJ |
58 | /* |
59 | * Tracks how many cpusets are currently defined in system. | |
60 | * When there is only one cpuset (the root cpuset) we can | |
61 | * short circuit some hooks. | |
62 | */ | |
7edc5962 | 63 | int number_of_cpusets __read_mostly; |
202f72d5 | 64 | |
8793d854 PM |
65 | /* Retrieve the cpuset from a cgroup */ |
66 | struct cgroup_subsys cpuset_subsys; | |
67 | struct cpuset; | |
68 | ||
3e0d98b9 PJ |
69 | /* See "Frequency meter" comments, below. */ |
70 | ||
71 | struct fmeter { | |
72 | int cnt; /* unprocessed events count */ | |
73 | int val; /* most recent output value */ | |
74 | time_t time; /* clock (secs) when val computed */ | |
75 | spinlock_t lock; /* guards read or write of above */ | |
76 | }; | |
77 | ||
1da177e4 | 78 | struct cpuset { |
8793d854 PM |
79 | struct cgroup_subsys_state css; |
80 | ||
1da177e4 LT |
81 | unsigned long flags; /* "unsigned long" so bitops work */ |
82 | cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ | |
83 | nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ | |
84 | ||
1da177e4 | 85 | struct cpuset *parent; /* my parent */ |
1da177e4 LT |
86 | |
87 | /* | |
88 | * Copy of global cpuset_mems_generation as of the most | |
89 | * recent time this cpuset changed its mems_allowed. | |
90 | */ | |
3e0d98b9 PJ |
91 | int mems_generation; |
92 | ||
93 | struct fmeter fmeter; /* memory_pressure filter */ | |
1da177e4 LT |
94 | }; |
95 | ||
8793d854 PM |
96 | /* Retrieve the cpuset for a cgroup */ |
97 | static inline struct cpuset *cgroup_cs(struct cgroup *cont) | |
98 | { | |
99 | return container_of(cgroup_subsys_state(cont, cpuset_subsys_id), | |
100 | struct cpuset, css); | |
101 | } | |
102 | ||
103 | /* Retrieve the cpuset for a task */ | |
104 | static inline struct cpuset *task_cs(struct task_struct *task) | |
105 | { | |
106 | return container_of(task_subsys_state(task, cpuset_subsys_id), | |
107 | struct cpuset, css); | |
108 | } | |
109 | ||
110 | ||
1da177e4 LT |
111 | /* bits in struct cpuset flags field */ |
112 | typedef enum { | |
113 | CS_CPU_EXCLUSIVE, | |
114 | CS_MEM_EXCLUSIVE, | |
45b07ef3 | 115 | CS_MEMORY_MIGRATE, |
825a46af PJ |
116 | CS_SPREAD_PAGE, |
117 | CS_SPREAD_SLAB, | |
1da177e4 LT |
118 | } cpuset_flagbits_t; |
119 | ||
120 | /* convenient tests for these bits */ | |
121 | static inline int is_cpu_exclusive(const struct cpuset *cs) | |
122 | { | |
7b5b9ef0 | 123 | return test_bit(CS_CPU_EXCLUSIVE, &cs->flags); |
1da177e4 LT |
124 | } |
125 | ||
126 | static inline int is_mem_exclusive(const struct cpuset *cs) | |
127 | { | |
7b5b9ef0 | 128 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
1da177e4 LT |
129 | } |
130 | ||
45b07ef3 PJ |
131 | static inline int is_memory_migrate(const struct cpuset *cs) |
132 | { | |
7b5b9ef0 | 133 | return test_bit(CS_MEMORY_MIGRATE, &cs->flags); |
45b07ef3 PJ |
134 | } |
135 | ||
825a46af PJ |
136 | static inline int is_spread_page(const struct cpuset *cs) |
137 | { | |
138 | return test_bit(CS_SPREAD_PAGE, &cs->flags); | |
139 | } | |
140 | ||
141 | static inline int is_spread_slab(const struct cpuset *cs) | |
142 | { | |
143 | return test_bit(CS_SPREAD_SLAB, &cs->flags); | |
144 | } | |
145 | ||
1da177e4 | 146 | /* |
151a4420 | 147 | * Increment this integer everytime any cpuset changes its |
1da177e4 LT |
148 | * mems_allowed value. Users of cpusets can track this generation |
149 | * number, and avoid having to lock and reload mems_allowed unless | |
150 | * the cpuset they're using changes generation. | |
151 | * | |
152 | * A single, global generation is needed because attach_task() could | |
153 | * reattach a task to a different cpuset, which must not have its | |
154 | * generation numbers aliased with those of that tasks previous cpuset. | |
155 | * | |
156 | * Generations are needed for mems_allowed because one task cannot | |
157 | * modify anothers memory placement. So we must enable every task, | |
158 | * on every visit to __alloc_pages(), to efficiently check whether | |
159 | * its current->cpuset->mems_allowed has changed, requiring an update | |
160 | * of its current->mems_allowed. | |
151a4420 PJ |
161 | * |
162 | * Since cpuset_mems_generation is guarded by manage_mutex, | |
163 | * there is no need to mark it atomic. | |
1da177e4 | 164 | */ |
151a4420 | 165 | static int cpuset_mems_generation; |
1da177e4 LT |
166 | |
167 | static struct cpuset top_cpuset = { | |
168 | .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), | |
169 | .cpus_allowed = CPU_MASK_ALL, | |
170 | .mems_allowed = NODE_MASK_ALL, | |
1da177e4 LT |
171 | }; |
172 | ||
1da177e4 | 173 | /* |
3d3f26a7 IM |
174 | * We have two global cpuset mutexes below. They can nest. |
175 | * It is ok to first take manage_mutex, then nest callback_mutex. We also | |
053199ed PJ |
176 | * require taking task_lock() when dereferencing a tasks cpuset pointer. |
177 | * See "The task_lock() exception", at the end of this comment. | |
178 | * | |
3d3f26a7 IM |
179 | * A task must hold both mutexes to modify cpusets. If a task |
180 | * holds manage_mutex, then it blocks others wanting that mutex, | |
181 | * ensuring that it is the only task able to also acquire callback_mutex | |
053199ed PJ |
182 | * and be able to modify cpusets. It can perform various checks on |
183 | * the cpuset structure first, knowing nothing will change. It can | |
3d3f26a7 | 184 | * also allocate memory while just holding manage_mutex. While it is |
053199ed | 185 | * performing these checks, various callback routines can briefly |
3d3f26a7 IM |
186 | * acquire callback_mutex to query cpusets. Once it is ready to make |
187 | * the changes, it takes callback_mutex, blocking everyone else. | |
053199ed PJ |
188 | * |
189 | * Calls to the kernel memory allocator can not be made while holding | |
3d3f26a7 | 190 | * callback_mutex, as that would risk double tripping on callback_mutex |
053199ed PJ |
191 | * from one of the callbacks into the cpuset code from within |
192 | * __alloc_pages(). | |
193 | * | |
3d3f26a7 | 194 | * If a task is only holding callback_mutex, then it has read-only |
053199ed PJ |
195 | * access to cpusets. |
196 | * | |
197 | * The task_struct fields mems_allowed and mems_generation may only | |
198 | * be accessed in the context of that task, so require no locks. | |
199 | * | |
200 | * Any task can increment and decrement the count field without lock. | |
3d3f26a7 | 201 | * So in general, code holding manage_mutex or callback_mutex can't rely |
053199ed | 202 | * on the count field not changing. However, if the count goes to |
3d3f26a7 | 203 | * zero, then only attach_task(), which holds both mutexes, can |
053199ed PJ |
204 | * increment it again. Because a count of zero means that no tasks |
205 | * are currently attached, therefore there is no way a task attached | |
206 | * to that cpuset can fork (the other way to increment the count). | |
3d3f26a7 | 207 | * So code holding manage_mutex or callback_mutex can safely assume that |
053199ed | 208 | * if the count is zero, it will stay zero. Similarly, if a task |
3d3f26a7 | 209 | * holds manage_mutex or callback_mutex on a cpuset with zero count, it |
053199ed | 210 | * knows that the cpuset won't be removed, as cpuset_rmdir() needs |
3d3f26a7 | 211 | * both of those mutexes. |
053199ed PJ |
212 | * |
213 | * The cpuset_common_file_write handler for operations that modify | |
3d3f26a7 | 214 | * the cpuset hierarchy holds manage_mutex across the entire operation, |
053199ed PJ |
215 | * single threading all such cpuset modifications across the system. |
216 | * | |
3d3f26a7 | 217 | * The cpuset_common_file_read() handlers only hold callback_mutex across |
053199ed PJ |
218 | * small pieces of code, such as when reading out possibly multi-word |
219 | * cpumasks and nodemasks. | |
220 | * | |
221 | * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't | |
3d3f26a7 | 222 | * (usually) take either mutex. These are the two most performance |
053199ed | 223 | * critical pieces of code here. The exception occurs on cpuset_exit(), |
3d3f26a7 | 224 | * when a task in a notify_on_release cpuset exits. Then manage_mutex |
2efe86b8 | 225 | * is taken, and if the cpuset count is zero, a usermode call made |
1da177e4 LT |
226 | * to /sbin/cpuset_release_agent with the name of the cpuset (path |
227 | * relative to the root of cpuset file system) as the argument. | |
228 | * | |
053199ed PJ |
229 | * A cpuset can only be deleted if both its 'count' of using tasks |
230 | * is zero, and its list of 'children' cpusets is empty. Since all | |
231 | * tasks in the system use _some_ cpuset, and since there is always at | |
f400e198 | 232 | * least one task in the system (init), therefore, top_cpuset |
053199ed PJ |
233 | * always has either children cpusets and/or using tasks. So we don't |
234 | * need a special hack to ensure that top_cpuset cannot be deleted. | |
235 | * | |
236 | * The above "Tale of Two Semaphores" would be complete, but for: | |
237 | * | |
238 | * The task_lock() exception | |
239 | * | |
240 | * The need for this exception arises from the action of attach_task(), | |
241 | * which overwrites one tasks cpuset pointer with another. It does | |
3d3f26a7 | 242 | * so using both mutexes, however there are several performance |
053199ed | 243 | * critical places that need to reference task->cpuset without the |
3d3f26a7 | 244 | * expense of grabbing a system global mutex. Therefore except as |
053199ed PJ |
245 | * noted below, when dereferencing or, as in attach_task(), modifying |
246 | * a tasks cpuset pointer we use task_lock(), which acts on a spinlock | |
247 | * (task->alloc_lock) already in the task_struct routinely used for | |
248 | * such matters. | |
6b9c2603 PJ |
249 | * |
250 | * P.S. One more locking exception. RCU is used to guard the | |
251 | * update of a tasks cpuset pointer by attach_task() and the | |
252 | * access of task->cpuset->mems_generation via that pointer in | |
253 | * the routine cpuset_update_task_memory_state(). | |
1da177e4 LT |
254 | */ |
255 | ||
3d3f26a7 | 256 | static DEFINE_MUTEX(callback_mutex); |
4247bdc6 | 257 | |
8793d854 PM |
258 | /* This is ugly, but preserves the userspace API for existing cpuset |
259 | * users. If someone tries to mount the "cpuset" filesystem, we | |
260 | * silently switch it to mount "cgroup" instead */ | |
454e2398 DH |
261 | static int cpuset_get_sb(struct file_system_type *fs_type, |
262 | int flags, const char *unused_dev_name, | |
263 | void *data, struct vfsmount *mnt) | |
1da177e4 | 264 | { |
8793d854 PM |
265 | struct file_system_type *cgroup_fs = get_fs_type("cgroup"); |
266 | int ret = -ENODEV; | |
267 | if (cgroup_fs) { | |
268 | char mountopts[] = | |
269 | "cpuset,noprefix," | |
270 | "release_agent=/sbin/cpuset_release_agent"; | |
271 | ret = cgroup_fs->get_sb(cgroup_fs, flags, | |
272 | unused_dev_name, mountopts, mnt); | |
273 | put_filesystem(cgroup_fs); | |
274 | } | |
275 | return ret; | |
1da177e4 LT |
276 | } |
277 | ||
278 | static struct file_system_type cpuset_fs_type = { | |
279 | .name = "cpuset", | |
280 | .get_sb = cpuset_get_sb, | |
1da177e4 LT |
281 | }; |
282 | ||
1da177e4 LT |
283 | /* |
284 | * Return in *pmask the portion of a cpusets's cpus_allowed that | |
285 | * are online. If none are online, walk up the cpuset hierarchy | |
286 | * until we find one that does have some online cpus. If we get | |
287 | * all the way to the top and still haven't found any online cpus, | |
288 | * return cpu_online_map. Or if passed a NULL cs from an exit'ing | |
289 | * task, return cpu_online_map. | |
290 | * | |
291 | * One way or another, we guarantee to return some non-empty subset | |
292 | * of cpu_online_map. | |
293 | * | |
3d3f26a7 | 294 | * Call with callback_mutex held. |
1da177e4 LT |
295 | */ |
296 | ||
297 | static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask) | |
298 | { | |
299 | while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map)) | |
300 | cs = cs->parent; | |
301 | if (cs) | |
302 | cpus_and(*pmask, cs->cpus_allowed, cpu_online_map); | |
303 | else | |
304 | *pmask = cpu_online_map; | |
305 | BUG_ON(!cpus_intersects(*pmask, cpu_online_map)); | |
306 | } | |
307 | ||
308 | /* | |
309 | * Return in *pmask the portion of a cpusets's mems_allowed that | |
0e1e7c7a CL |
310 | * are online, with memory. If none are online with memory, walk |
311 | * up the cpuset hierarchy until we find one that does have some | |
312 | * online mems. If we get all the way to the top and still haven't | |
313 | * found any online mems, return node_states[N_HIGH_MEMORY]. | |
1da177e4 LT |
314 | * |
315 | * One way or another, we guarantee to return some non-empty subset | |
0e1e7c7a | 316 | * of node_states[N_HIGH_MEMORY]. |
1da177e4 | 317 | * |
3d3f26a7 | 318 | * Call with callback_mutex held. |
1da177e4 LT |
319 | */ |
320 | ||
321 | static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) | |
322 | { | |
0e1e7c7a CL |
323 | while (cs && !nodes_intersects(cs->mems_allowed, |
324 | node_states[N_HIGH_MEMORY])) | |
1da177e4 LT |
325 | cs = cs->parent; |
326 | if (cs) | |
0e1e7c7a CL |
327 | nodes_and(*pmask, cs->mems_allowed, |
328 | node_states[N_HIGH_MEMORY]); | |
1da177e4 | 329 | else |
0e1e7c7a CL |
330 | *pmask = node_states[N_HIGH_MEMORY]; |
331 | BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY])); | |
1da177e4 LT |
332 | } |
333 | ||
cf2a473c PJ |
334 | /** |
335 | * cpuset_update_task_memory_state - update task memory placement | |
336 | * | |
337 | * If the current tasks cpusets mems_allowed changed behind our | |
338 | * backs, update current->mems_allowed, mems_generation and task NUMA | |
339 | * mempolicy to the new value. | |
053199ed | 340 | * |
cf2a473c PJ |
341 | * Task mempolicy is updated by rebinding it relative to the |
342 | * current->cpuset if a task has its memory placement changed. | |
343 | * Do not call this routine if in_interrupt(). | |
344 | * | |
4a01c8d5 PJ |
345 | * Call without callback_mutex or task_lock() held. May be |
346 | * called with or without manage_mutex held. Thanks in part to | |
347 | * 'the_top_cpuset_hack', the tasks cpuset pointer will never | |
348 | * be NULL. This routine also might acquire callback_mutex and | |
cf2a473c | 349 | * current->mm->mmap_sem during call. |
053199ed | 350 | * |
6b9c2603 PJ |
351 | * Reading current->cpuset->mems_generation doesn't need task_lock |
352 | * to guard the current->cpuset derefence, because it is guarded | |
353 | * from concurrent freeing of current->cpuset by attach_task(), | |
354 | * using RCU. | |
355 | * | |
356 | * The rcu_dereference() is technically probably not needed, | |
357 | * as I don't actually mind if I see a new cpuset pointer but | |
358 | * an old value of mems_generation. However this really only | |
359 | * matters on alpha systems using cpusets heavily. If I dropped | |
360 | * that rcu_dereference(), it would save them a memory barrier. | |
361 | * For all other arch's, rcu_dereference is a no-op anyway, and for | |
362 | * alpha systems not using cpusets, another planned optimization, | |
363 | * avoiding the rcu critical section for tasks in the root cpuset | |
364 | * which is statically allocated, so can't vanish, will make this | |
365 | * irrelevant. Better to use RCU as intended, than to engage in | |
366 | * some cute trick to save a memory barrier that is impossible to | |
367 | * test, for alpha systems using cpusets heavily, which might not | |
368 | * even exist. | |
053199ed PJ |
369 | * |
370 | * This routine is needed to update the per-task mems_allowed data, | |
371 | * within the tasks context, when it is trying to allocate memory | |
372 | * (in various mm/mempolicy.c routines) and notices that some other | |
373 | * task has been modifying its cpuset. | |
1da177e4 LT |
374 | */ |
375 | ||
fe85a998 | 376 | void cpuset_update_task_memory_state(void) |
1da177e4 | 377 | { |
053199ed | 378 | int my_cpusets_mem_gen; |
cf2a473c | 379 | struct task_struct *tsk = current; |
6b9c2603 | 380 | struct cpuset *cs; |
053199ed | 381 | |
8793d854 | 382 | if (task_cs(tsk) == &top_cpuset) { |
03a285f5 PJ |
383 | /* Don't need rcu for top_cpuset. It's never freed. */ |
384 | my_cpusets_mem_gen = top_cpuset.mems_generation; | |
385 | } else { | |
386 | rcu_read_lock(); | |
8793d854 | 387 | my_cpusets_mem_gen = task_cs(current)->mems_generation; |
03a285f5 PJ |
388 | rcu_read_unlock(); |
389 | } | |
1da177e4 | 390 | |
cf2a473c | 391 | if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { |
3d3f26a7 | 392 | mutex_lock(&callback_mutex); |
cf2a473c | 393 | task_lock(tsk); |
8793d854 | 394 | cs = task_cs(tsk); /* Maybe changed when task not locked */ |
cf2a473c PJ |
395 | guarantee_online_mems(cs, &tsk->mems_allowed); |
396 | tsk->cpuset_mems_generation = cs->mems_generation; | |
825a46af PJ |
397 | if (is_spread_page(cs)) |
398 | tsk->flags |= PF_SPREAD_PAGE; | |
399 | else | |
400 | tsk->flags &= ~PF_SPREAD_PAGE; | |
401 | if (is_spread_slab(cs)) | |
402 | tsk->flags |= PF_SPREAD_SLAB; | |
403 | else | |
404 | tsk->flags &= ~PF_SPREAD_SLAB; | |
cf2a473c | 405 | task_unlock(tsk); |
3d3f26a7 | 406 | mutex_unlock(&callback_mutex); |
74cb2155 | 407 | mpol_rebind_task(tsk, &tsk->mems_allowed); |
1da177e4 LT |
408 | } |
409 | } | |
410 | ||
411 | /* | |
412 | * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q? | |
413 | * | |
414 | * One cpuset is a subset of another if all its allowed CPUs and | |
415 | * Memory Nodes are a subset of the other, and its exclusive flags | |
3d3f26a7 | 416 | * are only set if the other's are set. Call holding manage_mutex. |
1da177e4 LT |
417 | */ |
418 | ||
419 | static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) | |
420 | { | |
421 | return cpus_subset(p->cpus_allowed, q->cpus_allowed) && | |
422 | nodes_subset(p->mems_allowed, q->mems_allowed) && | |
423 | is_cpu_exclusive(p) <= is_cpu_exclusive(q) && | |
424 | is_mem_exclusive(p) <= is_mem_exclusive(q); | |
425 | } | |
426 | ||
427 | /* | |
428 | * validate_change() - Used to validate that any proposed cpuset change | |
429 | * follows the structural rules for cpusets. | |
430 | * | |
431 | * If we replaced the flag and mask values of the current cpuset | |
432 | * (cur) with those values in the trial cpuset (trial), would | |
433 | * our various subset and exclusive rules still be valid? Presumes | |
3d3f26a7 | 434 | * manage_mutex held. |
1da177e4 LT |
435 | * |
436 | * 'cur' is the address of an actual, in-use cpuset. Operations | |
437 | * such as list traversal that depend on the actual address of the | |
438 | * cpuset in the list must use cur below, not trial. | |
439 | * | |
440 | * 'trial' is the address of bulk structure copy of cur, with | |
441 | * perhaps one or more of the fields cpus_allowed, mems_allowed, | |
442 | * or flags changed to new, trial values. | |
443 | * | |
444 | * Return 0 if valid, -errno if not. | |
445 | */ | |
446 | ||
447 | static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | |
448 | { | |
8793d854 | 449 | struct cgroup *cont; |
1da177e4 LT |
450 | struct cpuset *c, *par; |
451 | ||
452 | /* Each of our child cpusets must be a subset of us */ | |
8793d854 PM |
453 | list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { |
454 | if (!is_cpuset_subset(cgroup_cs(cont), trial)) | |
1da177e4 LT |
455 | return -EBUSY; |
456 | } | |
457 | ||
458 | /* Remaining checks don't apply to root cpuset */ | |
69604067 | 459 | if (cur == &top_cpuset) |
1da177e4 LT |
460 | return 0; |
461 | ||
69604067 PJ |
462 | par = cur->parent; |
463 | ||
1da177e4 LT |
464 | /* We must be a subset of our parent cpuset */ |
465 | if (!is_cpuset_subset(trial, par)) | |
466 | return -EACCES; | |
467 | ||
468 | /* If either I or some sibling (!= me) is exclusive, we can't overlap */ | |
8793d854 PM |
469 | list_for_each_entry(cont, &par->css.cgroup->children, sibling) { |
470 | c = cgroup_cs(cont); | |
1da177e4 LT |
471 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
472 | c != cur && | |
473 | cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) | |
474 | return -EINVAL; | |
475 | if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && | |
476 | c != cur && | |
477 | nodes_intersects(trial->mems_allowed, c->mems_allowed)) | |
478 | return -EINVAL; | |
479 | } | |
480 | ||
481 | return 0; | |
482 | } | |
483 | ||
053199ed | 484 | /* |
3d3f26a7 | 485 | * Call with manage_mutex held. May take callback_mutex during call. |
053199ed PJ |
486 | */ |
487 | ||
1da177e4 LT |
488 | static int update_cpumask(struct cpuset *cs, char *buf) |
489 | { | |
490 | struct cpuset trialcs; | |
607717a6 | 491 | int retval; |
1da177e4 | 492 | |
4c4d50f7 PJ |
493 | /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ |
494 | if (cs == &top_cpuset) | |
495 | return -EACCES; | |
496 | ||
1da177e4 | 497 | trialcs = *cs; |
6f7f02e7 DR |
498 | |
499 | /* | |
500 | * We allow a cpuset's cpus_allowed to be empty; if it has attached | |
501 | * tasks, we'll catch it later when we validate the change and return | |
502 | * -ENOSPC. | |
503 | */ | |
504 | if (!buf[0] || (buf[0] == '\n' && !buf[1])) { | |
505 | cpus_clear(trialcs.cpus_allowed); | |
506 | } else { | |
507 | retval = cpulist_parse(buf, trialcs.cpus_allowed); | |
508 | if (retval < 0) | |
509 | return retval; | |
510 | } | |
1da177e4 | 511 | cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map); |
6f7f02e7 | 512 | /* cpus_allowed cannot be empty for a cpuset with attached tasks. */ |
8793d854 PM |
513 | if (cgroup_task_count(cs->css.cgroup) && |
514 | cpus_empty(trialcs.cpus_allowed)) | |
1da177e4 LT |
515 | return -ENOSPC; |
516 | retval = validate_change(cs, &trialcs); | |
85d7b949 DG |
517 | if (retval < 0) |
518 | return retval; | |
3d3f26a7 | 519 | mutex_lock(&callback_mutex); |
85d7b949 | 520 | cs->cpus_allowed = trialcs.cpus_allowed; |
3d3f26a7 | 521 | mutex_unlock(&callback_mutex); |
85d7b949 | 522 | return 0; |
1da177e4 LT |
523 | } |
524 | ||
e4e364e8 PJ |
525 | /* |
526 | * cpuset_migrate_mm | |
527 | * | |
528 | * Migrate memory region from one set of nodes to another. | |
529 | * | |
530 | * Temporarilly set tasks mems_allowed to target nodes of migration, | |
531 | * so that the migration code can allocate pages on these nodes. | |
532 | * | |
533 | * Call holding manage_mutex, so our current->cpuset won't change | |
534 | * during this call, as manage_mutex holds off any attach_task() | |
535 | * calls. Therefore we don't need to take task_lock around the | |
536 | * call to guarantee_online_mems(), as we know no one is changing | |
537 | * our tasks cpuset. | |
538 | * | |
539 | * Hold callback_mutex around the two modifications of our tasks | |
540 | * mems_allowed to synchronize with cpuset_mems_allowed(). | |
541 | * | |
542 | * While the mm_struct we are migrating is typically from some | |
543 | * other task, the task_struct mems_allowed that we are hacking | |
544 | * is for our current task, which must allocate new pages for that | |
545 | * migrating memory region. | |
546 | * | |
547 | * We call cpuset_update_task_memory_state() before hacking | |
548 | * our tasks mems_allowed, so that we are assured of being in | |
549 | * sync with our tasks cpuset, and in particular, callbacks to | |
550 | * cpuset_update_task_memory_state() from nested page allocations | |
551 | * won't see any mismatch of our cpuset and task mems_generation | |
552 | * values, so won't overwrite our hacked tasks mems_allowed | |
553 | * nodemask. | |
554 | */ | |
555 | ||
556 | static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |
557 | const nodemask_t *to) | |
558 | { | |
559 | struct task_struct *tsk = current; | |
560 | ||
561 | cpuset_update_task_memory_state(); | |
562 | ||
563 | mutex_lock(&callback_mutex); | |
564 | tsk->mems_allowed = *to; | |
565 | mutex_unlock(&callback_mutex); | |
566 | ||
567 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); | |
568 | ||
569 | mutex_lock(&callback_mutex); | |
8793d854 | 570 | guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed); |
e4e364e8 PJ |
571 | mutex_unlock(&callback_mutex); |
572 | } | |
573 | ||
053199ed | 574 | /* |
4225399a PJ |
575 | * Handle user request to change the 'mems' memory placement |
576 | * of a cpuset. Needs to validate the request, update the | |
577 | * cpusets mems_allowed and mems_generation, and for each | |
04c19fa6 PJ |
578 | * task in the cpuset, rebind any vma mempolicies and if |
579 | * the cpuset is marked 'memory_migrate', migrate the tasks | |
580 | * pages to the new memory. | |
4225399a | 581 | * |
3d3f26a7 | 582 | * Call with manage_mutex held. May take callback_mutex during call. |
4225399a PJ |
583 | * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, |
584 | * lock each such tasks mm->mmap_sem, scan its vma's and rebind | |
585 | * their mempolicies to the cpusets new mems_allowed. | |
053199ed PJ |
586 | */ |
587 | ||
8793d854 PM |
588 | static void *cpuset_being_rebound; |
589 | ||
1da177e4 LT |
590 | static int update_nodemask(struct cpuset *cs, char *buf) |
591 | { | |
592 | struct cpuset trialcs; | |
04c19fa6 | 593 | nodemask_t oldmem; |
8793d854 | 594 | struct task_struct *p; |
4225399a PJ |
595 | struct mm_struct **mmarray; |
596 | int i, n, ntasks; | |
04c19fa6 | 597 | int migrate; |
4225399a | 598 | int fudge; |
1da177e4 | 599 | int retval; |
8793d854 | 600 | struct cgroup_iter it; |
1da177e4 | 601 | |
0e1e7c7a CL |
602 | /* |
603 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; | |
604 | * it's read-only | |
605 | */ | |
38837fc7 PJ |
606 | if (cs == &top_cpuset) |
607 | return -EACCES; | |
608 | ||
1da177e4 | 609 | trialcs = *cs; |
6f7f02e7 DR |
610 | |
611 | /* | |
612 | * We allow a cpuset's mems_allowed to be empty; if it has attached | |
613 | * tasks, we'll catch it later when we validate the change and return | |
614 | * -ENOSPC. | |
615 | */ | |
616 | if (!buf[0] || (buf[0] == '\n' && !buf[1])) { | |
617 | nodes_clear(trialcs.mems_allowed); | |
618 | } else { | |
619 | retval = nodelist_parse(buf, trialcs.mems_allowed); | |
620 | if (retval < 0) | |
621 | goto done; | |
0e1e7c7a CL |
622 | if (!nodes_intersects(trialcs.mems_allowed, |
623 | node_states[N_HIGH_MEMORY])) { | |
624 | /* | |
625 | * error if only memoryless nodes specified. | |
626 | */ | |
627 | retval = -ENOSPC; | |
628 | goto done; | |
629 | } | |
6f7f02e7 | 630 | } |
0e1e7c7a CL |
631 | /* |
632 | * Exclude memoryless nodes. We know that trialcs.mems_allowed | |
633 | * contains at least one node with memory. | |
634 | */ | |
635 | nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, | |
636 | node_states[N_HIGH_MEMORY]); | |
04c19fa6 PJ |
637 | oldmem = cs->mems_allowed; |
638 | if (nodes_equal(oldmem, trialcs.mems_allowed)) { | |
639 | retval = 0; /* Too easy - nothing to do */ | |
640 | goto done; | |
641 | } | |
6f7f02e7 | 642 | /* mems_allowed cannot be empty for a cpuset with attached tasks. */ |
8793d854 PM |
643 | if (cgroup_task_count(cs->css.cgroup) && |
644 | nodes_empty(trialcs.mems_allowed)) { | |
59dac16f PJ |
645 | retval = -ENOSPC; |
646 | goto done; | |
1da177e4 | 647 | } |
59dac16f PJ |
648 | retval = validate_change(cs, &trialcs); |
649 | if (retval < 0) | |
650 | goto done; | |
651 | ||
3d3f26a7 | 652 | mutex_lock(&callback_mutex); |
59dac16f | 653 | cs->mems_allowed = trialcs.mems_allowed; |
151a4420 | 654 | cs->mems_generation = cpuset_mems_generation++; |
3d3f26a7 | 655 | mutex_unlock(&callback_mutex); |
59dac16f | 656 | |
8793d854 | 657 | cpuset_being_rebound = cs; /* causes mpol_copy() rebind */ |
4225399a PJ |
658 | |
659 | fudge = 10; /* spare mmarray[] slots */ | |
660 | fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ | |
661 | retval = -ENOMEM; | |
662 | ||
663 | /* | |
664 | * Allocate mmarray[] to hold mm reference for each task | |
665 | * in cpuset cs. Can't kmalloc GFP_KERNEL while holding | |
666 | * tasklist_lock. We could use GFP_ATOMIC, but with a | |
667 | * few more lines of code, we can retry until we get a big | |
668 | * enough mmarray[] w/o using GFP_ATOMIC. | |
669 | */ | |
670 | while (1) { | |
8793d854 | 671 | ntasks = cgroup_task_count(cs->css.cgroup); /* guess */ |
4225399a PJ |
672 | ntasks += fudge; |
673 | mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); | |
674 | if (!mmarray) | |
675 | goto done; | |
c2aef333 | 676 | read_lock(&tasklist_lock); /* block fork */ |
8793d854 | 677 | if (cgroup_task_count(cs->css.cgroup) <= ntasks) |
4225399a | 678 | break; /* got enough */ |
c2aef333 | 679 | read_unlock(&tasklist_lock); /* try again */ |
4225399a PJ |
680 | kfree(mmarray); |
681 | } | |
682 | ||
683 | n = 0; | |
684 | ||
685 | /* Load up mmarray[] with mm reference for each task in cpuset. */ | |
8793d854 PM |
686 | cgroup_iter_start(cs->css.cgroup, &it); |
687 | while ((p = cgroup_iter_next(cs->css.cgroup, &it))) { | |
4225399a PJ |
688 | struct mm_struct *mm; |
689 | ||
690 | if (n >= ntasks) { | |
691 | printk(KERN_WARNING | |
692 | "Cpuset mempolicy rebind incomplete.\n"); | |
8793d854 | 693 | break; |
4225399a | 694 | } |
4225399a PJ |
695 | mm = get_task_mm(p); |
696 | if (!mm) | |
697 | continue; | |
698 | mmarray[n++] = mm; | |
8793d854 PM |
699 | } |
700 | cgroup_iter_end(cs->css.cgroup, &it); | |
c2aef333 | 701 | read_unlock(&tasklist_lock); |
4225399a PJ |
702 | |
703 | /* | |
704 | * Now that we've dropped the tasklist spinlock, we can | |
705 | * rebind the vma mempolicies of each mm in mmarray[] to their | |
706 | * new cpuset, and release that mm. The mpol_rebind_mm() | |
707 | * call takes mmap_sem, which we couldn't take while holding | |
708 | * tasklist_lock. Forks can happen again now - the mpol_copy() | |
709 | * cpuset_being_rebound check will catch such forks, and rebind | |
710 | * their vma mempolicies too. Because we still hold the global | |
3d3f26a7 | 711 | * cpuset manage_mutex, we know that no other rebind effort will |
4225399a PJ |
712 | * be contending for the global variable cpuset_being_rebound. |
713 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() | |
04c19fa6 | 714 | * is idempotent. Also migrate pages in each mm to new nodes. |
4225399a | 715 | */ |
04c19fa6 | 716 | migrate = is_memory_migrate(cs); |
4225399a PJ |
717 | for (i = 0; i < n; i++) { |
718 | struct mm_struct *mm = mmarray[i]; | |
719 | ||
720 | mpol_rebind_mm(mm, &cs->mems_allowed); | |
e4e364e8 PJ |
721 | if (migrate) |
722 | cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed); | |
4225399a PJ |
723 | mmput(mm); |
724 | } | |
725 | ||
726 | /* We're done rebinding vma's to this cpusets new mems_allowed. */ | |
727 | kfree(mmarray); | |
8793d854 | 728 | cpuset_being_rebound = NULL; |
4225399a | 729 | retval = 0; |
59dac16f | 730 | done: |
1da177e4 LT |
731 | return retval; |
732 | } | |
733 | ||
8793d854 PM |
734 | int current_cpuset_is_being_rebound(void) |
735 | { | |
736 | return task_cs(current) == cpuset_being_rebound; | |
737 | } | |
738 | ||
3e0d98b9 | 739 | /* |
3d3f26a7 | 740 | * Call with manage_mutex held. |
3e0d98b9 PJ |
741 | */ |
742 | ||
743 | static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) | |
744 | { | |
745 | if (simple_strtoul(buf, NULL, 10) != 0) | |
746 | cpuset_memory_pressure_enabled = 1; | |
747 | else | |
748 | cpuset_memory_pressure_enabled = 0; | |
749 | return 0; | |
750 | } | |
751 | ||
1da177e4 LT |
752 | /* |
753 | * update_flag - read a 0 or a 1 in a file and update associated flag | |
754 | * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, | |
825a46af PJ |
755 | * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE, |
756 | * CS_SPREAD_PAGE, CS_SPREAD_SLAB) | |
1da177e4 LT |
757 | * cs: the cpuset to update |
758 | * buf: the buffer where we read the 0 or 1 | |
053199ed | 759 | * |
3d3f26a7 | 760 | * Call with manage_mutex held. |
1da177e4 LT |
761 | */ |
762 | ||
763 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) | |
764 | { | |
765 | int turning_on; | |
766 | struct cpuset trialcs; | |
607717a6 | 767 | int err; |
1da177e4 LT |
768 | |
769 | turning_on = (simple_strtoul(buf, NULL, 10) != 0); | |
770 | ||
771 | trialcs = *cs; | |
772 | if (turning_on) | |
773 | set_bit(bit, &trialcs.flags); | |
774 | else | |
775 | clear_bit(bit, &trialcs.flags); | |
776 | ||
777 | err = validate_change(cs, &trialcs); | |
85d7b949 DG |
778 | if (err < 0) |
779 | return err; | |
3d3f26a7 | 780 | mutex_lock(&callback_mutex); |
69604067 | 781 | cs->flags = trialcs.flags; |
3d3f26a7 | 782 | mutex_unlock(&callback_mutex); |
85d7b949 | 783 | |
85d7b949 | 784 | return 0; |
1da177e4 LT |
785 | } |
786 | ||
3e0d98b9 | 787 | /* |
80f7228b | 788 | * Frequency meter - How fast is some event occurring? |
3e0d98b9 PJ |
789 | * |
790 | * These routines manage a digitally filtered, constant time based, | |
791 | * event frequency meter. There are four routines: | |
792 | * fmeter_init() - initialize a frequency meter. | |
793 | * fmeter_markevent() - called each time the event happens. | |
794 | * fmeter_getrate() - returns the recent rate of such events. | |
795 | * fmeter_update() - internal routine used to update fmeter. | |
796 | * | |
797 | * A common data structure is passed to each of these routines, | |
798 | * which is used to keep track of the state required to manage the | |
799 | * frequency meter and its digital filter. | |
800 | * | |
801 | * The filter works on the number of events marked per unit time. | |
802 | * The filter is single-pole low-pass recursive (IIR). The time unit | |
803 | * is 1 second. Arithmetic is done using 32-bit integers scaled to | |
804 | * simulate 3 decimal digits of precision (multiplied by 1000). | |
805 | * | |
806 | * With an FM_COEF of 933, and a time base of 1 second, the filter | |
807 | * has a half-life of 10 seconds, meaning that if the events quit | |
808 | * happening, then the rate returned from the fmeter_getrate() | |
809 | * will be cut in half each 10 seconds, until it converges to zero. | |
810 | * | |
811 | * It is not worth doing a real infinitely recursive filter. If more | |
812 | * than FM_MAXTICKS ticks have elapsed since the last filter event, | |
813 | * just compute FM_MAXTICKS ticks worth, by which point the level | |
814 | * will be stable. | |
815 | * | |
816 | * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid | |
817 | * arithmetic overflow in the fmeter_update() routine. | |
818 | * | |
819 | * Given the simple 32 bit integer arithmetic used, this meter works | |
820 | * best for reporting rates between one per millisecond (msec) and | |
821 | * one per 32 (approx) seconds. At constant rates faster than one | |
822 | * per msec it maxes out at values just under 1,000,000. At constant | |
823 | * rates between one per msec, and one per second it will stabilize | |
824 | * to a value N*1000, where N is the rate of events per second. | |
825 | * At constant rates between one per second and one per 32 seconds, | |
826 | * it will be choppy, moving up on the seconds that have an event, | |
827 | * and then decaying until the next event. At rates slower than | |
828 | * about one in 32 seconds, it decays all the way back to zero between | |
829 | * each event. | |
830 | */ | |
831 | ||
832 | #define FM_COEF 933 /* coefficient for half-life of 10 secs */ | |
833 | #define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */ | |
834 | #define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */ | |
835 | #define FM_SCALE 1000 /* faux fixed point scale */ | |
836 | ||
837 | /* Initialize a frequency meter */ | |
838 | static void fmeter_init(struct fmeter *fmp) | |
839 | { | |
840 | fmp->cnt = 0; | |
841 | fmp->val = 0; | |
842 | fmp->time = 0; | |
843 | spin_lock_init(&fmp->lock); | |
844 | } | |
845 | ||
846 | /* Internal meter update - process cnt events and update value */ | |
847 | static void fmeter_update(struct fmeter *fmp) | |
848 | { | |
849 | time_t now = get_seconds(); | |
850 | time_t ticks = now - fmp->time; | |
851 | ||
852 | if (ticks == 0) | |
853 | return; | |
854 | ||
855 | ticks = min(FM_MAXTICKS, ticks); | |
856 | while (ticks-- > 0) | |
857 | fmp->val = (FM_COEF * fmp->val) / FM_SCALE; | |
858 | fmp->time = now; | |
859 | ||
860 | fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE; | |
861 | fmp->cnt = 0; | |
862 | } | |
863 | ||
864 | /* Process any previous ticks, then bump cnt by one (times scale). */ | |
865 | static void fmeter_markevent(struct fmeter *fmp) | |
866 | { | |
867 | spin_lock(&fmp->lock); | |
868 | fmeter_update(fmp); | |
869 | fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE); | |
870 | spin_unlock(&fmp->lock); | |
871 | } | |
872 | ||
873 | /* Process any previous ticks, then return current value. */ | |
874 | static int fmeter_getrate(struct fmeter *fmp) | |
875 | { | |
876 | int val; | |
877 | ||
878 | spin_lock(&fmp->lock); | |
879 | fmeter_update(fmp); | |
880 | val = fmp->val; | |
881 | spin_unlock(&fmp->lock); | |
882 | return val; | |
883 | } | |
884 | ||
8793d854 PM |
885 | static int cpuset_can_attach(struct cgroup_subsys *ss, |
886 | struct cgroup *cont, struct task_struct *tsk) | |
1da177e4 | 887 | { |
8793d854 | 888 | struct cpuset *cs = cgroup_cs(cont); |
1da177e4 | 889 | |
1da177e4 LT |
890 | if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
891 | return -ENOSPC; | |
892 | ||
8793d854 PM |
893 | return security_task_setscheduler(tsk, 0, NULL); |
894 | } | |
1da177e4 | 895 | |
8793d854 PM |
896 | static void cpuset_attach(struct cgroup_subsys *ss, |
897 | struct cgroup *cont, struct cgroup *oldcont, | |
898 | struct task_struct *tsk) | |
899 | { | |
900 | cpumask_t cpus; | |
901 | nodemask_t from, to; | |
902 | struct mm_struct *mm; | |
903 | struct cpuset *cs = cgroup_cs(cont); | |
904 | struct cpuset *oldcs = cgroup_cs(oldcont); | |
22fb52dd | 905 | |
3d3f26a7 | 906 | mutex_lock(&callback_mutex); |
1da177e4 LT |
907 | guarantee_online_cpus(cs, &cpus); |
908 | set_cpus_allowed(tsk, cpus); | |
8793d854 | 909 | mutex_unlock(&callback_mutex); |
1da177e4 | 910 | |
45b07ef3 PJ |
911 | from = oldcs->mems_allowed; |
912 | to = cs->mems_allowed; | |
4225399a PJ |
913 | mm = get_task_mm(tsk); |
914 | if (mm) { | |
915 | mpol_rebind_mm(mm, &to); | |
2741a559 | 916 | if (is_memory_migrate(cs)) |
e4e364e8 | 917 | cpuset_migrate_mm(mm, &from, &to); |
4225399a PJ |
918 | mmput(mm); |
919 | } | |
920 | ||
1da177e4 LT |
921 | } |
922 | ||
923 | /* The various types of files and directories in a cpuset file system */ | |
924 | ||
925 | typedef enum { | |
45b07ef3 | 926 | FILE_MEMORY_MIGRATE, |
1da177e4 LT |
927 | FILE_CPULIST, |
928 | FILE_MEMLIST, | |
929 | FILE_CPU_EXCLUSIVE, | |
930 | FILE_MEM_EXCLUSIVE, | |
3e0d98b9 PJ |
931 | FILE_MEMORY_PRESSURE_ENABLED, |
932 | FILE_MEMORY_PRESSURE, | |
825a46af PJ |
933 | FILE_SPREAD_PAGE, |
934 | FILE_SPREAD_SLAB, | |
1da177e4 LT |
935 | } cpuset_filetype_t; |
936 | ||
8793d854 PM |
937 | static ssize_t cpuset_common_file_write(struct cgroup *cont, |
938 | struct cftype *cft, | |
939 | struct file *file, | |
d3ed11c3 | 940 | const char __user *userbuf, |
1da177e4 LT |
941 | size_t nbytes, loff_t *unused_ppos) |
942 | { | |
8793d854 | 943 | struct cpuset *cs = cgroup_cs(cont); |
1da177e4 LT |
944 | cpuset_filetype_t type = cft->private; |
945 | char *buffer; | |
946 | int retval = 0; | |
947 | ||
948 | /* Crude upper limit on largest legitimate cpulist user might write. */ | |
d3ed11c3 | 949 | if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES)) |
1da177e4 LT |
950 | return -E2BIG; |
951 | ||
952 | /* +1 for nul-terminator */ | |
953 | if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0) | |
954 | return -ENOMEM; | |
955 | ||
956 | if (copy_from_user(buffer, userbuf, nbytes)) { | |
957 | retval = -EFAULT; | |
958 | goto out1; | |
959 | } | |
960 | buffer[nbytes] = 0; /* nul-terminate */ | |
961 | ||
8793d854 | 962 | cgroup_lock(); |
1da177e4 | 963 | |
8793d854 | 964 | if (cgroup_is_removed(cont)) { |
1da177e4 LT |
965 | retval = -ENODEV; |
966 | goto out2; | |
967 | } | |
968 | ||
969 | switch (type) { | |
970 | case FILE_CPULIST: | |
971 | retval = update_cpumask(cs, buffer); | |
972 | break; | |
973 | case FILE_MEMLIST: | |
974 | retval = update_nodemask(cs, buffer); | |
975 | break; | |
976 | case FILE_CPU_EXCLUSIVE: | |
977 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer); | |
978 | break; | |
979 | case FILE_MEM_EXCLUSIVE: | |
980 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); | |
981 | break; | |
45b07ef3 PJ |
982 | case FILE_MEMORY_MIGRATE: |
983 | retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); | |
984 | break; | |
3e0d98b9 PJ |
985 | case FILE_MEMORY_PRESSURE_ENABLED: |
986 | retval = update_memory_pressure_enabled(cs, buffer); | |
987 | break; | |
988 | case FILE_MEMORY_PRESSURE: | |
989 | retval = -EACCES; | |
990 | break; | |
825a46af PJ |
991 | case FILE_SPREAD_PAGE: |
992 | retval = update_flag(CS_SPREAD_PAGE, cs, buffer); | |
151a4420 | 993 | cs->mems_generation = cpuset_mems_generation++; |
825a46af PJ |
994 | break; |
995 | case FILE_SPREAD_SLAB: | |
996 | retval = update_flag(CS_SPREAD_SLAB, cs, buffer); | |
151a4420 | 997 | cs->mems_generation = cpuset_mems_generation++; |
825a46af | 998 | break; |
1da177e4 LT |
999 | default: |
1000 | retval = -EINVAL; | |
1001 | goto out2; | |
1002 | } | |
1003 | ||
1004 | if (retval == 0) | |
1005 | retval = nbytes; | |
1006 | out2: | |
8793d854 | 1007 | cgroup_unlock(); |
1da177e4 LT |
1008 | out1: |
1009 | kfree(buffer); | |
1010 | return retval; | |
1011 | } | |
1012 | ||
1da177e4 LT |
1013 | /* |
1014 | * These ascii lists should be read in a single call, by using a user | |
1015 | * buffer large enough to hold the entire map. If read in smaller | |
1016 | * chunks, there is no guarantee of atomicity. Since the display format | |
1017 | * used, list of ranges of sequential numbers, is variable length, | |
1018 | * and since these maps can change value dynamically, one could read | |
1019 | * gibberish by doing partial reads while a list was changing. | |
1020 | * A single large read to a buffer that crosses a page boundary is | |
1021 | * ok, because the result being copied to user land is not recomputed | |
1022 | * across a page fault. | |
1023 | */ | |
1024 | ||
1025 | static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) | |
1026 | { | |
1027 | cpumask_t mask; | |
1028 | ||
3d3f26a7 | 1029 | mutex_lock(&callback_mutex); |
1da177e4 | 1030 | mask = cs->cpus_allowed; |
3d3f26a7 | 1031 | mutex_unlock(&callback_mutex); |
1da177e4 LT |
1032 | |
1033 | return cpulist_scnprintf(page, PAGE_SIZE, mask); | |
1034 | } | |
1035 | ||
1036 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) | |
1037 | { | |
1038 | nodemask_t mask; | |
1039 | ||
3d3f26a7 | 1040 | mutex_lock(&callback_mutex); |
1da177e4 | 1041 | mask = cs->mems_allowed; |
3d3f26a7 | 1042 | mutex_unlock(&callback_mutex); |
1da177e4 LT |
1043 | |
1044 | return nodelist_scnprintf(page, PAGE_SIZE, mask); | |
1045 | } | |
1046 | ||
8793d854 PM |
1047 | static ssize_t cpuset_common_file_read(struct cgroup *cont, |
1048 | struct cftype *cft, | |
1049 | struct file *file, | |
1050 | char __user *buf, | |
1051 | size_t nbytes, loff_t *ppos) | |
1da177e4 | 1052 | { |
8793d854 | 1053 | struct cpuset *cs = cgroup_cs(cont); |
1da177e4 LT |
1054 | cpuset_filetype_t type = cft->private; |
1055 | char *page; | |
1056 | ssize_t retval = 0; | |
1057 | char *s; | |
1da177e4 | 1058 | |
e12ba74d | 1059 | if (!(page = (char *)__get_free_page(GFP_TEMPORARY))) |
1da177e4 LT |
1060 | return -ENOMEM; |
1061 | ||
1062 | s = page; | |
1063 | ||
1064 | switch (type) { | |
1065 | case FILE_CPULIST: | |
1066 | s += cpuset_sprintf_cpulist(s, cs); | |
1067 | break; | |
1068 | case FILE_MEMLIST: | |
1069 | s += cpuset_sprintf_memlist(s, cs); | |
1070 | break; | |
1071 | case FILE_CPU_EXCLUSIVE: | |
1072 | *s++ = is_cpu_exclusive(cs) ? '1' : '0'; | |
1073 | break; | |
1074 | case FILE_MEM_EXCLUSIVE: | |
1075 | *s++ = is_mem_exclusive(cs) ? '1' : '0'; | |
1076 | break; | |
45b07ef3 PJ |
1077 | case FILE_MEMORY_MIGRATE: |
1078 | *s++ = is_memory_migrate(cs) ? '1' : '0'; | |
1079 | break; | |
3e0d98b9 PJ |
1080 | case FILE_MEMORY_PRESSURE_ENABLED: |
1081 | *s++ = cpuset_memory_pressure_enabled ? '1' : '0'; | |
1082 | break; | |
1083 | case FILE_MEMORY_PRESSURE: | |
1084 | s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter)); | |
1085 | break; | |
825a46af PJ |
1086 | case FILE_SPREAD_PAGE: |
1087 | *s++ = is_spread_page(cs) ? '1' : '0'; | |
1088 | break; | |
1089 | case FILE_SPREAD_SLAB: | |
1090 | *s++ = is_spread_slab(cs) ? '1' : '0'; | |
1091 | break; | |
1da177e4 LT |
1092 | default: |
1093 | retval = -EINVAL; | |
1094 | goto out; | |
1095 | } | |
1096 | *s++ = '\n'; | |
1da177e4 | 1097 | |
eacaa1f5 | 1098 | retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); |
1da177e4 LT |
1099 | out: |
1100 | free_page((unsigned long)page); | |
1101 | return retval; | |
1102 | } | |
1103 | ||
1da177e4 | 1104 | |
1da177e4 | 1105 | |
1da177e4 | 1106 | |
1da177e4 LT |
1107 | |
1108 | /* | |
1109 | * for the common functions, 'private' gives the type of file | |
1110 | */ | |
1111 | ||
1da177e4 LT |
1112 | static struct cftype cft_cpus = { |
1113 | .name = "cpus", | |
8793d854 PM |
1114 | .read = cpuset_common_file_read, |
1115 | .write = cpuset_common_file_write, | |
1da177e4 LT |
1116 | .private = FILE_CPULIST, |
1117 | }; | |
1118 | ||
1119 | static struct cftype cft_mems = { | |
1120 | .name = "mems", | |
8793d854 PM |
1121 | .read = cpuset_common_file_read, |
1122 | .write = cpuset_common_file_write, | |
1da177e4 LT |
1123 | .private = FILE_MEMLIST, |
1124 | }; | |
1125 | ||
1126 | static struct cftype cft_cpu_exclusive = { | |
1127 | .name = "cpu_exclusive", | |
8793d854 PM |
1128 | .read = cpuset_common_file_read, |
1129 | .write = cpuset_common_file_write, | |
1da177e4 LT |
1130 | .private = FILE_CPU_EXCLUSIVE, |
1131 | }; | |
1132 | ||
1133 | static struct cftype cft_mem_exclusive = { | |
1134 | .name = "mem_exclusive", | |
8793d854 PM |
1135 | .read = cpuset_common_file_read, |
1136 | .write = cpuset_common_file_write, | |
1da177e4 LT |
1137 | .private = FILE_MEM_EXCLUSIVE, |
1138 | }; | |
1139 | ||
45b07ef3 PJ |
1140 | static struct cftype cft_memory_migrate = { |
1141 | .name = "memory_migrate", | |
8793d854 PM |
1142 | .read = cpuset_common_file_read, |
1143 | .write = cpuset_common_file_write, | |
45b07ef3 PJ |
1144 | .private = FILE_MEMORY_MIGRATE, |
1145 | }; | |
1146 | ||
3e0d98b9 PJ |
1147 | static struct cftype cft_memory_pressure_enabled = { |
1148 | .name = "memory_pressure_enabled", | |
8793d854 PM |
1149 | .read = cpuset_common_file_read, |
1150 | .write = cpuset_common_file_write, | |
3e0d98b9 PJ |
1151 | .private = FILE_MEMORY_PRESSURE_ENABLED, |
1152 | }; | |
1153 | ||
1154 | static struct cftype cft_memory_pressure = { | |
1155 | .name = "memory_pressure", | |
8793d854 PM |
1156 | .read = cpuset_common_file_read, |
1157 | .write = cpuset_common_file_write, | |
3e0d98b9 PJ |
1158 | .private = FILE_MEMORY_PRESSURE, |
1159 | }; | |
1160 | ||
825a46af PJ |
1161 | static struct cftype cft_spread_page = { |
1162 | .name = "memory_spread_page", | |
8793d854 PM |
1163 | .read = cpuset_common_file_read, |
1164 | .write = cpuset_common_file_write, | |
825a46af PJ |
1165 | .private = FILE_SPREAD_PAGE, |
1166 | }; | |
1167 | ||
1168 | static struct cftype cft_spread_slab = { | |
1169 | .name = "memory_spread_slab", | |
8793d854 PM |
1170 | .read = cpuset_common_file_read, |
1171 | .write = cpuset_common_file_write, | |
825a46af PJ |
1172 | .private = FILE_SPREAD_SLAB, |
1173 | }; | |
1174 | ||
8793d854 | 1175 | static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) |
1da177e4 LT |
1176 | { |
1177 | int err; | |
1178 | ||
8793d854 | 1179 | if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0) |
1da177e4 | 1180 | return err; |
8793d854 | 1181 | if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0) |
1da177e4 | 1182 | return err; |
8793d854 | 1183 | if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0) |
1da177e4 | 1184 | return err; |
8793d854 | 1185 | if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0) |
1da177e4 | 1186 | return err; |
8793d854 | 1187 | if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0) |
1da177e4 | 1188 | return err; |
8793d854 | 1189 | if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0) |
45b07ef3 | 1190 | return err; |
8793d854 | 1191 | if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0) |
3e0d98b9 | 1192 | return err; |
8793d854 | 1193 | if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0) |
1da177e4 | 1194 | return err; |
8793d854 PM |
1195 | /* memory_pressure_enabled is in root cpuset only */ |
1196 | if (err == 0 && !cont->parent) | |
1197 | err = cgroup_add_file(cont, ss, | |
1198 | &cft_memory_pressure_enabled); | |
1da177e4 LT |
1199 | return 0; |
1200 | } | |
1201 | ||
8793d854 PM |
1202 | /* |
1203 | * post_clone() is called at the end of cgroup_clone(). | |
1204 | * 'cgroup' was just created automatically as a result of | |
1205 | * a cgroup_clone(), and the current task is about to | |
1206 | * be moved into 'cgroup'. | |
1207 | * | |
1208 | * Currently we refuse to set up the cgroup - thereby | |
1209 | * refusing the task to be entered, and as a result refusing | |
1210 | * the sys_unshare() or clone() which initiated it - if any | |
1211 | * sibling cpusets have exclusive cpus or mem. | |
1212 | * | |
1213 | * If this becomes a problem for some users who wish to | |
1214 | * allow that scenario, then cpuset_post_clone() could be | |
1215 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive | |
1216 | * (and likewise for mems) to the new cgroup. | |
1217 | */ | |
1218 | static void cpuset_post_clone(struct cgroup_subsys *ss, | |
1219 | struct cgroup *cgroup) | |
1220 | { | |
1221 | struct cgroup *parent, *child; | |
1222 | struct cpuset *cs, *parent_cs; | |
1223 | ||
1224 | parent = cgroup->parent; | |
1225 | list_for_each_entry(child, &parent->children, sibling) { | |
1226 | cs = cgroup_cs(child); | |
1227 | if (is_mem_exclusive(cs) || is_cpu_exclusive(cs)) | |
1228 | return; | |
1229 | } | |
1230 | cs = cgroup_cs(cgroup); | |
1231 | parent_cs = cgroup_cs(parent); | |
1232 | ||
1233 | cs->mems_allowed = parent_cs->mems_allowed; | |
1234 | cs->cpus_allowed = parent_cs->cpus_allowed; | |
1235 | return; | |
1236 | } | |
1237 | ||
1da177e4 LT |
1238 | /* |
1239 | * cpuset_create - create a cpuset | |
1240 | * parent: cpuset that will be parent of the new cpuset. | |
1241 | * name: name of the new cpuset. Will be strcpy'ed. | |
1242 | * mode: mode to set on new inode | |
1243 | * | |
3d3f26a7 | 1244 | * Must be called with the mutex on the parent inode held |
1da177e4 LT |
1245 | */ |
1246 | ||
8793d854 PM |
1247 | static struct cgroup_subsys_state *cpuset_create( |
1248 | struct cgroup_subsys *ss, | |
1249 | struct cgroup *cont) | |
1da177e4 LT |
1250 | { |
1251 | struct cpuset *cs; | |
8793d854 | 1252 | struct cpuset *parent; |
1da177e4 | 1253 | |
8793d854 PM |
1254 | if (!cont->parent) { |
1255 | /* This is early initialization for the top cgroup */ | |
1256 | top_cpuset.mems_generation = cpuset_mems_generation++; | |
1257 | return &top_cpuset.css; | |
1258 | } | |
1259 | parent = cgroup_cs(cont->parent); | |
1da177e4 LT |
1260 | cs = kmalloc(sizeof(*cs), GFP_KERNEL); |
1261 | if (!cs) | |
8793d854 | 1262 | return ERR_PTR(-ENOMEM); |
1da177e4 | 1263 | |
cf2a473c | 1264 | cpuset_update_task_memory_state(); |
1da177e4 | 1265 | cs->flags = 0; |
825a46af PJ |
1266 | if (is_spread_page(parent)) |
1267 | set_bit(CS_SPREAD_PAGE, &cs->flags); | |
1268 | if (is_spread_slab(parent)) | |
1269 | set_bit(CS_SPREAD_SLAB, &cs->flags); | |
1da177e4 LT |
1270 | cs->cpus_allowed = CPU_MASK_NONE; |
1271 | cs->mems_allowed = NODE_MASK_NONE; | |
151a4420 | 1272 | cs->mems_generation = cpuset_mems_generation++; |
3e0d98b9 | 1273 | fmeter_init(&cs->fmeter); |
1da177e4 LT |
1274 | |
1275 | cs->parent = parent; | |
202f72d5 | 1276 | number_of_cpusets++; |
8793d854 | 1277 | return &cs->css ; |
1da177e4 LT |
1278 | } |
1279 | ||
8793d854 | 1280 | static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) |
1da177e4 | 1281 | { |
8793d854 | 1282 | struct cpuset *cs = cgroup_cs(cont); |
1da177e4 | 1283 | |
cf2a473c | 1284 | cpuset_update_task_memory_state(); |
202f72d5 | 1285 | number_of_cpusets--; |
8793d854 | 1286 | kfree(cs); |
1da177e4 LT |
1287 | } |
1288 | ||
8793d854 PM |
1289 | struct cgroup_subsys cpuset_subsys = { |
1290 | .name = "cpuset", | |
1291 | .create = cpuset_create, | |
1292 | .destroy = cpuset_destroy, | |
1293 | .can_attach = cpuset_can_attach, | |
1294 | .attach = cpuset_attach, | |
1295 | .populate = cpuset_populate, | |
1296 | .post_clone = cpuset_post_clone, | |
1297 | .subsys_id = cpuset_subsys_id, | |
1298 | .early_init = 1, | |
1299 | }; | |
1300 | ||
c417f024 PJ |
1301 | /* |
1302 | * cpuset_init_early - just enough so that the calls to | |
1303 | * cpuset_update_task_memory_state() in early init code | |
1304 | * are harmless. | |
1305 | */ | |
1306 | ||
1307 | int __init cpuset_init_early(void) | |
1308 | { | |
8793d854 | 1309 | top_cpuset.mems_generation = cpuset_mems_generation++; |
c417f024 PJ |
1310 | return 0; |
1311 | } | |
1312 | ||
8793d854 | 1313 | |
1da177e4 LT |
1314 | /** |
1315 | * cpuset_init - initialize cpusets at system boot | |
1316 | * | |
1317 | * Description: Initialize top_cpuset and the cpuset internal file system, | |
1318 | **/ | |
1319 | ||
1320 | int __init cpuset_init(void) | |
1321 | { | |
8793d854 | 1322 | int err = 0; |
1da177e4 LT |
1323 | |
1324 | top_cpuset.cpus_allowed = CPU_MASK_ALL; | |
1325 | top_cpuset.mems_allowed = NODE_MASK_ALL; | |
1326 | ||
3e0d98b9 | 1327 | fmeter_init(&top_cpuset.fmeter); |
151a4420 | 1328 | top_cpuset.mems_generation = cpuset_mems_generation++; |
1da177e4 | 1329 | |
1da177e4 LT |
1330 | err = register_filesystem(&cpuset_fs_type); |
1331 | if (err < 0) | |
8793d854 PM |
1332 | return err; |
1333 | ||
202f72d5 | 1334 | number_of_cpusets = 1; |
8793d854 | 1335 | return 0; |
1da177e4 LT |
1336 | } |
1337 | ||
b1aac8bb PJ |
1338 | /* |
1339 | * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs | |
1340 | * or memory nodes, we need to walk over the cpuset hierarchy, | |
1341 | * removing that CPU or node from all cpusets. If this removes the | |
1342 | * last CPU or node from a cpuset, then the guarantee_online_cpus() | |
1343 | * or guarantee_online_mems() code will use that emptied cpusets | |
1344 | * parent online CPUs or nodes. Cpusets that were already empty of | |
1345 | * CPUs or nodes are left empty. | |
1346 | * | |
1347 | * This routine is intentionally inefficient in a couple of regards. | |
1348 | * It will check all cpusets in a subtree even if the top cpuset of | |
1349 | * the subtree has no offline CPUs or nodes. It checks both CPUs and | |
1350 | * nodes, even though the caller could have been coded to know that | |
1351 | * only one of CPUs or nodes needed to be checked on a given call. | |
1352 | * This was done to minimize text size rather than cpu cycles. | |
1353 | * | |
1354 | * Call with both manage_mutex and callback_mutex held. | |
1355 | * | |
1356 | * Recursive, on depth of cpuset subtree. | |
1357 | */ | |
1358 | ||
1359 | static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) | |
1360 | { | |
8793d854 | 1361 | struct cgroup *cont; |
b1aac8bb PJ |
1362 | struct cpuset *c; |
1363 | ||
1364 | /* Each of our child cpusets mems must be online */ | |
8793d854 PM |
1365 | list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { |
1366 | c = cgroup_cs(cont); | |
b1aac8bb PJ |
1367 | guarantee_online_cpus_mems_in_subtree(c); |
1368 | if (!cpus_empty(c->cpus_allowed)) | |
1369 | guarantee_online_cpus(c, &c->cpus_allowed); | |
1370 | if (!nodes_empty(c->mems_allowed)) | |
1371 | guarantee_online_mems(c, &c->mems_allowed); | |
1372 | } | |
1373 | } | |
1374 | ||
1375 | /* | |
1376 | * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track | |
0e1e7c7a CL |
1377 | * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to |
1378 | * track what's online after any CPU or memory node hotplug or unplug | |
1379 | * event. | |
b1aac8bb PJ |
1380 | * |
1381 | * To ensure that we don't remove a CPU or node from the top cpuset | |
1382 | * that is currently in use by a child cpuset (which would violate | |
1383 | * the rule that cpusets must be subsets of their parent), we first | |
1384 | * call the recursive routine guarantee_online_cpus_mems_in_subtree(). | |
1385 | * | |
1386 | * Since there are two callers of this routine, one for CPU hotplug | |
1387 | * events and one for memory node hotplug events, we could have coded | |
1388 | * two separate routines here. We code it as a single common routine | |
1389 | * in order to minimize text size. | |
1390 | */ | |
1391 | ||
1392 | static void common_cpu_mem_hotplug_unplug(void) | |
1393 | { | |
8793d854 | 1394 | cgroup_lock(); |
b1aac8bb PJ |
1395 | mutex_lock(&callback_mutex); |
1396 | ||
1397 | guarantee_online_cpus_mems_in_subtree(&top_cpuset); | |
1398 | top_cpuset.cpus_allowed = cpu_online_map; | |
0e1e7c7a | 1399 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
b1aac8bb PJ |
1400 | |
1401 | mutex_unlock(&callback_mutex); | |
8793d854 | 1402 | cgroup_unlock(); |
b1aac8bb | 1403 | } |
b1aac8bb | 1404 | |
4c4d50f7 PJ |
1405 | /* |
1406 | * The top_cpuset tracks what CPUs and Memory Nodes are online, | |
1407 | * period. This is necessary in order to make cpusets transparent | |
1408 | * (of no affect) on systems that are actively using CPU hotplug | |
1409 | * but making no active use of cpusets. | |
1410 | * | |
38837fc7 PJ |
1411 | * This routine ensures that top_cpuset.cpus_allowed tracks |
1412 | * cpu_online_map on each CPU hotplug (cpuhp) event. | |
4c4d50f7 PJ |
1413 | */ |
1414 | ||
4c4d50f7 PJ |
1415 | static int cpuset_handle_cpuhp(struct notifier_block *nb, |
1416 | unsigned long phase, void *cpu) | |
1417 | { | |
ac076758 AK |
1418 | if (phase == CPU_DYING || phase == CPU_DYING_FROZEN) |
1419 | return NOTIFY_DONE; | |
1420 | ||
b1aac8bb | 1421 | common_cpu_mem_hotplug_unplug(); |
4c4d50f7 PJ |
1422 | return 0; |
1423 | } | |
4c4d50f7 | 1424 | |
b1aac8bb | 1425 | #ifdef CONFIG_MEMORY_HOTPLUG |
38837fc7 | 1426 | /* |
0e1e7c7a CL |
1427 | * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. |
1428 | * Call this routine anytime after you change | |
1429 | * node_states[N_HIGH_MEMORY]. | |
38837fc7 PJ |
1430 | * See also the previous routine cpuset_handle_cpuhp(). |
1431 | */ | |
1432 | ||
1af98928 | 1433 | void cpuset_track_online_nodes(void) |
38837fc7 | 1434 | { |
b1aac8bb | 1435 | common_cpu_mem_hotplug_unplug(); |
38837fc7 PJ |
1436 | } |
1437 | #endif | |
1438 | ||
1da177e4 LT |
1439 | /** |
1440 | * cpuset_init_smp - initialize cpus_allowed | |
1441 | * | |
1442 | * Description: Finish top cpuset after cpu, node maps are initialized | |
1443 | **/ | |
1444 | ||
1445 | void __init cpuset_init_smp(void) | |
1446 | { | |
1447 | top_cpuset.cpus_allowed = cpu_online_map; | |
0e1e7c7a | 1448 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
4c4d50f7 PJ |
1449 | |
1450 | hotcpu_notifier(cpuset_handle_cpuhp, 0); | |
1da177e4 LT |
1451 | } |
1452 | ||
1453 | /** | |
3077a260 | 1454 | |
1da177e4 LT |
1455 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
1456 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | |
1457 | * | |
1458 | * Description: Returns the cpumask_t cpus_allowed of the cpuset | |
1459 | * attached to the specified @tsk. Guaranteed to return some non-empty | |
1460 | * subset of cpu_online_map, even if this means going outside the | |
1461 | * tasks cpuset. | |
1462 | **/ | |
1463 | ||
909d75a3 | 1464 | cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) |
1da177e4 LT |
1465 | { |
1466 | cpumask_t mask; | |
1467 | ||
3d3f26a7 | 1468 | mutex_lock(&callback_mutex); |
909d75a3 | 1469 | task_lock(tsk); |
8793d854 | 1470 | guarantee_online_cpus(task_cs(tsk), &mask); |
909d75a3 | 1471 | task_unlock(tsk); |
3d3f26a7 | 1472 | mutex_unlock(&callback_mutex); |
1da177e4 LT |
1473 | |
1474 | return mask; | |
1475 | } | |
1476 | ||
1477 | void cpuset_init_current_mems_allowed(void) | |
1478 | { | |
1479 | current->mems_allowed = NODE_MASK_ALL; | |
1480 | } | |
1481 | ||
909d75a3 PJ |
1482 | /** |
1483 | * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. | |
1484 | * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. | |
1485 | * | |
1486 | * Description: Returns the nodemask_t mems_allowed of the cpuset | |
1487 | * attached to the specified @tsk. Guaranteed to return some non-empty | |
0e1e7c7a | 1488 | * subset of node_states[N_HIGH_MEMORY], even if this means going outside the |
909d75a3 PJ |
1489 | * tasks cpuset. |
1490 | **/ | |
1491 | ||
1492 | nodemask_t cpuset_mems_allowed(struct task_struct *tsk) | |
1493 | { | |
1494 | nodemask_t mask; | |
1495 | ||
3d3f26a7 | 1496 | mutex_lock(&callback_mutex); |
909d75a3 | 1497 | task_lock(tsk); |
8793d854 | 1498 | guarantee_online_mems(task_cs(tsk), &mask); |
909d75a3 | 1499 | task_unlock(tsk); |
3d3f26a7 | 1500 | mutex_unlock(&callback_mutex); |
909d75a3 PJ |
1501 | |
1502 | return mask; | |
1503 | } | |
1504 | ||
d9fd8a6d RD |
1505 | /** |
1506 | * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed | |
1507 | * @zl: the zonelist to be checked | |
1508 | * | |
1da177e4 LT |
1509 | * Are any of the nodes on zonelist zl allowed in current->mems_allowed? |
1510 | */ | |
1511 | int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) | |
1512 | { | |
1513 | int i; | |
1514 | ||
1515 | for (i = 0; zl->zones[i]; i++) { | |
89fa3024 | 1516 | int nid = zone_to_nid(zl->zones[i]); |
1da177e4 LT |
1517 | |
1518 | if (node_isset(nid, current->mems_allowed)) | |
1519 | return 1; | |
1520 | } | |
1521 | return 0; | |
1522 | } | |
1523 | ||
9bf2229f PJ |
1524 | /* |
1525 | * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive | |
3d3f26a7 | 1526 | * ancestor to the specified cpuset. Call holding callback_mutex. |
9bf2229f PJ |
1527 | * If no ancestor is mem_exclusive (an unusual configuration), then |
1528 | * returns the root cpuset. | |
1529 | */ | |
1530 | static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) | |
1531 | { | |
1532 | while (!is_mem_exclusive(cs) && cs->parent) | |
1533 | cs = cs->parent; | |
1534 | return cs; | |
1535 | } | |
1536 | ||
d9fd8a6d | 1537 | /** |
02a0e53d | 1538 | * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? |
9bf2229f | 1539 | * @z: is this zone on an allowed node? |
02a0e53d | 1540 | * @gfp_mask: memory allocation flags |
d9fd8a6d | 1541 | * |
02a0e53d PJ |
1542 | * If we're in interrupt, yes, we can always allocate. If |
1543 | * __GFP_THISNODE is set, yes, we can always allocate. If zone | |
9bf2229f PJ |
1544 | * z's node is in our tasks mems_allowed, yes. If it's not a |
1545 | * __GFP_HARDWALL request and this zone's nodes is in the nearest | |
1546 | * mem_exclusive cpuset ancestor to this tasks cpuset, yes. | |
c596d9f3 DR |
1547 | * If the task has been OOM killed and has access to memory reserves |
1548 | * as specified by the TIF_MEMDIE flag, yes. | |
9bf2229f PJ |
1549 | * Otherwise, no. |
1550 | * | |
02a0e53d PJ |
1551 | * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() |
1552 | * reduces to cpuset_zone_allowed_hardwall(). Otherwise, | |
1553 | * cpuset_zone_allowed_softwall() might sleep, and might allow a zone | |
1554 | * from an enclosing cpuset. | |
1555 | * | |
1556 | * cpuset_zone_allowed_hardwall() only handles the simpler case of | |
1557 | * hardwall cpusets, and never sleeps. | |
1558 | * | |
1559 | * The __GFP_THISNODE placement logic is really handled elsewhere, | |
1560 | * by forcibly using a zonelist starting at a specified node, and by | |
1561 | * (in get_page_from_freelist()) refusing to consider the zones for | |
1562 | * any node on the zonelist except the first. By the time any such | |
1563 | * calls get to this routine, we should just shut up and say 'yes'. | |
1564 | * | |
9bf2229f | 1565 | * GFP_USER allocations are marked with the __GFP_HARDWALL bit, |
c596d9f3 DR |
1566 | * and do not allow allocations outside the current tasks cpuset |
1567 | * unless the task has been OOM killed as is marked TIF_MEMDIE. | |
9bf2229f | 1568 | * GFP_KERNEL allocations are not so marked, so can escape to the |
02a0e53d | 1569 | * nearest enclosing mem_exclusive ancestor cpuset. |
9bf2229f | 1570 | * |
02a0e53d PJ |
1571 | * Scanning up parent cpusets requires callback_mutex. The |
1572 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit | |
1573 | * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the | |
1574 | * current tasks mems_allowed came up empty on the first pass over | |
1575 | * the zonelist. So only GFP_KERNEL allocations, if all nodes in the | |
1576 | * cpuset are short of memory, might require taking the callback_mutex | |
1577 | * mutex. | |
9bf2229f | 1578 | * |
36be57ff | 1579 | * The first call here from mm/page_alloc:get_page_from_freelist() |
02a0e53d PJ |
1580 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, |
1581 | * so no allocation on a node outside the cpuset is allowed (unless | |
1582 | * in interrupt, of course). | |
36be57ff PJ |
1583 | * |
1584 | * The second pass through get_page_from_freelist() doesn't even call | |
1585 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() | |
1586 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set | |
1587 | * in alloc_flags. That logic and the checks below have the combined | |
1588 | * affect that: | |
9bf2229f PJ |
1589 | * in_interrupt - any node ok (current task context irrelevant) |
1590 | * GFP_ATOMIC - any node ok | |
c596d9f3 | 1591 | * TIF_MEMDIE - any node ok |
9bf2229f PJ |
1592 | * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok |
1593 | * GFP_USER - only nodes in current tasks mems allowed ok. | |
36be57ff PJ |
1594 | * |
1595 | * Rule: | |
02a0e53d | 1596 | * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you |
36be57ff PJ |
1597 | * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables |
1598 | * the code that might scan up ancestor cpusets and sleep. | |
02a0e53d | 1599 | */ |
9bf2229f | 1600 | |
02a0e53d | 1601 | int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
1da177e4 | 1602 | { |
9bf2229f PJ |
1603 | int node; /* node that zone z is on */ |
1604 | const struct cpuset *cs; /* current cpuset ancestors */ | |
29afd49b | 1605 | int allowed; /* is allocation in zone z allowed? */ |
9bf2229f | 1606 | |
9b819d20 | 1607 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |
9bf2229f | 1608 | return 1; |
89fa3024 | 1609 | node = zone_to_nid(z); |
92d1dbd2 | 1610 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); |
9bf2229f PJ |
1611 | if (node_isset(node, current->mems_allowed)) |
1612 | return 1; | |
c596d9f3 DR |
1613 | /* |
1614 | * Allow tasks that have access to memory reserves because they have | |
1615 | * been OOM killed to get memory anywhere. | |
1616 | */ | |
1617 | if (unlikely(test_thread_flag(TIF_MEMDIE))) | |
1618 | return 1; | |
9bf2229f PJ |
1619 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
1620 | return 0; | |
1621 | ||
5563e770 BP |
1622 | if (current->flags & PF_EXITING) /* Let dying task have memory */ |
1623 | return 1; | |
1624 | ||
9bf2229f | 1625 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
3d3f26a7 | 1626 | mutex_lock(&callback_mutex); |
053199ed | 1627 | |
053199ed | 1628 | task_lock(current); |
8793d854 | 1629 | cs = nearest_exclusive_ancestor(task_cs(current)); |
053199ed PJ |
1630 | task_unlock(current); |
1631 | ||
9bf2229f | 1632 | allowed = node_isset(node, cs->mems_allowed); |
3d3f26a7 | 1633 | mutex_unlock(&callback_mutex); |
9bf2229f | 1634 | return allowed; |
1da177e4 LT |
1635 | } |
1636 | ||
02a0e53d PJ |
1637 | /* |
1638 | * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? | |
1639 | * @z: is this zone on an allowed node? | |
1640 | * @gfp_mask: memory allocation flags | |
1641 | * | |
1642 | * If we're in interrupt, yes, we can always allocate. | |
1643 | * If __GFP_THISNODE is set, yes, we can always allocate. If zone | |
c596d9f3 DR |
1644 | * z's node is in our tasks mems_allowed, yes. If the task has been |
1645 | * OOM killed and has access to memory reserves as specified by the | |
1646 | * TIF_MEMDIE flag, yes. Otherwise, no. | |
02a0e53d PJ |
1647 | * |
1648 | * The __GFP_THISNODE placement logic is really handled elsewhere, | |
1649 | * by forcibly using a zonelist starting at a specified node, and by | |
1650 | * (in get_page_from_freelist()) refusing to consider the zones for | |
1651 | * any node on the zonelist except the first. By the time any such | |
1652 | * calls get to this routine, we should just shut up and say 'yes'. | |
1653 | * | |
1654 | * Unlike the cpuset_zone_allowed_softwall() variant, above, | |
1655 | * this variant requires that the zone be in the current tasks | |
1656 | * mems_allowed or that we're in interrupt. It does not scan up the | |
1657 | * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. | |
1658 | * It never sleeps. | |
1659 | */ | |
1660 | ||
1661 | int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
1662 | { | |
1663 | int node; /* node that zone z is on */ | |
1664 | ||
1665 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) | |
1666 | return 1; | |
1667 | node = zone_to_nid(z); | |
1668 | if (node_isset(node, current->mems_allowed)) | |
1669 | return 1; | |
dedf8b79 DW |
1670 | /* |
1671 | * Allow tasks that have access to memory reserves because they have | |
1672 | * been OOM killed to get memory anywhere. | |
1673 | */ | |
1674 | if (unlikely(test_thread_flag(TIF_MEMDIE))) | |
1675 | return 1; | |
02a0e53d PJ |
1676 | return 0; |
1677 | } | |
1678 | ||
505970b9 PJ |
1679 | /** |
1680 | * cpuset_lock - lock out any changes to cpuset structures | |
1681 | * | |
3d3f26a7 | 1682 | * The out of memory (oom) code needs to mutex_lock cpusets |
505970b9 | 1683 | * from being changed while it scans the tasklist looking for a |
3d3f26a7 | 1684 | * task in an overlapping cpuset. Expose callback_mutex via this |
505970b9 PJ |
1685 | * cpuset_lock() routine, so the oom code can lock it, before |
1686 | * locking the task list. The tasklist_lock is a spinlock, so | |
3d3f26a7 | 1687 | * must be taken inside callback_mutex. |
505970b9 PJ |
1688 | */ |
1689 | ||
1690 | void cpuset_lock(void) | |
1691 | { | |
3d3f26a7 | 1692 | mutex_lock(&callback_mutex); |
505970b9 PJ |
1693 | } |
1694 | ||
1695 | /** | |
1696 | * cpuset_unlock - release lock on cpuset changes | |
1697 | * | |
1698 | * Undo the lock taken in a previous cpuset_lock() call. | |
1699 | */ | |
1700 | ||
1701 | void cpuset_unlock(void) | |
1702 | { | |
3d3f26a7 | 1703 | mutex_unlock(&callback_mutex); |
505970b9 PJ |
1704 | } |
1705 | ||
825a46af PJ |
1706 | /** |
1707 | * cpuset_mem_spread_node() - On which node to begin search for a page | |
1708 | * | |
1709 | * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for | |
1710 | * tasks in a cpuset with is_spread_page or is_spread_slab set), | |
1711 | * and if the memory allocation used cpuset_mem_spread_node() | |
1712 | * to determine on which node to start looking, as it will for | |
1713 | * certain page cache or slab cache pages such as used for file | |
1714 | * system buffers and inode caches, then instead of starting on the | |
1715 | * local node to look for a free page, rather spread the starting | |
1716 | * node around the tasks mems_allowed nodes. | |
1717 | * | |
1718 | * We don't have to worry about the returned node being offline | |
1719 | * because "it can't happen", and even if it did, it would be ok. | |
1720 | * | |
1721 | * The routines calling guarantee_online_mems() are careful to | |
1722 | * only set nodes in task->mems_allowed that are online. So it | |
1723 | * should not be possible for the following code to return an | |
1724 | * offline node. But if it did, that would be ok, as this routine | |
1725 | * is not returning the node where the allocation must be, only | |
1726 | * the node where the search should start. The zonelist passed to | |
1727 | * __alloc_pages() will include all nodes. If the slab allocator | |
1728 | * is passed an offline node, it will fall back to the local node. | |
1729 | * See kmem_cache_alloc_node(). | |
1730 | */ | |
1731 | ||
1732 | int cpuset_mem_spread_node(void) | |
1733 | { | |
1734 | int node; | |
1735 | ||
1736 | node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed); | |
1737 | if (node == MAX_NUMNODES) | |
1738 | node = first_node(current->mems_allowed); | |
1739 | current->cpuset_mem_spread_rotor = node; | |
1740 | return node; | |
1741 | } | |
1742 | EXPORT_SYMBOL_GPL(cpuset_mem_spread_node); | |
1743 | ||
ef08e3b4 | 1744 | /** |
bbe373f2 DR |
1745 | * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? |
1746 | * @tsk1: pointer to task_struct of some task. | |
1747 | * @tsk2: pointer to task_struct of some other task. | |
1748 | * | |
1749 | * Description: Return true if @tsk1's mems_allowed intersects the | |
1750 | * mems_allowed of @tsk2. Used by the OOM killer to determine if | |
1751 | * one of the task's memory usage might impact the memory available | |
1752 | * to the other. | |
ef08e3b4 PJ |
1753 | **/ |
1754 | ||
bbe373f2 DR |
1755 | int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
1756 | const struct task_struct *tsk2) | |
ef08e3b4 | 1757 | { |
bbe373f2 | 1758 | return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); |
ef08e3b4 PJ |
1759 | } |
1760 | ||
3e0d98b9 PJ |
1761 | /* |
1762 | * Collection of memory_pressure is suppressed unless | |
1763 | * this flag is enabled by writing "1" to the special | |
1764 | * cpuset file 'memory_pressure_enabled' in the root cpuset. | |
1765 | */ | |
1766 | ||
c5b2aff8 | 1767 | int cpuset_memory_pressure_enabled __read_mostly; |
3e0d98b9 PJ |
1768 | |
1769 | /** | |
1770 | * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims. | |
1771 | * | |
1772 | * Keep a running average of the rate of synchronous (direct) | |
1773 | * page reclaim efforts initiated by tasks in each cpuset. | |
1774 | * | |
1775 | * This represents the rate at which some task in the cpuset | |
1776 | * ran low on memory on all nodes it was allowed to use, and | |
1777 | * had to enter the kernels page reclaim code in an effort to | |
1778 | * create more free memory by tossing clean pages or swapping | |
1779 | * or writing dirty pages. | |
1780 | * | |
1781 | * Display to user space in the per-cpuset read-only file | |
1782 | * "memory_pressure". Value displayed is an integer | |
1783 | * representing the recent rate of entry into the synchronous | |
1784 | * (direct) page reclaim by any task attached to the cpuset. | |
1785 | **/ | |
1786 | ||
1787 | void __cpuset_memory_pressure_bump(void) | |
1788 | { | |
3e0d98b9 | 1789 | task_lock(current); |
8793d854 | 1790 | fmeter_markevent(&task_cs(current)->fmeter); |
3e0d98b9 PJ |
1791 | task_unlock(current); |
1792 | } | |
1793 | ||
8793d854 | 1794 | #ifdef CONFIG_PROC_PID_CPUSET |
1da177e4 LT |
1795 | /* |
1796 | * proc_cpuset_show() | |
1797 | * - Print tasks cpuset path into seq_file. | |
1798 | * - Used for /proc/<pid>/cpuset. | |
053199ed PJ |
1799 | * - No need to task_lock(tsk) on this tsk->cpuset reference, as it |
1800 | * doesn't really matter if tsk->cpuset changes after we read it, | |
3d3f26a7 | 1801 | * and we take manage_mutex, keeping attach_task() from changing it |
8488bc35 PJ |
1802 | * anyway. No need to check that tsk->cpuset != NULL, thanks to |
1803 | * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks | |
1804 | * cpuset to top_cpuset. | |
1da177e4 | 1805 | */ |
1da177e4 LT |
1806 | static int proc_cpuset_show(struct seq_file *m, void *v) |
1807 | { | |
13b41b09 | 1808 | struct pid *pid; |
1da177e4 LT |
1809 | struct task_struct *tsk; |
1810 | char *buf; | |
8793d854 | 1811 | struct cgroup_subsys_state *css; |
99f89551 | 1812 | int retval; |
1da177e4 | 1813 | |
99f89551 | 1814 | retval = -ENOMEM; |
1da177e4 LT |
1815 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
1816 | if (!buf) | |
99f89551 EB |
1817 | goto out; |
1818 | ||
1819 | retval = -ESRCH; | |
13b41b09 EB |
1820 | pid = m->private; |
1821 | tsk = get_pid_task(pid, PIDTYPE_PID); | |
99f89551 EB |
1822 | if (!tsk) |
1823 | goto out_free; | |
1da177e4 | 1824 | |
99f89551 | 1825 | retval = -EINVAL; |
8793d854 PM |
1826 | cgroup_lock(); |
1827 | css = task_subsys_state(tsk, cpuset_subsys_id); | |
1828 | retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); | |
1da177e4 | 1829 | if (retval < 0) |
99f89551 | 1830 | goto out_unlock; |
1da177e4 LT |
1831 | seq_puts(m, buf); |
1832 | seq_putc(m, '\n'); | |
99f89551 | 1833 | out_unlock: |
8793d854 | 1834 | cgroup_unlock(); |
99f89551 EB |
1835 | put_task_struct(tsk); |
1836 | out_free: | |
1da177e4 | 1837 | kfree(buf); |
99f89551 | 1838 | out: |
1da177e4 LT |
1839 | return retval; |
1840 | } | |
1841 | ||
1842 | static int cpuset_open(struct inode *inode, struct file *file) | |
1843 | { | |
13b41b09 EB |
1844 | struct pid *pid = PROC_I(inode)->pid; |
1845 | return single_open(file, proc_cpuset_show, pid); | |
1da177e4 LT |
1846 | } |
1847 | ||
9a32144e | 1848 | const struct file_operations proc_cpuset_operations = { |
1da177e4 LT |
1849 | .open = cpuset_open, |
1850 | .read = seq_read, | |
1851 | .llseek = seq_lseek, | |
1852 | .release = single_release, | |
1853 | }; | |
8793d854 | 1854 | #endif /* CONFIG_PROC_PID_CPUSET */ |
1da177e4 LT |
1855 | |
1856 | /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ | |
1857 | char *cpuset_task_status_allowed(struct task_struct *task, char *buffer) | |
1858 | { | |
1859 | buffer += sprintf(buffer, "Cpus_allowed:\t"); | |
1860 | buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed); | |
1861 | buffer += sprintf(buffer, "\n"); | |
1862 | buffer += sprintf(buffer, "Mems_allowed:\t"); | |
1863 | buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed); | |
1864 | buffer += sprintf(buffer, "\n"); | |
1865 | return buffer; | |
1866 | } |