Merge tag 'for-linus-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml
[linux-2.6-block.git] / kernel / cgroup / cpuset.c
CommitLineData
1da177e4
LT
1/*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
029190c5 7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8793d854 8 * Copyright (C) 2006 Google, Inc
1da177e4
LT
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
1da177e4 12 *
825a46af 13 * 2003-10-10 Written by Simon Derr.
1da177e4 14 * 2003-10-22 Updates by Stephen Hemminger.
825a46af 15 * 2004 May-July Rework by Paul Jackson.
8793d854 16 * 2006 Rework by Paul Menage to use generic cgroups
cf417141
MK
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
1da177e4
LT
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24
1da177e4
LT
25#include <linux/cpu.h>
26#include <linux/cpumask.h>
27#include <linux/cpuset.h>
28#include <linux/err.h>
29#include <linux/errno.h>
30#include <linux/file.h>
31#include <linux/fs.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/kernel.h>
35#include <linux/kmod.h>
36#include <linux/list.h>
68860ec1 37#include <linux/mempolicy.h>
1da177e4 38#include <linux/mm.h>
f481891f 39#include <linux/memory.h>
9984de1a 40#include <linux/export.h>
1da177e4 41#include <linux/mount.h>
a1875374 42#include <linux/fs_context.h>
1da177e4
LT
43#include <linux/namei.h>
44#include <linux/pagemap.h>
45#include <linux/proc_fs.h>
6b9c2603 46#include <linux/rcupdate.h>
1da177e4 47#include <linux/sched.h>
f9a25f77 48#include <linux/sched/deadline.h>
6e84f315 49#include <linux/sched/mm.h>
f719ff9b 50#include <linux/sched/task.h>
1da177e4 51#include <linux/seq_file.h>
22fb52dd 52#include <linux/security.h>
1da177e4 53#include <linux/slab.h>
1da177e4
LT
54#include <linux/spinlock.h>
55#include <linux/stat.h>
56#include <linux/string.h>
57#include <linux/time.h>
d2b43658 58#include <linux/time64.h>
1da177e4
LT
59#include <linux/backing-dev.h>
60#include <linux/sort.h>
da99ecf1 61#include <linux/oom.h>
edb93821 62#include <linux/sched/isolation.h>
7c0f6ba6 63#include <linux/uaccess.h>
60063497 64#include <linux/atomic.h>
3d3f26a7 65#include <linux/mutex.h>
956db3ca 66#include <linux/cgroup.h>
e44193d3 67#include <linux/wait.h>
1da177e4 68
89affbf5 69DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
002f2906 70DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
202f72d5 71
3e0d98b9
PJ
72/* See "Frequency meter" comments, below. */
73
74struct fmeter {
75 int cnt; /* unprocessed events count */
76 int val; /* most recent output value */
d2b43658 77 time64_t time; /* clock (secs) when val computed */
3e0d98b9
PJ
78 spinlock_t lock; /* guards read or write of above */
79};
80
1da177e4 81struct cpuset {
8793d854
PM
82 struct cgroup_subsys_state css;
83
1da177e4 84 unsigned long flags; /* "unsigned long" so bitops work */
e2b9a3d7 85
7e88291b
LZ
86 /*
87 * On default hierarchy:
88 *
89 * The user-configured masks can only be changed by writing to
90 * cpuset.cpus and cpuset.mems, and won't be limited by the
91 * parent masks.
92 *
93 * The effective masks is the real masks that apply to the tasks
94 * in the cpuset. They may be changed if the configured masks are
95 * changed or hotplug happens.
96 *
97 * effective_mask == configured_mask & parent's effective_mask,
98 * and if it ends up empty, it will inherit the parent's mask.
99 *
100 *
101 * On legacy hierachy:
102 *
103 * The user-configured masks are always the same with effective masks.
104 */
105
e2b9a3d7
LZ
106 /* user-configured CPUs and Memory Nodes allow to tasks */
107 cpumask_var_t cpus_allowed;
108 nodemask_t mems_allowed;
109
110 /* effective CPUs and Memory Nodes allow to tasks */
111 cpumask_var_t effective_cpus;
112 nodemask_t effective_mems;
1da177e4 113
58b74842
WL
114 /*
115 * CPUs allocated to child sub-partitions (default hierarchy only)
116 * - CPUs granted by the parent = effective_cpus U subparts_cpus
117 * - effective_cpus and subparts_cpus are mutually exclusive.
4b842da2
WL
118 *
119 * effective_cpus contains only onlined CPUs, but subparts_cpus
120 * may have offlined ones.
58b74842
WL
121 */
122 cpumask_var_t subparts_cpus;
123
33ad801d
LZ
124 /*
125 * This is old Memory Nodes tasks took on.
126 *
127 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
128 * - A new cpuset's old_mems_allowed is initialized when some
129 * task is moved into it.
130 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
131 * cpuset.mems_allowed and have tasks' nodemask updated, and
132 * then old_mems_allowed is updated to mems_allowed.
133 */
134 nodemask_t old_mems_allowed;
135
3e0d98b9 136 struct fmeter fmeter; /* memory_pressure filter */
029190c5 137
452477fa
TH
138 /*
139 * Tasks are being attached to this cpuset. Used to prevent
140 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
141 */
142 int attach_in_progress;
143
029190c5
PJ
144 /* partition number for rebuild_sched_domains() */
145 int pn;
956db3ca 146
1d3504fc
HS
147 /* for custom sched domain */
148 int relax_domain_level;
58b74842
WL
149
150 /* number of CPUs in subparts_cpus */
151 int nr_subparts_cpus;
152
153 /* partition root state */
154 int partition_root_state;
4716909c
WL
155
156 /*
157 * Default hierarchy only:
158 * use_parent_ecpus - set if using parent's effective_cpus
159 * child_ecpus_count - # of children with use_parent_ecpus set
160 */
161 int use_parent_ecpus;
162 int child_ecpus_count;
58b74842
WL
163};
164
165/*
166 * Partition root states:
167 *
168 * 0 - not a partition root
3881b861 169 *
58b74842 170 * 1 - partition root
3881b861
WL
171 *
172 * -1 - invalid partition root
173 * None of the cpus in cpus_allowed can be put into the parent's
174 * subparts_cpus. In this case, the cpuset is not a real partition
175 * root anymore. However, the CPU_EXCLUSIVE bit will still be set
176 * and the cpuset can be restored back to a partition root if the
177 * parent cpuset can give more CPUs back to this child cpuset.
58b74842
WL
178 */
179#define PRS_DISABLED 0
180#define PRS_ENABLED 1
3881b861 181#define PRS_ERROR -1
58b74842
WL
182
183/*
184 * Temporary cpumasks for working with partitions that are passed among
185 * functions to avoid memory allocation in inner functions.
186 */
187struct tmpmasks {
188 cpumask_var_t addmask, delmask; /* For partition root */
189 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
1da177e4
LT
190};
191
a7c6d554 192static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
8793d854 193{
a7c6d554 194 return css ? container_of(css, struct cpuset, css) : NULL;
8793d854
PM
195}
196
197/* Retrieve the cpuset for a task */
198static inline struct cpuset *task_cs(struct task_struct *task)
199{
073219e9 200 return css_cs(task_css(task, cpuset_cgrp_id));
8793d854 201}
8793d854 202
c9710d80 203static inline struct cpuset *parent_cs(struct cpuset *cs)
c431069f 204{
5c9d535b 205 return css_cs(cs->css.parent);
c431069f
TH
206}
207
1da177e4
LT
208/* bits in struct cpuset flags field */
209typedef enum {
efeb77b2 210 CS_ONLINE,
1da177e4
LT
211 CS_CPU_EXCLUSIVE,
212 CS_MEM_EXCLUSIVE,
78608366 213 CS_MEM_HARDWALL,
45b07ef3 214 CS_MEMORY_MIGRATE,
029190c5 215 CS_SCHED_LOAD_BALANCE,
825a46af
PJ
216 CS_SPREAD_PAGE,
217 CS_SPREAD_SLAB,
1da177e4
LT
218} cpuset_flagbits_t;
219
220/* convenient tests for these bits */
41c25707 221static inline bool is_cpuset_online(struct cpuset *cs)
efeb77b2 222{
41c25707 223 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
efeb77b2
TH
224}
225
1da177e4
LT
226static inline int is_cpu_exclusive(const struct cpuset *cs)
227{
7b5b9ef0 228 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1da177e4
LT
229}
230
231static inline int is_mem_exclusive(const struct cpuset *cs)
232{
7b5b9ef0 233 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
1da177e4
LT
234}
235
78608366
PM
236static inline int is_mem_hardwall(const struct cpuset *cs)
237{
238 return test_bit(CS_MEM_HARDWALL, &cs->flags);
239}
240
029190c5
PJ
241static inline int is_sched_load_balance(const struct cpuset *cs)
242{
243 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
244}
245
45b07ef3
PJ
246static inline int is_memory_migrate(const struct cpuset *cs)
247{
7b5b9ef0 248 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
45b07ef3
PJ
249}
250
825a46af
PJ
251static inline int is_spread_page(const struct cpuset *cs)
252{
253 return test_bit(CS_SPREAD_PAGE, &cs->flags);
254}
255
256static inline int is_spread_slab(const struct cpuset *cs)
257{
258 return test_bit(CS_SPREAD_SLAB, &cs->flags);
259}
260
58b74842
WL
261static inline int is_partition_root(const struct cpuset *cs)
262{
3881b861 263 return cs->partition_root_state > 0;
58b74842
WL
264}
265
1da177e4 266static struct cpuset top_cpuset = {
efeb77b2
TH
267 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
268 (1 << CS_MEM_EXCLUSIVE)),
58b74842 269 .partition_root_state = PRS_ENABLED,
1da177e4
LT
270};
271
ae8086ce
TH
272/**
273 * cpuset_for_each_child - traverse online children of a cpuset
274 * @child_cs: loop cursor pointing to the current child
492eb21b 275 * @pos_css: used for iteration
ae8086ce
TH
276 * @parent_cs: target cpuset to walk children of
277 *
278 * Walk @child_cs through the online children of @parent_cs. Must be used
279 * with RCU read locked.
280 */
492eb21b
TH
281#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
282 css_for_each_child((pos_css), &(parent_cs)->css) \
283 if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
ae8086ce 284
fc560a26
TH
285/**
286 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
287 * @des_cs: loop cursor pointing to the current descendant
492eb21b 288 * @pos_css: used for iteration
fc560a26
TH
289 * @root_cs: target cpuset to walk ancestor of
290 *
291 * Walk @des_cs through the online descendants of @root_cs. Must be used
492eb21b 292 * with RCU read locked. The caller may modify @pos_css by calling
bd8815a6
TH
293 * css_rightmost_descendant() to skip subtree. @root_cs is included in the
294 * iteration and the first node to be visited.
fc560a26 295 */
492eb21b
TH
296#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
297 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
298 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
fc560a26 299
1da177e4 300/*
8447a0fe
VD
301 * There are two global locks guarding cpuset structures - cpuset_mutex and
302 * callback_lock. We also require taking task_lock() when dereferencing a
303 * task's cpuset pointer. See "The task_lock() exception", at the end of this
304 * comment.
5d21cc2d 305 *
8447a0fe 306 * A task must hold both locks to modify cpusets. If a task holds
5d21cc2d 307 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
8447a0fe 308 * is the only task able to also acquire callback_lock and be able to
5d21cc2d
TH
309 * modify cpusets. It can perform various checks on the cpuset structure
310 * first, knowing nothing will change. It can also allocate memory while
311 * just holding cpuset_mutex. While it is performing these checks, various
8447a0fe
VD
312 * callback routines can briefly acquire callback_lock to query cpusets.
313 * Once it is ready to make the changes, it takes callback_lock, blocking
5d21cc2d 314 * everyone else.
053199ed
PJ
315 *
316 * Calls to the kernel memory allocator can not be made while holding
8447a0fe 317 * callback_lock, as that would risk double tripping on callback_lock
053199ed
PJ
318 * from one of the callbacks into the cpuset code from within
319 * __alloc_pages().
320 *
8447a0fe 321 * If a task is only holding callback_lock, then it has read-only
053199ed
PJ
322 * access to cpusets.
323 *
58568d2a
MX
324 * Now, the task_struct fields mems_allowed and mempolicy may be changed
325 * by other task, we use alloc_lock in the task_struct fields to protect
326 * them.
053199ed 327 *
8447a0fe 328 * The cpuset_common_file_read() handlers only hold callback_lock across
053199ed
PJ
329 * small pieces of code, such as when reading out possibly multi-word
330 * cpumasks and nodemasks.
331 *
2df167a3
PM
332 * Accessing a task's cpuset should be done in accordance with the
333 * guidelines for accessing subsystem state in kernel/cgroup.c
1da177e4
LT
334 */
335
1243dc51 336DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
710da3c8
JL
337
338void cpuset_read_lock(void)
339{
340 percpu_down_read(&cpuset_rwsem);
341}
342
343void cpuset_read_unlock(void)
344{
345 percpu_up_read(&cpuset_rwsem);
346}
347
8447a0fe 348static DEFINE_SPINLOCK(callback_lock);
4247bdc6 349
e93ad19d
TH
350static struct workqueue_struct *cpuset_migrate_mm_wq;
351
3a5a6d0c
TH
352/*
353 * CPU / memory hotplug is handled asynchronously.
354 */
355static void cpuset_hotplug_workfn(struct work_struct *work);
3a5a6d0c
TH
356static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
357
e44193d3
LZ
358static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
359
b8d1b8ee
WL
360/*
361 * Cgroup v2 behavior is used when on default hierarchy or the
362 * cgroup_v2_mode flag is set.
363 */
364static inline bool is_in_v2_mode(void)
365{
366 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
367 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
368}
369
1da177e4 370/*
300ed6cb 371 * Return in pmask the portion of a cpusets's cpus_allowed that
1da177e4 372 * are online. If none are online, walk up the cpuset hierarchy
28b89b9e 373 * until we find one that does have some online cpus.
1da177e4
LT
374 *
375 * One way or another, we guarantee to return some non-empty subset
5f054e31 376 * of cpu_online_mask.
1da177e4 377 *
8447a0fe 378 * Call with callback_lock or cpuset_mutex held.
1da177e4 379 */
c9710d80 380static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
1da177e4 381{
28b89b9e 382 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
c431069f 383 cs = parent_cs(cs);
28b89b9e
JP
384 if (unlikely(!cs)) {
385 /*
386 * The top cpuset doesn't have any online cpu as a
387 * consequence of a race between cpuset_hotplug_work
388 * and cpu hotplug notifier. But we know the top
389 * cpuset's effective_cpus is on its way to to be
390 * identical to cpu_online_mask.
391 */
392 cpumask_copy(pmask, cpu_online_mask);
393 return;
394 }
395 }
ae1c8023 396 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
1da177e4
LT
397}
398
399/*
400 * Return in *pmask the portion of a cpusets's mems_allowed that
0e1e7c7a
CL
401 * are online, with memory. If none are online with memory, walk
402 * up the cpuset hierarchy until we find one that does have some
40df2deb 403 * online mems. The top cpuset always has some mems online.
1da177e4
LT
404 *
405 * One way or another, we guarantee to return some non-empty subset
38d7bee9 406 * of node_states[N_MEMORY].
1da177e4 407 *
8447a0fe 408 * Call with callback_lock or cpuset_mutex held.
1da177e4 409 */
c9710d80 410static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
1da177e4 411{
ae1c8023 412 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
c431069f 413 cs = parent_cs(cs);
ae1c8023 414 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
1da177e4
LT
415}
416
f3b39d47
MX
417/*
418 * update task's spread flag if cpuset's page/slab spread flag is set
419 *
8447a0fe 420 * Call with callback_lock or cpuset_mutex held.
f3b39d47
MX
421 */
422static void cpuset_update_task_spread_flag(struct cpuset *cs,
423 struct task_struct *tsk)
424{
425 if (is_spread_page(cs))
2ad654bc 426 task_set_spread_page(tsk);
f3b39d47 427 else
2ad654bc
ZL
428 task_clear_spread_page(tsk);
429
f3b39d47 430 if (is_spread_slab(cs))
2ad654bc 431 task_set_spread_slab(tsk);
f3b39d47 432 else
2ad654bc 433 task_clear_spread_slab(tsk);
f3b39d47
MX
434}
435
1da177e4
LT
436/*
437 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
438 *
439 * One cpuset is a subset of another if all its allowed CPUs and
440 * Memory Nodes are a subset of the other, and its exclusive flags
5d21cc2d 441 * are only set if the other's are set. Call holding cpuset_mutex.
1da177e4
LT
442 */
443
444static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
445{
300ed6cb 446 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
1da177e4
LT
447 nodes_subset(p->mems_allowed, q->mems_allowed) &&
448 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
449 is_mem_exclusive(p) <= is_mem_exclusive(q);
450}
451
bf92370c
WL
452/**
453 * alloc_cpumasks - allocate three cpumasks for cpuset
454 * @cs: the cpuset that have cpumasks to be allocated.
455 * @tmp: the tmpmasks structure pointer
456 * Return: 0 if successful, -ENOMEM otherwise.
457 *
458 * Only one of the two input arguments should be non-NULL.
459 */
460static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
461{
462 cpumask_var_t *pmask1, *pmask2, *pmask3;
463
464 if (cs) {
465 pmask1 = &cs->cpus_allowed;
466 pmask2 = &cs->effective_cpus;
467 pmask3 = &cs->subparts_cpus;
468 } else {
469 pmask1 = &tmp->new_cpus;
470 pmask2 = &tmp->addmask;
471 pmask3 = &tmp->delmask;
472 }
473
474 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
475 return -ENOMEM;
476
477 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
478 goto free_one;
479
480 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
481 goto free_two;
482
483 return 0;
484
485free_two:
486 free_cpumask_var(*pmask2);
487free_one:
488 free_cpumask_var(*pmask1);
489 return -ENOMEM;
490}
491
492/**
493 * free_cpumasks - free cpumasks in a tmpmasks structure
494 * @cs: the cpuset that have cpumasks to be free.
495 * @tmp: the tmpmasks structure pointer
496 */
497static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
498{
499 if (cs) {
500 free_cpumask_var(cs->cpus_allowed);
501 free_cpumask_var(cs->effective_cpus);
502 free_cpumask_var(cs->subparts_cpus);
503 }
504 if (tmp) {
505 free_cpumask_var(tmp->new_cpus);
506 free_cpumask_var(tmp->addmask);
507 free_cpumask_var(tmp->delmask);
508 }
509}
510
645fcc9d
LZ
511/**
512 * alloc_trial_cpuset - allocate a trial cpuset
513 * @cs: the cpuset that the trial cpuset duplicates
514 */
c9710d80 515static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
645fcc9d 516{
300ed6cb
LZ
517 struct cpuset *trial;
518
519 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
520 if (!trial)
521 return NULL;
522
bf92370c
WL
523 if (alloc_cpumasks(trial, NULL)) {
524 kfree(trial);
525 return NULL;
526 }
300ed6cb 527
e2b9a3d7
LZ
528 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
529 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
300ed6cb 530 return trial;
645fcc9d
LZ
531}
532
533/**
bf92370c
WL
534 * free_cpuset - free the cpuset
535 * @cs: the cpuset to be freed
645fcc9d 536 */
bf92370c 537static inline void free_cpuset(struct cpuset *cs)
645fcc9d 538{
bf92370c
WL
539 free_cpumasks(cs, NULL);
540 kfree(cs);
645fcc9d
LZ
541}
542
1da177e4
LT
543/*
544 * validate_change() - Used to validate that any proposed cpuset change
545 * follows the structural rules for cpusets.
546 *
547 * If we replaced the flag and mask values of the current cpuset
548 * (cur) with those values in the trial cpuset (trial), would
549 * our various subset and exclusive rules still be valid? Presumes
5d21cc2d 550 * cpuset_mutex held.
1da177e4
LT
551 *
552 * 'cur' is the address of an actual, in-use cpuset. Operations
553 * such as list traversal that depend on the actual address of the
554 * cpuset in the list must use cur below, not trial.
555 *
556 * 'trial' is the address of bulk structure copy of cur, with
557 * perhaps one or more of the fields cpus_allowed, mems_allowed,
558 * or flags changed to new, trial values.
559 *
560 * Return 0 if valid, -errno if not.
561 */
562
c9710d80 563static int validate_change(struct cpuset *cur, struct cpuset *trial)
1da177e4 564{
492eb21b 565 struct cgroup_subsys_state *css;
1da177e4 566 struct cpuset *c, *par;
ae8086ce
TH
567 int ret;
568
569 rcu_read_lock();
1da177e4
LT
570
571 /* Each of our child cpusets must be a subset of us */
ae8086ce 572 ret = -EBUSY;
492eb21b 573 cpuset_for_each_child(c, css, cur)
ae8086ce
TH
574 if (!is_cpuset_subset(c, trial))
575 goto out;
1da177e4
LT
576
577 /* Remaining checks don't apply to root cpuset */
ae8086ce 578 ret = 0;
69604067 579 if (cur == &top_cpuset)
ae8086ce 580 goto out;
1da177e4 581
c431069f 582 par = parent_cs(cur);
69604067 583
7e88291b 584 /* On legacy hiearchy, we must be a subset of our parent cpuset. */
ae8086ce 585 ret = -EACCES;
b8d1b8ee 586 if (!is_in_v2_mode() && !is_cpuset_subset(trial, par))
ae8086ce 587 goto out;
1da177e4 588
2df167a3
PM
589 /*
590 * If either I or some sibling (!= me) is exclusive, we can't
591 * overlap
592 */
ae8086ce 593 ret = -EINVAL;
492eb21b 594 cpuset_for_each_child(c, css, par) {
1da177e4
LT
595 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
596 c != cur &&
300ed6cb 597 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
ae8086ce 598 goto out;
1da177e4
LT
599 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
600 c != cur &&
601 nodes_intersects(trial->mems_allowed, c->mems_allowed))
ae8086ce 602 goto out;
1da177e4
LT
603 }
604
452477fa
TH
605 /*
606 * Cpusets with tasks - existing or newly being attached - can't
1c09b195 607 * be changed to have empty cpus_allowed or mems_allowed.
452477fa 608 */
ae8086ce 609 ret = -ENOSPC;
27bd4dbb 610 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
1c09b195
LZ
611 if (!cpumask_empty(cur->cpus_allowed) &&
612 cpumask_empty(trial->cpus_allowed))
613 goto out;
614 if (!nodes_empty(cur->mems_allowed) &&
615 nodes_empty(trial->mems_allowed))
616 goto out;
617 }
020958b6 618
f82f8042
JL
619 /*
620 * We can't shrink if we won't have enough room for SCHED_DEADLINE
621 * tasks.
622 */
623 ret = -EBUSY;
624 if (is_cpu_exclusive(cur) &&
625 !cpuset_cpumask_can_shrink(cur->cpus_allowed,
626 trial->cpus_allowed))
627 goto out;
628
ae8086ce
TH
629 ret = 0;
630out:
631 rcu_read_unlock();
632 return ret;
1da177e4
LT
633}
634
db7f47cf 635#ifdef CONFIG_SMP
029190c5 636/*
cf417141 637 * Helper routine for generate_sched_domains().
8b5f1c52 638 * Do cpusets a, b have overlapping effective cpus_allowed masks?
029190c5 639 */
029190c5
PJ
640static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
641{
8b5f1c52 642 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
029190c5
PJ
643}
644
1d3504fc
HS
645static void
646update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
647{
1d3504fc
HS
648 if (dattr->relax_domain_level < c->relax_domain_level)
649 dattr->relax_domain_level = c->relax_domain_level;
650 return;
651}
652
fc560a26
TH
653static void update_domain_attr_tree(struct sched_domain_attr *dattr,
654 struct cpuset *root_cs)
f5393693 655{
fc560a26 656 struct cpuset *cp;
492eb21b 657 struct cgroup_subsys_state *pos_css;
f5393693 658
fc560a26 659 rcu_read_lock();
492eb21b 660 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
fc560a26
TH
661 /* skip the whole subtree if @cp doesn't have any CPU */
662 if (cpumask_empty(cp->cpus_allowed)) {
492eb21b 663 pos_css = css_rightmost_descendant(pos_css);
f5393693 664 continue;
fc560a26 665 }
f5393693
LJ
666
667 if (is_sched_load_balance(cp))
668 update_domain_attr(dattr, cp);
f5393693 669 }
fc560a26 670 rcu_read_unlock();
f5393693
LJ
671}
672
be040bea
PB
673/* Must be called with cpuset_mutex held. */
674static inline int nr_cpusets(void)
675{
676 /* jump label reference count + the top-level cpuset */
677 return static_key_count(&cpusets_enabled_key.key) + 1;
678}
679
029190c5 680/*
cf417141
MK
681 * generate_sched_domains()
682 *
683 * This function builds a partial partition of the systems CPUs
684 * A 'partial partition' is a set of non-overlapping subsets whose
685 * union is a subset of that set.
0a0fca9d 686 * The output of this function needs to be passed to kernel/sched/core.c
cf417141
MK
687 * partition_sched_domains() routine, which will rebuild the scheduler's
688 * load balancing domains (sched domains) as specified by that partial
689 * partition.
029190c5 690 *
da82c92f 691 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
029190c5
PJ
692 * for a background explanation of this.
693 *
694 * Does not return errors, on the theory that the callers of this
695 * routine would rather not worry about failures to rebuild sched
696 * domains when operating in the severe memory shortage situations
697 * that could cause allocation failures below.
698 *
5d21cc2d 699 * Must be called with cpuset_mutex held.
029190c5
PJ
700 *
701 * The three key local variables below are:
b6fbbf31
JL
702 * cp - cpuset pointer, used (together with pos_css) to perform a
703 * top-down scan of all cpusets. For our purposes, rebuilding
704 * the schedulers sched domains, we can ignore !is_sched_load_
705 * balance cpusets.
029190c5
PJ
706 * csa - (for CpuSet Array) Array of pointers to all the cpusets
707 * that need to be load balanced, for convenient iterative
708 * access by the subsequent code that finds the best partition,
709 * i.e the set of domains (subsets) of CPUs such that the
710 * cpus_allowed of every cpuset marked is_sched_load_balance
711 * is a subset of one of these domains, while there are as
712 * many such domains as possible, each as small as possible.
713 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
0a0fca9d 714 * the kernel/sched/core.c routine partition_sched_domains() in a
029190c5
PJ
715 * convenient format, that can be easily compared to the prior
716 * value to determine what partition elements (sched domains)
717 * were changed (added or removed.)
718 *
719 * Finding the best partition (set of domains):
720 * The triple nested loops below over i, j, k scan over the
721 * load balanced cpusets (using the array of cpuset pointers in
722 * csa[]) looking for pairs of cpusets that have overlapping
723 * cpus_allowed, but which don't have the same 'pn' partition
724 * number and gives them in the same partition number. It keeps
725 * looping on the 'restart' label until it can no longer find
726 * any such pairs.
727 *
728 * The union of the cpus_allowed masks from the set of
729 * all cpusets having the same 'pn' value then form the one
730 * element of the partition (one sched domain) to be passed to
731 * partition_sched_domains().
732 */
acc3f5d7 733static int generate_sched_domains(cpumask_var_t **domains,
cf417141 734 struct sched_domain_attr **attributes)
029190c5 735{
b6fbbf31 736 struct cpuset *cp; /* top-down scan of cpusets */
029190c5
PJ
737 struct cpuset **csa; /* array of all cpuset ptrs */
738 int csn; /* how many cpuset ptrs in csa so far */
739 int i, j, k; /* indices for partition finding loops */
acc3f5d7 740 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
1d3504fc 741 struct sched_domain_attr *dattr; /* attributes for custom domains */
1583715d 742 int ndoms = 0; /* number of sched domains in result */
6af866af 743 int nslot; /* next empty doms[] struct cpumask slot */
492eb21b 744 struct cgroup_subsys_state *pos_css;
0ccea8fe 745 bool root_load_balance = is_sched_load_balance(&top_cpuset);
029190c5 746
029190c5 747 doms = NULL;
1d3504fc 748 dattr = NULL;
cf417141 749 csa = NULL;
029190c5
PJ
750
751 /* Special case for the 99% of systems with one, full, sched domain */
0ccea8fe 752 if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
acc3f5d7
RR
753 ndoms = 1;
754 doms = alloc_sched_domains(ndoms);
029190c5 755 if (!doms)
cf417141
MK
756 goto done;
757
1d3504fc
HS
758 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
759 if (dattr) {
760 *dattr = SD_ATTR_INIT;
93a65575 761 update_domain_attr_tree(dattr, &top_cpuset);
1d3504fc 762 }
47b8ea71 763 cpumask_and(doms[0], top_cpuset.effective_cpus,
edb93821 764 housekeeping_cpumask(HK_FLAG_DOMAIN));
cf417141 765
cf417141 766 goto done;
029190c5
PJ
767 }
768
6da2ec56 769 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
029190c5
PJ
770 if (!csa)
771 goto done;
772 csn = 0;
773
fc560a26 774 rcu_read_lock();
0ccea8fe
WL
775 if (root_load_balance)
776 csa[csn++] = &top_cpuset;
492eb21b 777 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
bd8815a6
TH
778 if (cp == &top_cpuset)
779 continue;
f5393693 780 /*
fc560a26
TH
781 * Continue traversing beyond @cp iff @cp has some CPUs and
782 * isn't load balancing. The former is obvious. The
783 * latter: All child cpusets contain a subset of the
784 * parent's cpus, so just skip them, and then we call
785 * update_domain_attr_tree() to calc relax_domain_level of
786 * the corresponding sched domain.
0ccea8fe
WL
787 *
788 * If root is load-balancing, we can skip @cp if it
789 * is a subset of the root's effective_cpus.
f5393693 790 */
fc560a26 791 if (!cpumask_empty(cp->cpus_allowed) &&
47b8ea71 792 !(is_sched_load_balance(cp) &&
edb93821
FW
793 cpumask_intersects(cp->cpus_allowed,
794 housekeeping_cpumask(HK_FLAG_DOMAIN))))
f5393693 795 continue;
489a5393 796
0ccea8fe
WL
797 if (root_load_balance &&
798 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
799 continue;
800
cd1cb335
VS
801 if (is_sched_load_balance(cp) &&
802 !cpumask_empty(cp->effective_cpus))
fc560a26
TH
803 csa[csn++] = cp;
804
0ccea8fe
WL
805 /* skip @cp's subtree if not a partition root */
806 if (!is_partition_root(cp))
807 pos_css = css_rightmost_descendant(pos_css);
fc560a26
TH
808 }
809 rcu_read_unlock();
029190c5
PJ
810
811 for (i = 0; i < csn; i++)
812 csa[i]->pn = i;
813 ndoms = csn;
814
815restart:
816 /* Find the best partition (set of sched domains) */
817 for (i = 0; i < csn; i++) {
818 struct cpuset *a = csa[i];
819 int apn = a->pn;
820
821 for (j = 0; j < csn; j++) {
822 struct cpuset *b = csa[j];
823 int bpn = b->pn;
824
825 if (apn != bpn && cpusets_overlap(a, b)) {
826 for (k = 0; k < csn; k++) {
827 struct cpuset *c = csa[k];
828
829 if (c->pn == bpn)
830 c->pn = apn;
831 }
832 ndoms--; /* one less element */
833 goto restart;
834 }
835 }
836 }
837
cf417141
MK
838 /*
839 * Now we know how many domains to create.
840 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
841 */
acc3f5d7 842 doms = alloc_sched_domains(ndoms);
700018e0 843 if (!doms)
cf417141 844 goto done;
cf417141
MK
845
846 /*
847 * The rest of the code, including the scheduler, can deal with
848 * dattr==NULL case. No need to abort if alloc fails.
849 */
6da2ec56
KC
850 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
851 GFP_KERNEL);
029190c5
PJ
852
853 for (nslot = 0, i = 0; i < csn; i++) {
854 struct cpuset *a = csa[i];
6af866af 855 struct cpumask *dp;
029190c5
PJ
856 int apn = a->pn;
857
cf417141
MK
858 if (apn < 0) {
859 /* Skip completed partitions */
860 continue;
861 }
862
acc3f5d7 863 dp = doms[nslot];
cf417141
MK
864
865 if (nslot == ndoms) {
866 static int warnings = 10;
867 if (warnings) {
12d3089c
FF
868 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
869 nslot, ndoms, csn, i, apn);
cf417141 870 warnings--;
029190c5 871 }
cf417141
MK
872 continue;
873 }
029190c5 874
6af866af 875 cpumask_clear(dp);
cf417141
MK
876 if (dattr)
877 *(dattr + nslot) = SD_ATTR_INIT;
878 for (j = i; j < csn; j++) {
879 struct cpuset *b = csa[j];
880
881 if (apn == b->pn) {
8b5f1c52 882 cpumask_or(dp, dp, b->effective_cpus);
edb93821 883 cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN));
cf417141
MK
884 if (dattr)
885 update_domain_attr_tree(dattr + nslot, b);
886
887 /* Done with this partition */
888 b->pn = -1;
029190c5 889 }
029190c5 890 }
cf417141 891 nslot++;
029190c5
PJ
892 }
893 BUG_ON(nslot != ndoms);
894
cf417141
MK
895done:
896 kfree(csa);
897
700018e0
LZ
898 /*
899 * Fallback to the default domain if kmalloc() failed.
900 * See comments in partition_sched_domains().
901 */
902 if (doms == NULL)
903 ndoms = 1;
904
cf417141
MK
905 *domains = doms;
906 *attributes = dattr;
907 return ndoms;
908}
909
f9a25f77
MP
910static void update_tasks_root_domain(struct cpuset *cs)
911{
912 struct css_task_iter it;
913 struct task_struct *task;
914
915 css_task_iter_start(&cs->css, 0, &it);
916
917 while ((task = css_task_iter_next(&it)))
918 dl_add_task_root_domain(task);
919
920 css_task_iter_end(&it);
921}
922
923static void rebuild_root_domains(void)
924{
925 struct cpuset *cs = NULL;
926 struct cgroup_subsys_state *pos_css;
927
1243dc51 928 percpu_rwsem_assert_held(&cpuset_rwsem);
f9a25f77
MP
929 lockdep_assert_cpus_held();
930 lockdep_assert_held(&sched_domains_mutex);
931
f9a25f77
MP
932 rcu_read_lock();
933
934 /*
935 * Clear default root domain DL accounting, it will be computed again
936 * if a task belongs to it.
937 */
938 dl_clear_root_domain(&def_root_domain);
939
940 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
941
942 if (cpumask_empty(cs->effective_cpus)) {
943 pos_css = css_rightmost_descendant(pos_css);
944 continue;
945 }
946
947 css_get(&cs->css);
948
949 rcu_read_unlock();
950
951 update_tasks_root_domain(cs);
952
953 rcu_read_lock();
954 css_put(&cs->css);
955 }
956 rcu_read_unlock();
957}
958
959static void
960partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
961 struct sched_domain_attr *dattr_new)
962{
963 mutex_lock(&sched_domains_mutex);
964 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
965 rebuild_root_domains();
966 mutex_unlock(&sched_domains_mutex);
967}
968
cf417141
MK
969/*
970 * Rebuild scheduler domains.
971 *
699140ba
TH
972 * If the flag 'sched_load_balance' of any cpuset with non-empty
973 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
974 * which has that flag enabled, or if any cpuset with a non-empty
975 * 'cpus' is removed, then call this routine to rebuild the
976 * scheduler's dynamic sched domains.
cf417141 977 *
5d21cc2d 978 * Call with cpuset_mutex held. Takes get_online_cpus().
cf417141 979 */
699140ba 980static void rebuild_sched_domains_locked(void)
cf417141
MK
981{
982 struct sched_domain_attr *attr;
acc3f5d7 983 cpumask_var_t *doms;
cf417141
MK
984 int ndoms;
985
d74b27d6 986 lockdep_assert_cpus_held();
1243dc51 987 percpu_rwsem_assert_held(&cpuset_rwsem);
cf417141 988
5b16c2a4
LZ
989 /*
990 * We have raced with CPU hotplug. Don't do anything to avoid
991 * passing doms with offlined cpu to partition_sched_domains().
992 * Anyways, hotplug work item will rebuild sched domains.
993 */
0ccea8fe
WL
994 if (!top_cpuset.nr_subparts_cpus &&
995 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
d74b27d6 996 return;
0ccea8fe
WL
997
998 if (top_cpuset.nr_subparts_cpus &&
999 !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask))
d74b27d6 1000 return;
5b16c2a4 1001
cf417141 1002 /* Generate domain masks and attrs */
cf417141 1003 ndoms = generate_sched_domains(&doms, &attr);
cf417141
MK
1004
1005 /* Have scheduler rebuild the domains */
f9a25f77 1006 partition_and_rebuild_sched_domains(ndoms, doms, attr);
cf417141 1007}
db7f47cf 1008#else /* !CONFIG_SMP */
699140ba 1009static void rebuild_sched_domains_locked(void)
db7f47cf
PM
1010{
1011}
db7f47cf 1012#endif /* CONFIG_SMP */
029190c5 1013
cf417141
MK
1014void rebuild_sched_domains(void)
1015{
d74b27d6 1016 get_online_cpus();
1243dc51 1017 percpu_down_write(&cpuset_rwsem);
699140ba 1018 rebuild_sched_domains_locked();
1243dc51 1019 percpu_up_write(&cpuset_rwsem);
d74b27d6 1020 put_online_cpus();
029190c5
PJ
1021}
1022
0b2f630a
MX
1023/**
1024 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1025 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
0b2f630a 1026 *
d66393e5
TH
1027 * Iterate through each task of @cs updating its cpus_allowed to the
1028 * effective cpuset's. As this function is called with cpuset_mutex held,
1029 * cpuset membership stays stable.
0b2f630a 1030 */
d66393e5 1031static void update_tasks_cpumask(struct cpuset *cs)
0b2f630a 1032{
d66393e5
TH
1033 struct css_task_iter it;
1034 struct task_struct *task;
1035
bc2fb7ed 1036 css_task_iter_start(&cs->css, 0, &it);
d66393e5 1037 while ((task = css_task_iter_next(&it)))
ae1c8023 1038 set_cpus_allowed_ptr(task, cs->effective_cpus);
d66393e5 1039 css_task_iter_end(&it);
0b2f630a
MX
1040}
1041
ee8dde0c
WL
1042/**
1043 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1044 * @new_cpus: the temp variable for the new effective_cpus mask
1045 * @cs: the cpuset the need to recompute the new effective_cpus mask
1046 * @parent: the parent cpuset
1047 *
1048 * If the parent has subpartition CPUs, include them in the list of
4b842da2
WL
1049 * allowable CPUs in computing the new effective_cpus mask. Since offlined
1050 * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1051 * to mask those out.
ee8dde0c
WL
1052 */
1053static void compute_effective_cpumask(struct cpumask *new_cpus,
1054 struct cpuset *cs, struct cpuset *parent)
1055{
1056 if (parent->nr_subparts_cpus) {
1057 cpumask_or(new_cpus, parent->effective_cpus,
1058 parent->subparts_cpus);
1059 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
4b842da2 1060 cpumask_and(new_cpus, new_cpus, cpu_active_mask);
ee8dde0c
WL
1061 } else {
1062 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1063 }
1064}
1065
1066/*
1067 * Commands for update_parent_subparts_cpumask
1068 */
1069enum subparts_cmd {
1070 partcmd_enable, /* Enable partition root */
1071 partcmd_disable, /* Disable partition root */
1072 partcmd_update, /* Update parent's subparts_cpus */
1073};
1074
1075/**
1076 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1077 * @cpuset: The cpuset that requests change in partition root state
1078 * @cmd: Partition root state change command
1079 * @newmask: Optional new cpumask for partcmd_update
1080 * @tmp: Temporary addmask and delmask
1081 * Return: 0, 1 or an error code
1082 *
1083 * For partcmd_enable, the cpuset is being transformed from a non-partition
1084 * root to a partition root. The cpus_allowed mask of the given cpuset will
1085 * be put into parent's subparts_cpus and taken away from parent's
1086 * effective_cpus. The function will return 0 if all the CPUs listed in
1087 * cpus_allowed can be granted or an error code will be returned.
1088 *
1089 * For partcmd_disable, the cpuset is being transofrmed from a partition
1090 * root back to a non-partition root. any CPUs in cpus_allowed that are in
1091 * parent's subparts_cpus will be taken away from that cpumask and put back
1092 * into parent's effective_cpus. 0 should always be returned.
1093 *
1094 * For partcmd_update, if the optional newmask is specified, the cpu
1095 * list is to be changed from cpus_allowed to newmask. Otherwise,
3881b861
WL
1096 * cpus_allowed is assumed to remain the same. The cpuset should either
1097 * be a partition root or an invalid partition root. The partition root
1098 * state may change if newmask is NULL and none of the requested CPUs can
1099 * be granted by the parent. The function will return 1 if changes to
1100 * parent's subparts_cpus and effective_cpus happen or 0 otherwise.
1101 * Error code should only be returned when newmask is non-NULL.
ee8dde0c
WL
1102 *
1103 * The partcmd_enable and partcmd_disable commands are used by
1104 * update_prstate(). The partcmd_update command is used by
1105 * update_cpumasks_hier() with newmask NULL and update_cpumask() with
1106 * newmask set.
1107 *
1108 * The checking is more strict when enabling partition root than the
1109 * other two commands.
1110 *
1111 * Because of the implicit cpu exclusive nature of a partition root,
1112 * cpumask changes that violates the cpu exclusivity rule will not be
1113 * permitted when checked by validate_change(). The validate_change()
1114 * function will also prevent any changes to the cpu list if it is not
1115 * a superset of children's cpu lists.
1116 */
1117static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
1118 struct cpumask *newmask,
1119 struct tmpmasks *tmp)
1120{
1121 struct cpuset *parent = parent_cs(cpuset);
1122 int adding; /* Moving cpus from effective_cpus to subparts_cpus */
1123 int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
3881b861 1124 bool part_error = false; /* Partition error? */
ee8dde0c 1125
1243dc51 1126 percpu_rwsem_assert_held(&cpuset_rwsem);
ee8dde0c
WL
1127
1128 /*
1129 * The parent must be a partition root.
1130 * The new cpumask, if present, or the current cpus_allowed must
1131 * not be empty.
1132 */
1133 if (!is_partition_root(parent) ||
1134 (newmask && cpumask_empty(newmask)) ||
1135 (!newmask && cpumask_empty(cpuset->cpus_allowed)))
1136 return -EINVAL;
1137
1138 /*
1139 * Enabling/disabling partition root is not allowed if there are
1140 * online children.
1141 */
1142 if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css))
1143 return -EBUSY;
1144
1145 /*
1146 * Enabling partition root is not allowed if not all the CPUs
1147 * can be granted from parent's effective_cpus or at least one
1148 * CPU will be left after that.
1149 */
1150 if ((cmd == partcmd_enable) &&
1151 (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) ||
1152 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus)))
1153 return -EINVAL;
1154
1155 /*
1156 * A cpumask update cannot make parent's effective_cpus become empty.
1157 */
1158 adding = deleting = false;
1159 if (cmd == partcmd_enable) {
1160 cpumask_copy(tmp->addmask, cpuset->cpus_allowed);
1161 adding = true;
1162 } else if (cmd == partcmd_disable) {
1163 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed,
1164 parent->subparts_cpus);
1165 } else if (newmask) {
1166 /*
1167 * partcmd_update with newmask:
1168 *
1169 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1170 * addmask = newmask & parent->effective_cpus
1171 * & ~parent->subparts_cpus
1172 */
1173 cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask);
1174 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1175 parent->subparts_cpus);
1176
1177 cpumask_and(tmp->addmask, newmask, parent->effective_cpus);
1178 adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1179 parent->subparts_cpus);
1180 /*
1181 * Return error if the new effective_cpus could become empty.
1182 */
4b842da2
WL
1183 if (adding &&
1184 cpumask_equal(parent->effective_cpus, tmp->addmask)) {
1185 if (!deleting)
1186 return -EINVAL;
1187 /*
1188 * As some of the CPUs in subparts_cpus might have
1189 * been offlined, we need to compute the real delmask
1190 * to confirm that.
1191 */
1192 if (!cpumask_and(tmp->addmask, tmp->delmask,
1193 cpu_active_mask))
1194 return -EINVAL;
1195 cpumask_copy(tmp->addmask, parent->effective_cpus);
1196 }
ee8dde0c
WL
1197 } else {
1198 /*
1199 * partcmd_update w/o newmask:
1200 *
1201 * addmask = cpus_allowed & parent->effectiveb_cpus
1202 *
1203 * Note that parent's subparts_cpus may have been
3881b861
WL
1204 * pre-shrunk in case there is a change in the cpu list.
1205 * So no deletion is needed.
ee8dde0c
WL
1206 */
1207 adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed,
1208 parent->effective_cpus);
3881b861
WL
1209 part_error = cpumask_equal(tmp->addmask,
1210 parent->effective_cpus);
1211 }
1212
1213 if (cmd == partcmd_update) {
1214 int prev_prs = cpuset->partition_root_state;
1215
1216 /*
1217 * Check for possible transition between PRS_ENABLED
1218 * and PRS_ERROR.
1219 */
1220 switch (cpuset->partition_root_state) {
1221 case PRS_ENABLED:
1222 if (part_error)
1223 cpuset->partition_root_state = PRS_ERROR;
1224 break;
1225 case PRS_ERROR:
1226 if (!part_error)
1227 cpuset->partition_root_state = PRS_ENABLED;
1228 break;
1229 }
1230 /*
1231 * Set part_error if previously in invalid state.
1232 */
1233 part_error = (prev_prs == PRS_ERROR);
1234 }
1235
1236 if (!part_error && (cpuset->partition_root_state == PRS_ERROR))
1237 return 0; /* Nothing need to be done */
1238
1239 if (cpuset->partition_root_state == PRS_ERROR) {
1240 /*
1241 * Remove all its cpus from parent's subparts_cpus.
1242 */
1243 adding = false;
1244 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed,
1245 parent->subparts_cpus);
ee8dde0c
WL
1246 }
1247
1248 if (!adding && !deleting)
1249 return 0;
1250
1251 /*
1252 * Change the parent's subparts_cpus.
1253 * Newly added CPUs will be removed from effective_cpus and
1254 * newly deleted ones will be added back to effective_cpus.
1255 */
1256 spin_lock_irq(&callback_lock);
1257 if (adding) {
1258 cpumask_or(parent->subparts_cpus,
1259 parent->subparts_cpus, tmp->addmask);
1260 cpumask_andnot(parent->effective_cpus,
1261 parent->effective_cpus, tmp->addmask);
1262 }
1263 if (deleting) {
1264 cpumask_andnot(parent->subparts_cpus,
1265 parent->subparts_cpus, tmp->delmask);
4b842da2
WL
1266 /*
1267 * Some of the CPUs in subparts_cpus might have been offlined.
1268 */
1269 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
ee8dde0c
WL
1270 cpumask_or(parent->effective_cpus,
1271 parent->effective_cpus, tmp->delmask);
1272 }
1273
1274 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1275 spin_unlock_irq(&callback_lock);
1276
1277 return cmd == partcmd_update;
1278}
1279
5c5cc623 1280/*
734d4513 1281 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
ee8dde0c
WL
1282 * @cs: the cpuset to consider
1283 * @tmp: temp variables for calculating effective_cpus & partition setup
734d4513
LZ
1284 *
1285 * When congifured cpumask is changed, the effective cpumasks of this cpuset
1286 * and all its descendants need to be updated.
5c5cc623 1287 *
734d4513 1288 * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
5c5cc623
LZ
1289 *
1290 * Called with cpuset_mutex held
1291 */
ee8dde0c 1292static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
5c5cc623
LZ
1293{
1294 struct cpuset *cp;
492eb21b 1295 struct cgroup_subsys_state *pos_css;
8b5f1c52 1296 bool need_rebuild_sched_domains = false;
5c5cc623
LZ
1297
1298 rcu_read_lock();
734d4513
LZ
1299 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1300 struct cpuset *parent = parent_cs(cp);
1301
ee8dde0c 1302 compute_effective_cpumask(tmp->new_cpus, cp, parent);
734d4513 1303
554b0d1c
LZ
1304 /*
1305 * If it becomes empty, inherit the effective mask of the
1306 * parent, which is guaranteed to have some CPUs.
1307 */
4716909c 1308 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
ee8dde0c 1309 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
4716909c
WL
1310 if (!cp->use_parent_ecpus) {
1311 cp->use_parent_ecpus = true;
1312 parent->child_ecpus_count++;
1313 }
1314 } else if (cp->use_parent_ecpus) {
1315 cp->use_parent_ecpus = false;
1316 WARN_ON_ONCE(!parent->child_ecpus_count);
1317 parent->child_ecpus_count--;
1318 }
554b0d1c 1319
ee8dde0c
WL
1320 /*
1321 * Skip the whole subtree if the cpumask remains the same
1322 * and has no partition root state.
1323 */
3881b861 1324 if (!cp->partition_root_state &&
ee8dde0c 1325 cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
734d4513
LZ
1326 pos_css = css_rightmost_descendant(pos_css);
1327 continue;
5c5cc623 1328 }
734d4513 1329
ee8dde0c
WL
1330 /*
1331 * update_parent_subparts_cpumask() should have been called
1332 * for cs already in update_cpumask(). We should also call
1333 * update_tasks_cpumask() again for tasks in the parent
1334 * cpuset if the parent's subparts_cpus changes.
1335 */
3881b861
WL
1336 if ((cp != cs) && cp->partition_root_state) {
1337 switch (parent->partition_root_state) {
1338 case PRS_DISABLED:
1339 /*
1340 * If parent is not a partition root or an
1341 * invalid partition root, clear the state
1342 * state and the CS_CPU_EXCLUSIVE flag.
1343 */
1344 WARN_ON_ONCE(cp->partition_root_state
1345 != PRS_ERROR);
1346 cp->partition_root_state = 0;
1347
1348 /*
1349 * clear_bit() is an atomic operation and
1350 * readers aren't interested in the state
1351 * of CS_CPU_EXCLUSIVE anyway. So we can
1352 * just update the flag without holding
1353 * the callback_lock.
1354 */
1355 clear_bit(CS_CPU_EXCLUSIVE, &cp->flags);
1356 break;
1357
1358 case PRS_ENABLED:
1359 if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp))
1360 update_tasks_cpumask(parent);
1361 break;
1362
1363 case PRS_ERROR:
1364 /*
1365 * When parent is invalid, it has to be too.
1366 */
1367 cp->partition_root_state = PRS_ERROR;
1368 if (cp->nr_subparts_cpus) {
1369 cp->nr_subparts_cpus = 0;
1370 cpumask_clear(cp->subparts_cpus);
1371 }
1372 break;
1373 }
ee8dde0c
WL
1374 }
1375
ec903c0c 1376 if (!css_tryget_online(&cp->css))
5c5cc623
LZ
1377 continue;
1378 rcu_read_unlock();
1379
8447a0fe 1380 spin_lock_irq(&callback_lock);
ee8dde0c
WL
1381
1382 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
3881b861
WL
1383 if (cp->nr_subparts_cpus &&
1384 (cp->partition_root_state != PRS_ENABLED)) {
1385 cp->nr_subparts_cpus = 0;
1386 cpumask_clear(cp->subparts_cpus);
1387 } else if (cp->nr_subparts_cpus) {
ee8dde0c
WL
1388 /*
1389 * Make sure that effective_cpus & subparts_cpus
1390 * are mutually exclusive.
3881b861
WL
1391 *
1392 * In the unlikely event that effective_cpus
1393 * becomes empty. we clear cp->nr_subparts_cpus and
1394 * let its child partition roots to compete for
1395 * CPUs again.
ee8dde0c
WL
1396 */
1397 cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
1398 cp->subparts_cpus);
3881b861
WL
1399 if (cpumask_empty(cp->effective_cpus)) {
1400 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1401 cpumask_clear(cp->subparts_cpus);
1402 cp->nr_subparts_cpus = 0;
1403 } else if (!cpumask_subset(cp->subparts_cpus,
1404 tmp->new_cpus)) {
1405 cpumask_andnot(cp->subparts_cpus,
1406 cp->subparts_cpus, tmp->new_cpus);
1407 cp->nr_subparts_cpus
1408 = cpumask_weight(cp->subparts_cpus);
1409 }
ee8dde0c 1410 }
8447a0fe 1411 spin_unlock_irq(&callback_lock);
734d4513 1412
b8d1b8ee 1413 WARN_ON(!is_in_v2_mode() &&
734d4513
LZ
1414 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1415
d66393e5 1416 update_tasks_cpumask(cp);
5c5cc623 1417
8b5f1c52 1418 /*
0ccea8fe
WL
1419 * On legacy hierarchy, if the effective cpumask of any non-
1420 * empty cpuset is changed, we need to rebuild sched domains.
1421 * On default hierarchy, the cpuset needs to be a partition
1422 * root as well.
8b5f1c52
LZ
1423 */
1424 if (!cpumask_empty(cp->cpus_allowed) &&
0ccea8fe
WL
1425 is_sched_load_balance(cp) &&
1426 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1427 is_partition_root(cp)))
8b5f1c52
LZ
1428 need_rebuild_sched_domains = true;
1429
5c5cc623
LZ
1430 rcu_read_lock();
1431 css_put(&cp->css);
1432 }
1433 rcu_read_unlock();
8b5f1c52
LZ
1434
1435 if (need_rebuild_sched_domains)
1436 rebuild_sched_domains_locked();
5c5cc623
LZ
1437}
1438
4716909c
WL
1439/**
1440 * update_sibling_cpumasks - Update siblings cpumasks
1441 * @parent: Parent cpuset
1442 * @cs: Current cpuset
1443 * @tmp: Temp variables
1444 */
1445static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1446 struct tmpmasks *tmp)
1447{
1448 struct cpuset *sibling;
1449 struct cgroup_subsys_state *pos_css;
1450
1451 /*
1452 * Check all its siblings and call update_cpumasks_hier()
1453 * if their use_parent_ecpus flag is set in order for them
1454 * to use the right effective_cpus value.
1455 */
1456 rcu_read_lock();
1457 cpuset_for_each_child(sibling, pos_css, parent) {
1458 if (sibling == cs)
1459 continue;
1460 if (!sibling->use_parent_ecpus)
1461 continue;
1462
1463 update_cpumasks_hier(sibling, tmp);
1464 }
1465 rcu_read_unlock();
1466}
1467
58f4790b
CW
1468/**
1469 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1470 * @cs: the cpuset to consider
fc34ac1d 1471 * @trialcs: trial cpuset
58f4790b
CW
1472 * @buf: buffer of cpu numbers written to this cpuset
1473 */
645fcc9d
LZ
1474static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1475 const char *buf)
1da177e4 1476{
58f4790b 1477 int retval;
ee8dde0c 1478 struct tmpmasks tmp;
1da177e4 1479
5f054e31 1480 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
4c4d50f7
PJ
1481 if (cs == &top_cpuset)
1482 return -EACCES;
1483
6f7f02e7 1484 /*
c8d9c90c 1485 * An empty cpus_allowed is ok only if the cpuset has no tasks.
020958b6
PJ
1486 * Since cpulist_parse() fails on an empty mask, we special case
1487 * that parsing. The validate_change() call ensures that cpusets
1488 * with tasks have cpus.
6f7f02e7 1489 */
020958b6 1490 if (!*buf) {
300ed6cb 1491 cpumask_clear(trialcs->cpus_allowed);
6f7f02e7 1492 } else {
300ed6cb 1493 retval = cpulist_parse(buf, trialcs->cpus_allowed);
6f7f02e7
DR
1494 if (retval < 0)
1495 return retval;
37340746 1496
5d8ba82c
LZ
1497 if (!cpumask_subset(trialcs->cpus_allowed,
1498 top_cpuset.cpus_allowed))
37340746 1499 return -EINVAL;
6f7f02e7 1500 }
029190c5 1501
8707d8b8 1502 /* Nothing to do if the cpus didn't change */
300ed6cb 1503 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
8707d8b8 1504 return 0;
58f4790b 1505
a73456f3
LZ
1506 retval = validate_change(cs, trialcs);
1507 if (retval < 0)
1508 return retval;
1509
ee8dde0c
WL
1510#ifdef CONFIG_CPUMASK_OFFSTACK
1511 /*
1512 * Use the cpumasks in trialcs for tmpmasks when they are pointers
1513 * to allocated cpumasks.
1514 */
1515 tmp.addmask = trialcs->subparts_cpus;
1516 tmp.delmask = trialcs->effective_cpus;
1517 tmp.new_cpus = trialcs->cpus_allowed;
1518#endif
1519
1520 if (cs->partition_root_state) {
1521 /* Cpumask of a partition root cannot be empty */
1522 if (cpumask_empty(trialcs->cpus_allowed))
1523 return -EINVAL;
1524 if (update_parent_subparts_cpumask(cs, partcmd_update,
1525 trialcs->cpus_allowed, &tmp) < 0)
1526 return -EINVAL;
1527 }
1528
8447a0fe 1529 spin_lock_irq(&callback_lock);
300ed6cb 1530 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
ee8dde0c
WL
1531
1532 /*
1533 * Make sure that subparts_cpus is a subset of cpus_allowed.
1534 */
1535 if (cs->nr_subparts_cpus) {
1536 cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus,
1537 cs->cpus_allowed);
1538 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1539 }
8447a0fe 1540 spin_unlock_irq(&callback_lock);
029190c5 1541
ee8dde0c 1542 update_cpumasks_hier(cs, &tmp);
4716909c
WL
1543
1544 if (cs->partition_root_state) {
1545 struct cpuset *parent = parent_cs(cs);
1546
1547 /*
1548 * For partition root, update the cpumasks of sibling
1549 * cpusets if they use parent's effective_cpus.
1550 */
1551 if (parent->child_ecpus_count)
1552 update_sibling_cpumasks(parent, cs, &tmp);
1553 }
85d7b949 1554 return 0;
1da177e4
LT
1555}
1556
e4e364e8 1557/*
e93ad19d
TH
1558 * Migrate memory region from one set of nodes to another. This is
1559 * performed asynchronously as it can be called from process migration path
1560 * holding locks involved in process management. All mm migrations are
1561 * performed in the queued order and can be waited for by flushing
1562 * cpuset_migrate_mm_wq.
e4e364e8
PJ
1563 */
1564
e93ad19d
TH
1565struct cpuset_migrate_mm_work {
1566 struct work_struct work;
1567 struct mm_struct *mm;
1568 nodemask_t from;
1569 nodemask_t to;
1570};
1571
1572static void cpuset_migrate_mm_workfn(struct work_struct *work)
1573{
1574 struct cpuset_migrate_mm_work *mwork =
1575 container_of(work, struct cpuset_migrate_mm_work, work);
1576
1577 /* on a wq worker, no need to worry about %current's mems_allowed */
1578 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1579 mmput(mwork->mm);
1580 kfree(mwork);
1581}
1582
e4e364e8
PJ
1583static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1584 const nodemask_t *to)
1585{
e93ad19d 1586 struct cpuset_migrate_mm_work *mwork;
e4e364e8 1587
e93ad19d
TH
1588 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1589 if (mwork) {
1590 mwork->mm = mm;
1591 mwork->from = *from;
1592 mwork->to = *to;
1593 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1594 queue_work(cpuset_migrate_mm_wq, &mwork->work);
1595 } else {
1596 mmput(mm);
1597 }
1598}
e4e364e8 1599
5cf1cacb 1600static void cpuset_post_attach(void)
e93ad19d
TH
1601{
1602 flush_workqueue(cpuset_migrate_mm_wq);
e4e364e8
PJ
1603}
1604
3b6766fe 1605/*
58568d2a
MX
1606 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1607 * @tsk: the task to change
1608 * @newmems: new nodes that the task will be set
1609 *
5f155f27
VB
1610 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1611 * and rebind an eventual tasks' mempolicy. If the task is allocating in
1612 * parallel, it might temporarily see an empty intersection, which results in
1613 * a seqlock check and retry before OOM or allocation failure.
58568d2a
MX
1614 */
1615static void cpuset_change_task_nodemask(struct task_struct *tsk,
1616 nodemask_t *newmems)
1617{
c0ff7453 1618 task_lock(tsk);
c0ff7453 1619
5f155f27
VB
1620 local_irq_disable();
1621 write_seqcount_begin(&tsk->mems_allowed_seq);
c0ff7453 1622
cc9a6c87 1623 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
213980c0 1624 mpol_rebind_task(tsk, newmems);
58568d2a 1625 tsk->mems_allowed = *newmems;
cc9a6c87 1626
5f155f27
VB
1627 write_seqcount_end(&tsk->mems_allowed_seq);
1628 local_irq_enable();
cc9a6c87 1629
c0ff7453 1630 task_unlock(tsk);
58568d2a
MX
1631}
1632
8793d854
PM
1633static void *cpuset_being_rebound;
1634
0b2f630a
MX
1635/**
1636 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1637 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
0b2f630a 1638 *
d66393e5
TH
1639 * Iterate through each task of @cs updating its mems_allowed to the
1640 * effective cpuset's. As this function is called with cpuset_mutex held,
1641 * cpuset membership stays stable.
0b2f630a 1642 */
d66393e5 1643static void update_tasks_nodemask(struct cpuset *cs)
1da177e4 1644{
33ad801d 1645 static nodemask_t newmems; /* protected by cpuset_mutex */
d66393e5
TH
1646 struct css_task_iter it;
1647 struct task_struct *task;
59dac16f 1648
846a16bf 1649 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
4225399a 1650
ae1c8023 1651 guarantee_online_mems(cs, &newmems);
33ad801d 1652
4225399a 1653 /*
3b6766fe
LZ
1654 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1655 * take while holding tasklist_lock. Forks can happen - the
1656 * mpol_dup() cpuset_being_rebound check will catch such forks,
1657 * and rebind their vma mempolicies too. Because we still hold
5d21cc2d 1658 * the global cpuset_mutex, we know that no other rebind effort
3b6766fe 1659 * will be contending for the global variable cpuset_being_rebound.
4225399a 1660 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
04c19fa6 1661 * is idempotent. Also migrate pages in each mm to new nodes.
4225399a 1662 */
bc2fb7ed 1663 css_task_iter_start(&cs->css, 0, &it);
d66393e5
TH
1664 while ((task = css_task_iter_next(&it))) {
1665 struct mm_struct *mm;
1666 bool migrate;
1667
1668 cpuset_change_task_nodemask(task, &newmems);
1669
1670 mm = get_task_mm(task);
1671 if (!mm)
1672 continue;
1673
1674 migrate = is_memory_migrate(cs);
1675
1676 mpol_rebind_mm(mm, &cs->mems_allowed);
1677 if (migrate)
1678 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
e93ad19d
TH
1679 else
1680 mmput(mm);
d66393e5
TH
1681 }
1682 css_task_iter_end(&it);
4225399a 1683
33ad801d
LZ
1684 /*
1685 * All the tasks' nodemasks have been updated, update
1686 * cs->old_mems_allowed.
1687 */
1688 cs->old_mems_allowed = newmems;
1689
2df167a3 1690 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
8793d854 1691 cpuset_being_rebound = NULL;
1da177e4
LT
1692}
1693
5c5cc623 1694/*
734d4513
LZ
1695 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1696 * @cs: the cpuset to consider
1697 * @new_mems: a temp variable for calculating new effective_mems
5c5cc623 1698 *
734d4513
LZ
1699 * When configured nodemask is changed, the effective nodemasks of this cpuset
1700 * and all its descendants need to be updated.
5c5cc623 1701 *
734d4513 1702 * On legacy hiearchy, effective_mems will be the same with mems_allowed.
5c5cc623
LZ
1703 *
1704 * Called with cpuset_mutex held
1705 */
734d4513 1706static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
5c5cc623
LZ
1707{
1708 struct cpuset *cp;
492eb21b 1709 struct cgroup_subsys_state *pos_css;
5c5cc623
LZ
1710
1711 rcu_read_lock();
734d4513
LZ
1712 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1713 struct cpuset *parent = parent_cs(cp);
1714
1715 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
1716
554b0d1c
LZ
1717 /*
1718 * If it becomes empty, inherit the effective mask of the
1719 * parent, which is guaranteed to have some MEMs.
1720 */
b8d1b8ee 1721 if (is_in_v2_mode() && nodes_empty(*new_mems))
554b0d1c
LZ
1722 *new_mems = parent->effective_mems;
1723
734d4513
LZ
1724 /* Skip the whole subtree if the nodemask remains the same. */
1725 if (nodes_equal(*new_mems, cp->effective_mems)) {
1726 pos_css = css_rightmost_descendant(pos_css);
1727 continue;
5c5cc623 1728 }
734d4513 1729
ec903c0c 1730 if (!css_tryget_online(&cp->css))
5c5cc623
LZ
1731 continue;
1732 rcu_read_unlock();
1733
8447a0fe 1734 spin_lock_irq(&callback_lock);
734d4513 1735 cp->effective_mems = *new_mems;
8447a0fe 1736 spin_unlock_irq(&callback_lock);
734d4513 1737
b8d1b8ee 1738 WARN_ON(!is_in_v2_mode() &&
a1381268 1739 !nodes_equal(cp->mems_allowed, cp->effective_mems));
734d4513 1740
d66393e5 1741 update_tasks_nodemask(cp);
5c5cc623
LZ
1742
1743 rcu_read_lock();
1744 css_put(&cp->css);
1745 }
1746 rcu_read_unlock();
1747}
1748
0b2f630a
MX
1749/*
1750 * Handle user request to change the 'mems' memory placement
1751 * of a cpuset. Needs to validate the request, update the
58568d2a
MX
1752 * cpusets mems_allowed, and for each task in the cpuset,
1753 * update mems_allowed and rebind task's mempolicy and any vma
1754 * mempolicies and if the cpuset is marked 'memory_migrate',
1755 * migrate the tasks pages to the new memory.
0b2f630a 1756 *
8447a0fe 1757 * Call with cpuset_mutex held. May take callback_lock during call.
0b2f630a
MX
1758 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1759 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1760 * their mempolicies to the cpusets new mems_allowed.
1761 */
645fcc9d
LZ
1762static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1763 const char *buf)
0b2f630a 1764{
0b2f630a
MX
1765 int retval;
1766
1767 /*
38d7bee9 1768 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
0b2f630a
MX
1769 * it's read-only
1770 */
53feb297
MX
1771 if (cs == &top_cpuset) {
1772 retval = -EACCES;
1773 goto done;
1774 }
0b2f630a 1775
0b2f630a
MX
1776 /*
1777 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1778 * Since nodelist_parse() fails on an empty mask, we special case
1779 * that parsing. The validate_change() call ensures that cpusets
1780 * with tasks have memory.
1781 */
1782 if (!*buf) {
645fcc9d 1783 nodes_clear(trialcs->mems_allowed);
0b2f630a 1784 } else {
645fcc9d 1785 retval = nodelist_parse(buf, trialcs->mems_allowed);
0b2f630a
MX
1786 if (retval < 0)
1787 goto done;
1788
645fcc9d 1789 if (!nodes_subset(trialcs->mems_allowed,
5d8ba82c
LZ
1790 top_cpuset.mems_allowed)) {
1791 retval = -EINVAL;
53feb297
MX
1792 goto done;
1793 }
0b2f630a 1794 }
33ad801d
LZ
1795
1796 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
0b2f630a
MX
1797 retval = 0; /* Too easy - nothing to do */
1798 goto done;
1799 }
645fcc9d 1800 retval = validate_change(cs, trialcs);
0b2f630a
MX
1801 if (retval < 0)
1802 goto done;
1803
8447a0fe 1804 spin_lock_irq(&callback_lock);
645fcc9d 1805 cs->mems_allowed = trialcs->mems_allowed;
8447a0fe 1806 spin_unlock_irq(&callback_lock);
0b2f630a 1807
734d4513 1808 /* use trialcs->mems_allowed as a temp variable */
24ee3cf8 1809 update_nodemasks_hier(cs, &trialcs->mems_allowed);
0b2f630a
MX
1810done:
1811 return retval;
1812}
1813
77ef80c6 1814bool current_cpuset_is_being_rebound(void)
8793d854 1815{
77ef80c6 1816 bool ret;
391acf97
GZ
1817
1818 rcu_read_lock();
1819 ret = task_cs(current) == cpuset_being_rebound;
1820 rcu_read_unlock();
1821
1822 return ret;
8793d854
PM
1823}
1824
5be7a479 1825static int update_relax_domain_level(struct cpuset *cs, s64 val)
1d3504fc 1826{
db7f47cf 1827#ifdef CONFIG_SMP
60495e77 1828 if (val < -1 || val >= sched_domain_level_max)
30e0e178 1829 return -EINVAL;
db7f47cf 1830#endif
1d3504fc
HS
1831
1832 if (val != cs->relax_domain_level) {
1833 cs->relax_domain_level = val;
300ed6cb
LZ
1834 if (!cpumask_empty(cs->cpus_allowed) &&
1835 is_sched_load_balance(cs))
699140ba 1836 rebuild_sched_domains_locked();
1d3504fc
HS
1837 }
1838
1839 return 0;
1840}
1841
72ec7029 1842/**
950592f7
MX
1843 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1844 * @cs: the cpuset in which each task's spread flags needs to be changed
950592f7 1845 *
d66393e5
TH
1846 * Iterate through each task of @cs updating its spread flags. As this
1847 * function is called with cpuset_mutex held, cpuset membership stays
1848 * stable.
950592f7 1849 */
d66393e5 1850static void update_tasks_flags(struct cpuset *cs)
950592f7 1851{
d66393e5
TH
1852 struct css_task_iter it;
1853 struct task_struct *task;
1854
bc2fb7ed 1855 css_task_iter_start(&cs->css, 0, &it);
d66393e5
TH
1856 while ((task = css_task_iter_next(&it)))
1857 cpuset_update_task_spread_flag(cs, task);
1858 css_task_iter_end(&it);
950592f7
MX
1859}
1860
1da177e4
LT
1861/*
1862 * update_flag - read a 0 or a 1 in a file and update associated flag
78608366
PM
1863 * bit: the bit to update (see cpuset_flagbits_t)
1864 * cs: the cpuset to update
1865 * turning_on: whether the flag is being set or cleared
053199ed 1866 *
5d21cc2d 1867 * Call with cpuset_mutex held.
1da177e4
LT
1868 */
1869
700fe1ab
PM
1870static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1871 int turning_on)
1da177e4 1872{
645fcc9d 1873 struct cpuset *trialcs;
40b6a762 1874 int balance_flag_changed;
950592f7 1875 int spread_flag_changed;
950592f7 1876 int err;
1da177e4 1877
645fcc9d
LZ
1878 trialcs = alloc_trial_cpuset(cs);
1879 if (!trialcs)
1880 return -ENOMEM;
1881
1da177e4 1882 if (turning_on)
645fcc9d 1883 set_bit(bit, &trialcs->flags);
1da177e4 1884 else
645fcc9d 1885 clear_bit(bit, &trialcs->flags);
1da177e4 1886
645fcc9d 1887 err = validate_change(cs, trialcs);
85d7b949 1888 if (err < 0)
645fcc9d 1889 goto out;
029190c5 1890
029190c5 1891 balance_flag_changed = (is_sched_load_balance(cs) !=
645fcc9d 1892 is_sched_load_balance(trialcs));
029190c5 1893
950592f7
MX
1894 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1895 || (is_spread_page(cs) != is_spread_page(trialcs)));
1896
8447a0fe 1897 spin_lock_irq(&callback_lock);
645fcc9d 1898 cs->flags = trialcs->flags;
8447a0fe 1899 spin_unlock_irq(&callback_lock);
85d7b949 1900
300ed6cb 1901 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
699140ba 1902 rebuild_sched_domains_locked();
029190c5 1903
950592f7 1904 if (spread_flag_changed)
d66393e5 1905 update_tasks_flags(cs);
645fcc9d 1906out:
bf92370c 1907 free_cpuset(trialcs);
645fcc9d 1908 return err;
1da177e4
LT
1909}
1910
ee8dde0c
WL
1911/*
1912 * update_prstate - update partititon_root_state
1913 * cs: the cpuset to update
1914 * val: 0 - disabled, 1 - enabled
1915 *
1916 * Call with cpuset_mutex held.
1917 */
1918static int update_prstate(struct cpuset *cs, int val)
1919{
1920 int err;
1921 struct cpuset *parent = parent_cs(cs);
1922 struct tmpmasks tmp;
1923
1924 if ((val != 0) && (val != 1))
1925 return -EINVAL;
1926 if (val == cs->partition_root_state)
1927 return 0;
1928
1929 /*
3881b861 1930 * Cannot force a partial or invalid partition root to a full
ee8dde0c
WL
1931 * partition root.
1932 */
1933 if (val && cs->partition_root_state)
1934 return -EINVAL;
1935
1936 if (alloc_cpumasks(NULL, &tmp))
1937 return -ENOMEM;
1938
1939 err = -EINVAL;
1940 if (!cs->partition_root_state) {
1941 /*
1942 * Turning on partition root requires setting the
1943 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
1944 * cannot be NULL.
1945 */
1946 if (cpumask_empty(cs->cpus_allowed))
1947 goto out;
1948
1949 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1);
1950 if (err)
1951 goto out;
1952
1953 err = update_parent_subparts_cpumask(cs, partcmd_enable,
1954 NULL, &tmp);
1955 if (err) {
1956 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1957 goto out;
1958 }
1959 cs->partition_root_state = PRS_ENABLED;
1960 } else {
3881b861
WL
1961 /*
1962 * Turning off partition root will clear the
1963 * CS_CPU_EXCLUSIVE bit.
1964 */
1965 if (cs->partition_root_state == PRS_ERROR) {
1966 cs->partition_root_state = 0;
1967 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1968 err = 0;
1969 goto out;
1970 }
1971
ee8dde0c
WL
1972 err = update_parent_subparts_cpumask(cs, partcmd_disable,
1973 NULL, &tmp);
1974 if (err)
1975 goto out;
1976
1977 cs->partition_root_state = 0;
1978
1979 /* Turning off CS_CPU_EXCLUSIVE will not return error */
1980 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1981 }
1982
1983 /*
1984 * Update cpumask of parent's tasks except when it is the top
1985 * cpuset as some system daemons cannot be mapped to other CPUs.
1986 */
1987 if (parent != &top_cpuset)
1988 update_tasks_cpumask(parent);
1989
4716909c
WL
1990 if (parent->child_ecpus_count)
1991 update_sibling_cpumasks(parent, cs, &tmp);
1992
ee8dde0c
WL
1993 rebuild_sched_domains_locked();
1994out:
1995 free_cpumasks(NULL, &tmp);
645fcc9d 1996 return err;
1da177e4
LT
1997}
1998
3e0d98b9 1999/*
80f7228b 2000 * Frequency meter - How fast is some event occurring?
3e0d98b9
PJ
2001 *
2002 * These routines manage a digitally filtered, constant time based,
2003 * event frequency meter. There are four routines:
2004 * fmeter_init() - initialize a frequency meter.
2005 * fmeter_markevent() - called each time the event happens.
2006 * fmeter_getrate() - returns the recent rate of such events.
2007 * fmeter_update() - internal routine used to update fmeter.
2008 *
2009 * A common data structure is passed to each of these routines,
2010 * which is used to keep track of the state required to manage the
2011 * frequency meter and its digital filter.
2012 *
2013 * The filter works on the number of events marked per unit time.
2014 * The filter is single-pole low-pass recursive (IIR). The time unit
2015 * is 1 second. Arithmetic is done using 32-bit integers scaled to
2016 * simulate 3 decimal digits of precision (multiplied by 1000).
2017 *
2018 * With an FM_COEF of 933, and a time base of 1 second, the filter
2019 * has a half-life of 10 seconds, meaning that if the events quit
2020 * happening, then the rate returned from the fmeter_getrate()
2021 * will be cut in half each 10 seconds, until it converges to zero.
2022 *
2023 * It is not worth doing a real infinitely recursive filter. If more
2024 * than FM_MAXTICKS ticks have elapsed since the last filter event,
2025 * just compute FM_MAXTICKS ticks worth, by which point the level
2026 * will be stable.
2027 *
2028 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2029 * arithmetic overflow in the fmeter_update() routine.
2030 *
2031 * Given the simple 32 bit integer arithmetic used, this meter works
2032 * best for reporting rates between one per millisecond (msec) and
2033 * one per 32 (approx) seconds. At constant rates faster than one
2034 * per msec it maxes out at values just under 1,000,000. At constant
2035 * rates between one per msec, and one per second it will stabilize
2036 * to a value N*1000, where N is the rate of events per second.
2037 * At constant rates between one per second and one per 32 seconds,
2038 * it will be choppy, moving up on the seconds that have an event,
2039 * and then decaying until the next event. At rates slower than
2040 * about one in 32 seconds, it decays all the way back to zero between
2041 * each event.
2042 */
2043
2044#define FM_COEF 933 /* coefficient for half-life of 10 secs */
d2b43658 2045#define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
3e0d98b9
PJ
2046#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
2047#define FM_SCALE 1000 /* faux fixed point scale */
2048
2049/* Initialize a frequency meter */
2050static void fmeter_init(struct fmeter *fmp)
2051{
2052 fmp->cnt = 0;
2053 fmp->val = 0;
2054 fmp->time = 0;
2055 spin_lock_init(&fmp->lock);
2056}
2057
2058/* Internal meter update - process cnt events and update value */
2059static void fmeter_update(struct fmeter *fmp)
2060{
d2b43658
AB
2061 time64_t now;
2062 u32 ticks;
2063
2064 now = ktime_get_seconds();
2065 ticks = now - fmp->time;
3e0d98b9
PJ
2066
2067 if (ticks == 0)
2068 return;
2069
2070 ticks = min(FM_MAXTICKS, ticks);
2071 while (ticks-- > 0)
2072 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2073 fmp->time = now;
2074
2075 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2076 fmp->cnt = 0;
2077}
2078
2079/* Process any previous ticks, then bump cnt by one (times scale). */
2080static void fmeter_markevent(struct fmeter *fmp)
2081{
2082 spin_lock(&fmp->lock);
2083 fmeter_update(fmp);
2084 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2085 spin_unlock(&fmp->lock);
2086}
2087
2088/* Process any previous ticks, then return current value. */
2089static int fmeter_getrate(struct fmeter *fmp)
2090{
2091 int val;
2092
2093 spin_lock(&fmp->lock);
2094 fmeter_update(fmp);
2095 val = fmp->val;
2096 spin_unlock(&fmp->lock);
2097 return val;
2098}
2099
57fce0a6
TH
2100static struct cpuset *cpuset_attach_old_cs;
2101
5d21cc2d 2102/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1f7dd3e5 2103static int cpuset_can_attach(struct cgroup_taskset *tset)
f780bdb7 2104{
1f7dd3e5
TH
2105 struct cgroup_subsys_state *css;
2106 struct cpuset *cs;
bb9d97b6
TH
2107 struct task_struct *task;
2108 int ret;
1da177e4 2109
57fce0a6 2110 /* used later by cpuset_attach() */
1f7dd3e5
TH
2111 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2112 cs = css_cs(css);
57fce0a6 2113
1243dc51 2114 percpu_down_write(&cpuset_rwsem);
5d21cc2d 2115
aa6ec29b 2116 /* allow moving tasks into an empty cpuset if on default hierarchy */
5d21cc2d 2117 ret = -ENOSPC;
b8d1b8ee 2118 if (!is_in_v2_mode() &&
88fa523b 2119 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
5d21cc2d 2120 goto out_unlock;
9985b0ba 2121
1f7dd3e5 2122 cgroup_taskset_for_each(task, css, tset) {
7f51412a
JL
2123 ret = task_can_attach(task, cs->cpus_allowed);
2124 if (ret)
5d21cc2d
TH
2125 goto out_unlock;
2126 ret = security_task_setscheduler(task);
2127 if (ret)
2128 goto out_unlock;
bb9d97b6 2129 }
f780bdb7 2130
452477fa
TH
2131 /*
2132 * Mark attach is in progress. This makes validate_change() fail
2133 * changes which zero cpus/mems_allowed.
2134 */
2135 cs->attach_in_progress++;
5d21cc2d
TH
2136 ret = 0;
2137out_unlock:
1243dc51 2138 percpu_up_write(&cpuset_rwsem);
5d21cc2d 2139 return ret;
8793d854 2140}
f780bdb7 2141
1f7dd3e5 2142static void cpuset_cancel_attach(struct cgroup_taskset *tset)
452477fa 2143{
1f7dd3e5 2144 struct cgroup_subsys_state *css;
1f7dd3e5
TH
2145
2146 cgroup_taskset_first(tset, &css);
1f7dd3e5 2147
1243dc51 2148 percpu_down_write(&cpuset_rwsem);
eb95419b 2149 css_cs(css)->attach_in_progress--;
1243dc51 2150 percpu_up_write(&cpuset_rwsem);
8793d854 2151}
1da177e4 2152
4e4c9a14 2153/*
5d21cc2d 2154 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
4e4c9a14
TH
2155 * but we can't allocate it dynamically there. Define it global and
2156 * allocate from cpuset_init().
2157 */
2158static cpumask_var_t cpus_attach;
2159
1f7dd3e5 2160static void cpuset_attach(struct cgroup_taskset *tset)
8793d854 2161{
67bd2c59 2162 /* static buf protected by cpuset_mutex */
4e4c9a14 2163 static nodemask_t cpuset_attach_nodemask_to;
bb9d97b6 2164 struct task_struct *task;
4530eddb 2165 struct task_struct *leader;
1f7dd3e5
TH
2166 struct cgroup_subsys_state *css;
2167 struct cpuset *cs;
57fce0a6 2168 struct cpuset *oldcs = cpuset_attach_old_cs;
22fb52dd 2169
1f7dd3e5
TH
2170 cgroup_taskset_first(tset, &css);
2171 cs = css_cs(css);
2172
1243dc51 2173 percpu_down_write(&cpuset_rwsem);
5d21cc2d 2174
4e4c9a14
TH
2175 /* prepare for attach */
2176 if (cs == &top_cpuset)
2177 cpumask_copy(cpus_attach, cpu_possible_mask);
2178 else
ae1c8023 2179 guarantee_online_cpus(cs, cpus_attach);
4e4c9a14 2180
ae1c8023 2181 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
4e4c9a14 2182
1f7dd3e5 2183 cgroup_taskset_for_each(task, css, tset) {
bb9d97b6
TH
2184 /*
2185 * can_attach beforehand should guarantee that this doesn't
2186 * fail. TODO: have a better way to handle failure here
2187 */
2188 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
2189
2190 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2191 cpuset_update_task_spread_flag(cs, task);
2192 }
22fb52dd 2193
f780bdb7 2194 /*
4530eddb
TH
2195 * Change mm for all threadgroup leaders. This is expensive and may
2196 * sleep and should be moved outside migration path proper.
f780bdb7 2197 */
ae1c8023 2198 cpuset_attach_nodemask_to = cs->effective_mems;
1f7dd3e5 2199 cgroup_taskset_for_each_leader(leader, css, tset) {
3df9ca0a
TH
2200 struct mm_struct *mm = get_task_mm(leader);
2201
2202 if (mm) {
2203 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2204
2205 /*
2206 * old_mems_allowed is the same with mems_allowed
2207 * here, except if this task is being moved
2208 * automatically due to hotplug. In that case
2209 * @mems_allowed has been updated and is empty, so
2210 * @old_mems_allowed is the right nodesets that we
2211 * migrate mm from.
2212 */
e93ad19d 2213 if (is_memory_migrate(cs))
3df9ca0a
TH
2214 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
2215 &cpuset_attach_nodemask_to);
e93ad19d
TH
2216 else
2217 mmput(mm);
f047cecf 2218 }
4225399a 2219 }
452477fa 2220
33ad801d 2221 cs->old_mems_allowed = cpuset_attach_nodemask_to;
02bb5863 2222
452477fa 2223 cs->attach_in_progress--;
e44193d3
LZ
2224 if (!cs->attach_in_progress)
2225 wake_up(&cpuset_attach_wq);
5d21cc2d 2226
1243dc51 2227 percpu_up_write(&cpuset_rwsem);
1da177e4
LT
2228}
2229
2230/* The various types of files and directories in a cpuset file system */
2231
2232typedef enum {
45b07ef3 2233 FILE_MEMORY_MIGRATE,
1da177e4
LT
2234 FILE_CPULIST,
2235 FILE_MEMLIST,
afd1a8b3
LZ
2236 FILE_EFFECTIVE_CPULIST,
2237 FILE_EFFECTIVE_MEMLIST,
5cf8114d 2238 FILE_SUBPARTS_CPULIST,
1da177e4
LT
2239 FILE_CPU_EXCLUSIVE,
2240 FILE_MEM_EXCLUSIVE,
78608366 2241 FILE_MEM_HARDWALL,
029190c5 2242 FILE_SCHED_LOAD_BALANCE,
ee8dde0c 2243 FILE_PARTITION_ROOT,
1d3504fc 2244 FILE_SCHED_RELAX_DOMAIN_LEVEL,
3e0d98b9
PJ
2245 FILE_MEMORY_PRESSURE_ENABLED,
2246 FILE_MEMORY_PRESSURE,
825a46af
PJ
2247 FILE_SPREAD_PAGE,
2248 FILE_SPREAD_SLAB,
1da177e4
LT
2249} cpuset_filetype_t;
2250
182446d0
TH
2251static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
2252 u64 val)
700fe1ab 2253{
182446d0 2254 struct cpuset *cs = css_cs(css);
700fe1ab 2255 cpuset_filetype_t type = cft->private;
a903f086 2256 int retval = 0;
700fe1ab 2257
d74b27d6 2258 get_online_cpus();
1243dc51 2259 percpu_down_write(&cpuset_rwsem);
a903f086
LZ
2260 if (!is_cpuset_online(cs)) {
2261 retval = -ENODEV;
5d21cc2d 2262 goto out_unlock;
a903f086 2263 }
700fe1ab
PM
2264
2265 switch (type) {
1da177e4 2266 case FILE_CPU_EXCLUSIVE:
700fe1ab 2267 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1da177e4
LT
2268 break;
2269 case FILE_MEM_EXCLUSIVE:
700fe1ab 2270 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1da177e4 2271 break;
78608366
PM
2272 case FILE_MEM_HARDWALL:
2273 retval = update_flag(CS_MEM_HARDWALL, cs, val);
2274 break;
029190c5 2275 case FILE_SCHED_LOAD_BALANCE:
700fe1ab 2276 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1d3504fc 2277 break;
45b07ef3 2278 case FILE_MEMORY_MIGRATE:
700fe1ab 2279 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
45b07ef3 2280 break;
3e0d98b9 2281 case FILE_MEMORY_PRESSURE_ENABLED:
700fe1ab 2282 cpuset_memory_pressure_enabled = !!val;
3e0d98b9 2283 break;
825a46af 2284 case FILE_SPREAD_PAGE:
700fe1ab 2285 retval = update_flag(CS_SPREAD_PAGE, cs, val);
825a46af
PJ
2286 break;
2287 case FILE_SPREAD_SLAB:
700fe1ab 2288 retval = update_flag(CS_SPREAD_SLAB, cs, val);
825a46af 2289 break;
1da177e4
LT
2290 default:
2291 retval = -EINVAL;
700fe1ab 2292 break;
1da177e4 2293 }
5d21cc2d 2294out_unlock:
1243dc51 2295 percpu_up_write(&cpuset_rwsem);
d74b27d6 2296 put_online_cpus();
1da177e4
LT
2297 return retval;
2298}
2299
182446d0
TH
2300static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
2301 s64 val)
5be7a479 2302{
182446d0 2303 struct cpuset *cs = css_cs(css);
5be7a479 2304 cpuset_filetype_t type = cft->private;
5d21cc2d 2305 int retval = -ENODEV;
5be7a479 2306
d74b27d6 2307 get_online_cpus();
1243dc51 2308 percpu_down_write(&cpuset_rwsem);
5d21cc2d
TH
2309 if (!is_cpuset_online(cs))
2310 goto out_unlock;
e3712395 2311
5be7a479
PM
2312 switch (type) {
2313 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2314 retval = update_relax_domain_level(cs, val);
2315 break;
2316 default:
2317 retval = -EINVAL;
2318 break;
2319 }
5d21cc2d 2320out_unlock:
1243dc51 2321 percpu_up_write(&cpuset_rwsem);
d74b27d6 2322 put_online_cpus();
5be7a479
PM
2323 return retval;
2324}
2325
e3712395
PM
2326/*
2327 * Common handling for a write to a "cpus" or "mems" file.
2328 */
451af504
TH
2329static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
2330 char *buf, size_t nbytes, loff_t off)
e3712395 2331{
451af504 2332 struct cpuset *cs = css_cs(of_css(of));
645fcc9d 2333 struct cpuset *trialcs;
5d21cc2d 2334 int retval = -ENODEV;
e3712395 2335
451af504
TH
2336 buf = strstrip(buf);
2337
3a5a6d0c
TH
2338 /*
2339 * CPU or memory hotunplug may leave @cs w/o any execution
2340 * resources, in which case the hotplug code asynchronously updates
2341 * configuration and transfers all tasks to the nearest ancestor
2342 * which can execute.
2343 *
2344 * As writes to "cpus" or "mems" may restore @cs's execution
2345 * resources, wait for the previously scheduled operations before
2346 * proceeding, so that we don't end up keep removing tasks added
2347 * after execution capability is restored.
76bb5ab8
TH
2348 *
2349 * cpuset_hotplug_work calls back into cgroup core via
2350 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2351 * operation like this one can lead to a deadlock through kernfs
2352 * active_ref protection. Let's break the protection. Losing the
2353 * protection is okay as we check whether @cs is online after
2354 * grabbing cpuset_mutex anyway. This only happens on the legacy
2355 * hierarchies.
3a5a6d0c 2356 */
76bb5ab8
TH
2357 css_get(&cs->css);
2358 kernfs_break_active_protection(of->kn);
3a5a6d0c
TH
2359 flush_work(&cpuset_hotplug_work);
2360
d74b27d6 2361 get_online_cpus();
1243dc51 2362 percpu_down_write(&cpuset_rwsem);
5d21cc2d
TH
2363 if (!is_cpuset_online(cs))
2364 goto out_unlock;
e3712395 2365
645fcc9d 2366 trialcs = alloc_trial_cpuset(cs);
b75f38d6
LZ
2367 if (!trialcs) {
2368 retval = -ENOMEM;
5d21cc2d 2369 goto out_unlock;
b75f38d6 2370 }
645fcc9d 2371
451af504 2372 switch (of_cft(of)->private) {
e3712395 2373 case FILE_CPULIST:
645fcc9d 2374 retval = update_cpumask(cs, trialcs, buf);
e3712395
PM
2375 break;
2376 case FILE_MEMLIST:
645fcc9d 2377 retval = update_nodemask(cs, trialcs, buf);
e3712395
PM
2378 break;
2379 default:
2380 retval = -EINVAL;
2381 break;
2382 }
645fcc9d 2383
bf92370c 2384 free_cpuset(trialcs);
5d21cc2d 2385out_unlock:
1243dc51 2386 percpu_up_write(&cpuset_rwsem);
d74b27d6 2387 put_online_cpus();
76bb5ab8
TH
2388 kernfs_unbreak_active_protection(of->kn);
2389 css_put(&cs->css);
e93ad19d 2390 flush_workqueue(cpuset_migrate_mm_wq);
451af504 2391 return retval ?: nbytes;
e3712395
PM
2392}
2393
1da177e4
LT
2394/*
2395 * These ascii lists should be read in a single call, by using a user
2396 * buffer large enough to hold the entire map. If read in smaller
2397 * chunks, there is no guarantee of atomicity. Since the display format
2398 * used, list of ranges of sequential numbers, is variable length,
2399 * and since these maps can change value dynamically, one could read
2400 * gibberish by doing partial reads while a list was changing.
1da177e4 2401 */
2da8ca82 2402static int cpuset_common_seq_show(struct seq_file *sf, void *v)
1da177e4 2403{
2da8ca82
TH
2404 struct cpuset *cs = css_cs(seq_css(sf));
2405 cpuset_filetype_t type = seq_cft(sf)->private;
51ffe411 2406 int ret = 0;
1da177e4 2407
8447a0fe 2408 spin_lock_irq(&callback_lock);
1da177e4
LT
2409
2410 switch (type) {
2411 case FILE_CPULIST:
e8e6d97c 2412 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
1da177e4
LT
2413 break;
2414 case FILE_MEMLIST:
e8e6d97c 2415 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
1da177e4 2416 break;
afd1a8b3 2417 case FILE_EFFECTIVE_CPULIST:
e8e6d97c 2418 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
afd1a8b3
LZ
2419 break;
2420 case FILE_EFFECTIVE_MEMLIST:
e8e6d97c 2421 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
afd1a8b3 2422 break;
5cf8114d
WL
2423 case FILE_SUBPARTS_CPULIST:
2424 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
2425 break;
1da177e4 2426 default:
51ffe411 2427 ret = -EINVAL;
1da177e4 2428 }
1da177e4 2429
8447a0fe 2430 spin_unlock_irq(&callback_lock);
51ffe411 2431 return ret;
1da177e4
LT
2432}
2433
182446d0 2434static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
700fe1ab 2435{
182446d0 2436 struct cpuset *cs = css_cs(css);
700fe1ab
PM
2437 cpuset_filetype_t type = cft->private;
2438 switch (type) {
2439 case FILE_CPU_EXCLUSIVE:
2440 return is_cpu_exclusive(cs);
2441 case FILE_MEM_EXCLUSIVE:
2442 return is_mem_exclusive(cs);
78608366
PM
2443 case FILE_MEM_HARDWALL:
2444 return is_mem_hardwall(cs);
700fe1ab
PM
2445 case FILE_SCHED_LOAD_BALANCE:
2446 return is_sched_load_balance(cs);
2447 case FILE_MEMORY_MIGRATE:
2448 return is_memory_migrate(cs);
2449 case FILE_MEMORY_PRESSURE_ENABLED:
2450 return cpuset_memory_pressure_enabled;
2451 case FILE_MEMORY_PRESSURE:
2452 return fmeter_getrate(&cs->fmeter);
2453 case FILE_SPREAD_PAGE:
2454 return is_spread_page(cs);
2455 case FILE_SPREAD_SLAB:
2456 return is_spread_slab(cs);
2457 default:
2458 BUG();
2459 }
cf417141
MK
2460
2461 /* Unreachable but makes gcc happy */
2462 return 0;
700fe1ab 2463}
1da177e4 2464
182446d0 2465static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
5be7a479 2466{
182446d0 2467 struct cpuset *cs = css_cs(css);
5be7a479
PM
2468 cpuset_filetype_t type = cft->private;
2469 switch (type) {
2470 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2471 return cs->relax_domain_level;
2472 default:
2473 BUG();
2474 }
cf417141
MK
2475
2476 /* Unrechable but makes gcc happy */
2477 return 0;
5be7a479
PM
2478}
2479
bb5b553c
WL
2480static int sched_partition_show(struct seq_file *seq, void *v)
2481{
2482 struct cpuset *cs = css_cs(seq_css(seq));
2483
2484 switch (cs->partition_root_state) {
2485 case PRS_ENABLED:
2486 seq_puts(seq, "root\n");
2487 break;
2488 case PRS_DISABLED:
2489 seq_puts(seq, "member\n");
2490 break;
2491 case PRS_ERROR:
2492 seq_puts(seq, "root invalid\n");
2493 break;
2494 }
2495 return 0;
2496}
2497
2498static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
2499 size_t nbytes, loff_t off)
2500{
2501 struct cpuset *cs = css_cs(of_css(of));
2502 int val;
2503 int retval = -ENODEV;
2504
2505 buf = strstrip(buf);
2506
2507 /*
b1e3aeb1 2508 * Convert "root" to ENABLED, and convert "member" to DISABLED.
bb5b553c 2509 */
b1e3aeb1 2510 if (!strcmp(buf, "root"))
bb5b553c 2511 val = PRS_ENABLED;
b1e3aeb1 2512 else if (!strcmp(buf, "member"))
bb5b553c
WL
2513 val = PRS_DISABLED;
2514 else
2515 return -EINVAL;
2516
2517 css_get(&cs->css);
d74b27d6 2518 get_online_cpus();
1243dc51 2519 percpu_down_write(&cpuset_rwsem);
bb5b553c
WL
2520 if (!is_cpuset_online(cs))
2521 goto out_unlock;
2522
2523 retval = update_prstate(cs, val);
2524out_unlock:
1243dc51 2525 percpu_up_write(&cpuset_rwsem);
d74b27d6 2526 put_online_cpus();
bb5b553c
WL
2527 css_put(&cs->css);
2528 return retval ?: nbytes;
2529}
1da177e4
LT
2530
2531/*
2532 * for the common functions, 'private' gives the type of file
2533 */
2534
4ec22e9c 2535static struct cftype legacy_files[] = {
addf2c73
PM
2536 {
2537 .name = "cpus",
2da8ca82 2538 .seq_show = cpuset_common_seq_show,
451af504 2539 .write = cpuset_write_resmask,
e3712395 2540 .max_write_len = (100U + 6 * NR_CPUS),
addf2c73
PM
2541 .private = FILE_CPULIST,
2542 },
2543
2544 {
2545 .name = "mems",
2da8ca82 2546 .seq_show = cpuset_common_seq_show,
451af504 2547 .write = cpuset_write_resmask,
e3712395 2548 .max_write_len = (100U + 6 * MAX_NUMNODES),
addf2c73
PM
2549 .private = FILE_MEMLIST,
2550 },
2551
afd1a8b3
LZ
2552 {
2553 .name = "effective_cpus",
2554 .seq_show = cpuset_common_seq_show,
2555 .private = FILE_EFFECTIVE_CPULIST,
2556 },
2557
2558 {
2559 .name = "effective_mems",
2560 .seq_show = cpuset_common_seq_show,
2561 .private = FILE_EFFECTIVE_MEMLIST,
2562 },
2563
addf2c73
PM
2564 {
2565 .name = "cpu_exclusive",
2566 .read_u64 = cpuset_read_u64,
2567 .write_u64 = cpuset_write_u64,
2568 .private = FILE_CPU_EXCLUSIVE,
2569 },
2570
2571 {
2572 .name = "mem_exclusive",
2573 .read_u64 = cpuset_read_u64,
2574 .write_u64 = cpuset_write_u64,
2575 .private = FILE_MEM_EXCLUSIVE,
2576 },
2577
78608366
PM
2578 {
2579 .name = "mem_hardwall",
2580 .read_u64 = cpuset_read_u64,
2581 .write_u64 = cpuset_write_u64,
2582 .private = FILE_MEM_HARDWALL,
2583 },
2584
addf2c73
PM
2585 {
2586 .name = "sched_load_balance",
2587 .read_u64 = cpuset_read_u64,
2588 .write_u64 = cpuset_write_u64,
2589 .private = FILE_SCHED_LOAD_BALANCE,
2590 },
2591
2592 {
2593 .name = "sched_relax_domain_level",
5be7a479
PM
2594 .read_s64 = cpuset_read_s64,
2595 .write_s64 = cpuset_write_s64,
addf2c73
PM
2596 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
2597 },
2598
2599 {
2600 .name = "memory_migrate",
2601 .read_u64 = cpuset_read_u64,
2602 .write_u64 = cpuset_write_u64,
2603 .private = FILE_MEMORY_MIGRATE,
2604 },
2605
2606 {
2607 .name = "memory_pressure",
2608 .read_u64 = cpuset_read_u64,
1c08c22c 2609 .private = FILE_MEMORY_PRESSURE,
addf2c73
PM
2610 },
2611
2612 {
2613 .name = "memory_spread_page",
2614 .read_u64 = cpuset_read_u64,
2615 .write_u64 = cpuset_write_u64,
2616 .private = FILE_SPREAD_PAGE,
2617 },
2618
2619 {
2620 .name = "memory_spread_slab",
2621 .read_u64 = cpuset_read_u64,
2622 .write_u64 = cpuset_write_u64,
2623 .private = FILE_SPREAD_SLAB,
2624 },
3e0d98b9 2625
4baf6e33
TH
2626 {
2627 .name = "memory_pressure_enabled",
2628 .flags = CFTYPE_ONLY_ON_ROOT,
2629 .read_u64 = cpuset_read_u64,
2630 .write_u64 = cpuset_write_u64,
2631 .private = FILE_MEMORY_PRESSURE_ENABLED,
2632 },
1da177e4 2633
4baf6e33
TH
2634 { } /* terminate */
2635};
1da177e4 2636
4ec22e9c
WL
2637/*
2638 * This is currently a minimal set for the default hierarchy. It can be
2639 * expanded later on by migrating more features and control files from v1.
2640 */
2641static struct cftype dfl_files[] = {
2642 {
2643 .name = "cpus",
2644 .seq_show = cpuset_common_seq_show,
2645 .write = cpuset_write_resmask,
2646 .max_write_len = (100U + 6 * NR_CPUS),
2647 .private = FILE_CPULIST,
2648 .flags = CFTYPE_NOT_ON_ROOT,
2649 },
2650
2651 {
2652 .name = "mems",
2653 .seq_show = cpuset_common_seq_show,
2654 .write = cpuset_write_resmask,
2655 .max_write_len = (100U + 6 * MAX_NUMNODES),
2656 .private = FILE_MEMLIST,
2657 .flags = CFTYPE_NOT_ON_ROOT,
2658 },
2659
2660 {
2661 .name = "cpus.effective",
2662 .seq_show = cpuset_common_seq_show,
2663 .private = FILE_EFFECTIVE_CPULIST,
4ec22e9c
WL
2664 },
2665
2666 {
2667 .name = "mems.effective",
2668 .seq_show = cpuset_common_seq_show,
2669 .private = FILE_EFFECTIVE_MEMLIST,
4ec22e9c
WL
2670 },
2671
ee8dde0c 2672 {
b1e3aeb1 2673 .name = "cpus.partition",
bb5b553c
WL
2674 .seq_show = sched_partition_show,
2675 .write = sched_partition_write,
ee8dde0c
WL
2676 .private = FILE_PARTITION_ROOT,
2677 .flags = CFTYPE_NOT_ON_ROOT,
2678 },
2679
5cf8114d
WL
2680 {
2681 .name = "cpus.subpartitions",
2682 .seq_show = cpuset_common_seq_show,
2683 .private = FILE_SUBPARTS_CPULIST,
2684 .flags = CFTYPE_DEBUG,
2685 },
2686
4ec22e9c
WL
2687 { } /* terminate */
2688};
2689
2690
1da177e4 2691/*
92fb9748 2692 * cpuset_css_alloc - allocate a cpuset css
c9e5fe66 2693 * cgrp: control group that the new cpuset will be part of
1da177e4
LT
2694 */
2695
eb95419b
TH
2696static struct cgroup_subsys_state *
2697cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
1da177e4 2698{
c8f699bb 2699 struct cpuset *cs;
1da177e4 2700
eb95419b 2701 if (!parent_css)
8793d854 2702 return &top_cpuset.css;
033fa1c5 2703
c8f699bb 2704 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1da177e4 2705 if (!cs)
8793d854 2706 return ERR_PTR(-ENOMEM);
bf92370c
WL
2707
2708 if (alloc_cpumasks(cs, NULL)) {
2709 kfree(cs);
2710 return ERR_PTR(-ENOMEM);
2711 }
1da177e4 2712
029190c5 2713 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
f9a86fcb 2714 nodes_clear(cs->mems_allowed);
e2b9a3d7 2715 nodes_clear(cs->effective_mems);
3e0d98b9 2716 fmeter_init(&cs->fmeter);
1d3504fc 2717 cs->relax_domain_level = -1;
1da177e4 2718
c8f699bb
TH
2719 return &cs->css;
2720}
2721
eb95419b 2722static int cpuset_css_online(struct cgroup_subsys_state *css)
c8f699bb 2723{
eb95419b 2724 struct cpuset *cs = css_cs(css);
c431069f 2725 struct cpuset *parent = parent_cs(cs);
ae8086ce 2726 struct cpuset *tmp_cs;
492eb21b 2727 struct cgroup_subsys_state *pos_css;
c8f699bb
TH
2728
2729 if (!parent)
2730 return 0;
2731
d74b27d6 2732 get_online_cpus();
1243dc51 2733 percpu_down_write(&cpuset_rwsem);
5d21cc2d 2734
efeb77b2 2735 set_bit(CS_ONLINE, &cs->flags);
c8f699bb
TH
2736 if (is_spread_page(parent))
2737 set_bit(CS_SPREAD_PAGE, &cs->flags);
2738 if (is_spread_slab(parent))
2739 set_bit(CS_SPREAD_SLAB, &cs->flags);
1da177e4 2740
664eedde 2741 cpuset_inc();
033fa1c5 2742
8447a0fe 2743 spin_lock_irq(&callback_lock);
b8d1b8ee 2744 if (is_in_v2_mode()) {
e2b9a3d7
LZ
2745 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
2746 cs->effective_mems = parent->effective_mems;
4716909c
WL
2747 cs->use_parent_ecpus = true;
2748 parent->child_ecpus_count++;
e2b9a3d7 2749 }
8447a0fe 2750 spin_unlock_irq(&callback_lock);
e2b9a3d7 2751
eb95419b 2752 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
5d21cc2d 2753 goto out_unlock;
033fa1c5
TH
2754
2755 /*
2756 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
2757 * set. This flag handling is implemented in cgroup core for
2758 * histrical reasons - the flag may be specified during mount.
2759 *
2760 * Currently, if any sibling cpusets have exclusive cpus or mem, we
2761 * refuse to clone the configuration - thereby refusing the task to
2762 * be entered, and as a result refusing the sys_unshare() or
2763 * clone() which initiated it. If this becomes a problem for some
2764 * users who wish to allow that scenario, then this could be
2765 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2766 * (and likewise for mems) to the new cgroup.
2767 */
ae8086ce 2768 rcu_read_lock();
492eb21b 2769 cpuset_for_each_child(tmp_cs, pos_css, parent) {
ae8086ce
TH
2770 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2771 rcu_read_unlock();
5d21cc2d 2772 goto out_unlock;
ae8086ce 2773 }
033fa1c5 2774 }
ae8086ce 2775 rcu_read_unlock();
033fa1c5 2776
8447a0fe 2777 spin_lock_irq(&callback_lock);
033fa1c5 2778 cs->mems_allowed = parent->mems_allowed;
790317e1 2779 cs->effective_mems = parent->mems_allowed;
033fa1c5 2780 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
790317e1 2781 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
cea74465 2782 spin_unlock_irq(&callback_lock);
5d21cc2d 2783out_unlock:
1243dc51 2784 percpu_up_write(&cpuset_rwsem);
d74b27d6 2785 put_online_cpus();
c8f699bb
TH
2786 return 0;
2787}
2788
0b9e6965
ZH
2789/*
2790 * If the cpuset being removed has its flag 'sched_load_balance'
2791 * enabled, then simulate turning sched_load_balance off, which
ee8dde0c
WL
2792 * will call rebuild_sched_domains_locked(). That is not needed
2793 * in the default hierarchy where only changes in partition
2794 * will cause repartitioning.
2795 *
2796 * If the cpuset has the 'sched.partition' flag enabled, simulate
2797 * turning 'sched.partition" off.
0b9e6965
ZH
2798 */
2799
eb95419b 2800static void cpuset_css_offline(struct cgroup_subsys_state *css)
c8f699bb 2801{
eb95419b 2802 struct cpuset *cs = css_cs(css);
c8f699bb 2803
d74b27d6 2804 get_online_cpus();
1243dc51 2805 percpu_down_write(&cpuset_rwsem);
c8f699bb 2806
ee8dde0c
WL
2807 if (is_partition_root(cs))
2808 update_prstate(cs, 0);
2809
2810 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2811 is_sched_load_balance(cs))
c8f699bb
TH
2812 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2813
4716909c
WL
2814 if (cs->use_parent_ecpus) {
2815 struct cpuset *parent = parent_cs(cs);
2816
2817 cs->use_parent_ecpus = false;
2818 parent->child_ecpus_count--;
2819 }
2820
664eedde 2821 cpuset_dec();
efeb77b2 2822 clear_bit(CS_ONLINE, &cs->flags);
c8f699bb 2823
1243dc51 2824 percpu_up_write(&cpuset_rwsem);
d74b27d6 2825 put_online_cpus();
1da177e4
LT
2826}
2827
eb95419b 2828static void cpuset_css_free(struct cgroup_subsys_state *css)
1da177e4 2829{
eb95419b 2830 struct cpuset *cs = css_cs(css);
1da177e4 2831
bf92370c 2832 free_cpuset(cs);
1da177e4
LT
2833}
2834
39bd0d15
LZ
2835static void cpuset_bind(struct cgroup_subsys_state *root_css)
2836{
1243dc51 2837 percpu_down_write(&cpuset_rwsem);
8447a0fe 2838 spin_lock_irq(&callback_lock);
39bd0d15 2839
b8d1b8ee 2840 if (is_in_v2_mode()) {
39bd0d15
LZ
2841 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
2842 top_cpuset.mems_allowed = node_possible_map;
2843 } else {
2844 cpumask_copy(top_cpuset.cpus_allowed,
2845 top_cpuset.effective_cpus);
2846 top_cpuset.mems_allowed = top_cpuset.effective_mems;
2847 }
2848
8447a0fe 2849 spin_unlock_irq(&callback_lock);
1243dc51 2850 percpu_up_write(&cpuset_rwsem);
39bd0d15
LZ
2851}
2852
06f4e948
ZL
2853/*
2854 * Make sure the new task conform to the current state of its parent,
2855 * which could have been changed by cpuset just after it inherits the
2856 * state from the parent and before it sits on the cgroup's task list.
2857 */
8a15b817 2858static void cpuset_fork(struct task_struct *task)
06f4e948
ZL
2859{
2860 if (task_css_is_root(task, cpuset_cgrp_id))
2861 return;
2862
3bd37062 2863 set_cpus_allowed_ptr(task, current->cpus_ptr);
06f4e948
ZL
2864 task->mems_allowed = current->mems_allowed;
2865}
2866
073219e9 2867struct cgroup_subsys cpuset_cgrp_subsys = {
39bd0d15
LZ
2868 .css_alloc = cpuset_css_alloc,
2869 .css_online = cpuset_css_online,
2870 .css_offline = cpuset_css_offline,
2871 .css_free = cpuset_css_free,
2872 .can_attach = cpuset_can_attach,
2873 .cancel_attach = cpuset_cancel_attach,
2874 .attach = cpuset_attach,
5cf1cacb 2875 .post_attach = cpuset_post_attach,
39bd0d15 2876 .bind = cpuset_bind,
06f4e948 2877 .fork = cpuset_fork,
4ec22e9c
WL
2878 .legacy_cftypes = legacy_files,
2879 .dfl_cftypes = dfl_files,
b38e42e9 2880 .early_init = true,
4ec22e9c 2881 .threaded = true,
8793d854
PM
2882};
2883
1da177e4
LT
2884/**
2885 * cpuset_init - initialize cpusets at system boot
2886 *
d5f68d33 2887 * Description: Initialize top_cpuset
1da177e4
LT
2888 **/
2889
2890int __init cpuset_init(void)
2891{
1243dc51
JL
2892 BUG_ON(percpu_init_rwsem(&cpuset_rwsem));
2893
75fa8e5d
NMG
2894 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
2895 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
bf92370c 2896 BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
58568d2a 2897
300ed6cb 2898 cpumask_setall(top_cpuset.cpus_allowed);
f9a86fcb 2899 nodes_setall(top_cpuset.mems_allowed);
e2b9a3d7
LZ
2900 cpumask_setall(top_cpuset.effective_cpus);
2901 nodes_setall(top_cpuset.effective_mems);
1da177e4 2902
3e0d98b9 2903 fmeter_init(&top_cpuset.fmeter);
029190c5 2904 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1d3504fc 2905 top_cpuset.relax_domain_level = -1;
1da177e4 2906
75fa8e5d 2907 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
2341d1b6 2908
8793d854 2909 return 0;
1da177e4
LT
2910}
2911
b1aac8bb 2912/*
cf417141 2913 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
b1aac8bb
PJ
2914 * or memory nodes, we need to walk over the cpuset hierarchy,
2915 * removing that CPU or node from all cpusets. If this removes the
956db3ca
CW
2916 * last CPU or node from a cpuset, then move the tasks in the empty
2917 * cpuset to its next-highest non-empty parent.
b1aac8bb 2918 */
956db3ca
CW
2919static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2920{
2921 struct cpuset *parent;
2922
956db3ca
CW
2923 /*
2924 * Find its next-highest non-empty parent, (top cpuset
2925 * has online cpus, so can't be empty).
2926 */
c431069f 2927 parent = parent_cs(cs);
300ed6cb 2928 while (cpumask_empty(parent->cpus_allowed) ||
b4501295 2929 nodes_empty(parent->mems_allowed))
c431069f 2930 parent = parent_cs(parent);
956db3ca 2931
8cc99345 2932 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
12d3089c 2933 pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
e61734c5
TH
2934 pr_cont_cgroup_name(cs->css.cgroup);
2935 pr_cont("\n");
8cc99345 2936 }
956db3ca
CW
2937}
2938
be4c9dd7
LZ
2939static void
2940hotplug_update_tasks_legacy(struct cpuset *cs,
2941 struct cpumask *new_cpus, nodemask_t *new_mems,
2942 bool cpus_updated, bool mems_updated)
390a36aa
LZ
2943{
2944 bool is_empty;
2945
8447a0fe 2946 spin_lock_irq(&callback_lock);
be4c9dd7
LZ
2947 cpumask_copy(cs->cpus_allowed, new_cpus);
2948 cpumask_copy(cs->effective_cpus, new_cpus);
2949 cs->mems_allowed = *new_mems;
2950 cs->effective_mems = *new_mems;
8447a0fe 2951 spin_unlock_irq(&callback_lock);
390a36aa
LZ
2952
2953 /*
2954 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
2955 * as the tasks will be migratecd to an ancestor.
2956 */
be4c9dd7 2957 if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
390a36aa 2958 update_tasks_cpumask(cs);
be4c9dd7 2959 if (mems_updated && !nodes_empty(cs->mems_allowed))
390a36aa
LZ
2960 update_tasks_nodemask(cs);
2961
2962 is_empty = cpumask_empty(cs->cpus_allowed) ||
2963 nodes_empty(cs->mems_allowed);
2964
1243dc51 2965 percpu_up_write(&cpuset_rwsem);
390a36aa
LZ
2966
2967 /*
2968 * Move tasks to the nearest ancestor with execution resources,
2969 * This is full cgroup operation which will also call back into
2970 * cpuset. Should be done outside any lock.
2971 */
2972 if (is_empty)
2973 remove_tasks_in_empty_cpuset(cs);
2974
1243dc51 2975 percpu_down_write(&cpuset_rwsem);
390a36aa
LZ
2976}
2977
be4c9dd7
LZ
2978static void
2979hotplug_update_tasks(struct cpuset *cs,
2980 struct cpumask *new_cpus, nodemask_t *new_mems,
2981 bool cpus_updated, bool mems_updated)
390a36aa 2982{
be4c9dd7
LZ
2983 if (cpumask_empty(new_cpus))
2984 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
2985 if (nodes_empty(*new_mems))
2986 *new_mems = parent_cs(cs)->effective_mems;
2987
8447a0fe 2988 spin_lock_irq(&callback_lock);
be4c9dd7
LZ
2989 cpumask_copy(cs->effective_cpus, new_cpus);
2990 cs->effective_mems = *new_mems;
8447a0fe 2991 spin_unlock_irq(&callback_lock);
390a36aa 2992
be4c9dd7 2993 if (cpus_updated)
390a36aa 2994 update_tasks_cpumask(cs);
be4c9dd7 2995 if (mems_updated)
390a36aa
LZ
2996 update_tasks_nodemask(cs);
2997}
2998
4b842da2
WL
2999static bool force_rebuild;
3000
3001void cpuset_force_rebuild(void)
3002{
3003 force_rebuild = true;
3004}
3005
deb7aa30 3006/**
388afd85 3007 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
deb7aa30 3008 * @cs: cpuset in interest
4b842da2 3009 * @tmp: the tmpmasks structure pointer
956db3ca 3010 *
deb7aa30
TH
3011 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3012 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3013 * all its tasks are moved to the nearest ancestor with both resources.
80d1fa64 3014 */
4b842da2 3015static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
80d1fa64 3016{
be4c9dd7
LZ
3017 static cpumask_t new_cpus;
3018 static nodemask_t new_mems;
3019 bool cpus_updated;
3020 bool mems_updated;
4b842da2 3021 struct cpuset *parent;
e44193d3
LZ
3022retry:
3023 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
80d1fa64 3024
1243dc51 3025 percpu_down_write(&cpuset_rwsem);
7ddf96b0 3026
e44193d3
LZ
3027 /*
3028 * We have raced with task attaching. We wait until attaching
3029 * is finished, so we won't attach a task to an empty cpuset.
3030 */
3031 if (cs->attach_in_progress) {
1243dc51 3032 percpu_up_write(&cpuset_rwsem);
e44193d3
LZ
3033 goto retry;
3034 }
3035
4b842da2
WL
3036 parent = parent_cs(cs);
3037 compute_effective_cpumask(&new_cpus, cs, parent);
3038 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3039
3040 if (cs->nr_subparts_cpus)
3041 /*
3042 * Make sure that CPUs allocated to child partitions
3043 * do not show up in effective_cpus.
3044 */
3045 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3046
3047 if (!tmp || !cs->partition_root_state)
3048 goto update_tasks;
80d1fa64 3049
4b842da2
WL
3050 /*
3051 * In the unlikely event that a partition root has empty
3052 * effective_cpus or its parent becomes erroneous, we have to
3053 * transition it to the erroneous state.
3054 */
3055 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
3056 (parent->partition_root_state == PRS_ERROR))) {
3057 if (cs->nr_subparts_cpus) {
3058 cs->nr_subparts_cpus = 0;
3059 cpumask_clear(cs->subparts_cpus);
3060 compute_effective_cpumask(&new_cpus, cs, parent);
3061 }
80d1fa64 3062
4b842da2
WL
3063 /*
3064 * If the effective_cpus is empty because the child
3065 * partitions take away all the CPUs, we can keep
3066 * the current partition and let the child partitions
3067 * fight for available CPUs.
3068 */
3069 if ((parent->partition_root_state == PRS_ERROR) ||
3070 cpumask_empty(&new_cpus)) {
3071 update_parent_subparts_cpumask(cs, partcmd_disable,
3072 NULL, tmp);
3073 cs->partition_root_state = PRS_ERROR;
3074 }
3075 cpuset_force_rebuild();
3076 }
3077
3078 /*
3079 * On the other hand, an erroneous partition root may be transitioned
3080 * back to a regular one or a partition root with no CPU allocated
3081 * from the parent may change to erroneous.
3082 */
3083 if (is_partition_root(parent) &&
3084 ((cs->partition_root_state == PRS_ERROR) ||
3085 !cpumask_intersects(&new_cpus, parent->subparts_cpus)) &&
3086 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp))
3087 cpuset_force_rebuild();
3088
3089update_tasks:
be4c9dd7
LZ
3090 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3091 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
deb7aa30 3092
b8d1b8ee 3093 if (is_in_v2_mode())
be4c9dd7
LZ
3094 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3095 cpus_updated, mems_updated);
390a36aa 3096 else
be4c9dd7
LZ
3097 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3098 cpus_updated, mems_updated);
8d033948 3099
1243dc51 3100 percpu_up_write(&cpuset_rwsem);
b1aac8bb
PJ
3101}
3102
deb7aa30 3103/**
3a5a6d0c 3104 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
956db3ca 3105 *
deb7aa30
TH
3106 * This function is called after either CPU or memory configuration has
3107 * changed and updates cpuset accordingly. The top_cpuset is always
3108 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3109 * order to make cpusets transparent (of no affect) on systems that are
3110 * actively using CPU hotplug but making no active use of cpusets.
956db3ca 3111 *
deb7aa30 3112 * Non-root cpusets are only affected by offlining. If any CPUs or memory
388afd85
LZ
3113 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3114 * all descendants.
956db3ca 3115 *
deb7aa30
TH
3116 * Note that CPU offlining during suspend is ignored. We don't modify
3117 * cpusets across suspend/resume cycles at all.
956db3ca 3118 */
3a5a6d0c 3119static void cpuset_hotplug_workfn(struct work_struct *work)
b1aac8bb 3120{
5c5cc623
LZ
3121 static cpumask_t new_cpus;
3122 static nodemask_t new_mems;
deb7aa30 3123 bool cpus_updated, mems_updated;
b8d1b8ee 3124 bool on_dfl = is_in_v2_mode();
4b842da2
WL
3125 struct tmpmasks tmp, *ptmp = NULL;
3126
3127 if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3128 ptmp = &tmp;
b1aac8bb 3129
1243dc51 3130 percpu_down_write(&cpuset_rwsem);
956db3ca 3131
deb7aa30
TH
3132 /* fetch the available cpus/mems and find out which changed how */
3133 cpumask_copy(&new_cpus, cpu_active_mask);
3134 new_mems = node_states[N_MEMORY];
7ddf96b0 3135
4b842da2
WL
3136 /*
3137 * If subparts_cpus is populated, it is likely that the check below
3138 * will produce a false positive on cpus_updated when the cpu list
3139 * isn't changed. It is extra work, but it is better to be safe.
3140 */
7e88291b
LZ
3141 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
3142 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
7ddf96b0 3143
deb7aa30
TH
3144 /* synchronize cpus_allowed to cpu_active_mask */
3145 if (cpus_updated) {
8447a0fe 3146 spin_lock_irq(&callback_lock);
7e88291b
LZ
3147 if (!on_dfl)
3148 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
4b842da2
WL
3149 /*
3150 * Make sure that CPUs allocated to child partitions
3151 * do not show up in effective_cpus. If no CPU is left,
3152 * we clear the subparts_cpus & let the child partitions
3153 * fight for the CPUs again.
3154 */
3155 if (top_cpuset.nr_subparts_cpus) {
3156 if (cpumask_subset(&new_cpus,
3157 top_cpuset.subparts_cpus)) {
3158 top_cpuset.nr_subparts_cpus = 0;
3159 cpumask_clear(top_cpuset.subparts_cpus);
3160 } else {
3161 cpumask_andnot(&new_cpus, &new_cpus,
3162 top_cpuset.subparts_cpus);
3163 }
3164 }
1344ab9c 3165 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
8447a0fe 3166 spin_unlock_irq(&callback_lock);
deb7aa30
TH
3167 /* we don't mess with cpumasks of tasks in top_cpuset */
3168 }
b4501295 3169
deb7aa30
TH
3170 /* synchronize mems_allowed to N_MEMORY */
3171 if (mems_updated) {
8447a0fe 3172 spin_lock_irq(&callback_lock);
7e88291b
LZ
3173 if (!on_dfl)
3174 top_cpuset.mems_allowed = new_mems;
1344ab9c 3175 top_cpuset.effective_mems = new_mems;
8447a0fe 3176 spin_unlock_irq(&callback_lock);
d66393e5 3177 update_tasks_nodemask(&top_cpuset);
deb7aa30 3178 }
b4501295 3179
1243dc51 3180 percpu_up_write(&cpuset_rwsem);
388afd85 3181
5c5cc623
LZ
3182 /* if cpus or mems changed, we need to propagate to descendants */
3183 if (cpus_updated || mems_updated) {
deb7aa30 3184 struct cpuset *cs;
492eb21b 3185 struct cgroup_subsys_state *pos_css;
f9b4fb8d 3186
fc560a26 3187 rcu_read_lock();
492eb21b 3188 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
ec903c0c 3189 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
388afd85
LZ
3190 continue;
3191 rcu_read_unlock();
7ddf96b0 3192
4b842da2 3193 cpuset_hotplug_update_tasks(cs, ptmp);
b4501295 3194
388afd85
LZ
3195 rcu_read_lock();
3196 css_put(&cs->css);
3197 }
3198 rcu_read_unlock();
3199 }
8d033948 3200
deb7aa30 3201 /* rebuild sched domains if cpus_allowed has changed */
50e76632
PZ
3202 if (cpus_updated || force_rebuild) {
3203 force_rebuild = false;
e0e80a02 3204 rebuild_sched_domains();
50e76632 3205 }
4b842da2
WL
3206
3207 free_cpumasks(NULL, ptmp);
b1aac8bb
PJ
3208}
3209
30e03acd 3210void cpuset_update_active_cpus(void)
4c4d50f7 3211{
3a5a6d0c
TH
3212 /*
3213 * We're inside cpu hotplug critical region which usually nests
3214 * inside cgroup synchronization. Bounce actual hotplug processing
3215 * to a work item to avoid reverse locking order.
3a5a6d0c 3216 */
3a5a6d0c 3217 schedule_work(&cpuset_hotplug_work);
4c4d50f7 3218}
4c4d50f7 3219
50e76632
PZ
3220void cpuset_wait_for_hotplug(void)
3221{
3222 flush_work(&cpuset_hotplug_work);
3223}
3224
38837fc7 3225/*
38d7bee9
LJ
3226 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3227 * Call this routine anytime after node_states[N_MEMORY] changes.
a1cd2b13 3228 * See cpuset_update_active_cpus() for CPU hotplug handling.
38837fc7 3229 */
f481891f
MX
3230static int cpuset_track_online_nodes(struct notifier_block *self,
3231 unsigned long action, void *arg)
38837fc7 3232{
3a5a6d0c 3233 schedule_work(&cpuset_hotplug_work);
f481891f 3234 return NOTIFY_OK;
38837fc7 3235}
d8f10cb3
AM
3236
3237static struct notifier_block cpuset_track_online_nodes_nb = {
3238 .notifier_call = cpuset_track_online_nodes,
3239 .priority = 10, /* ??! */
3240};
38837fc7 3241
1da177e4
LT
3242/**
3243 * cpuset_init_smp - initialize cpus_allowed
3244 *
3245 * Description: Finish top cpuset after cpu, node maps are initialized
d8f10cb3 3246 */
1da177e4
LT
3247void __init cpuset_init_smp(void)
3248{
6ad4c188 3249 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
38d7bee9 3250 top_cpuset.mems_allowed = node_states[N_MEMORY];
33ad801d 3251 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
4c4d50f7 3252
e2b9a3d7
LZ
3253 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3254 top_cpuset.effective_mems = node_states[N_MEMORY];
3255
d8f10cb3 3256 register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
e93ad19d
TH
3257
3258 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3259 BUG_ON(!cpuset_migrate_mm_wq);
1da177e4
LT
3260}
3261
3262/**
1da177e4
LT
3263 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3264 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
6af866af 3265 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
1da177e4 3266 *
300ed6cb 3267 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
1da177e4 3268 * attached to the specified @tsk. Guaranteed to return some non-empty
5f054e31 3269 * subset of cpu_online_mask, even if this means going outside the
1da177e4
LT
3270 * tasks cpuset.
3271 **/
3272
6af866af 3273void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
1da177e4 3274{
8447a0fe
VD
3275 unsigned long flags;
3276
3277 spin_lock_irqsave(&callback_lock, flags);
b8dadcb5 3278 rcu_read_lock();
ae1c8023 3279 guarantee_online_cpus(task_cs(tsk), pmask);
b8dadcb5 3280 rcu_read_unlock();
8447a0fe 3281 spin_unlock_irqrestore(&callback_lock, flags);
1da177e4
LT
3282}
3283
d477f8c2
JS
3284/**
3285 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3286 * @tsk: pointer to task_struct with which the scheduler is struggling
3287 *
3288 * Description: In the case that the scheduler cannot find an allowed cpu in
3289 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3290 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3291 * which will not contain a sane cpumask during cases such as cpu hotplugging.
3292 * This is the absolute last resort for the scheduler and it is only used if
3293 * _every_ other avenue has been traveled.
3294 **/
3295
2baab4e9 3296void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
9084bb82 3297{
9084bb82 3298 rcu_read_lock();
d477f8c2
JS
3299 do_set_cpus_allowed(tsk, is_in_v2_mode() ?
3300 task_cs(tsk)->cpus_allowed : cpu_possible_mask);
9084bb82
ON
3301 rcu_read_unlock();
3302
3303 /*
3304 * We own tsk->cpus_allowed, nobody can change it under us.
3305 *
3306 * But we used cs && cs->cpus_allowed lockless and thus can
3307 * race with cgroup_attach_task() or update_cpumask() and get
3308 * the wrong tsk->cpus_allowed. However, both cases imply the
3309 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
3310 * which takes task_rq_lock().
3311 *
3312 * If we are called after it dropped the lock we must see all
3313 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
3314 * set any mask even if it is not right from task_cs() pov,
3315 * the pending set_cpus_allowed_ptr() will fix things.
2baab4e9
PZ
3316 *
3317 * select_fallback_rq() will fix things ups and set cpu_possible_mask
3318 * if required.
9084bb82 3319 */
9084bb82
ON
3320}
3321
8f4ab07f 3322void __init cpuset_init_current_mems_allowed(void)
1da177e4 3323{
f9a86fcb 3324 nodes_setall(current->mems_allowed);
1da177e4
LT
3325}
3326
909d75a3
PJ
3327/**
3328 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3329 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3330 *
3331 * Description: Returns the nodemask_t mems_allowed of the cpuset
3332 * attached to the specified @tsk. Guaranteed to return some non-empty
38d7bee9 3333 * subset of node_states[N_MEMORY], even if this means going outside the
909d75a3
PJ
3334 * tasks cpuset.
3335 **/
3336
3337nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
3338{
3339 nodemask_t mask;
8447a0fe 3340 unsigned long flags;
909d75a3 3341
8447a0fe 3342 spin_lock_irqsave(&callback_lock, flags);
b8dadcb5 3343 rcu_read_lock();
ae1c8023 3344 guarantee_online_mems(task_cs(tsk), &mask);
b8dadcb5 3345 rcu_read_unlock();
8447a0fe 3346 spin_unlock_irqrestore(&callback_lock, flags);
909d75a3
PJ
3347
3348 return mask;
3349}
3350
d9fd8a6d 3351/**
19770b32
MG
3352 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
3353 * @nodemask: the nodemask to be checked
d9fd8a6d 3354 *
19770b32 3355 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
1da177e4 3356 */
19770b32 3357int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
1da177e4 3358{
19770b32 3359 return nodes_intersects(*nodemask, current->mems_allowed);
1da177e4
LT
3360}
3361
9bf2229f 3362/*
78608366
PM
3363 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
3364 * mem_hardwall ancestor to the specified cpuset. Call holding
8447a0fe 3365 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
78608366 3366 * (an unusual configuration), then returns the root cpuset.
9bf2229f 3367 */
c9710d80 3368static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
9bf2229f 3369{
c431069f
TH
3370 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
3371 cs = parent_cs(cs);
9bf2229f
PJ
3372 return cs;
3373}
3374
d9fd8a6d 3375/**
344736f2 3376 * cpuset_node_allowed - Can we allocate on a memory node?
a1bc5a4e 3377 * @node: is this an allowed node?
02a0e53d 3378 * @gfp_mask: memory allocation flags
d9fd8a6d 3379 *
6e276d2a
DR
3380 * If we're in interrupt, yes, we can always allocate. If @node is set in
3381 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
3382 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
da99ecf1 3383 * yes. If current has access to memory reserves as an oom victim, yes.
9bf2229f
PJ
3384 * Otherwise, no.
3385 *
3386 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
c596d9f3 3387 * and do not allow allocations outside the current tasks cpuset
da99ecf1 3388 * unless the task has been OOM killed.
9bf2229f 3389 * GFP_KERNEL allocations are not so marked, so can escape to the
78608366 3390 * nearest enclosing hardwalled ancestor cpuset.
9bf2229f 3391 *
8447a0fe 3392 * Scanning up parent cpusets requires callback_lock. The
02a0e53d
PJ
3393 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
3394 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
3395 * current tasks mems_allowed came up empty on the first pass over
3396 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
8447a0fe 3397 * cpuset are short of memory, might require taking the callback_lock.
9bf2229f 3398 *
36be57ff 3399 * The first call here from mm/page_alloc:get_page_from_freelist()
02a0e53d
PJ
3400 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
3401 * so no allocation on a node outside the cpuset is allowed (unless
3402 * in interrupt, of course).
36be57ff
PJ
3403 *
3404 * The second pass through get_page_from_freelist() doesn't even call
3405 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
3406 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
3407 * in alloc_flags. That logic and the checks below have the combined
3408 * affect that:
9bf2229f
PJ
3409 * in_interrupt - any node ok (current task context irrelevant)
3410 * GFP_ATOMIC - any node ok
da99ecf1 3411 * tsk_is_oom_victim - any node ok
78608366 3412 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
9bf2229f 3413 * GFP_USER - only nodes in current tasks mems allowed ok.
02a0e53d 3414 */
002f2906 3415bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
1da177e4 3416{
c9710d80 3417 struct cpuset *cs; /* current cpuset ancestors */
29afd49b 3418 int allowed; /* is allocation in zone z allowed? */
8447a0fe 3419 unsigned long flags;
9bf2229f 3420
6e276d2a 3421 if (in_interrupt())
002f2906 3422 return true;
9bf2229f 3423 if (node_isset(node, current->mems_allowed))
002f2906 3424 return true;
c596d9f3
DR
3425 /*
3426 * Allow tasks that have access to memory reserves because they have
3427 * been OOM killed to get memory anywhere.
3428 */
da99ecf1 3429 if (unlikely(tsk_is_oom_victim(current)))
002f2906 3430 return true;
9bf2229f 3431 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
002f2906 3432 return false;
9bf2229f 3433
5563e770 3434 if (current->flags & PF_EXITING) /* Let dying task have memory */
002f2906 3435 return true;
5563e770 3436
9bf2229f 3437 /* Not hardwall and node outside mems_allowed: scan up cpusets */
8447a0fe 3438 spin_lock_irqsave(&callback_lock, flags);
053199ed 3439
b8dadcb5 3440 rcu_read_lock();
78608366 3441 cs = nearest_hardwall_ancestor(task_cs(current));
99afb0fd 3442 allowed = node_isset(node, cs->mems_allowed);
b8dadcb5 3443 rcu_read_unlock();
053199ed 3444
8447a0fe 3445 spin_unlock_irqrestore(&callback_lock, flags);
9bf2229f 3446 return allowed;
1da177e4
LT
3447}
3448
825a46af 3449/**
6adef3eb
JS
3450 * cpuset_mem_spread_node() - On which node to begin search for a file page
3451 * cpuset_slab_spread_node() - On which node to begin search for a slab page
825a46af
PJ
3452 *
3453 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
3454 * tasks in a cpuset with is_spread_page or is_spread_slab set),
3455 * and if the memory allocation used cpuset_mem_spread_node()
3456 * to determine on which node to start looking, as it will for
3457 * certain page cache or slab cache pages such as used for file
3458 * system buffers and inode caches, then instead of starting on the
3459 * local node to look for a free page, rather spread the starting
3460 * node around the tasks mems_allowed nodes.
3461 *
3462 * We don't have to worry about the returned node being offline
3463 * because "it can't happen", and even if it did, it would be ok.
3464 *
3465 * The routines calling guarantee_online_mems() are careful to
3466 * only set nodes in task->mems_allowed that are online. So it
3467 * should not be possible for the following code to return an
3468 * offline node. But if it did, that would be ok, as this routine
3469 * is not returning the node where the allocation must be, only
3470 * the node where the search should start. The zonelist passed to
3471 * __alloc_pages() will include all nodes. If the slab allocator
3472 * is passed an offline node, it will fall back to the local node.
3473 * See kmem_cache_alloc_node().
3474 */
3475
6adef3eb 3476static int cpuset_spread_node(int *rotor)
825a46af 3477{
0edaf86c 3478 return *rotor = next_node_in(*rotor, current->mems_allowed);
825a46af 3479}
6adef3eb
JS
3480
3481int cpuset_mem_spread_node(void)
3482{
778d3b0f
MH
3483 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
3484 current->cpuset_mem_spread_rotor =
3485 node_random(&current->mems_allowed);
3486
6adef3eb
JS
3487 return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
3488}
3489
3490int cpuset_slab_spread_node(void)
3491{
778d3b0f
MH
3492 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
3493 current->cpuset_slab_spread_rotor =
3494 node_random(&current->mems_allowed);
3495
6adef3eb
JS
3496 return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
3497}
3498
825a46af
PJ
3499EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
3500
ef08e3b4 3501/**
bbe373f2
DR
3502 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
3503 * @tsk1: pointer to task_struct of some task.
3504 * @tsk2: pointer to task_struct of some other task.
3505 *
3506 * Description: Return true if @tsk1's mems_allowed intersects the
3507 * mems_allowed of @tsk2. Used by the OOM killer to determine if
3508 * one of the task's memory usage might impact the memory available
3509 * to the other.
ef08e3b4
PJ
3510 **/
3511
bbe373f2
DR
3512int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
3513 const struct task_struct *tsk2)
ef08e3b4 3514{
bbe373f2 3515 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
ef08e3b4
PJ
3516}
3517
75aa1994 3518/**
da39da3a 3519 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
75aa1994 3520 *
da39da3a 3521 * Description: Prints current's name, cpuset name, and cached copy of its
b8dadcb5 3522 * mems_allowed to the kernel log.
75aa1994 3523 */
da39da3a 3524void cpuset_print_current_mems_allowed(void)
75aa1994 3525{
b8dadcb5 3526 struct cgroup *cgrp;
75aa1994 3527
b8dadcb5 3528 rcu_read_lock();
63f43f55 3529
da39da3a 3530 cgrp = task_cs(current)->css.cgroup;
ef8444ea 3531 pr_cont(",cpuset=");
e61734c5 3532 pr_cont_cgroup_name(cgrp);
ef8444ea 3533 pr_cont(",mems_allowed=%*pbl",
da39da3a 3534 nodemask_pr_args(&current->mems_allowed));
f440d98f 3535
cfb5966b 3536 rcu_read_unlock();
75aa1994
DR
3537}
3538
3e0d98b9
PJ
3539/*
3540 * Collection of memory_pressure is suppressed unless
3541 * this flag is enabled by writing "1" to the special
3542 * cpuset file 'memory_pressure_enabled' in the root cpuset.
3543 */
3544
c5b2aff8 3545int cpuset_memory_pressure_enabled __read_mostly;
3e0d98b9
PJ
3546
3547/**
3548 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
3549 *
3550 * Keep a running average of the rate of synchronous (direct)
3551 * page reclaim efforts initiated by tasks in each cpuset.
3552 *
3553 * This represents the rate at which some task in the cpuset
3554 * ran low on memory on all nodes it was allowed to use, and
3555 * had to enter the kernels page reclaim code in an effort to
3556 * create more free memory by tossing clean pages or swapping
3557 * or writing dirty pages.
3558 *
3559 * Display to user space in the per-cpuset read-only file
3560 * "memory_pressure". Value displayed is an integer
3561 * representing the recent rate of entry into the synchronous
3562 * (direct) page reclaim by any task attached to the cpuset.
3563 **/
3564
3565void __cpuset_memory_pressure_bump(void)
3566{
b8dadcb5 3567 rcu_read_lock();
8793d854 3568 fmeter_markevent(&task_cs(current)->fmeter);
b8dadcb5 3569 rcu_read_unlock();
3e0d98b9
PJ
3570}
3571
8793d854 3572#ifdef CONFIG_PROC_PID_CPUSET
1da177e4
LT
3573/*
3574 * proc_cpuset_show()
3575 * - Print tasks cpuset path into seq_file.
3576 * - Used for /proc/<pid>/cpuset.
053199ed
PJ
3577 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
3578 * doesn't really matter if tsk->cpuset changes after we read it,
5d21cc2d 3579 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
2df167a3 3580 * anyway.
1da177e4 3581 */
52de4779
ZL
3582int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
3583 struct pid *pid, struct task_struct *tsk)
1da177e4 3584{
4c737b41 3585 char *buf;
8793d854 3586 struct cgroup_subsys_state *css;
99f89551 3587 int retval;
1da177e4 3588
99f89551 3589 retval = -ENOMEM;
e61734c5 3590 buf = kmalloc(PATH_MAX, GFP_KERNEL);
1da177e4 3591 if (!buf)
99f89551
EB
3592 goto out;
3593
a79a908f 3594 css = task_get_css(tsk, cpuset_cgrp_id);
4c737b41
TH
3595 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
3596 current->nsproxy->cgroup_ns);
a79a908f 3597 css_put(css);
4c737b41 3598 if (retval >= PATH_MAX)
679a5e3f
TH
3599 retval = -ENAMETOOLONG;
3600 if (retval < 0)
52de4779 3601 goto out_free;
4c737b41 3602 seq_puts(m, buf);
1da177e4 3603 seq_putc(m, '\n');
e61734c5 3604 retval = 0;
99f89551 3605out_free:
1da177e4 3606 kfree(buf);
99f89551 3607out:
1da177e4
LT
3608 return retval;
3609}
8793d854 3610#endif /* CONFIG_PROC_PID_CPUSET */
1da177e4 3611
d01d4827 3612/* Display task mems_allowed in /proc/<pid>/status file. */
df5f8314
EB
3613void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
3614{
e8e6d97c
TH
3615 seq_printf(m, "Mems_allowed:\t%*pb\n",
3616 nodemask_pr_args(&task->mems_allowed));
3617 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
3618 nodemask_pr_args(&task->mems_allowed));
1da177e4 3619}