Merge tag 'ext4_for_linus_fixes2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / mm / mempolicy.c
CommitLineData
46aeb7e6 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
b1de0d13
MH
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
1da177e4 70#include <linux/mempolicy.h>
a520110e 71#include <linux/pagewalk.h>
1da177e4
LT
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
6e84f315 76#include <linux/sched/mm.h>
6a3827d7 77#include <linux/sched/numa_balancing.h>
f719ff9b 78#include <linux/sched/task.h>
1da177e4
LT
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
1da177e4
LT
81#include <linux/slab.h>
82#include <linux/string.h>
b95f1b31 83#include <linux/export.h>
b488893a 84#include <linux/nsproxy.h>
1da177e4
LT
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
31367466 88#include <linux/ptrace.h>
dc9aa5b9 89#include <linux/swap.h>
1a75a6c8
CL
90#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
b20a3503 92#include <linux/migrate.h>
62b61f61 93#include <linux/ksm.h>
95a402c3 94#include <linux/rmap.h>
86c3a764 95#include <linux/security.h>
dbcb0f19 96#include <linux/syscalls.h>
095f1fc4 97#include <linux/ctype.h>
6d9c285a 98#include <linux/mm_inline.h>
b24f53a0 99#include <linux/mmu_notifier.h>
b1de0d13 100#include <linux/printk.h>
c8633798 101#include <linux/swapops.h>
dc9aa5b9 102
1da177e4 103#include <asm/tlbflush.h>
7c0f6ba6 104#include <linux/uaccess.h>
1da177e4 105
62695a84
NP
106#include "internal.h"
107
38e35860 108/* Internal flags */
dc9aa5b9 109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 111
fcc234f8
PE
112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
1da177e4 114
1da177e4
LT
115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
6267276f 117enum zone_type policy_zone = 0;
1da177e4 118
bea904d5
LS
119/*
120 * run-time system-wide default policy => local allocation
121 */
e754d79d 122static struct mempolicy default_policy = {
1da177e4 123 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 124 .mode = MPOL_PREFERRED,
fc36b8d3 125 .flags = MPOL_F_LOCAL,
1da177e4
LT
126};
127
5606e387
MG
128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
b2ca916c
DW
130/**
131 * numa_map_to_online_node - Find closest online node
f6e92f40 132 * @node: Node id to start the search
b2ca916c
DW
133 *
134 * Lookup the next closest node by distance if @nid is not online.
135 */
136int numa_map_to_online_node(int node)
137{
4fcbe96e 138 int min_dist = INT_MAX, dist, n, min_node;
b2ca916c 139
4fcbe96e
DW
140 if (node == NUMA_NO_NODE || node_online(node))
141 return node;
b2ca916c
DW
142
143 min_node = node;
4fcbe96e
DW
144 for_each_online_node(n) {
145 dist = node_distance(node, n);
146 if (dist < min_dist) {
147 min_dist = dist;
148 min_node = n;
b2ca916c
DW
149 }
150 }
151
152 return min_node;
153}
154EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155
74d2c3a0 156struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
157{
158 struct mempolicy *pol = p->mempolicy;
f15ca78e 159 int node;
5606e387 160
f15ca78e
ON
161 if (pol)
162 return pol;
5606e387 163
f15ca78e
ON
164 node = numa_node_id();
165 if (node != NUMA_NO_NODE) {
166 pol = &preferred_node_policy[node];
167 /* preferred_node_policy is not initialised early in boot */
168 if (pol->mode)
169 return pol;
5606e387
MG
170 }
171
f15ca78e 172 return &default_policy;
5606e387
MG
173}
174
37012946
DR
175static const struct mempolicy_operations {
176 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 177 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
178} mpol_ops[MPOL_MAX];
179
f5b087b5
DR
180static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181{
6d556294 182 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
183}
184
185static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
186 const nodemask_t *rel)
187{
188 nodemask_t tmp;
189 nodes_fold(tmp, *orig, nodes_weight(*rel));
190 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
191}
192
37012946
DR
193static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
194{
195 if (nodes_empty(*nodes))
196 return -EINVAL;
197 pol->v.nodes = *nodes;
198 return 0;
199}
200
201static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
202{
203 if (!nodes)
fc36b8d3 204 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
205 else if (nodes_empty(*nodes))
206 return -EINVAL; /* no allowed nodes */
207 else
208 pol->v.preferred_node = first_node(*nodes);
209 return 0;
210}
211
212static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
213{
859f7ef1 214 if (nodes_empty(*nodes))
37012946
DR
215 return -EINVAL;
216 pol->v.nodes = *nodes;
217 return 0;
218}
219
58568d2a
MX
220/*
221 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
222 * any, for the new policy. mpol_new() has already validated the nodes
223 * parameter with respect to the policy mode and flags. But, we need to
224 * handle an empty nodemask with MPOL_PREFERRED here.
225 *
226 * Must be called holding task's alloc_lock to protect task's mems_allowed
c1e8d7c6 227 * and mempolicy. May also be called holding the mmap_lock for write.
58568d2a 228 */
4bfc4495
KH
229static int mpol_set_nodemask(struct mempolicy *pol,
230 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 231{
58568d2a
MX
232 int ret;
233
234 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
235 if (pol == NULL)
236 return 0;
01f13bd6 237 /* Check N_MEMORY */
4bfc4495 238 nodes_and(nsc->mask1,
01f13bd6 239 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
240
241 VM_BUG_ON(!nodes);
242 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
243 nodes = NULL; /* explicit local allocation */
244 else {
245 if (pol->flags & MPOL_F_RELATIVE_NODES)
859f7ef1 246 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
58568d2a 247 else
4bfc4495
KH
248 nodes_and(nsc->mask2, *nodes, nsc->mask1);
249
58568d2a
MX
250 if (mpol_store_user_nodemask(pol))
251 pol->w.user_nodemask = *nodes;
252 else
253 pol->w.cpuset_mems_allowed =
254 cpuset_current_mems_allowed;
255 }
256
4bfc4495
KH
257 if (nodes)
258 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
259 else
260 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
261 return ret;
262}
263
264/*
265 * This function just creates a new policy, does some check and simple
266 * initialization. You must invoke mpol_set_nodemask() to set nodes.
267 */
028fec41
DR
268static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269 nodemask_t *nodes)
1da177e4
LT
270{
271 struct mempolicy *policy;
272
028fec41 273 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 274 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 275
3e1f0645
DR
276 if (mode == MPOL_DEFAULT) {
277 if (nodes && !nodes_empty(*nodes))
37012946 278 return ERR_PTR(-EINVAL);
d3a71033 279 return NULL;
37012946 280 }
3e1f0645
DR
281 VM_BUG_ON(!nodes);
282
283 /*
284 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
285 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
286 * All other modes require a valid pointer to a non-empty nodemask.
287 */
288 if (mode == MPOL_PREFERRED) {
289 if (nodes_empty(*nodes)) {
290 if (((flags & MPOL_F_STATIC_NODES) ||
291 (flags & MPOL_F_RELATIVE_NODES)))
292 return ERR_PTR(-EINVAL);
3e1f0645 293 }
479e2802 294 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
295 if (!nodes_empty(*nodes) ||
296 (flags & MPOL_F_STATIC_NODES) ||
297 (flags & MPOL_F_RELATIVE_NODES))
479e2802
PZ
298 return ERR_PTR(-EINVAL);
299 mode = MPOL_PREFERRED;
3e1f0645
DR
300 } else if (nodes_empty(*nodes))
301 return ERR_PTR(-EINVAL);
1da177e4
LT
302 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
303 if (!policy)
304 return ERR_PTR(-ENOMEM);
305 atomic_set(&policy->refcnt, 1);
45c4745a 306 policy->mode = mode;
3e1f0645 307 policy->flags = flags;
37012946 308
1da177e4 309 return policy;
37012946
DR
310}
311
52cd3b07
LS
312/* Slow path of a mpol destructor. */
313void __mpol_put(struct mempolicy *p)
314{
315 if (!atomic_dec_and_test(&p->refcnt))
316 return;
52cd3b07
LS
317 kmem_cache_free(policy_cache, p);
318}
319
213980c0 320static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
321{
322}
323
213980c0 324static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
325{
326 nodemask_t tmp;
327
328 if (pol->flags & MPOL_F_STATIC_NODES)
329 nodes_and(tmp, pol->w.user_nodemask, *nodes);
330 else if (pol->flags & MPOL_F_RELATIVE_NODES)
331 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
332 else {
213980c0
VB
333 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
334 *nodes);
29b190fa 335 pol->w.cpuset_mems_allowed = *nodes;
37012946 336 }
f5b087b5 337
708c1bbc
MX
338 if (nodes_empty(tmp))
339 tmp = *nodes;
340
213980c0 341 pol->v.nodes = tmp;
37012946
DR
342}
343
344static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 345 const nodemask_t *nodes)
37012946
DR
346{
347 nodemask_t tmp;
348
37012946
DR
349 if (pol->flags & MPOL_F_STATIC_NODES) {
350 int node = first_node(pol->w.user_nodemask);
351
fc36b8d3 352 if (node_isset(node, *nodes)) {
37012946 353 pol->v.preferred_node = node;
fc36b8d3
LS
354 pol->flags &= ~MPOL_F_LOCAL;
355 } else
356 pol->flags |= MPOL_F_LOCAL;
37012946
DR
357 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
358 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
359 pol->v.preferred_node = first_node(tmp);
fc36b8d3 360 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
361 pol->v.preferred_node = node_remap(pol->v.preferred_node,
362 pol->w.cpuset_mems_allowed,
363 *nodes);
364 pol->w.cpuset_mems_allowed = *nodes;
365 }
1da177e4
LT
366}
367
708c1bbc
MX
368/*
369 * mpol_rebind_policy - Migrate a policy to a different set of nodes
370 *
c1e8d7c6 371 * Per-vma policies are protected by mmap_lock. Allocations using per-task
213980c0
VB
372 * policies are protected by task->mems_allowed_seq to prevent a premature
373 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 374 */
213980c0 375static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 376{
1d0d2680
DR
377 if (!pol)
378 return;
2e25644e 379 if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
1d0d2680
DR
380 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
381 return;
708c1bbc 382
213980c0 383 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
384}
385
386/*
387 * Wrapper for mpol_rebind_policy() that just requires task
388 * pointer, and updates task mempolicy.
58568d2a
MX
389 *
390 * Called with task's alloc_lock held.
1d0d2680
DR
391 */
392
213980c0 393void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 394{
213980c0 395 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
396}
397
398/*
399 * Rebind each vma in mm to new nodemask.
400 *
c1e8d7c6 401 * Call holding a reference to mm. Takes mm->mmap_lock during call.
1d0d2680
DR
402 */
403
404void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
405{
406 struct vm_area_struct *vma;
407
d8ed45c5 408 mmap_write_lock(mm);
1d0d2680 409 for (vma = mm->mmap; vma; vma = vma->vm_next)
213980c0 410 mpol_rebind_policy(vma->vm_policy, new);
d8ed45c5 411 mmap_write_unlock(mm);
1d0d2680
DR
412}
413
37012946
DR
414static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
415 [MPOL_DEFAULT] = {
416 .rebind = mpol_rebind_default,
417 },
418 [MPOL_INTERLEAVE] = {
419 .create = mpol_new_interleave,
420 .rebind = mpol_rebind_nodemask,
421 },
422 [MPOL_PREFERRED] = {
423 .create = mpol_new_preferred,
424 .rebind = mpol_rebind_preferred,
425 },
426 [MPOL_BIND] = {
427 .create = mpol_new_bind,
428 .rebind = mpol_rebind_nodemask,
429 },
430};
431
a53190a4 432static int migrate_page_add(struct page *page, struct list_head *pagelist,
fc301289 433 unsigned long flags);
1a75a6c8 434
6f4576e3
NH
435struct queue_pages {
436 struct list_head *pagelist;
437 unsigned long flags;
438 nodemask_t *nmask;
f18da660
LX
439 unsigned long start;
440 unsigned long end;
441 struct vm_area_struct *first;
6f4576e3
NH
442};
443
88aaa2a1
NH
444/*
445 * Check if the page's nid is in qp->nmask.
446 *
447 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
448 * in the invert of qp->nmask.
449 */
450static inline bool queue_pages_required(struct page *page,
451 struct queue_pages *qp)
452{
453 int nid = page_to_nid(page);
454 unsigned long flags = qp->flags;
455
456 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
457}
458
a7f40cfe 459/*
d8835445
YS
460 * queue_pages_pmd() has four possible return values:
461 * 0 - pages are placed on the right node or queued successfully.
462 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
463 * specified.
464 * 2 - THP was split.
465 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
466 * existing page was already on a node that does not follow the
467 * policy.
a7f40cfe 468 */
c8633798
NH
469static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
470 unsigned long end, struct mm_walk *walk)
959a7e13 471 __releases(ptl)
c8633798
NH
472{
473 int ret = 0;
474 struct page *page;
475 struct queue_pages *qp = walk->private;
476 unsigned long flags;
477
478 if (unlikely(is_pmd_migration_entry(*pmd))) {
a7f40cfe 479 ret = -EIO;
c8633798
NH
480 goto unlock;
481 }
482 page = pmd_page(*pmd);
483 if (is_huge_zero_page(page)) {
484 spin_unlock(ptl);
485 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
d8835445 486 ret = 2;
c8633798
NH
487 goto out;
488 }
d8835445 489 if (!queue_pages_required(page, qp))
c8633798 490 goto unlock;
c8633798 491
c8633798
NH
492 flags = qp->flags;
493 /* go to thp migration */
a7f40cfe 494 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
a53190a4
YS
495 if (!vma_migratable(walk->vma) ||
496 migrate_page_add(page, qp->pagelist, flags)) {
d8835445 497 ret = 1;
a7f40cfe
YS
498 goto unlock;
499 }
a7f40cfe
YS
500 } else
501 ret = -EIO;
c8633798
NH
502unlock:
503 spin_unlock(ptl);
504out:
505 return ret;
506}
507
98094945
NH
508/*
509 * Scan through pages checking if pages follow certain conditions,
510 * and move them to the pagelist if they do.
d8835445
YS
511 *
512 * queue_pages_pte_range() has three possible return values:
513 * 0 - pages are placed on the right node or queued successfully.
514 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
515 * specified.
516 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
517 * on a node that does not follow the policy.
98094945 518 */
6f4576e3
NH
519static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
520 unsigned long end, struct mm_walk *walk)
1da177e4 521{
6f4576e3
NH
522 struct vm_area_struct *vma = walk->vma;
523 struct page *page;
524 struct queue_pages *qp = walk->private;
525 unsigned long flags = qp->flags;
c8633798 526 int ret;
d8835445 527 bool has_unmovable = false;
3f088420 528 pte_t *pte, *mapped_pte;
705e87c0 529 spinlock_t *ptl;
941150a3 530
c8633798
NH
531 ptl = pmd_trans_huge_lock(pmd, vma);
532 if (ptl) {
533 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
d8835445 534 if (ret != 2)
a7f40cfe 535 return ret;
248db92d 536 }
d8835445 537 /* THP was split, fall through to pte walk */
91612e0d 538
337d9abf
NH
539 if (pmd_trans_unstable(pmd))
540 return 0;
94723aaf 541
3f088420 542 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
6f4576e3 543 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 544 if (!pte_present(*pte))
1da177e4 545 continue;
6aab341e
LT
546 page = vm_normal_page(vma, addr, *pte);
547 if (!page)
1da177e4 548 continue;
053837fc 549 /*
62b61f61
HD
550 * vm_normal_page() filters out zero pages, but there might
551 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 552 */
b79bc0a0 553 if (PageReserved(page))
f4598c8b 554 continue;
88aaa2a1 555 if (!queue_pages_required(page, qp))
38e35860 556 continue;
a7f40cfe 557 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
d8835445
YS
558 /* MPOL_MF_STRICT must be specified if we get here */
559 if (!vma_migratable(vma)) {
560 has_unmovable = true;
a7f40cfe 561 break;
d8835445 562 }
a53190a4
YS
563
564 /*
565 * Do not abort immediately since there may be
566 * temporary off LRU pages in the range. Still
567 * need migrate other LRU pages.
568 */
569 if (migrate_page_add(page, qp->pagelist, flags))
570 has_unmovable = true;
a7f40cfe
YS
571 } else
572 break;
6f4576e3 573 }
3f088420 574 pte_unmap_unlock(mapped_pte, ptl);
6f4576e3 575 cond_resched();
d8835445
YS
576
577 if (has_unmovable)
578 return 1;
579
a7f40cfe 580 return addr != end ? -EIO : 0;
91612e0d
HD
581}
582
6f4576e3
NH
583static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
584 unsigned long addr, unsigned long end,
585 struct mm_walk *walk)
e2d8cf40 586{
dcf17635 587 int ret = 0;
e2d8cf40 588#ifdef CONFIG_HUGETLB_PAGE
6f4576e3 589 struct queue_pages *qp = walk->private;
dcf17635 590 unsigned long flags = (qp->flags & MPOL_MF_VALID);
e2d8cf40 591 struct page *page;
cb900f41 592 spinlock_t *ptl;
d4c54919 593 pte_t entry;
e2d8cf40 594
6f4576e3
NH
595 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
596 entry = huge_ptep_get(pte);
d4c54919
NH
597 if (!pte_present(entry))
598 goto unlock;
599 page = pte_page(entry);
88aaa2a1 600 if (!queue_pages_required(page, qp))
e2d8cf40 601 goto unlock;
dcf17635
LX
602
603 if (flags == MPOL_MF_STRICT) {
604 /*
605 * STRICT alone means only detecting misplaced page and no
606 * need to further check other vma.
607 */
608 ret = -EIO;
609 goto unlock;
610 }
611
612 if (!vma_migratable(walk->vma)) {
613 /*
614 * Must be STRICT with MOVE*, otherwise .test_walk() have
615 * stopped walking current vma.
616 * Detecting misplaced page but allow migrating pages which
617 * have been queued.
618 */
619 ret = 1;
620 goto unlock;
621 }
622
e2d8cf40
NH
623 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
624 if (flags & (MPOL_MF_MOVE_ALL) ||
dcf17635
LX
625 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
626 if (!isolate_huge_page(page, qp->pagelist) &&
627 (flags & MPOL_MF_STRICT))
628 /*
629 * Failed to isolate page but allow migrating pages
630 * which have been queued.
631 */
632 ret = 1;
633 }
e2d8cf40 634unlock:
cb900f41 635 spin_unlock(ptl);
e2d8cf40
NH
636#else
637 BUG();
638#endif
dcf17635 639 return ret;
1da177e4
LT
640}
641
5877231f 642#ifdef CONFIG_NUMA_BALANCING
b24f53a0 643/*
4b10e7d5
MG
644 * This is used to mark a range of virtual addresses to be inaccessible.
645 * These are later cleared by a NUMA hinting fault. Depending on these
646 * faults, pages may be migrated for better NUMA placement.
647 *
648 * This is assuming that NUMA faults are handled using PROT_NONE. If
649 * an architecture makes a different choice, it will need further
650 * changes to the core.
b24f53a0 651 */
4b10e7d5
MG
652unsigned long change_prot_numa(struct vm_area_struct *vma,
653 unsigned long addr, unsigned long end)
b24f53a0 654{
4b10e7d5 655 int nr_updated;
b24f53a0 656
58705444 657 nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
03c5a6e1
MG
658 if (nr_updated)
659 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 660
4b10e7d5 661 return nr_updated;
b24f53a0
LS
662}
663#else
664static unsigned long change_prot_numa(struct vm_area_struct *vma,
665 unsigned long addr, unsigned long end)
666{
667 return 0;
668}
5877231f 669#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 670
6f4576e3
NH
671static int queue_pages_test_walk(unsigned long start, unsigned long end,
672 struct mm_walk *walk)
673{
674 struct vm_area_struct *vma = walk->vma;
675 struct queue_pages *qp = walk->private;
676 unsigned long endvma = vma->vm_end;
677 unsigned long flags = qp->flags;
678
a18b3ac2 679 /* range check first */
d888fb2b 680 VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma);
f18da660
LX
681
682 if (!qp->first) {
683 qp->first = vma;
684 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
685 (qp->start < vma->vm_start))
686 /* hole at head side of range */
a18b3ac2
LX
687 return -EFAULT;
688 }
f18da660
LX
689 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
690 ((vma->vm_end < qp->end) &&
691 (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
692 /* hole at middle or tail of range */
693 return -EFAULT;
a18b3ac2 694
a7f40cfe
YS
695 /*
696 * Need check MPOL_MF_STRICT to return -EIO if possible
697 * regardless of vma_migratable
698 */
699 if (!vma_migratable(vma) &&
700 !(flags & MPOL_MF_STRICT))
48684a65
NH
701 return 1;
702
6f4576e3
NH
703 if (endvma > end)
704 endvma = end;
6f4576e3 705
6f4576e3
NH
706 if (flags & MPOL_MF_LAZY) {
707 /* Similar to task_numa_work, skip inaccessible VMAs */
3122e80e 708 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
4355c018 709 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
710 change_prot_numa(vma, start, endvma);
711 return 1;
712 }
713
77bf45e7 714 /* queue pages from current vma */
a7f40cfe 715 if (flags & MPOL_MF_VALID)
6f4576e3
NH
716 return 0;
717 return 1;
718}
719
7b86ac33
CH
720static const struct mm_walk_ops queue_pages_walk_ops = {
721 .hugetlb_entry = queue_pages_hugetlb,
722 .pmd_entry = queue_pages_pte_range,
723 .test_walk = queue_pages_test_walk,
724};
725
dc9aa5b9 726/*
98094945
NH
727 * Walk through page tables and collect pages to be migrated.
728 *
729 * If pages found in a given range are on a set of nodes (determined by
730 * @nodes and @flags,) it's isolated and queued to the pagelist which is
d8835445
YS
731 * passed via @private.
732 *
733 * queue_pages_range() has three possible return values:
734 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
735 * specified.
736 * 0 - queue pages successfully or no misplaced page.
a85dfc30
YS
737 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
738 * memory range specified by nodemask and maxnode points outside
739 * your accessible address space (-EFAULT)
dc9aa5b9 740 */
d05f0cdc 741static int
98094945 742queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
743 nodemask_t *nodes, unsigned long flags,
744 struct list_head *pagelist)
1da177e4 745{
f18da660 746 int err;
6f4576e3
NH
747 struct queue_pages qp = {
748 .pagelist = pagelist,
749 .flags = flags,
750 .nmask = nodes,
f18da660
LX
751 .start = start,
752 .end = end,
753 .first = NULL,
6f4576e3 754 };
6f4576e3 755
f18da660
LX
756 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
757
758 if (!qp.first)
759 /* whole range in hole */
760 err = -EFAULT;
761
762 return err;
1da177e4
LT
763}
764
869833f2
KM
765/*
766 * Apply policy to a single VMA
c1e8d7c6 767 * This must be called with the mmap_lock held for writing.
869833f2
KM
768 */
769static int vma_replace_policy(struct vm_area_struct *vma,
770 struct mempolicy *pol)
8d34694c 771{
869833f2
KM
772 int err;
773 struct mempolicy *old;
774 struct mempolicy *new;
8d34694c
KM
775
776 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
777 vma->vm_start, vma->vm_end, vma->vm_pgoff,
778 vma->vm_ops, vma->vm_file,
779 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
780
869833f2
KM
781 new = mpol_dup(pol);
782 if (IS_ERR(new))
783 return PTR_ERR(new);
784
785 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 786 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
787 if (err)
788 goto err_out;
8d34694c 789 }
869833f2
KM
790
791 old = vma->vm_policy;
c1e8d7c6 792 vma->vm_policy = new; /* protected by mmap_lock */
869833f2
KM
793 mpol_put(old);
794
795 return 0;
796 err_out:
797 mpol_put(new);
8d34694c
KM
798 return err;
799}
800
1da177e4 801/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
802static int mbind_range(struct mm_struct *mm, unsigned long start,
803 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
804{
805 struct vm_area_struct *next;
9d8cebd4
KM
806 struct vm_area_struct *prev;
807 struct vm_area_struct *vma;
808 int err = 0;
e26a5114 809 pgoff_t pgoff;
9d8cebd4
KM
810 unsigned long vmstart;
811 unsigned long vmend;
1da177e4 812
097d5910 813 vma = find_vma(mm, start);
f18da660 814 VM_BUG_ON(!vma);
9d8cebd4 815
097d5910 816 prev = vma->vm_prev;
e26a5114
KM
817 if (start > vma->vm_start)
818 prev = vma;
819
9d8cebd4 820 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 821 next = vma->vm_next;
9d8cebd4
KM
822 vmstart = max(start, vma->vm_start);
823 vmend = min(end, vma->vm_end);
824
e26a5114
KM
825 if (mpol_equal(vma_policy(vma), new_pol))
826 continue;
827
828 pgoff = vma->vm_pgoff +
829 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 830 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
831 vma->anon_vma, vma->vm_file, pgoff,
832 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
833 if (prev) {
834 vma = prev;
835 next = vma->vm_next;
3964acd0
ON
836 if (mpol_equal(vma_policy(vma), new_pol))
837 continue;
838 /* vma_merge() joined vma && vma->next, case 8 */
839 goto replace;
9d8cebd4
KM
840 }
841 if (vma->vm_start != vmstart) {
842 err = split_vma(vma->vm_mm, vma, vmstart, 1);
843 if (err)
844 goto out;
845 }
846 if (vma->vm_end != vmend) {
847 err = split_vma(vma->vm_mm, vma, vmend, 0);
848 if (err)
849 goto out;
850 }
3964acd0 851 replace:
869833f2 852 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
853 if (err)
854 goto out;
1da177e4 855 }
9d8cebd4
KM
856
857 out:
1da177e4
LT
858 return err;
859}
860
1da177e4 861/* Set the process memory policy */
028fec41
DR
862static long do_set_mempolicy(unsigned short mode, unsigned short flags,
863 nodemask_t *nodes)
1da177e4 864{
58568d2a 865 struct mempolicy *new, *old;
4bfc4495 866 NODEMASK_SCRATCH(scratch);
58568d2a 867 int ret;
1da177e4 868
4bfc4495
KH
869 if (!scratch)
870 return -ENOMEM;
f4e53d91 871
4bfc4495
KH
872 new = mpol_new(mode, flags, nodes);
873 if (IS_ERR(new)) {
874 ret = PTR_ERR(new);
875 goto out;
876 }
2c7c3a7d 877
4bfc4495 878 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a 879 if (ret) {
58568d2a 880 mpol_put(new);
4bfc4495 881 goto out;
58568d2a 882 }
78b132e9 883 task_lock(current);
58568d2a 884 old = current->mempolicy;
1da177e4 885 current->mempolicy = new;
45816682
VB
886 if (new && new->mode == MPOL_INTERLEAVE)
887 current->il_prev = MAX_NUMNODES-1;
58568d2a 888 task_unlock(current);
58568d2a 889 mpol_put(old);
4bfc4495
KH
890 ret = 0;
891out:
892 NODEMASK_SCRATCH_FREE(scratch);
893 return ret;
1da177e4
LT
894}
895
bea904d5
LS
896/*
897 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
898 *
899 * Called with task's alloc_lock held
bea904d5
LS
900 */
901static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 902{
dfcd3c0d 903 nodes_clear(*nodes);
bea904d5
LS
904 if (p == &default_policy)
905 return;
906
45c4745a 907 switch (p->mode) {
19770b32 908 case MPOL_BIND:
1da177e4 909 case MPOL_INTERLEAVE:
dfcd3c0d 910 *nodes = p->v.nodes;
1da177e4
LT
911 break;
912 case MPOL_PREFERRED:
fc36b8d3 913 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 914 node_set(p->v.preferred_node, *nodes);
53f2556b 915 /* else return empty node mask for local allocation */
1da177e4
LT
916 break;
917 default:
918 BUG();
919 }
920}
921
3b9aadf7 922static int lookup_node(struct mm_struct *mm, unsigned long addr)
1da177e4 923{
ba841078 924 struct page *p = NULL;
1da177e4
LT
925 int err;
926
3b9aadf7
AA
927 int locked = 1;
928 err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
2d3a36a4 929 if (err > 0) {
1da177e4
LT
930 err = page_to_nid(p);
931 put_page(p);
932 }
3b9aadf7 933 if (locked)
d8ed45c5 934 mmap_read_unlock(mm);
1da177e4
LT
935 return err;
936}
937
1da177e4 938/* Retrieve NUMA policy */
dbcb0f19
AB
939static long do_get_mempolicy(int *policy, nodemask_t *nmask,
940 unsigned long addr, unsigned long flags)
1da177e4 941{
8bccd85f 942 int err;
1da177e4
LT
943 struct mm_struct *mm = current->mm;
944 struct vm_area_struct *vma = NULL;
3b9aadf7 945 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
1da177e4 946
754af6f5
LS
947 if (flags &
948 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 949 return -EINVAL;
754af6f5
LS
950
951 if (flags & MPOL_F_MEMS_ALLOWED) {
952 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
953 return -EINVAL;
954 *policy = 0; /* just so it's initialized */
58568d2a 955 task_lock(current);
754af6f5 956 *nmask = cpuset_current_mems_allowed;
58568d2a 957 task_unlock(current);
754af6f5
LS
958 return 0;
959 }
960
1da177e4 961 if (flags & MPOL_F_ADDR) {
bea904d5
LS
962 /*
963 * Do NOT fall back to task policy if the
964 * vma/shared policy at addr is NULL. We
965 * want to return MPOL_DEFAULT in this case.
966 */
d8ed45c5 967 mmap_read_lock(mm);
1da177e4
LT
968 vma = find_vma_intersection(mm, addr, addr+1);
969 if (!vma) {
d8ed45c5 970 mmap_read_unlock(mm);
1da177e4
LT
971 return -EFAULT;
972 }
973 if (vma->vm_ops && vma->vm_ops->get_policy)
974 pol = vma->vm_ops->get_policy(vma, addr);
975 else
976 pol = vma->vm_policy;
977 } else if (addr)
978 return -EINVAL;
979
980 if (!pol)
bea904d5 981 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
982
983 if (flags & MPOL_F_NODE) {
984 if (flags & MPOL_F_ADDR) {
3b9aadf7
AA
985 /*
986 * Take a refcount on the mpol, lookup_node()
c1e8d7c6 987 * wil drop the mmap_lock, so after calling
3b9aadf7
AA
988 * lookup_node() only "pol" remains valid, "vma"
989 * is stale.
990 */
991 pol_refcount = pol;
992 vma = NULL;
993 mpol_get(pol);
994 err = lookup_node(mm, addr);
1da177e4
LT
995 if (err < 0)
996 goto out;
8bccd85f 997 *policy = err;
1da177e4 998 } else if (pol == current->mempolicy &&
45c4745a 999 pol->mode == MPOL_INTERLEAVE) {
45816682 1000 *policy = next_node_in(current->il_prev, pol->v.nodes);
1da177e4
LT
1001 } else {
1002 err = -EINVAL;
1003 goto out;
1004 }
bea904d5
LS
1005 } else {
1006 *policy = pol == &default_policy ? MPOL_DEFAULT :
1007 pol->mode;
d79df630
DR
1008 /*
1009 * Internal mempolicy flags must be masked off before exposing
1010 * the policy to userspace.
1011 */
1012 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 1013 }
1da177e4 1014
1da177e4 1015 err = 0;
58568d2a 1016 if (nmask) {
c6b6ef8b
LS
1017 if (mpol_store_user_nodemask(pol)) {
1018 *nmask = pol->w.user_nodemask;
1019 } else {
1020 task_lock(current);
1021 get_policy_nodemask(pol, nmask);
1022 task_unlock(current);
1023 }
58568d2a 1024 }
1da177e4
LT
1025
1026 out:
52cd3b07 1027 mpol_cond_put(pol);
1da177e4 1028 if (vma)
d8ed45c5 1029 mmap_read_unlock(mm);
3b9aadf7
AA
1030 if (pol_refcount)
1031 mpol_put(pol_refcount);
1da177e4
LT
1032 return err;
1033}
1034
b20a3503 1035#ifdef CONFIG_MIGRATION
6ce3c4c0 1036/*
c8633798 1037 * page migration, thp tail pages can be passed.
6ce3c4c0 1038 */
a53190a4 1039static int migrate_page_add(struct page *page, struct list_head *pagelist,
fc301289 1040 unsigned long flags)
6ce3c4c0 1041{
c8633798 1042 struct page *head = compound_head(page);
6ce3c4c0 1043 /*
fc301289 1044 * Avoid migrating a page that is shared with others.
6ce3c4c0 1045 */
c8633798
NH
1046 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1047 if (!isolate_lru_page(head)) {
1048 list_add_tail(&head->lru, pagelist);
1049 mod_node_page_state(page_pgdat(head),
9de4f22a 1050 NR_ISOLATED_ANON + page_is_file_lru(head),
6c357848 1051 thp_nr_pages(head));
a53190a4
YS
1052 } else if (flags & MPOL_MF_STRICT) {
1053 /*
1054 * Non-movable page may reach here. And, there may be
1055 * temporary off LRU pages or non-LRU movable pages.
1056 * Treat them as unmovable pages since they can't be
1057 * isolated, so they can't be moved at the moment. It
1058 * should return -EIO for this case too.
1059 */
1060 return -EIO;
62695a84
NP
1061 }
1062 }
a53190a4
YS
1063
1064 return 0;
7e2ab150 1065}
6ce3c4c0 1066
7e2ab150
CL
1067/*
1068 * Migrate pages from one node to a target node.
1069 * Returns error or the number of pages not migrated.
1070 */
dbcb0f19
AB
1071static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1072 int flags)
7e2ab150
CL
1073{
1074 nodemask_t nmask;
1075 LIST_HEAD(pagelist);
1076 int err = 0;
a0976311
JK
1077 struct migration_target_control mtc = {
1078 .nid = dest,
1079 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1080 };
7e2ab150
CL
1081
1082 nodes_clear(nmask);
1083 node_set(source, nmask);
6ce3c4c0 1084
08270807
MK
1085 /*
1086 * This does not "check" the range but isolates all pages that
1087 * need migration. Between passing in the full user address
1088 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1089 */
1090 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 1091 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
1092 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1093
cf608ac1 1094 if (!list_empty(&pagelist)) {
a0976311
JK
1095 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1096 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 1097 if (err)
e2d8cf40 1098 putback_movable_pages(&pagelist);
cf608ac1 1099 }
95a402c3 1100
7e2ab150 1101 return err;
6ce3c4c0
CL
1102}
1103
39743889 1104/*
7e2ab150
CL
1105 * Move pages between the two nodesets so as to preserve the physical
1106 * layout as much as possible.
39743889
CL
1107 *
1108 * Returns the number of page that could not be moved.
1109 */
0ce72d4f
AM
1110int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1111 const nodemask_t *to, int flags)
39743889 1112{
7e2ab150 1113 int busy = 0;
0aedadf9 1114 int err;
7e2ab150 1115 nodemask_t tmp;
39743889 1116
0aedadf9
CL
1117 err = migrate_prep();
1118 if (err)
1119 return err;
1120
d8ed45c5 1121 mmap_read_lock(mm);
39743889 1122
da0aa138
KM
1123 /*
1124 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1125 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1126 * bit in 'tmp', and return that <source, dest> pair for migration.
1127 * The pair of nodemasks 'to' and 'from' define the map.
1128 *
1129 * If no pair of bits is found that way, fallback to picking some
1130 * pair of 'source' and 'dest' bits that are not the same. If the
1131 * 'source' and 'dest' bits are the same, this represents a node
1132 * that will be migrating to itself, so no pages need move.
1133 *
1134 * If no bits are left in 'tmp', or if all remaining bits left
1135 * in 'tmp' correspond to the same bit in 'to', return false
1136 * (nothing left to migrate).
1137 *
1138 * This lets us pick a pair of nodes to migrate between, such that
1139 * if possible the dest node is not already occupied by some other
1140 * source node, minimizing the risk of overloading the memory on a
1141 * node that would happen if we migrated incoming memory to a node
1142 * before migrating outgoing memory source that same node.
1143 *
1144 * A single scan of tmp is sufficient. As we go, we remember the
1145 * most recent <s, d> pair that moved (s != d). If we find a pair
1146 * that not only moved, but what's better, moved to an empty slot
1147 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1148 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1149 * most recent <s, d> pair that moved. If we get all the way through
1150 * the scan of tmp without finding any node that moved, much less
1151 * moved to an empty node, then there is nothing left worth migrating.
1152 */
d4984711 1153
0ce72d4f 1154 tmp = *from;
7e2ab150
CL
1155 while (!nodes_empty(tmp)) {
1156 int s,d;
b76ac7e7 1157 int source = NUMA_NO_NODE;
7e2ab150
CL
1158 int dest = 0;
1159
1160 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1161
1162 /*
1163 * do_migrate_pages() tries to maintain the relative
1164 * node relationship of the pages established between
1165 * threads and memory areas.
1166 *
1167 * However if the number of source nodes is not equal to
1168 * the number of destination nodes we can not preserve
1169 * this node relative relationship. In that case, skip
1170 * copying memory from a node that is in the destination
1171 * mask.
1172 *
1173 * Example: [2,3,4] -> [3,4,5] moves everything.
1174 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1175 */
1176
0ce72d4f
AM
1177 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1178 (node_isset(s, *to)))
4a5b18cc
LW
1179 continue;
1180
0ce72d4f 1181 d = node_remap(s, *from, *to);
7e2ab150
CL
1182 if (s == d)
1183 continue;
1184
1185 source = s; /* Node moved. Memorize */
1186 dest = d;
1187
1188 /* dest not in remaining from nodes? */
1189 if (!node_isset(dest, tmp))
1190 break;
1191 }
b76ac7e7 1192 if (source == NUMA_NO_NODE)
7e2ab150
CL
1193 break;
1194
1195 node_clear(source, tmp);
1196 err = migrate_to_node(mm, source, dest, flags);
1197 if (err > 0)
1198 busy += err;
1199 if (err < 0)
1200 break;
39743889 1201 }
d8ed45c5 1202 mmap_read_unlock(mm);
7e2ab150
CL
1203 if (err < 0)
1204 return err;
1205 return busy;
b20a3503
CL
1206
1207}
1208
3ad33b24
LS
1209/*
1210 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1211 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1212 * Search forward from there, if not. N.B., this assumes that the
1213 * list of pages handed to migrate_pages()--which is how we get here--
1214 * is in virtual address order.
1215 */
666feb21 1216static struct page *new_page(struct page *page, unsigned long start)
95a402c3 1217{
d05f0cdc 1218 struct vm_area_struct *vma;
3f649ab7 1219 unsigned long address;
95a402c3 1220
d05f0cdc 1221 vma = find_vma(current->mm, start);
3ad33b24
LS
1222 while (vma) {
1223 address = page_address_in_vma(page, vma);
1224 if (address != -EFAULT)
1225 break;
1226 vma = vma->vm_next;
1227 }
11c731e8
WL
1228
1229 if (PageHuge(page)) {
389c8178
MH
1230 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1231 vma, address);
94723aaf 1232 } else if (PageTransHuge(page)) {
c8633798
NH
1233 struct page *thp;
1234
19deb769
DR
1235 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1236 HPAGE_PMD_ORDER);
c8633798
NH
1237 if (!thp)
1238 return NULL;
1239 prep_transhuge_page(thp);
1240 return thp;
11c731e8 1241 }
0bf598d8 1242 /*
11c731e8 1243 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1244 */
0f556856
MH
1245 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1246 vma, address);
95a402c3 1247}
b20a3503
CL
1248#else
1249
a53190a4 1250static int migrate_page_add(struct page *page, struct list_head *pagelist,
b20a3503
CL
1251 unsigned long flags)
1252{
a53190a4 1253 return -EIO;
39743889
CL
1254}
1255
0ce72d4f
AM
1256int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1257 const nodemask_t *to, int flags)
b20a3503
CL
1258{
1259 return -ENOSYS;
1260}
95a402c3 1261
666feb21 1262static struct page *new_page(struct page *page, unsigned long start)
95a402c3
CL
1263{
1264 return NULL;
1265}
b20a3503
CL
1266#endif
1267
dbcb0f19 1268static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1269 unsigned short mode, unsigned short mode_flags,
1270 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1271{
6ce3c4c0
CL
1272 struct mm_struct *mm = current->mm;
1273 struct mempolicy *new;
1274 unsigned long end;
1275 int err;
d8835445 1276 int ret;
6ce3c4c0
CL
1277 LIST_HEAD(pagelist);
1278
b24f53a0 1279 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1280 return -EINVAL;
74c00241 1281 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1282 return -EPERM;
1283
1284 if (start & ~PAGE_MASK)
1285 return -EINVAL;
1286
1287 if (mode == MPOL_DEFAULT)
1288 flags &= ~MPOL_MF_STRICT;
1289
1290 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1291 end = start + len;
1292
1293 if (end < start)
1294 return -EINVAL;
1295 if (end == start)
1296 return 0;
1297
028fec41 1298 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1299 if (IS_ERR(new))
1300 return PTR_ERR(new);
1301
b24f53a0
LS
1302 if (flags & MPOL_MF_LAZY)
1303 new->flags |= MPOL_F_MOF;
1304
6ce3c4c0
CL
1305 /*
1306 * If we are using the default policy then operation
1307 * on discontinuous address spaces is okay after all
1308 */
1309 if (!new)
1310 flags |= MPOL_MF_DISCONTIG_OK;
1311
028fec41
DR
1312 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1313 start, start + len, mode, mode_flags,
00ef2d2f 1314 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1315
0aedadf9
CL
1316 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1317
1318 err = migrate_prep();
1319 if (err)
b05ca738 1320 goto mpol_out;
0aedadf9 1321 }
4bfc4495
KH
1322 {
1323 NODEMASK_SCRATCH(scratch);
1324 if (scratch) {
d8ed45c5 1325 mmap_write_lock(mm);
4bfc4495 1326 err = mpol_set_nodemask(new, nmask, scratch);
4bfc4495 1327 if (err)
d8ed45c5 1328 mmap_write_unlock(mm);
4bfc4495
KH
1329 } else
1330 err = -ENOMEM;
1331 NODEMASK_SCRATCH_FREE(scratch);
1332 }
b05ca738
KM
1333 if (err)
1334 goto mpol_out;
1335
d8835445 1336 ret = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1337 flags | MPOL_MF_INVERT, &pagelist);
d8835445
YS
1338
1339 if (ret < 0) {
a85dfc30 1340 err = ret;
d8835445
YS
1341 goto up_out;
1342 }
1343
1344 err = mbind_range(mm, start, end, new);
7e2ab150 1345
b24f53a0
LS
1346 if (!err) {
1347 int nr_failed = 0;
1348
cf608ac1 1349 if (!list_empty(&pagelist)) {
b24f53a0 1350 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc
HD
1351 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1352 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1353 if (nr_failed)
74060e4d 1354 putback_movable_pages(&pagelist);
cf608ac1 1355 }
6ce3c4c0 1356
d8835445 1357 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
6ce3c4c0 1358 err = -EIO;
a85dfc30 1359 } else {
d8835445 1360up_out:
a85dfc30
YS
1361 if (!list_empty(&pagelist))
1362 putback_movable_pages(&pagelist);
1363 }
1364
d8ed45c5 1365 mmap_write_unlock(mm);
d8835445 1366mpol_out:
f0be3d32 1367 mpol_put(new);
6ce3c4c0
CL
1368 return err;
1369}
1370
8bccd85f
CL
1371/*
1372 * User space interface with variable sized bitmaps for nodelists.
1373 */
1374
1375/* Copy a node mask from user space. */
39743889 1376static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1377 unsigned long maxnode)
1378{
1379 unsigned long k;
56521e7a 1380 unsigned long t;
8bccd85f
CL
1381 unsigned long nlongs;
1382 unsigned long endmask;
1383
1384 --maxnode;
1385 nodes_clear(*nodes);
1386 if (maxnode == 0 || !nmask)
1387 return 0;
a9c930ba 1388 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1389 return -EINVAL;
8bccd85f
CL
1390
1391 nlongs = BITS_TO_LONGS(maxnode);
1392 if ((maxnode % BITS_PER_LONG) == 0)
1393 endmask = ~0UL;
1394 else
1395 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1396
56521e7a
YX
1397 /*
1398 * When the user specified more nodes than supported just check
1399 * if the non supported part is all zero.
1400 *
1401 * If maxnode have more longs than MAX_NUMNODES, check
1402 * the bits in that area first. And then go through to
1403 * check the rest bits which equal or bigger than MAX_NUMNODES.
1404 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1405 */
8bccd85f 1406 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8bccd85f 1407 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8bccd85f
CL
1408 if (get_user(t, nmask + k))
1409 return -EFAULT;
1410 if (k == nlongs - 1) {
1411 if (t & endmask)
1412 return -EINVAL;
1413 } else if (t)
1414 return -EINVAL;
1415 }
1416 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1417 endmask = ~0UL;
1418 }
1419
56521e7a
YX
1420 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1421 unsigned long valid_mask = endmask;
1422
1423 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1424 if (get_user(t, nmask + nlongs - 1))
1425 return -EFAULT;
1426 if (t & valid_mask)
1427 return -EINVAL;
1428 }
1429
8bccd85f
CL
1430 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1431 return -EFAULT;
1432 nodes_addr(*nodes)[nlongs-1] &= endmask;
1433 return 0;
1434}
1435
1436/* Copy a kernel node mask to user space */
1437static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1438 nodemask_t *nodes)
1439{
1440 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
050c17f2 1441 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
8bccd85f
CL
1442
1443 if (copy > nbytes) {
1444 if (copy > PAGE_SIZE)
1445 return -EINVAL;
1446 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1447 return -EFAULT;
1448 copy = nbytes;
1449 }
1450 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1451}
1452
e7dc9ad6
DB
1453static long kernel_mbind(unsigned long start, unsigned long len,
1454 unsigned long mode, const unsigned long __user *nmask,
1455 unsigned long maxnode, unsigned int flags)
8bccd85f
CL
1456{
1457 nodemask_t nodes;
1458 int err;
028fec41 1459 unsigned short mode_flags;
8bccd85f 1460
057d3389 1461 start = untagged_addr(start);
028fec41
DR
1462 mode_flags = mode & MPOL_MODE_FLAGS;
1463 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1464 if (mode >= MPOL_MAX)
1465 return -EINVAL;
4c50bc01
DR
1466 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1467 (mode_flags & MPOL_F_RELATIVE_NODES))
1468 return -EINVAL;
8bccd85f
CL
1469 err = get_nodes(&nodes, nmask, maxnode);
1470 if (err)
1471 return err;
028fec41 1472 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1473}
1474
e7dc9ad6
DB
1475SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1476 unsigned long, mode, const unsigned long __user *, nmask,
1477 unsigned long, maxnode, unsigned int, flags)
1478{
1479 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1480}
1481
8bccd85f 1482/* Set the process memory policy */
af03c4ac
DB
1483static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1484 unsigned long maxnode)
8bccd85f
CL
1485{
1486 int err;
1487 nodemask_t nodes;
028fec41 1488 unsigned short flags;
8bccd85f 1489
028fec41
DR
1490 flags = mode & MPOL_MODE_FLAGS;
1491 mode &= ~MPOL_MODE_FLAGS;
1492 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1493 return -EINVAL;
4c50bc01
DR
1494 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1495 return -EINVAL;
8bccd85f
CL
1496 err = get_nodes(&nodes, nmask, maxnode);
1497 if (err)
1498 return err;
028fec41 1499 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1500}
1501
af03c4ac
DB
1502SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1503 unsigned long, maxnode)
1504{
1505 return kernel_set_mempolicy(mode, nmask, maxnode);
1506}
1507
b6e9b0ba
DB
1508static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1509 const unsigned long __user *old_nodes,
1510 const unsigned long __user *new_nodes)
39743889 1511{
596d7cfa 1512 struct mm_struct *mm = NULL;
39743889 1513 struct task_struct *task;
39743889
CL
1514 nodemask_t task_nodes;
1515 int err;
596d7cfa
KM
1516 nodemask_t *old;
1517 nodemask_t *new;
1518 NODEMASK_SCRATCH(scratch);
1519
1520 if (!scratch)
1521 return -ENOMEM;
39743889 1522
596d7cfa
KM
1523 old = &scratch->mask1;
1524 new = &scratch->mask2;
1525
1526 err = get_nodes(old, old_nodes, maxnode);
39743889 1527 if (err)
596d7cfa 1528 goto out;
39743889 1529
596d7cfa 1530 err = get_nodes(new, new_nodes, maxnode);
39743889 1531 if (err)
596d7cfa 1532 goto out;
39743889
CL
1533
1534 /* Find the mm_struct */
55cfaa3c 1535 rcu_read_lock();
228ebcbe 1536 task = pid ? find_task_by_vpid(pid) : current;
39743889 1537 if (!task) {
55cfaa3c 1538 rcu_read_unlock();
596d7cfa
KM
1539 err = -ESRCH;
1540 goto out;
39743889 1541 }
3268c63e 1542 get_task_struct(task);
39743889 1543
596d7cfa 1544 err = -EINVAL;
39743889
CL
1545
1546 /*
31367466
OE
1547 * Check if this process has the right to modify the specified process.
1548 * Use the regular "ptrace_may_access()" checks.
39743889 1549 */
31367466 1550 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1551 rcu_read_unlock();
39743889 1552 err = -EPERM;
3268c63e 1553 goto out_put;
39743889 1554 }
c69e8d9c 1555 rcu_read_unlock();
39743889
CL
1556
1557 task_nodes = cpuset_mems_allowed(task);
1558 /* Is the user allowed to access the target nodes? */
596d7cfa 1559 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1560 err = -EPERM;
3268c63e 1561 goto out_put;
39743889
CL
1562 }
1563
0486a38b
YX
1564 task_nodes = cpuset_mems_allowed(current);
1565 nodes_and(*new, *new, task_nodes);
1566 if (nodes_empty(*new))
1567 goto out_put;
1568
86c3a764
DQ
1569 err = security_task_movememory(task);
1570 if (err)
3268c63e 1571 goto out_put;
86c3a764 1572
3268c63e
CL
1573 mm = get_task_mm(task);
1574 put_task_struct(task);
f2a9ef88
SL
1575
1576 if (!mm) {
3268c63e 1577 err = -EINVAL;
f2a9ef88
SL
1578 goto out;
1579 }
1580
1581 err = do_migrate_pages(mm, old, new,
1582 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1583
1584 mmput(mm);
1585out:
596d7cfa
KM
1586 NODEMASK_SCRATCH_FREE(scratch);
1587
39743889 1588 return err;
3268c63e
CL
1589
1590out_put:
1591 put_task_struct(task);
1592 goto out;
1593
39743889
CL
1594}
1595
b6e9b0ba
DB
1596SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1597 const unsigned long __user *, old_nodes,
1598 const unsigned long __user *, new_nodes)
1599{
1600 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1601}
1602
39743889 1603
8bccd85f 1604/* Retrieve NUMA policy */
af03c4ac
DB
1605static int kernel_get_mempolicy(int __user *policy,
1606 unsigned long __user *nmask,
1607 unsigned long maxnode,
1608 unsigned long addr,
1609 unsigned long flags)
8bccd85f 1610{
dbcb0f19 1611 int err;
3f649ab7 1612 int pval;
8bccd85f
CL
1613 nodemask_t nodes;
1614
050c17f2 1615 if (nmask != NULL && maxnode < nr_node_ids)
8bccd85f
CL
1616 return -EINVAL;
1617
4605f057
WH
1618 addr = untagged_addr(addr);
1619
8bccd85f
CL
1620 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1621
1622 if (err)
1623 return err;
1624
1625 if (policy && put_user(pval, policy))
1626 return -EFAULT;
1627
1628 if (nmask)
1629 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1630
1631 return err;
1632}
1633
af03c4ac
DB
1634SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1635 unsigned long __user *, nmask, unsigned long, maxnode,
1636 unsigned long, addr, unsigned long, flags)
1637{
1638 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1639}
1640
1da177e4
LT
1641#ifdef CONFIG_COMPAT
1642
c93e0f6c
HC
1643COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1644 compat_ulong_t __user *, nmask,
1645 compat_ulong_t, maxnode,
1646 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1647{
1648 long err;
1649 unsigned long __user *nm = NULL;
1650 unsigned long nr_bits, alloc_size;
1651 DECLARE_BITMAP(bm, MAX_NUMNODES);
1652
050c17f2 1653 nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1da177e4
LT
1654 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1655
1656 if (nmask)
1657 nm = compat_alloc_user_space(alloc_size);
1658
af03c4ac 1659 err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1da177e4
LT
1660
1661 if (!err && nmask) {
2bbff6c7
KH
1662 unsigned long copy_size;
1663 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1664 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1665 /* ensure entire bitmap is zeroed */
1666 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1667 err |= compat_put_bitmap(nmask, bm, nr_bits);
1668 }
1669
1670 return err;
1671}
1672
c93e0f6c
HC
1673COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1674 compat_ulong_t, maxnode)
1da177e4 1675{
1da177e4
LT
1676 unsigned long __user *nm = NULL;
1677 unsigned long nr_bits, alloc_size;
1678 DECLARE_BITMAP(bm, MAX_NUMNODES);
1679
1680 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1681 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1682
1683 if (nmask) {
cf01fb99
CS
1684 if (compat_get_bitmap(bm, nmask, nr_bits))
1685 return -EFAULT;
1da177e4 1686 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1687 if (copy_to_user(nm, bm, alloc_size))
1688 return -EFAULT;
1da177e4
LT
1689 }
1690
af03c4ac 1691 return kernel_set_mempolicy(mode, nm, nr_bits+1);
1da177e4
LT
1692}
1693
c93e0f6c
HC
1694COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1695 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1696 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4 1697{
1da177e4
LT
1698 unsigned long __user *nm = NULL;
1699 unsigned long nr_bits, alloc_size;
dfcd3c0d 1700 nodemask_t bm;
1da177e4
LT
1701
1702 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1703 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1704
1705 if (nmask) {
cf01fb99
CS
1706 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1707 return -EFAULT;
1da177e4 1708 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1709 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1710 return -EFAULT;
1da177e4
LT
1711 }
1712
e7dc9ad6 1713 return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1da177e4
LT
1714}
1715
b6e9b0ba
DB
1716COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1717 compat_ulong_t, maxnode,
1718 const compat_ulong_t __user *, old_nodes,
1719 const compat_ulong_t __user *, new_nodes)
1720{
1721 unsigned long __user *old = NULL;
1722 unsigned long __user *new = NULL;
1723 nodemask_t tmp_mask;
1724 unsigned long nr_bits;
1725 unsigned long size;
1726
1727 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1728 size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1729 if (old_nodes) {
1730 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1731 return -EFAULT;
1732 old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1733 if (new_nodes)
1734 new = old + size / sizeof(unsigned long);
1735 if (copy_to_user(old, nodes_addr(tmp_mask), size))
1736 return -EFAULT;
1737 }
1738 if (new_nodes) {
1739 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1740 return -EFAULT;
1741 if (new == NULL)
1742 new = compat_alloc_user_space(size);
1743 if (copy_to_user(new, nodes_addr(tmp_mask), size))
1744 return -EFAULT;
1745 }
1746 return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1747}
1748
1749#endif /* CONFIG_COMPAT */
1da177e4 1750
20ca87f2
LX
1751bool vma_migratable(struct vm_area_struct *vma)
1752{
1753 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1754 return false;
1755
1756 /*
1757 * DAX device mappings require predictable access latency, so avoid
1758 * incurring periodic faults.
1759 */
1760 if (vma_is_dax(vma))
1761 return false;
1762
1763 if (is_vm_hugetlb_page(vma) &&
1764 !hugepage_migration_supported(hstate_vma(vma)))
1765 return false;
1766
1767 /*
1768 * Migration allocates pages in the highest zone. If we cannot
1769 * do so then migration (at least from node to node) is not
1770 * possible.
1771 */
1772 if (vma->vm_file &&
1773 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1774 < policy_zone)
1775 return false;
1776 return true;
1777}
1778
74d2c3a0
ON
1779struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1780 unsigned long addr)
1da177e4 1781{
8d90274b 1782 struct mempolicy *pol = NULL;
1da177e4
LT
1783
1784 if (vma) {
480eccf9 1785 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1786 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1787 } else if (vma->vm_policy) {
1da177e4 1788 pol = vma->vm_policy;
00442ad0
MG
1789
1790 /*
1791 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1792 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1793 * count on these policies which will be dropped by
1794 * mpol_cond_put() later
1795 */
1796 if (mpol_needs_cond_ref(pol))
1797 mpol_get(pol);
1798 }
1da177e4 1799 }
f15ca78e 1800
74d2c3a0
ON
1801 return pol;
1802}
1803
1804/*
dd6eecb9 1805 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1806 * @vma: virtual memory area whose policy is sought
1807 * @addr: address in @vma for shared policy lookup
1808 *
1809 * Returns effective policy for a VMA at specified address.
dd6eecb9 1810 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1811 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1812 * count--added by the get_policy() vm_op, as appropriate--to protect against
1813 * freeing by another task. It is the caller's responsibility to free the
1814 * extra reference for shared policies.
1815 */
ac79f78d 1816static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
dd6eecb9 1817 unsigned long addr)
74d2c3a0
ON
1818{
1819 struct mempolicy *pol = __get_vma_policy(vma, addr);
1820
8d90274b 1821 if (!pol)
dd6eecb9 1822 pol = get_task_policy(current);
8d90274b 1823
1da177e4
LT
1824 return pol;
1825}
1826
6b6482bb 1827bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1828{
6b6482bb 1829 struct mempolicy *pol;
fc314724 1830
6b6482bb
ON
1831 if (vma->vm_ops && vma->vm_ops->get_policy) {
1832 bool ret = false;
fc314724 1833
6b6482bb
ON
1834 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1835 if (pol && (pol->flags & MPOL_F_MOF))
1836 ret = true;
1837 mpol_cond_put(pol);
8d90274b 1838
6b6482bb 1839 return ret;
fc314724
MG
1840 }
1841
6b6482bb 1842 pol = vma->vm_policy;
8d90274b 1843 if (!pol)
6b6482bb 1844 pol = get_task_policy(current);
8d90274b 1845
fc314724
MG
1846 return pol->flags & MPOL_F_MOF;
1847}
1848
d3eb1570
LJ
1849static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1850{
1851 enum zone_type dynamic_policy_zone = policy_zone;
1852
1853 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1854
1855 /*
1856 * if policy->v.nodes has movable memory only,
1857 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1858 *
1859 * policy->v.nodes is intersect with node_states[N_MEMORY].
1860 * so if the following test faile, it implies
1861 * policy->v.nodes has movable memory only.
1862 */
1863 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1864 dynamic_policy_zone = ZONE_MOVABLE;
1865
1866 return zone >= dynamic_policy_zone;
1867}
1868
52cd3b07
LS
1869/*
1870 * Return a nodemask representing a mempolicy for filtering nodes for
1871 * page allocation
1872 */
8ca39e68 1873nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1874{
1875 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1876 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1877 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1878 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1879 return &policy->v.nodes;
1880
1881 return NULL;
1882}
1883
04ec6264 1884/* Return the node id preferred by the given mempolicy, or the given id */
f8fd5253 1885static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1da177e4 1886{
6d840958
MH
1887 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1888 nd = policy->v.preferred_node;
1889 else {
19770b32 1890 /*
6d840958
MH
1891 * __GFP_THISNODE shouldn't even be used with the bind policy
1892 * because we might easily break the expectation to stay on the
1893 * requested node and not break the policy.
19770b32 1894 */
6d840958 1895 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1896 }
6d840958 1897
04ec6264 1898 return nd;
1da177e4
LT
1899}
1900
1901/* Do dynamic interleaving for a process */
1902static unsigned interleave_nodes(struct mempolicy *policy)
1903{
45816682 1904 unsigned next;
1da177e4
LT
1905 struct task_struct *me = current;
1906
45816682 1907 next = next_node_in(me->il_prev, policy->v.nodes);
f5b087b5 1908 if (next < MAX_NUMNODES)
45816682
VB
1909 me->il_prev = next;
1910 return next;
1da177e4
LT
1911}
1912
dc85da15
CL
1913/*
1914 * Depending on the memory policy provide a node from which to allocate the
1915 * next slab entry.
1916 */
2a389610 1917unsigned int mempolicy_slab_node(void)
dc85da15 1918{
e7b691b0 1919 struct mempolicy *policy;
2a389610 1920 int node = numa_mem_id();
e7b691b0
AK
1921
1922 if (in_interrupt())
2a389610 1923 return node;
e7b691b0
AK
1924
1925 policy = current->mempolicy;
fc36b8d3 1926 if (!policy || policy->flags & MPOL_F_LOCAL)
2a389610 1927 return node;
bea904d5
LS
1928
1929 switch (policy->mode) {
1930 case MPOL_PREFERRED:
fc36b8d3
LS
1931 /*
1932 * handled MPOL_F_LOCAL above
1933 */
1934 return policy->v.preferred_node;
765c4507 1935
dc85da15
CL
1936 case MPOL_INTERLEAVE:
1937 return interleave_nodes(policy);
1938
dd1a239f 1939 case MPOL_BIND: {
c33d6c06
MG
1940 struct zoneref *z;
1941
dc85da15
CL
1942 /*
1943 * Follow bind policy behavior and start allocation at the
1944 * first node.
1945 */
19770b32 1946 struct zonelist *zonelist;
19770b32 1947 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1948 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06
MG
1949 z = first_zones_zonelist(zonelist, highest_zoneidx,
1950 &policy->v.nodes);
c1093b74 1951 return z->zone ? zone_to_nid(z->zone) : node;
dd1a239f 1952 }
dc85da15 1953
dc85da15 1954 default:
bea904d5 1955 BUG();
dc85da15
CL
1956 }
1957}
1958
fee83b3a
AM
1959/*
1960 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1961 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1962 * number of present nodes.
1963 */
98c70baa 1964static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1da177e4 1965{
dfcd3c0d 1966 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1967 unsigned target;
fee83b3a
AM
1968 int i;
1969 int nid;
1da177e4 1970
f5b087b5
DR
1971 if (!nnodes)
1972 return numa_node_id();
fee83b3a
AM
1973 target = (unsigned int)n % nnodes;
1974 nid = first_node(pol->v.nodes);
1975 for (i = 0; i < target; i++)
dfcd3c0d 1976 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1977 return nid;
1978}
1979
5da7ca86
CL
1980/* Determine a node number for interleave */
1981static inline unsigned interleave_nid(struct mempolicy *pol,
1982 struct vm_area_struct *vma, unsigned long addr, int shift)
1983{
1984 if (vma) {
1985 unsigned long off;
1986
3b98b087
NA
1987 /*
1988 * for small pages, there is no difference between
1989 * shift and PAGE_SHIFT, so the bit-shift is safe.
1990 * for huge pages, since vm_pgoff is in units of small
1991 * pages, we need to shift off the always 0 bits to get
1992 * a useful offset.
1993 */
1994 BUG_ON(shift < PAGE_SHIFT);
1995 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86 1996 off += (addr - vma->vm_start) >> shift;
98c70baa 1997 return offset_il_node(pol, off);
5da7ca86
CL
1998 } else
1999 return interleave_nodes(pol);
2000}
2001
00ac59ad 2002#ifdef CONFIG_HUGETLBFS
480eccf9 2003/*
04ec6264 2004 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
2005 * @vma: virtual memory area whose policy is sought
2006 * @addr: address in @vma for shared policy lookup and interleave policy
2007 * @gfp_flags: for requested zone
2008 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2009 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 2010 *
04ec6264 2011 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07
LS
2012 * to the struct mempolicy for conditional unref after allocation.
2013 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2014 * @nodemask for filtering the zonelist.
c0ff7453 2015 *
d26914d1 2016 * Must be protected by read_mems_allowed_begin()
480eccf9 2017 */
04ec6264
VB
2018int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2019 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 2020{
04ec6264 2021 int nid;
5da7ca86 2022
dd6eecb9 2023 *mpol = get_vma_policy(vma, addr);
19770b32 2024 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 2025
52cd3b07 2026 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
04ec6264
VB
2027 nid = interleave_nid(*mpol, vma, addr,
2028 huge_page_shift(hstate_vma(vma)));
52cd3b07 2029 } else {
04ec6264 2030 nid = policy_node(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
2031 if ((*mpol)->mode == MPOL_BIND)
2032 *nodemask = &(*mpol)->v.nodes;
480eccf9 2033 }
04ec6264 2034 return nid;
5da7ca86 2035}
06808b08
LS
2036
2037/*
2038 * init_nodemask_of_mempolicy
2039 *
2040 * If the current task's mempolicy is "default" [NULL], return 'false'
2041 * to indicate default policy. Otherwise, extract the policy nodemask
2042 * for 'bind' or 'interleave' policy into the argument nodemask, or
2043 * initialize the argument nodemask to contain the single node for
2044 * 'preferred' or 'local' policy and return 'true' to indicate presence
2045 * of non-default mempolicy.
2046 *
2047 * We don't bother with reference counting the mempolicy [mpol_get/put]
2048 * because the current task is examining it's own mempolicy and a task's
2049 * mempolicy is only ever changed by the task itself.
2050 *
2051 * N.B., it is the caller's responsibility to free a returned nodemask.
2052 */
2053bool init_nodemask_of_mempolicy(nodemask_t *mask)
2054{
2055 struct mempolicy *mempolicy;
2056 int nid;
2057
2058 if (!(mask && current->mempolicy))
2059 return false;
2060
c0ff7453 2061 task_lock(current);
06808b08
LS
2062 mempolicy = current->mempolicy;
2063 switch (mempolicy->mode) {
2064 case MPOL_PREFERRED:
2065 if (mempolicy->flags & MPOL_F_LOCAL)
2066 nid = numa_node_id();
2067 else
2068 nid = mempolicy->v.preferred_node;
2069 init_nodemask_of_node(mask, nid);
2070 break;
2071
2072 case MPOL_BIND:
06808b08
LS
2073 case MPOL_INTERLEAVE:
2074 *mask = mempolicy->v.nodes;
2075 break;
2076
2077 default:
2078 BUG();
2079 }
c0ff7453 2080 task_unlock(current);
06808b08
LS
2081
2082 return true;
2083}
00ac59ad 2084#endif
5da7ca86 2085
6f48d0eb
DR
2086/*
2087 * mempolicy_nodemask_intersects
2088 *
2089 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2090 * policy. Otherwise, check for intersection between mask and the policy
2091 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
2092 * policy, always return true since it may allocate elsewhere on fallback.
2093 *
2094 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2095 */
2096bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2097 const nodemask_t *mask)
2098{
2099 struct mempolicy *mempolicy;
2100 bool ret = true;
2101
2102 if (!mask)
2103 return ret;
2104 task_lock(tsk);
2105 mempolicy = tsk->mempolicy;
2106 if (!mempolicy)
2107 goto out;
2108
2109 switch (mempolicy->mode) {
2110 case MPOL_PREFERRED:
2111 /*
2112 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2113 * allocate from, they may fallback to other nodes when oom.
2114 * Thus, it's possible for tsk to have allocated memory from
2115 * nodes in mask.
2116 */
2117 break;
2118 case MPOL_BIND:
2119 case MPOL_INTERLEAVE:
2120 ret = nodes_intersects(mempolicy->v.nodes, *mask);
2121 break;
2122 default:
2123 BUG();
2124 }
2125out:
2126 task_unlock(tsk);
2127 return ret;
2128}
2129
1da177e4
LT
2130/* Allocate a page in interleaved policy.
2131 Own path because it needs to do special accounting. */
662f3a0b
AK
2132static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2133 unsigned nid)
1da177e4 2134{
1da177e4
LT
2135 struct page *page;
2136
04ec6264 2137 page = __alloc_pages(gfp, order, nid);
4518085e
KW
2138 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2139 if (!static_branch_likely(&vm_numa_stat_key))
2140 return page;
de55c8b2
AR
2141 if (page && page_to_nid(page) == nid) {
2142 preempt_disable();
2143 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2144 preempt_enable();
2145 }
1da177e4
LT
2146 return page;
2147}
2148
2149/**
0bbbc0b3 2150 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
2151 *
2152 * @gfp:
2153 * %GFP_USER user allocation.
2154 * %GFP_KERNEL kernel allocations,
2155 * %GFP_HIGHMEM highmem/user allocations,
2156 * %GFP_FS allocation should not call back into a file system.
2157 * %GFP_ATOMIC don't sleep.
2158 *
0bbbc0b3 2159 * @order:Order of the GFP allocation.
1da177e4
LT
2160 * @vma: Pointer to VMA or NULL if not available.
2161 * @addr: Virtual Address of the allocation. Must be inside the VMA.
be97a41b 2162 * @node: Which node to prefer for allocation (modulo policy).
19deb769 2163 * @hugepage: for hugepages try only the preferred node if possible
1da177e4
LT
2164 *
2165 * This function allocates a page from the kernel page pool and applies
2166 * a NUMA policy associated with the VMA or the current process.
3e4e28c5 2167 * When VMA is not NULL caller must read-lock the mmap_lock of the
1da177e4 2168 * mm_struct of the VMA to prevent it from going away. Should be used for
be97a41b
VB
2169 * all allocations for pages that will be mapped into user space. Returns
2170 * NULL when no page can be allocated.
1da177e4
LT
2171 */
2172struct page *
0bbbc0b3 2173alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
19deb769 2174 unsigned long addr, int node, bool hugepage)
1da177e4 2175{
cc9a6c87 2176 struct mempolicy *pol;
c0ff7453 2177 struct page *page;
04ec6264 2178 int preferred_nid;
be97a41b 2179 nodemask_t *nmask;
cc9a6c87 2180
dd6eecb9 2181 pol = get_vma_policy(vma, addr);
1da177e4 2182
0867a57c
VB
2183 if (pol->mode == MPOL_INTERLEAVE) {
2184 unsigned nid;
2185
2186 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2187 mpol_cond_put(pol);
2188 page = alloc_page_interleave(gfp, order, nid);
2189 goto out;
19deb769
DR
2190 }
2191
2192 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2193 int hpage_node = node;
2194
2195 /*
2196 * For hugepage allocation and non-interleave policy which
2197 * allows the current node (or other explicitly preferred
2198 * node) we only try to allocate from the current/preferred
2199 * node and don't fall back to other nodes, as the cost of
2200 * remote accesses would likely offset THP benefits.
2201 *
2202 * If the policy is interleave, or does not allow the current
2203 * node in its nodemask, we allocate the standard way.
2204 */
2205 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2206 hpage_node = pol->v.preferred_node;
2207
2208 nmask = policy_nodemask(gfp, pol);
2209 if (!nmask || node_isset(hpage_node, *nmask)) {
2210 mpol_cond_put(pol);
cc638f32
VB
2211 /*
2212 * First, try to allocate THP only on local node, but
2213 * don't reclaim unnecessarily, just compact.
2214 */
19deb769 2215 page = __alloc_pages_node(hpage_node,
cc638f32 2216 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
76e654cc
DR
2217
2218 /*
2219 * If hugepage allocations are configured to always
2220 * synchronous compact or the vma has been madvised
2221 * to prefer hugepage backing, retry allowing remote
cc638f32 2222 * memory with both reclaim and compact as well.
76e654cc
DR
2223 */
2224 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2225 page = __alloc_pages_node(hpage_node,
cc638f32 2226 gfp, order);
76e654cc 2227
19deb769
DR
2228 goto out;
2229 }
356ff8a9
DR
2230 }
2231
be97a41b 2232 nmask = policy_nodemask(gfp, pol);
04ec6264
VB
2233 preferred_nid = policy_node(gfp, pol, node);
2234 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
d51e9894 2235 mpol_cond_put(pol);
be97a41b 2236out:
c0ff7453 2237 return page;
1da177e4 2238}
69262215 2239EXPORT_SYMBOL(alloc_pages_vma);
1da177e4
LT
2240
2241/**
2242 * alloc_pages_current - Allocate pages.
2243 *
2244 * @gfp:
2245 * %GFP_USER user allocation,
2246 * %GFP_KERNEL kernel allocation,
2247 * %GFP_HIGHMEM highmem allocation,
2248 * %GFP_FS don't call back into a file system.
2249 * %GFP_ATOMIC don't sleep.
2250 * @order: Power of two of allocation size in pages. 0 is a single page.
2251 *
2252 * Allocate a page from the kernel page pool. When not in
2253 * interrupt context and apply the current process NUMA policy.
2254 * Returns NULL when no page can be allocated.
1da177e4 2255 */
dd0fc66f 2256struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 2257{
8d90274b 2258 struct mempolicy *pol = &default_policy;
c0ff7453 2259 struct page *page;
1da177e4 2260
8d90274b
ON
2261 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2262 pol = get_task_policy(current);
52cd3b07
LS
2263
2264 /*
2265 * No reference counting needed for current->mempolicy
2266 * nor system default_policy
2267 */
45c4745a 2268 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2269 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2270 else
2271 page = __alloc_pages_nodemask(gfp, order,
04ec6264 2272 policy_node(gfp, pol, numa_node_id()),
5c4b4be3 2273 policy_nodemask(gfp, pol));
cc9a6c87 2274
c0ff7453 2275 return page;
1da177e4
LT
2276}
2277EXPORT_SYMBOL(alloc_pages_current);
2278
ef0855d3
ON
2279int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2280{
2281 struct mempolicy *pol = mpol_dup(vma_policy(src));
2282
2283 if (IS_ERR(pol))
2284 return PTR_ERR(pol);
2285 dst->vm_policy = pol;
2286 return 0;
2287}
2288
4225399a 2289/*
846a16bf 2290 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2291 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2292 * with the mems_allowed returned by cpuset_mems_allowed(). This
2293 * keeps mempolicies cpuset relative after its cpuset moves. See
2294 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2295 *
2296 * current's mempolicy may be rebinded by the other task(the task that changes
2297 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2298 */
4225399a 2299
846a16bf
LS
2300/* Slow path of a mempolicy duplicate */
2301struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2302{
2303 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2304
2305 if (!new)
2306 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2307
2308 /* task's mempolicy is protected by alloc_lock */
2309 if (old == current->mempolicy) {
2310 task_lock(current);
2311 *new = *old;
2312 task_unlock(current);
2313 } else
2314 *new = *old;
2315
4225399a
PJ
2316 if (current_cpuset_is_being_rebound()) {
2317 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2318 mpol_rebind_policy(new, &mems);
4225399a 2319 }
1da177e4 2320 atomic_set(&new->refcnt, 1);
1da177e4
LT
2321 return new;
2322}
2323
2324/* Slow path of a mempolicy comparison */
fcfb4dcc 2325bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2326{
2327 if (!a || !b)
fcfb4dcc 2328 return false;
45c4745a 2329 if (a->mode != b->mode)
fcfb4dcc 2330 return false;
19800502 2331 if (a->flags != b->flags)
fcfb4dcc 2332 return false;
19800502
BL
2333 if (mpol_store_user_nodemask(a))
2334 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2335 return false;
19800502 2336
45c4745a 2337 switch (a->mode) {
19770b32 2338 case MPOL_BIND:
1da177e4 2339 case MPOL_INTERLEAVE:
fcfb4dcc 2340 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2341 case MPOL_PREFERRED:
8970a63e
YX
2342 /* a's ->flags is the same as b's */
2343 if (a->flags & MPOL_F_LOCAL)
2344 return true;
75719661 2345 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2346 default:
2347 BUG();
fcfb4dcc 2348 return false;
1da177e4
LT
2349 }
2350}
2351
1da177e4
LT
2352/*
2353 * Shared memory backing store policy support.
2354 *
2355 * Remember policies even when nobody has shared memory mapped.
2356 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2357 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2358 * for any accesses to the tree.
2359 */
2360
4a8c7bb5
NZ
2361/*
2362 * lookup first element intersecting start-end. Caller holds sp->lock for
2363 * reading or for writing
2364 */
1da177e4
LT
2365static struct sp_node *
2366sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2367{
2368 struct rb_node *n = sp->root.rb_node;
2369
2370 while (n) {
2371 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2372
2373 if (start >= p->end)
2374 n = n->rb_right;
2375 else if (end <= p->start)
2376 n = n->rb_left;
2377 else
2378 break;
2379 }
2380 if (!n)
2381 return NULL;
2382 for (;;) {
2383 struct sp_node *w = NULL;
2384 struct rb_node *prev = rb_prev(n);
2385 if (!prev)
2386 break;
2387 w = rb_entry(prev, struct sp_node, nd);
2388 if (w->end <= start)
2389 break;
2390 n = prev;
2391 }
2392 return rb_entry(n, struct sp_node, nd);
2393}
2394
4a8c7bb5
NZ
2395/*
2396 * Insert a new shared policy into the list. Caller holds sp->lock for
2397 * writing.
2398 */
1da177e4
LT
2399static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2400{
2401 struct rb_node **p = &sp->root.rb_node;
2402 struct rb_node *parent = NULL;
2403 struct sp_node *nd;
2404
2405 while (*p) {
2406 parent = *p;
2407 nd = rb_entry(parent, struct sp_node, nd);
2408 if (new->start < nd->start)
2409 p = &(*p)->rb_left;
2410 else if (new->end > nd->end)
2411 p = &(*p)->rb_right;
2412 else
2413 BUG();
2414 }
2415 rb_link_node(&new->nd, parent, p);
2416 rb_insert_color(&new->nd, &sp->root);
140d5a49 2417 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2418 new->policy ? new->policy->mode : 0);
1da177e4
LT
2419}
2420
2421/* Find shared policy intersecting idx */
2422struct mempolicy *
2423mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2424{
2425 struct mempolicy *pol = NULL;
2426 struct sp_node *sn;
2427
2428 if (!sp->root.rb_node)
2429 return NULL;
4a8c7bb5 2430 read_lock(&sp->lock);
1da177e4
LT
2431 sn = sp_lookup(sp, idx, idx+1);
2432 if (sn) {
2433 mpol_get(sn->policy);
2434 pol = sn->policy;
2435 }
4a8c7bb5 2436 read_unlock(&sp->lock);
1da177e4
LT
2437 return pol;
2438}
2439
63f74ca2
KM
2440static void sp_free(struct sp_node *n)
2441{
2442 mpol_put(n->policy);
2443 kmem_cache_free(sn_cache, n);
2444}
2445
771fb4d8
LS
2446/**
2447 * mpol_misplaced - check whether current page node is valid in policy
2448 *
b46e14ac
FF
2449 * @page: page to be checked
2450 * @vma: vm area where page mapped
2451 * @addr: virtual address where page mapped
771fb4d8
LS
2452 *
2453 * Lookup current policy node id for vma,addr and "compare to" page's
2454 * node id.
2455 *
2456 * Returns:
2457 * -1 - not misplaced, page is in the right node
2458 * node - node id where the page should be
2459 *
2460 * Policy determination "mimics" alloc_page_vma().
2461 * Called from fault path where we know the vma and faulting address.
2462 */
2463int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2464{
2465 struct mempolicy *pol;
c33d6c06 2466 struct zoneref *z;
771fb4d8
LS
2467 int curnid = page_to_nid(page);
2468 unsigned long pgoff;
90572890
PZ
2469 int thiscpu = raw_smp_processor_id();
2470 int thisnid = cpu_to_node(thiscpu);
98fa15f3 2471 int polnid = NUMA_NO_NODE;
771fb4d8
LS
2472 int ret = -1;
2473
dd6eecb9 2474 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2475 if (!(pol->flags & MPOL_F_MOF))
2476 goto out;
2477
2478 switch (pol->mode) {
2479 case MPOL_INTERLEAVE:
771fb4d8
LS
2480 pgoff = vma->vm_pgoff;
2481 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
98c70baa 2482 polnid = offset_il_node(pol, pgoff);
771fb4d8
LS
2483 break;
2484
2485 case MPOL_PREFERRED:
2486 if (pol->flags & MPOL_F_LOCAL)
2487 polnid = numa_node_id();
2488 else
2489 polnid = pol->v.preferred_node;
2490 break;
2491
2492 case MPOL_BIND:
c33d6c06 2493
771fb4d8
LS
2494 /*
2495 * allows binding to multiple nodes.
2496 * use current page if in policy nodemask,
2497 * else select nearest allowed node, if any.
2498 * If no allowed nodes, use current [!misplaced].
2499 */
2500 if (node_isset(curnid, pol->v.nodes))
2501 goto out;
c33d6c06 2502 z = first_zones_zonelist(
771fb4d8
LS
2503 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2504 gfp_zone(GFP_HIGHUSER),
c33d6c06 2505 &pol->v.nodes);
c1093b74 2506 polnid = zone_to_nid(z->zone);
771fb4d8
LS
2507 break;
2508
2509 default:
2510 BUG();
2511 }
5606e387
MG
2512
2513 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2514 if (pol->flags & MPOL_F_MORON) {
90572890 2515 polnid = thisnid;
5606e387 2516
10f39042 2517 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2518 goto out;
e42c8ff2
MG
2519 }
2520
771fb4d8
LS
2521 if (curnid != polnid)
2522 ret = polnid;
2523out:
2524 mpol_cond_put(pol);
2525
2526 return ret;
2527}
2528
c11600e4
DR
2529/*
2530 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2531 * dropped after task->mempolicy is set to NULL so that any allocation done as
2532 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2533 * policy.
2534 */
2535void mpol_put_task_policy(struct task_struct *task)
2536{
2537 struct mempolicy *pol;
2538
2539 task_lock(task);
2540 pol = task->mempolicy;
2541 task->mempolicy = NULL;
2542 task_unlock(task);
2543 mpol_put(pol);
2544}
2545
1da177e4
LT
2546static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2547{
140d5a49 2548 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2549 rb_erase(&n->nd, &sp->root);
63f74ca2 2550 sp_free(n);
1da177e4
LT
2551}
2552
42288fe3
MG
2553static void sp_node_init(struct sp_node *node, unsigned long start,
2554 unsigned long end, struct mempolicy *pol)
2555{
2556 node->start = start;
2557 node->end = end;
2558 node->policy = pol;
2559}
2560
dbcb0f19
AB
2561static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2562 struct mempolicy *pol)
1da177e4 2563{
869833f2
KM
2564 struct sp_node *n;
2565 struct mempolicy *newpol;
1da177e4 2566
869833f2 2567 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2568 if (!n)
2569 return NULL;
869833f2
KM
2570
2571 newpol = mpol_dup(pol);
2572 if (IS_ERR(newpol)) {
2573 kmem_cache_free(sn_cache, n);
2574 return NULL;
2575 }
2576 newpol->flags |= MPOL_F_SHARED;
42288fe3 2577 sp_node_init(n, start, end, newpol);
869833f2 2578
1da177e4
LT
2579 return n;
2580}
2581
2582/* Replace a policy range. */
2583static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2584 unsigned long end, struct sp_node *new)
2585{
b22d127a 2586 struct sp_node *n;
42288fe3
MG
2587 struct sp_node *n_new = NULL;
2588 struct mempolicy *mpol_new = NULL;
b22d127a 2589 int ret = 0;
1da177e4 2590
42288fe3 2591restart:
4a8c7bb5 2592 write_lock(&sp->lock);
1da177e4
LT
2593 n = sp_lookup(sp, start, end);
2594 /* Take care of old policies in the same range. */
2595 while (n && n->start < end) {
2596 struct rb_node *next = rb_next(&n->nd);
2597 if (n->start >= start) {
2598 if (n->end <= end)
2599 sp_delete(sp, n);
2600 else
2601 n->start = end;
2602 } else {
2603 /* Old policy spanning whole new range. */
2604 if (n->end > end) {
42288fe3
MG
2605 if (!n_new)
2606 goto alloc_new;
2607
2608 *mpol_new = *n->policy;
2609 atomic_set(&mpol_new->refcnt, 1);
7880639c 2610 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2611 n->end = start;
5ca39575 2612 sp_insert(sp, n_new);
42288fe3
MG
2613 n_new = NULL;
2614 mpol_new = NULL;
1da177e4
LT
2615 break;
2616 } else
2617 n->end = start;
2618 }
2619 if (!next)
2620 break;
2621 n = rb_entry(next, struct sp_node, nd);
2622 }
2623 if (new)
2624 sp_insert(sp, new);
4a8c7bb5 2625 write_unlock(&sp->lock);
42288fe3
MG
2626 ret = 0;
2627
2628err_out:
2629 if (mpol_new)
2630 mpol_put(mpol_new);
2631 if (n_new)
2632 kmem_cache_free(sn_cache, n_new);
2633
b22d127a 2634 return ret;
42288fe3
MG
2635
2636alloc_new:
4a8c7bb5 2637 write_unlock(&sp->lock);
42288fe3
MG
2638 ret = -ENOMEM;
2639 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2640 if (!n_new)
2641 goto err_out;
2642 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2643 if (!mpol_new)
2644 goto err_out;
2645 goto restart;
1da177e4
LT
2646}
2647
71fe804b
LS
2648/**
2649 * mpol_shared_policy_init - initialize shared policy for inode
2650 * @sp: pointer to inode shared policy
2651 * @mpol: struct mempolicy to install
2652 *
2653 * Install non-NULL @mpol in inode's shared policy rb-tree.
2654 * On entry, the current task has a reference on a non-NULL @mpol.
2655 * This must be released on exit.
4bfc4495 2656 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2657 */
2658void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2659{
58568d2a
MX
2660 int ret;
2661
71fe804b 2662 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2663 rwlock_init(&sp->lock);
71fe804b
LS
2664
2665 if (mpol) {
2666 struct vm_area_struct pvma;
2667 struct mempolicy *new;
4bfc4495 2668 NODEMASK_SCRATCH(scratch);
71fe804b 2669
4bfc4495 2670 if (!scratch)
5c0c1654 2671 goto put_mpol;
71fe804b
LS
2672 /* contextualize the tmpfs mount point mempolicy */
2673 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2674 if (IS_ERR(new))
0cae3457 2675 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2676
2677 task_lock(current);
4bfc4495 2678 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2679 task_unlock(current);
15d77835 2680 if (ret)
5c0c1654 2681 goto put_new;
71fe804b
LS
2682
2683 /* Create pseudo-vma that contains just the policy */
2c4541e2 2684 vma_init(&pvma, NULL);
71fe804b
LS
2685 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2686 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2687
5c0c1654 2688put_new:
71fe804b 2689 mpol_put(new); /* drop initial ref */
0cae3457 2690free_scratch:
4bfc4495 2691 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2692put_mpol:
2693 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2694 }
2695}
2696
1da177e4
LT
2697int mpol_set_shared_policy(struct shared_policy *info,
2698 struct vm_area_struct *vma, struct mempolicy *npol)
2699{
2700 int err;
2701 struct sp_node *new = NULL;
2702 unsigned long sz = vma_pages(vma);
2703
028fec41 2704 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2705 vma->vm_pgoff,
45c4745a 2706 sz, npol ? npol->mode : -1,
028fec41 2707 npol ? npol->flags : -1,
00ef2d2f 2708 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2709
2710 if (npol) {
2711 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2712 if (!new)
2713 return -ENOMEM;
2714 }
2715 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2716 if (err && new)
63f74ca2 2717 sp_free(new);
1da177e4
LT
2718 return err;
2719}
2720
2721/* Free a backing policy store on inode delete. */
2722void mpol_free_shared_policy(struct shared_policy *p)
2723{
2724 struct sp_node *n;
2725 struct rb_node *next;
2726
2727 if (!p->root.rb_node)
2728 return;
4a8c7bb5 2729 write_lock(&p->lock);
1da177e4
LT
2730 next = rb_first(&p->root);
2731 while (next) {
2732 n = rb_entry(next, struct sp_node, nd);
2733 next = rb_next(&n->nd);
63f74ca2 2734 sp_delete(p, n);
1da177e4 2735 }
4a8c7bb5 2736 write_unlock(&p->lock);
1da177e4
LT
2737}
2738
1a687c2e 2739#ifdef CONFIG_NUMA_BALANCING
c297663c 2740static int __initdata numabalancing_override;
1a687c2e
MG
2741
2742static void __init check_numabalancing_enable(void)
2743{
2744 bool numabalancing_default = false;
2745
2746 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2747 numabalancing_default = true;
2748
c297663c
MG
2749 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2750 if (numabalancing_override)
2751 set_numabalancing_state(numabalancing_override == 1);
2752
b0dc2b9b 2753 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2754 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2755 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2756 set_numabalancing_state(numabalancing_default);
2757 }
2758}
2759
2760static int __init setup_numabalancing(char *str)
2761{
2762 int ret = 0;
2763 if (!str)
2764 goto out;
1a687c2e
MG
2765
2766 if (!strcmp(str, "enable")) {
c297663c 2767 numabalancing_override = 1;
1a687c2e
MG
2768 ret = 1;
2769 } else if (!strcmp(str, "disable")) {
c297663c 2770 numabalancing_override = -1;
1a687c2e
MG
2771 ret = 1;
2772 }
2773out:
2774 if (!ret)
4a404bea 2775 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2776
2777 return ret;
2778}
2779__setup("numa_balancing=", setup_numabalancing);
2780#else
2781static inline void __init check_numabalancing_enable(void)
2782{
2783}
2784#endif /* CONFIG_NUMA_BALANCING */
2785
1da177e4
LT
2786/* assumes fs == KERNEL_DS */
2787void __init numa_policy_init(void)
2788{
b71636e2
PM
2789 nodemask_t interleave_nodes;
2790 unsigned long largest = 0;
2791 int nid, prefer = 0;
2792
1da177e4
LT
2793 policy_cache = kmem_cache_create("numa_policy",
2794 sizeof(struct mempolicy),
20c2df83 2795 0, SLAB_PANIC, NULL);
1da177e4
LT
2796
2797 sn_cache = kmem_cache_create("shared_policy_node",
2798 sizeof(struct sp_node),
20c2df83 2799 0, SLAB_PANIC, NULL);
1da177e4 2800
5606e387
MG
2801 for_each_node(nid) {
2802 preferred_node_policy[nid] = (struct mempolicy) {
2803 .refcnt = ATOMIC_INIT(1),
2804 .mode = MPOL_PREFERRED,
2805 .flags = MPOL_F_MOF | MPOL_F_MORON,
2806 .v = { .preferred_node = nid, },
2807 };
2808 }
2809
b71636e2
PM
2810 /*
2811 * Set interleaving policy for system init. Interleaving is only
2812 * enabled across suitably sized nodes (default is >= 16MB), or
2813 * fall back to the largest node if they're all smaller.
2814 */
2815 nodes_clear(interleave_nodes);
01f13bd6 2816 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2817 unsigned long total_pages = node_present_pages(nid);
2818
2819 /* Preserve the largest node */
2820 if (largest < total_pages) {
2821 largest = total_pages;
2822 prefer = nid;
2823 }
2824
2825 /* Interleave this node? */
2826 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2827 node_set(nid, interleave_nodes);
2828 }
2829
2830 /* All too small, use the largest */
2831 if (unlikely(nodes_empty(interleave_nodes)))
2832 node_set(prefer, interleave_nodes);
1da177e4 2833
028fec41 2834 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2835 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2836
2837 check_numabalancing_enable();
1da177e4
LT
2838}
2839
8bccd85f 2840/* Reset policy of current process to default */
1da177e4
LT
2841void numa_default_policy(void)
2842{
028fec41 2843 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2844}
68860ec1 2845
095f1fc4
LS
2846/*
2847 * Parse and format mempolicy from/to strings
2848 */
2849
1a75a6c8 2850/*
f2a07f40 2851 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2852 */
345ace9c
LS
2853static const char * const policy_modes[] =
2854{
2855 [MPOL_DEFAULT] = "default",
2856 [MPOL_PREFERRED] = "prefer",
2857 [MPOL_BIND] = "bind",
2858 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2859 [MPOL_LOCAL] = "local",
345ace9c 2860};
1a75a6c8 2861
095f1fc4
LS
2862
2863#ifdef CONFIG_TMPFS
2864/**
f2a07f40 2865 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2866 * @str: string containing mempolicy to parse
71fe804b 2867 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2868 *
2869 * Format of input:
2870 * <mode>[=<flags>][:<nodelist>]
2871 *
71fe804b 2872 * On success, returns 0, else 1
095f1fc4 2873 */
a7a88b23 2874int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2875{
71fe804b 2876 struct mempolicy *new = NULL;
f2a07f40 2877 unsigned short mode_flags;
71fe804b 2878 nodemask_t nodes;
095f1fc4
LS
2879 char *nodelist = strchr(str, ':');
2880 char *flags = strchr(str, '=');
dedf2c73 2881 int err = 1, mode;
095f1fc4 2882
c7a91bc7
DC
2883 if (flags)
2884 *flags++ = '\0'; /* terminate mode string */
2885
095f1fc4
LS
2886 if (nodelist) {
2887 /* NUL-terminate mode or flags string */
2888 *nodelist++ = '\0';
71fe804b 2889 if (nodelist_parse(nodelist, nodes))
095f1fc4 2890 goto out;
01f13bd6 2891 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2892 goto out;
71fe804b
LS
2893 } else
2894 nodes_clear(nodes);
2895
dedf2c73 2896 mode = match_string(policy_modes, MPOL_MAX, str);
2897 if (mode < 0)
095f1fc4
LS
2898 goto out;
2899
71fe804b 2900 switch (mode) {
095f1fc4 2901 case MPOL_PREFERRED:
71fe804b 2902 /*
aa9f7d51
RD
2903 * Insist on a nodelist of one node only, although later
2904 * we use first_node(nodes) to grab a single node, so here
2905 * nodelist (or nodes) cannot be empty.
71fe804b 2906 */
095f1fc4
LS
2907 if (nodelist) {
2908 char *rest = nodelist;
2909 while (isdigit(*rest))
2910 rest++;
926f2ae0
KM
2911 if (*rest)
2912 goto out;
aa9f7d51
RD
2913 if (nodes_empty(nodes))
2914 goto out;
095f1fc4
LS
2915 }
2916 break;
095f1fc4
LS
2917 case MPOL_INTERLEAVE:
2918 /*
2919 * Default to online nodes with memory if no nodelist
2920 */
2921 if (!nodelist)
01f13bd6 2922 nodes = node_states[N_MEMORY];
3f226aa1 2923 break;
71fe804b 2924 case MPOL_LOCAL:
3f226aa1 2925 /*
71fe804b 2926 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2927 */
71fe804b 2928 if (nodelist)
3f226aa1 2929 goto out;
71fe804b 2930 mode = MPOL_PREFERRED;
3f226aa1 2931 break;
413b43de
RT
2932 case MPOL_DEFAULT:
2933 /*
2934 * Insist on a empty nodelist
2935 */
2936 if (!nodelist)
2937 err = 0;
2938 goto out;
d69b2e63
KM
2939 case MPOL_BIND:
2940 /*
2941 * Insist on a nodelist
2942 */
2943 if (!nodelist)
2944 goto out;
095f1fc4
LS
2945 }
2946
71fe804b 2947 mode_flags = 0;
095f1fc4
LS
2948 if (flags) {
2949 /*
2950 * Currently, we only support two mutually exclusive
2951 * mode flags.
2952 */
2953 if (!strcmp(flags, "static"))
71fe804b 2954 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2955 else if (!strcmp(flags, "relative"))
71fe804b 2956 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2957 else
926f2ae0 2958 goto out;
095f1fc4 2959 }
71fe804b
LS
2960
2961 new = mpol_new(mode, mode_flags, &nodes);
2962 if (IS_ERR(new))
926f2ae0
KM
2963 goto out;
2964
f2a07f40
HD
2965 /*
2966 * Save nodes for mpol_to_str() to show the tmpfs mount options
2967 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2968 */
2969 if (mode != MPOL_PREFERRED)
2970 new->v.nodes = nodes;
2971 else if (nodelist)
2972 new->v.preferred_node = first_node(nodes);
2973 else
2974 new->flags |= MPOL_F_LOCAL;
2975
2976 /*
2977 * Save nodes for contextualization: this will be used to "clone"
2978 * the mempolicy in a specific context [cpuset] at a later time.
2979 */
2980 new->w.user_nodemask = nodes;
2981
926f2ae0 2982 err = 0;
71fe804b 2983
095f1fc4
LS
2984out:
2985 /* Restore string for error message */
2986 if (nodelist)
2987 *--nodelist = ':';
2988 if (flags)
2989 *--flags = '=';
71fe804b
LS
2990 if (!err)
2991 *mpol = new;
095f1fc4
LS
2992 return err;
2993}
2994#endif /* CONFIG_TMPFS */
2995
71fe804b
LS
2996/**
2997 * mpol_to_str - format a mempolicy structure for printing
2998 * @buffer: to contain formatted mempolicy string
2999 * @maxlen: length of @buffer
3000 * @pol: pointer to mempolicy to be formatted
71fe804b 3001 *
948927ee
DR
3002 * Convert @pol into a string. If @buffer is too short, truncate the string.
3003 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3004 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 3005 */
948927ee 3006void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
3007{
3008 char *p = buffer;
948927ee
DR
3009 nodemask_t nodes = NODE_MASK_NONE;
3010 unsigned short mode = MPOL_DEFAULT;
3011 unsigned short flags = 0;
2291990a 3012
8790c71a 3013 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 3014 mode = pol->mode;
948927ee
DR
3015 flags = pol->flags;
3016 }
bea904d5 3017
1a75a6c8
CL
3018 switch (mode) {
3019 case MPOL_DEFAULT:
1a75a6c8 3020 break;
1a75a6c8 3021 case MPOL_PREFERRED:
fc36b8d3 3022 if (flags & MPOL_F_LOCAL)
f2a07f40 3023 mode = MPOL_LOCAL;
53f2556b 3024 else
fc36b8d3 3025 node_set(pol->v.preferred_node, nodes);
1a75a6c8 3026 break;
1a75a6c8 3027 case MPOL_BIND:
1a75a6c8 3028 case MPOL_INTERLEAVE:
f2a07f40 3029 nodes = pol->v.nodes;
1a75a6c8 3030 break;
1a75a6c8 3031 default:
948927ee
DR
3032 WARN_ON_ONCE(1);
3033 snprintf(p, maxlen, "unknown");
3034 return;
1a75a6c8
CL
3035 }
3036
b7a9f420 3037 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 3038
fc36b8d3 3039 if (flags & MPOL_MODE_FLAGS) {
948927ee 3040 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 3041
2291990a
LS
3042 /*
3043 * Currently, the only defined flags are mutually exclusive
3044 */
f5b087b5 3045 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
3046 p += snprintf(p, buffer + maxlen - p, "static");
3047 else if (flags & MPOL_F_RELATIVE_NODES)
3048 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
3049 }
3050
9e763e0f
TH
3051 if (!nodes_empty(nodes))
3052 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3053 nodemask_pr_args(&nodes));
1a75a6c8 3054}