shmem: convert shmem_init_inodecache() to void
[linux-block.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
b1de0d13
MH
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
1da177e4
LT
70#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
6e84f315 76#include <linux/sched/mm.h>
6a3827d7 77#include <linux/sched/numa_balancing.h>
f719ff9b 78#include <linux/sched/task.h>
1da177e4
LT
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
1da177e4
LT
81#include <linux/slab.h>
82#include <linux/string.h>
b95f1b31 83#include <linux/export.h>
b488893a 84#include <linux/nsproxy.h>
1da177e4
LT
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
31367466 88#include <linux/ptrace.h>
dc9aa5b9 89#include <linux/swap.h>
1a75a6c8
CL
90#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
b20a3503 92#include <linux/migrate.h>
62b61f61 93#include <linux/ksm.h>
95a402c3 94#include <linux/rmap.h>
86c3a764 95#include <linux/security.h>
dbcb0f19 96#include <linux/syscalls.h>
095f1fc4 97#include <linux/ctype.h>
6d9c285a 98#include <linux/mm_inline.h>
b24f53a0 99#include <linux/mmu_notifier.h>
b1de0d13 100#include <linux/printk.h>
c8633798 101#include <linux/swapops.h>
dc9aa5b9 102
1da177e4 103#include <asm/tlbflush.h>
7c0f6ba6 104#include <linux/uaccess.h>
1da177e4 105
62695a84
NP
106#include "internal.h"
107
38e35860 108/* Internal flags */
dc9aa5b9 109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 111
fcc234f8
PE
112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
1da177e4 114
1da177e4
LT
115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
6267276f 117enum zone_type policy_zone = 0;
1da177e4 118
bea904d5
LS
119/*
120 * run-time system-wide default policy => local allocation
121 */
e754d79d 122static struct mempolicy default_policy = {
1da177e4 123 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 124 .mode = MPOL_PREFERRED,
fc36b8d3 125 .flags = MPOL_F_LOCAL,
1da177e4
LT
126};
127
5606e387
MG
128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
74d2c3a0 130struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
131{
132 struct mempolicy *pol = p->mempolicy;
f15ca78e 133 int node;
5606e387 134
f15ca78e
ON
135 if (pol)
136 return pol;
5606e387 137
f15ca78e
ON
138 node = numa_node_id();
139 if (node != NUMA_NO_NODE) {
140 pol = &preferred_node_policy[node];
141 /* preferred_node_policy is not initialised early in boot */
142 if (pol->mode)
143 return pol;
5606e387
MG
144 }
145
f15ca78e 146 return &default_policy;
5606e387
MG
147}
148
37012946
DR
149static const struct mempolicy_operations {
150 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 151 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
152} mpol_ops[MPOL_MAX];
153
f5b087b5
DR
154static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155{
6d556294 156 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
157}
158
159static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
160 const nodemask_t *rel)
161{
162 nodemask_t tmp;
163 nodes_fold(tmp, *orig, nodes_weight(*rel));
164 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
165}
166
37012946
DR
167static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
168{
169 if (nodes_empty(*nodes))
170 return -EINVAL;
171 pol->v.nodes = *nodes;
172 return 0;
173}
174
175static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
176{
177 if (!nodes)
fc36b8d3 178 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
179 else if (nodes_empty(*nodes))
180 return -EINVAL; /* no allowed nodes */
181 else
182 pol->v.preferred_node = first_node(*nodes);
183 return 0;
184}
185
186static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
187{
859f7ef1 188 if (nodes_empty(*nodes))
37012946
DR
189 return -EINVAL;
190 pol->v.nodes = *nodes;
191 return 0;
192}
193
58568d2a
MX
194/*
195 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
196 * any, for the new policy. mpol_new() has already validated the nodes
197 * parameter with respect to the policy mode and flags. But, we need to
198 * handle an empty nodemask with MPOL_PREFERRED here.
199 *
200 * Must be called holding task's alloc_lock to protect task's mems_allowed
201 * and mempolicy. May also be called holding the mmap_semaphore for write.
202 */
4bfc4495
KH
203static int mpol_set_nodemask(struct mempolicy *pol,
204 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 205{
58568d2a
MX
206 int ret;
207
208 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
209 if (pol == NULL)
210 return 0;
01f13bd6 211 /* Check N_MEMORY */
4bfc4495 212 nodes_and(nsc->mask1,
01f13bd6 213 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
214
215 VM_BUG_ON(!nodes);
216 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
217 nodes = NULL; /* explicit local allocation */
218 else {
219 if (pol->flags & MPOL_F_RELATIVE_NODES)
859f7ef1 220 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
58568d2a 221 else
4bfc4495
KH
222 nodes_and(nsc->mask2, *nodes, nsc->mask1);
223
58568d2a
MX
224 if (mpol_store_user_nodemask(pol))
225 pol->w.user_nodemask = *nodes;
226 else
227 pol->w.cpuset_mems_allowed =
228 cpuset_current_mems_allowed;
229 }
230
4bfc4495
KH
231 if (nodes)
232 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
233 else
234 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
235 return ret;
236}
237
238/*
239 * This function just creates a new policy, does some check and simple
240 * initialization. You must invoke mpol_set_nodemask() to set nodes.
241 */
028fec41
DR
242static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243 nodemask_t *nodes)
1da177e4
LT
244{
245 struct mempolicy *policy;
246
028fec41 247 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 248 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 249
3e1f0645
DR
250 if (mode == MPOL_DEFAULT) {
251 if (nodes && !nodes_empty(*nodes))
37012946 252 return ERR_PTR(-EINVAL);
d3a71033 253 return NULL;
37012946 254 }
3e1f0645
DR
255 VM_BUG_ON(!nodes);
256
257 /*
258 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
259 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
260 * All other modes require a valid pointer to a non-empty nodemask.
261 */
262 if (mode == MPOL_PREFERRED) {
263 if (nodes_empty(*nodes)) {
264 if (((flags & MPOL_F_STATIC_NODES) ||
265 (flags & MPOL_F_RELATIVE_NODES)))
266 return ERR_PTR(-EINVAL);
3e1f0645 267 }
479e2802 268 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
269 if (!nodes_empty(*nodes) ||
270 (flags & MPOL_F_STATIC_NODES) ||
271 (flags & MPOL_F_RELATIVE_NODES))
479e2802
PZ
272 return ERR_PTR(-EINVAL);
273 mode = MPOL_PREFERRED;
3e1f0645
DR
274 } else if (nodes_empty(*nodes))
275 return ERR_PTR(-EINVAL);
1da177e4
LT
276 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
277 if (!policy)
278 return ERR_PTR(-ENOMEM);
279 atomic_set(&policy->refcnt, 1);
45c4745a 280 policy->mode = mode;
3e1f0645 281 policy->flags = flags;
37012946 282
1da177e4 283 return policy;
37012946
DR
284}
285
52cd3b07
LS
286/* Slow path of a mpol destructor. */
287void __mpol_put(struct mempolicy *p)
288{
289 if (!atomic_dec_and_test(&p->refcnt))
290 return;
52cd3b07
LS
291 kmem_cache_free(policy_cache, p);
292}
293
213980c0 294static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
295{
296}
297
213980c0 298static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
299{
300 nodemask_t tmp;
301
302 if (pol->flags & MPOL_F_STATIC_NODES)
303 nodes_and(tmp, pol->w.user_nodemask, *nodes);
304 else if (pol->flags & MPOL_F_RELATIVE_NODES)
305 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
306 else {
213980c0
VB
307 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308 *nodes);
309 pol->w.cpuset_mems_allowed = tmp;
37012946 310 }
f5b087b5 311
708c1bbc
MX
312 if (nodes_empty(tmp))
313 tmp = *nodes;
314
213980c0 315 pol->v.nodes = tmp;
37012946
DR
316}
317
318static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 319 const nodemask_t *nodes)
37012946
DR
320{
321 nodemask_t tmp;
322
37012946
DR
323 if (pol->flags & MPOL_F_STATIC_NODES) {
324 int node = first_node(pol->w.user_nodemask);
325
fc36b8d3 326 if (node_isset(node, *nodes)) {
37012946 327 pol->v.preferred_node = node;
fc36b8d3
LS
328 pol->flags &= ~MPOL_F_LOCAL;
329 } else
330 pol->flags |= MPOL_F_LOCAL;
37012946
DR
331 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
332 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
333 pol->v.preferred_node = first_node(tmp);
fc36b8d3 334 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
335 pol->v.preferred_node = node_remap(pol->v.preferred_node,
336 pol->w.cpuset_mems_allowed,
337 *nodes);
338 pol->w.cpuset_mems_allowed = *nodes;
339 }
1da177e4
LT
340}
341
708c1bbc
MX
342/*
343 * mpol_rebind_policy - Migrate a policy to a different set of nodes
344 *
213980c0
VB
345 * Per-vma policies are protected by mmap_sem. Allocations using per-task
346 * policies are protected by task->mems_allowed_seq to prevent a premature
347 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 348 */
213980c0 349static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 350{
1d0d2680
DR
351 if (!pol)
352 return;
213980c0 353 if (!mpol_store_user_nodemask(pol) &&
1d0d2680
DR
354 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
355 return;
708c1bbc 356
213980c0 357 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
358}
359
360/*
361 * Wrapper for mpol_rebind_policy() that just requires task
362 * pointer, and updates task mempolicy.
58568d2a
MX
363 *
364 * Called with task's alloc_lock held.
1d0d2680
DR
365 */
366
213980c0 367void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 368{
213980c0 369 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
370}
371
372/*
373 * Rebind each vma in mm to new nodemask.
374 *
375 * Call holding a reference to mm. Takes mm->mmap_sem during call.
376 */
377
378void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
379{
380 struct vm_area_struct *vma;
381
382 down_write(&mm->mmap_sem);
383 for (vma = mm->mmap; vma; vma = vma->vm_next)
213980c0 384 mpol_rebind_policy(vma->vm_policy, new);
1d0d2680
DR
385 up_write(&mm->mmap_sem);
386}
387
37012946
DR
388static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
389 [MPOL_DEFAULT] = {
390 .rebind = mpol_rebind_default,
391 },
392 [MPOL_INTERLEAVE] = {
393 .create = mpol_new_interleave,
394 .rebind = mpol_rebind_nodemask,
395 },
396 [MPOL_PREFERRED] = {
397 .create = mpol_new_preferred,
398 .rebind = mpol_rebind_preferred,
399 },
400 [MPOL_BIND] = {
401 .create = mpol_new_bind,
402 .rebind = mpol_rebind_nodemask,
403 },
404};
405
fc301289
CL
406static void migrate_page_add(struct page *page, struct list_head *pagelist,
407 unsigned long flags);
1a75a6c8 408
6f4576e3
NH
409struct queue_pages {
410 struct list_head *pagelist;
411 unsigned long flags;
412 nodemask_t *nmask;
413 struct vm_area_struct *prev;
414};
415
88aaa2a1
NH
416/*
417 * Check if the page's nid is in qp->nmask.
418 *
419 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
420 * in the invert of qp->nmask.
421 */
422static inline bool queue_pages_required(struct page *page,
423 struct queue_pages *qp)
424{
425 int nid = page_to_nid(page);
426 unsigned long flags = qp->flags;
427
428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
429}
430
c8633798
NH
431static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
432 unsigned long end, struct mm_walk *walk)
433{
434 int ret = 0;
435 struct page *page;
436 struct queue_pages *qp = walk->private;
437 unsigned long flags;
438
439 if (unlikely(is_pmd_migration_entry(*pmd))) {
440 ret = 1;
441 goto unlock;
442 }
443 page = pmd_page(*pmd);
444 if (is_huge_zero_page(page)) {
445 spin_unlock(ptl);
446 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
447 goto out;
448 }
449 if (!thp_migration_supported()) {
450 get_page(page);
451 spin_unlock(ptl);
452 lock_page(page);
453 ret = split_huge_page(page);
454 unlock_page(page);
455 put_page(page);
456 goto out;
457 }
458 if (!queue_pages_required(page, qp)) {
459 ret = 1;
460 goto unlock;
461 }
462
463 ret = 1;
464 flags = qp->flags;
465 /* go to thp migration */
466 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
467 migrate_page_add(page, qp->pagelist, flags);
468unlock:
469 spin_unlock(ptl);
470out:
471 return ret;
472}
473
98094945
NH
474/*
475 * Scan through pages checking if pages follow certain conditions,
476 * and move them to the pagelist if they do.
477 */
6f4576e3
NH
478static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
479 unsigned long end, struct mm_walk *walk)
1da177e4 480{
6f4576e3
NH
481 struct vm_area_struct *vma = walk->vma;
482 struct page *page;
483 struct queue_pages *qp = walk->private;
484 unsigned long flags = qp->flags;
c8633798 485 int ret;
91612e0d 486 pte_t *pte;
705e87c0 487 spinlock_t *ptl;
941150a3 488
c8633798
NH
489 ptl = pmd_trans_huge_lock(pmd, vma);
490 if (ptl) {
491 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
492 if (ret)
493 return 0;
248db92d 494 }
91612e0d 495
337d9abf
NH
496 if (pmd_trans_unstable(pmd))
497 return 0;
248db92d 498retry:
6f4576e3
NH
499 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
500 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 501 if (!pte_present(*pte))
1da177e4 502 continue;
6aab341e
LT
503 page = vm_normal_page(vma, addr, *pte);
504 if (!page)
1da177e4 505 continue;
053837fc 506 /*
62b61f61
HD
507 * vm_normal_page() filters out zero pages, but there might
508 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 509 */
b79bc0a0 510 if (PageReserved(page))
f4598c8b 511 continue;
88aaa2a1 512 if (!queue_pages_required(page, qp))
38e35860 513 continue;
c8633798 514 if (PageTransCompound(page) && !thp_migration_supported()) {
248db92d
KS
515 get_page(page);
516 pte_unmap_unlock(pte, ptl);
517 lock_page(page);
518 ret = split_huge_page(page);
519 unlock_page(page);
520 put_page(page);
521 /* Failed to split -- skip. */
522 if (ret) {
523 pte = pte_offset_map_lock(walk->mm, pmd,
524 addr, &ptl);
525 continue;
526 }
527 goto retry;
528 }
38e35860 529
77bf45e7 530 migrate_page_add(page, qp->pagelist, flags);
6f4576e3
NH
531 }
532 pte_unmap_unlock(pte - 1, ptl);
533 cond_resched();
534 return 0;
91612e0d
HD
535}
536
6f4576e3
NH
537static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
538 unsigned long addr, unsigned long end,
539 struct mm_walk *walk)
e2d8cf40
NH
540{
541#ifdef CONFIG_HUGETLB_PAGE
6f4576e3
NH
542 struct queue_pages *qp = walk->private;
543 unsigned long flags = qp->flags;
e2d8cf40 544 struct page *page;
cb900f41 545 spinlock_t *ptl;
d4c54919 546 pte_t entry;
e2d8cf40 547
6f4576e3
NH
548 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
549 entry = huge_ptep_get(pte);
d4c54919
NH
550 if (!pte_present(entry))
551 goto unlock;
552 page = pte_page(entry);
88aaa2a1 553 if (!queue_pages_required(page, qp))
e2d8cf40
NH
554 goto unlock;
555 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
556 if (flags & (MPOL_MF_MOVE_ALL) ||
557 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
6f4576e3 558 isolate_huge_page(page, qp->pagelist);
e2d8cf40 559unlock:
cb900f41 560 spin_unlock(ptl);
e2d8cf40
NH
561#else
562 BUG();
563#endif
91612e0d 564 return 0;
1da177e4
LT
565}
566
5877231f 567#ifdef CONFIG_NUMA_BALANCING
b24f53a0 568/*
4b10e7d5
MG
569 * This is used to mark a range of virtual addresses to be inaccessible.
570 * These are later cleared by a NUMA hinting fault. Depending on these
571 * faults, pages may be migrated for better NUMA placement.
572 *
573 * This is assuming that NUMA faults are handled using PROT_NONE. If
574 * an architecture makes a different choice, it will need further
575 * changes to the core.
b24f53a0 576 */
4b10e7d5
MG
577unsigned long change_prot_numa(struct vm_area_struct *vma,
578 unsigned long addr, unsigned long end)
b24f53a0 579{
4b10e7d5 580 int nr_updated;
b24f53a0 581
4d942466 582 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
03c5a6e1
MG
583 if (nr_updated)
584 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 585
4b10e7d5 586 return nr_updated;
b24f53a0
LS
587}
588#else
589static unsigned long change_prot_numa(struct vm_area_struct *vma,
590 unsigned long addr, unsigned long end)
591{
592 return 0;
593}
5877231f 594#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 595
6f4576e3
NH
596static int queue_pages_test_walk(unsigned long start, unsigned long end,
597 struct mm_walk *walk)
598{
599 struct vm_area_struct *vma = walk->vma;
600 struct queue_pages *qp = walk->private;
601 unsigned long endvma = vma->vm_end;
602 unsigned long flags = qp->flags;
603
77bf45e7 604 if (!vma_migratable(vma))
48684a65
NH
605 return 1;
606
6f4576e3
NH
607 if (endvma > end)
608 endvma = end;
609 if (vma->vm_start > start)
610 start = vma->vm_start;
611
612 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
613 if (!vma->vm_next && vma->vm_end < end)
614 return -EFAULT;
615 if (qp->prev && qp->prev->vm_end < vma->vm_start)
616 return -EFAULT;
617 }
618
619 qp->prev = vma;
620
6f4576e3
NH
621 if (flags & MPOL_MF_LAZY) {
622 /* Similar to task_numa_work, skip inaccessible VMAs */
4355c018
LC
623 if (!is_vm_hugetlb_page(vma) &&
624 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
625 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
626 change_prot_numa(vma, start, endvma);
627 return 1;
628 }
629
77bf45e7
KS
630 /* queue pages from current vma */
631 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6f4576e3
NH
632 return 0;
633 return 1;
634}
635
dc9aa5b9 636/*
98094945
NH
637 * Walk through page tables and collect pages to be migrated.
638 *
639 * If pages found in a given range are on a set of nodes (determined by
640 * @nodes and @flags,) it's isolated and queued to the pagelist which is
641 * passed via @private.)
dc9aa5b9 642 */
d05f0cdc 643static int
98094945 644queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
645 nodemask_t *nodes, unsigned long flags,
646 struct list_head *pagelist)
1da177e4 647{
6f4576e3
NH
648 struct queue_pages qp = {
649 .pagelist = pagelist,
650 .flags = flags,
651 .nmask = nodes,
652 .prev = NULL,
653 };
654 struct mm_walk queue_pages_walk = {
655 .hugetlb_entry = queue_pages_hugetlb,
656 .pmd_entry = queue_pages_pte_range,
657 .test_walk = queue_pages_test_walk,
658 .mm = mm,
659 .private = &qp,
660 };
661
662 return walk_page_range(start, end, &queue_pages_walk);
1da177e4
LT
663}
664
869833f2
KM
665/*
666 * Apply policy to a single VMA
667 * This must be called with the mmap_sem held for writing.
668 */
669static int vma_replace_policy(struct vm_area_struct *vma,
670 struct mempolicy *pol)
8d34694c 671{
869833f2
KM
672 int err;
673 struct mempolicy *old;
674 struct mempolicy *new;
8d34694c
KM
675
676 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
677 vma->vm_start, vma->vm_end, vma->vm_pgoff,
678 vma->vm_ops, vma->vm_file,
679 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
680
869833f2
KM
681 new = mpol_dup(pol);
682 if (IS_ERR(new))
683 return PTR_ERR(new);
684
685 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 686 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
687 if (err)
688 goto err_out;
8d34694c 689 }
869833f2
KM
690
691 old = vma->vm_policy;
692 vma->vm_policy = new; /* protected by mmap_sem */
693 mpol_put(old);
694
695 return 0;
696 err_out:
697 mpol_put(new);
8d34694c
KM
698 return err;
699}
700
1da177e4 701/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
702static int mbind_range(struct mm_struct *mm, unsigned long start,
703 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
704{
705 struct vm_area_struct *next;
9d8cebd4
KM
706 struct vm_area_struct *prev;
707 struct vm_area_struct *vma;
708 int err = 0;
e26a5114 709 pgoff_t pgoff;
9d8cebd4
KM
710 unsigned long vmstart;
711 unsigned long vmend;
1da177e4 712
097d5910 713 vma = find_vma(mm, start);
9d8cebd4
KM
714 if (!vma || vma->vm_start > start)
715 return -EFAULT;
716
097d5910 717 prev = vma->vm_prev;
e26a5114
KM
718 if (start > vma->vm_start)
719 prev = vma;
720
9d8cebd4 721 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 722 next = vma->vm_next;
9d8cebd4
KM
723 vmstart = max(start, vma->vm_start);
724 vmend = min(end, vma->vm_end);
725
e26a5114
KM
726 if (mpol_equal(vma_policy(vma), new_pol))
727 continue;
728
729 pgoff = vma->vm_pgoff +
730 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 731 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
732 vma->anon_vma, vma->vm_file, pgoff,
733 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
734 if (prev) {
735 vma = prev;
736 next = vma->vm_next;
3964acd0
ON
737 if (mpol_equal(vma_policy(vma), new_pol))
738 continue;
739 /* vma_merge() joined vma && vma->next, case 8 */
740 goto replace;
9d8cebd4
KM
741 }
742 if (vma->vm_start != vmstart) {
743 err = split_vma(vma->vm_mm, vma, vmstart, 1);
744 if (err)
745 goto out;
746 }
747 if (vma->vm_end != vmend) {
748 err = split_vma(vma->vm_mm, vma, vmend, 0);
749 if (err)
750 goto out;
751 }
3964acd0 752 replace:
869833f2 753 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
754 if (err)
755 goto out;
1da177e4 756 }
9d8cebd4
KM
757
758 out:
1da177e4
LT
759 return err;
760}
761
1da177e4 762/* Set the process memory policy */
028fec41
DR
763static long do_set_mempolicy(unsigned short mode, unsigned short flags,
764 nodemask_t *nodes)
1da177e4 765{
58568d2a 766 struct mempolicy *new, *old;
4bfc4495 767 NODEMASK_SCRATCH(scratch);
58568d2a 768 int ret;
1da177e4 769
4bfc4495
KH
770 if (!scratch)
771 return -ENOMEM;
f4e53d91 772
4bfc4495
KH
773 new = mpol_new(mode, flags, nodes);
774 if (IS_ERR(new)) {
775 ret = PTR_ERR(new);
776 goto out;
777 }
2c7c3a7d 778
58568d2a 779 task_lock(current);
4bfc4495 780 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
781 if (ret) {
782 task_unlock(current);
58568d2a 783 mpol_put(new);
4bfc4495 784 goto out;
58568d2a
MX
785 }
786 old = current->mempolicy;
1da177e4 787 current->mempolicy = new;
45816682
VB
788 if (new && new->mode == MPOL_INTERLEAVE)
789 current->il_prev = MAX_NUMNODES-1;
58568d2a 790 task_unlock(current);
58568d2a 791 mpol_put(old);
4bfc4495
KH
792 ret = 0;
793out:
794 NODEMASK_SCRATCH_FREE(scratch);
795 return ret;
1da177e4
LT
796}
797
bea904d5
LS
798/*
799 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
800 *
801 * Called with task's alloc_lock held
bea904d5
LS
802 */
803static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 804{
dfcd3c0d 805 nodes_clear(*nodes);
bea904d5
LS
806 if (p == &default_policy)
807 return;
808
45c4745a 809 switch (p->mode) {
19770b32
MG
810 case MPOL_BIND:
811 /* Fall through */
1da177e4 812 case MPOL_INTERLEAVE:
dfcd3c0d 813 *nodes = p->v.nodes;
1da177e4
LT
814 break;
815 case MPOL_PREFERRED:
fc36b8d3 816 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 817 node_set(p->v.preferred_node, *nodes);
53f2556b 818 /* else return empty node mask for local allocation */
1da177e4
LT
819 break;
820 default:
821 BUG();
822 }
823}
824
d4edcf0d 825static int lookup_node(unsigned long addr)
1da177e4
LT
826{
827 struct page *p;
828 int err;
829
768ae309 830 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
1da177e4
LT
831 if (err >= 0) {
832 err = page_to_nid(p);
833 put_page(p);
834 }
835 return err;
836}
837
1da177e4 838/* Retrieve NUMA policy */
dbcb0f19
AB
839static long do_get_mempolicy(int *policy, nodemask_t *nmask,
840 unsigned long addr, unsigned long flags)
1da177e4 841{
8bccd85f 842 int err;
1da177e4
LT
843 struct mm_struct *mm = current->mm;
844 struct vm_area_struct *vma = NULL;
845 struct mempolicy *pol = current->mempolicy;
846
754af6f5
LS
847 if (flags &
848 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 849 return -EINVAL;
754af6f5
LS
850
851 if (flags & MPOL_F_MEMS_ALLOWED) {
852 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
853 return -EINVAL;
854 *policy = 0; /* just so it's initialized */
58568d2a 855 task_lock(current);
754af6f5 856 *nmask = cpuset_current_mems_allowed;
58568d2a 857 task_unlock(current);
754af6f5
LS
858 return 0;
859 }
860
1da177e4 861 if (flags & MPOL_F_ADDR) {
bea904d5
LS
862 /*
863 * Do NOT fall back to task policy if the
864 * vma/shared policy at addr is NULL. We
865 * want to return MPOL_DEFAULT in this case.
866 */
1da177e4
LT
867 down_read(&mm->mmap_sem);
868 vma = find_vma_intersection(mm, addr, addr+1);
869 if (!vma) {
870 up_read(&mm->mmap_sem);
871 return -EFAULT;
872 }
873 if (vma->vm_ops && vma->vm_ops->get_policy)
874 pol = vma->vm_ops->get_policy(vma, addr);
875 else
876 pol = vma->vm_policy;
877 } else if (addr)
878 return -EINVAL;
879
880 if (!pol)
bea904d5 881 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
882
883 if (flags & MPOL_F_NODE) {
884 if (flags & MPOL_F_ADDR) {
d4edcf0d 885 err = lookup_node(addr);
1da177e4
LT
886 if (err < 0)
887 goto out;
8bccd85f 888 *policy = err;
1da177e4 889 } else if (pol == current->mempolicy &&
45c4745a 890 pol->mode == MPOL_INTERLEAVE) {
45816682 891 *policy = next_node_in(current->il_prev, pol->v.nodes);
1da177e4
LT
892 } else {
893 err = -EINVAL;
894 goto out;
895 }
bea904d5
LS
896 } else {
897 *policy = pol == &default_policy ? MPOL_DEFAULT :
898 pol->mode;
d79df630
DR
899 /*
900 * Internal mempolicy flags must be masked off before exposing
901 * the policy to userspace.
902 */
903 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 904 }
1da177e4 905
1da177e4 906 err = 0;
58568d2a 907 if (nmask) {
c6b6ef8b
LS
908 if (mpol_store_user_nodemask(pol)) {
909 *nmask = pol->w.user_nodemask;
910 } else {
911 task_lock(current);
912 get_policy_nodemask(pol, nmask);
913 task_unlock(current);
914 }
58568d2a 915 }
1da177e4
LT
916
917 out:
52cd3b07 918 mpol_cond_put(pol);
1da177e4
LT
919 if (vma)
920 up_read(&current->mm->mmap_sem);
921 return err;
922}
923
b20a3503 924#ifdef CONFIG_MIGRATION
6ce3c4c0 925/*
c8633798 926 * page migration, thp tail pages can be passed.
6ce3c4c0 927 */
fc301289
CL
928static void migrate_page_add(struct page *page, struct list_head *pagelist,
929 unsigned long flags)
6ce3c4c0 930{
c8633798 931 struct page *head = compound_head(page);
6ce3c4c0 932 /*
fc301289 933 * Avoid migrating a page that is shared with others.
6ce3c4c0 934 */
c8633798
NH
935 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
936 if (!isolate_lru_page(head)) {
937 list_add_tail(&head->lru, pagelist);
938 mod_node_page_state(page_pgdat(head),
939 NR_ISOLATED_ANON + page_is_file_cache(head),
940 hpage_nr_pages(head));
62695a84
NP
941 }
942 }
7e2ab150 943}
6ce3c4c0 944
742755a1 945static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 946{
e2d8cf40
NH
947 if (PageHuge(page))
948 return alloc_huge_page_node(page_hstate(compound_head(page)),
949 node);
c8633798
NH
950 else if (thp_migration_supported() && PageTransHuge(page)) {
951 struct page *thp;
952
953 thp = alloc_pages_node(node,
954 (GFP_TRANSHUGE | __GFP_THISNODE),
955 HPAGE_PMD_ORDER);
956 if (!thp)
957 return NULL;
958 prep_transhuge_page(thp);
959 return thp;
960 } else
96db800f 961 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
b360edb4 962 __GFP_THISNODE, 0);
95a402c3
CL
963}
964
7e2ab150
CL
965/*
966 * Migrate pages from one node to a target node.
967 * Returns error or the number of pages not migrated.
968 */
dbcb0f19
AB
969static int migrate_to_node(struct mm_struct *mm, int source, int dest,
970 int flags)
7e2ab150
CL
971{
972 nodemask_t nmask;
973 LIST_HEAD(pagelist);
974 int err = 0;
975
976 nodes_clear(nmask);
977 node_set(source, nmask);
6ce3c4c0 978
08270807
MK
979 /*
980 * This does not "check" the range but isolates all pages that
981 * need migration. Between passing in the full user address
982 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
983 */
984 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 985 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
986 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
987
cf608ac1 988 if (!list_empty(&pagelist)) {
68711a74 989 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
9c620e2b 990 MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 991 if (err)
e2d8cf40 992 putback_movable_pages(&pagelist);
cf608ac1 993 }
95a402c3 994
7e2ab150 995 return err;
6ce3c4c0
CL
996}
997
39743889 998/*
7e2ab150
CL
999 * Move pages between the two nodesets so as to preserve the physical
1000 * layout as much as possible.
39743889
CL
1001 *
1002 * Returns the number of page that could not be moved.
1003 */
0ce72d4f
AM
1004int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1005 const nodemask_t *to, int flags)
39743889 1006{
7e2ab150 1007 int busy = 0;
0aedadf9 1008 int err;
7e2ab150 1009 nodemask_t tmp;
39743889 1010
0aedadf9
CL
1011 err = migrate_prep();
1012 if (err)
1013 return err;
1014
53f2556b 1015 down_read(&mm->mmap_sem);
39743889 1016
da0aa138
KM
1017 /*
1018 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1019 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1020 * bit in 'tmp', and return that <source, dest> pair for migration.
1021 * The pair of nodemasks 'to' and 'from' define the map.
1022 *
1023 * If no pair of bits is found that way, fallback to picking some
1024 * pair of 'source' and 'dest' bits that are not the same. If the
1025 * 'source' and 'dest' bits are the same, this represents a node
1026 * that will be migrating to itself, so no pages need move.
1027 *
1028 * If no bits are left in 'tmp', or if all remaining bits left
1029 * in 'tmp' correspond to the same bit in 'to', return false
1030 * (nothing left to migrate).
1031 *
1032 * This lets us pick a pair of nodes to migrate between, such that
1033 * if possible the dest node is not already occupied by some other
1034 * source node, minimizing the risk of overloading the memory on a
1035 * node that would happen if we migrated incoming memory to a node
1036 * before migrating outgoing memory source that same node.
1037 *
1038 * A single scan of tmp is sufficient. As we go, we remember the
1039 * most recent <s, d> pair that moved (s != d). If we find a pair
1040 * that not only moved, but what's better, moved to an empty slot
1041 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1042 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1043 * most recent <s, d> pair that moved. If we get all the way through
1044 * the scan of tmp without finding any node that moved, much less
1045 * moved to an empty node, then there is nothing left worth migrating.
1046 */
d4984711 1047
0ce72d4f 1048 tmp = *from;
7e2ab150
CL
1049 while (!nodes_empty(tmp)) {
1050 int s,d;
b76ac7e7 1051 int source = NUMA_NO_NODE;
7e2ab150
CL
1052 int dest = 0;
1053
1054 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1055
1056 /*
1057 * do_migrate_pages() tries to maintain the relative
1058 * node relationship of the pages established between
1059 * threads and memory areas.
1060 *
1061 * However if the number of source nodes is not equal to
1062 * the number of destination nodes we can not preserve
1063 * this node relative relationship. In that case, skip
1064 * copying memory from a node that is in the destination
1065 * mask.
1066 *
1067 * Example: [2,3,4] -> [3,4,5] moves everything.
1068 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1069 */
1070
0ce72d4f
AM
1071 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1072 (node_isset(s, *to)))
4a5b18cc
LW
1073 continue;
1074
0ce72d4f 1075 d = node_remap(s, *from, *to);
7e2ab150
CL
1076 if (s == d)
1077 continue;
1078
1079 source = s; /* Node moved. Memorize */
1080 dest = d;
1081
1082 /* dest not in remaining from nodes? */
1083 if (!node_isset(dest, tmp))
1084 break;
1085 }
b76ac7e7 1086 if (source == NUMA_NO_NODE)
7e2ab150
CL
1087 break;
1088
1089 node_clear(source, tmp);
1090 err = migrate_to_node(mm, source, dest, flags);
1091 if (err > 0)
1092 busy += err;
1093 if (err < 0)
1094 break;
39743889
CL
1095 }
1096 up_read(&mm->mmap_sem);
7e2ab150
CL
1097 if (err < 0)
1098 return err;
1099 return busy;
b20a3503
CL
1100
1101}
1102
3ad33b24
LS
1103/*
1104 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1105 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1106 * Search forward from there, if not. N.B., this assumes that the
1107 * list of pages handed to migrate_pages()--which is how we get here--
1108 * is in virtual address order.
1109 */
d05f0cdc 1110static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3 1111{
d05f0cdc 1112 struct vm_area_struct *vma;
3ad33b24 1113 unsigned long uninitialized_var(address);
95a402c3 1114
d05f0cdc 1115 vma = find_vma(current->mm, start);
3ad33b24
LS
1116 while (vma) {
1117 address = page_address_in_vma(page, vma);
1118 if (address != -EFAULT)
1119 break;
1120 vma = vma->vm_next;
1121 }
11c731e8
WL
1122
1123 if (PageHuge(page)) {
cc81717e
MH
1124 BUG_ON(!vma);
1125 return alloc_huge_page_noerr(vma, address, 1);
c8633798
NH
1126 } else if (thp_migration_supported() && PageTransHuge(page)) {
1127 struct page *thp;
1128
1129 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1130 HPAGE_PMD_ORDER);
1131 if (!thp)
1132 return NULL;
1133 prep_transhuge_page(thp);
1134 return thp;
11c731e8 1135 }
0bf598d8 1136 /*
11c731e8 1137 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1138 */
0f556856
MH
1139 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1140 vma, address);
95a402c3 1141}
b20a3503
CL
1142#else
1143
1144static void migrate_page_add(struct page *page, struct list_head *pagelist,
1145 unsigned long flags)
1146{
39743889
CL
1147}
1148
0ce72d4f
AM
1149int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1150 const nodemask_t *to, int flags)
b20a3503
CL
1151{
1152 return -ENOSYS;
1153}
95a402c3 1154
d05f0cdc 1155static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3
CL
1156{
1157 return NULL;
1158}
b20a3503
CL
1159#endif
1160
dbcb0f19 1161static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1162 unsigned short mode, unsigned short mode_flags,
1163 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1164{
6ce3c4c0
CL
1165 struct mm_struct *mm = current->mm;
1166 struct mempolicy *new;
1167 unsigned long end;
1168 int err;
1169 LIST_HEAD(pagelist);
1170
b24f53a0 1171 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1172 return -EINVAL;
74c00241 1173 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1174 return -EPERM;
1175
1176 if (start & ~PAGE_MASK)
1177 return -EINVAL;
1178
1179 if (mode == MPOL_DEFAULT)
1180 flags &= ~MPOL_MF_STRICT;
1181
1182 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1183 end = start + len;
1184
1185 if (end < start)
1186 return -EINVAL;
1187 if (end == start)
1188 return 0;
1189
028fec41 1190 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1191 if (IS_ERR(new))
1192 return PTR_ERR(new);
1193
b24f53a0
LS
1194 if (flags & MPOL_MF_LAZY)
1195 new->flags |= MPOL_F_MOF;
1196
6ce3c4c0
CL
1197 /*
1198 * If we are using the default policy then operation
1199 * on discontinuous address spaces is okay after all
1200 */
1201 if (!new)
1202 flags |= MPOL_MF_DISCONTIG_OK;
1203
028fec41
DR
1204 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1205 start, start + len, mode, mode_flags,
00ef2d2f 1206 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1207
0aedadf9
CL
1208 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1209
1210 err = migrate_prep();
1211 if (err)
b05ca738 1212 goto mpol_out;
0aedadf9 1213 }
4bfc4495
KH
1214 {
1215 NODEMASK_SCRATCH(scratch);
1216 if (scratch) {
1217 down_write(&mm->mmap_sem);
1218 task_lock(current);
1219 err = mpol_set_nodemask(new, nmask, scratch);
1220 task_unlock(current);
1221 if (err)
1222 up_write(&mm->mmap_sem);
1223 } else
1224 err = -ENOMEM;
1225 NODEMASK_SCRATCH_FREE(scratch);
1226 }
b05ca738
KM
1227 if (err)
1228 goto mpol_out;
1229
d05f0cdc 1230 err = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1231 flags | MPOL_MF_INVERT, &pagelist);
d05f0cdc 1232 if (!err)
9d8cebd4 1233 err = mbind_range(mm, start, end, new);
7e2ab150 1234
b24f53a0
LS
1235 if (!err) {
1236 int nr_failed = 0;
1237
cf608ac1 1238 if (!list_empty(&pagelist)) {
b24f53a0 1239 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc
HD
1240 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1241 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1242 if (nr_failed)
74060e4d 1243 putback_movable_pages(&pagelist);
cf608ac1 1244 }
6ce3c4c0 1245
b24f53a0 1246 if (nr_failed && (flags & MPOL_MF_STRICT))
6ce3c4c0 1247 err = -EIO;
ab8a3e14 1248 } else
b0e5fd73 1249 putback_movable_pages(&pagelist);
b20a3503 1250
6ce3c4c0 1251 up_write(&mm->mmap_sem);
b05ca738 1252 mpol_out:
f0be3d32 1253 mpol_put(new);
6ce3c4c0
CL
1254 return err;
1255}
1256
8bccd85f
CL
1257/*
1258 * User space interface with variable sized bitmaps for nodelists.
1259 */
1260
1261/* Copy a node mask from user space. */
39743889 1262static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1263 unsigned long maxnode)
1264{
1265 unsigned long k;
1266 unsigned long nlongs;
1267 unsigned long endmask;
1268
1269 --maxnode;
1270 nodes_clear(*nodes);
1271 if (maxnode == 0 || !nmask)
1272 return 0;
a9c930ba 1273 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1274 return -EINVAL;
8bccd85f
CL
1275
1276 nlongs = BITS_TO_LONGS(maxnode);
1277 if ((maxnode % BITS_PER_LONG) == 0)
1278 endmask = ~0UL;
1279 else
1280 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1281
1282 /* When the user specified more nodes than supported just check
1283 if the non supported part is all zero. */
1284 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1285 if (nlongs > PAGE_SIZE/sizeof(long))
1286 return -EINVAL;
1287 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1288 unsigned long t;
1289 if (get_user(t, nmask + k))
1290 return -EFAULT;
1291 if (k == nlongs - 1) {
1292 if (t & endmask)
1293 return -EINVAL;
1294 } else if (t)
1295 return -EINVAL;
1296 }
1297 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1298 endmask = ~0UL;
1299 }
1300
1301 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1302 return -EFAULT;
1303 nodes_addr(*nodes)[nlongs-1] &= endmask;
1304 return 0;
1305}
1306
1307/* Copy a kernel node mask to user space */
1308static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1309 nodemask_t *nodes)
1310{
1311 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1312 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1313
1314 if (copy > nbytes) {
1315 if (copy > PAGE_SIZE)
1316 return -EINVAL;
1317 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1318 return -EFAULT;
1319 copy = nbytes;
1320 }
1321 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1322}
1323
938bb9f5 1324SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
f7f28ca9 1325 unsigned long, mode, const unsigned long __user *, nmask,
938bb9f5 1326 unsigned long, maxnode, unsigned, flags)
8bccd85f
CL
1327{
1328 nodemask_t nodes;
1329 int err;
028fec41 1330 unsigned short mode_flags;
8bccd85f 1331
028fec41
DR
1332 mode_flags = mode & MPOL_MODE_FLAGS;
1333 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1334 if (mode >= MPOL_MAX)
1335 return -EINVAL;
4c50bc01
DR
1336 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1337 (mode_flags & MPOL_F_RELATIVE_NODES))
1338 return -EINVAL;
8bccd85f
CL
1339 err = get_nodes(&nodes, nmask, maxnode);
1340 if (err)
1341 return err;
028fec41 1342 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1343}
1344
1345/* Set the process memory policy */
23c8902d 1346SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
938bb9f5 1347 unsigned long, maxnode)
8bccd85f
CL
1348{
1349 int err;
1350 nodemask_t nodes;
028fec41 1351 unsigned short flags;
8bccd85f 1352
028fec41
DR
1353 flags = mode & MPOL_MODE_FLAGS;
1354 mode &= ~MPOL_MODE_FLAGS;
1355 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1356 return -EINVAL;
4c50bc01
DR
1357 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1358 return -EINVAL;
8bccd85f
CL
1359 err = get_nodes(&nodes, nmask, maxnode);
1360 if (err)
1361 return err;
028fec41 1362 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1363}
1364
938bb9f5
HC
1365SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1366 const unsigned long __user *, old_nodes,
1367 const unsigned long __user *, new_nodes)
39743889 1368{
596d7cfa 1369 struct mm_struct *mm = NULL;
39743889 1370 struct task_struct *task;
39743889
CL
1371 nodemask_t task_nodes;
1372 int err;
596d7cfa
KM
1373 nodemask_t *old;
1374 nodemask_t *new;
1375 NODEMASK_SCRATCH(scratch);
1376
1377 if (!scratch)
1378 return -ENOMEM;
39743889 1379
596d7cfa
KM
1380 old = &scratch->mask1;
1381 new = &scratch->mask2;
1382
1383 err = get_nodes(old, old_nodes, maxnode);
39743889 1384 if (err)
596d7cfa 1385 goto out;
39743889 1386
596d7cfa 1387 err = get_nodes(new, new_nodes, maxnode);
39743889 1388 if (err)
596d7cfa 1389 goto out;
39743889
CL
1390
1391 /* Find the mm_struct */
55cfaa3c 1392 rcu_read_lock();
228ebcbe 1393 task = pid ? find_task_by_vpid(pid) : current;
39743889 1394 if (!task) {
55cfaa3c 1395 rcu_read_unlock();
596d7cfa
KM
1396 err = -ESRCH;
1397 goto out;
39743889 1398 }
3268c63e 1399 get_task_struct(task);
39743889 1400
596d7cfa 1401 err = -EINVAL;
39743889
CL
1402
1403 /*
31367466
OE
1404 * Check if this process has the right to modify the specified process.
1405 * Use the regular "ptrace_may_access()" checks.
39743889 1406 */
31367466 1407 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1408 rcu_read_unlock();
39743889 1409 err = -EPERM;
3268c63e 1410 goto out_put;
39743889 1411 }
c69e8d9c 1412 rcu_read_unlock();
39743889
CL
1413
1414 task_nodes = cpuset_mems_allowed(task);
1415 /* Is the user allowed to access the target nodes? */
596d7cfa 1416 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1417 err = -EPERM;
3268c63e 1418 goto out_put;
39743889
CL
1419 }
1420
01f13bd6 1421 if (!nodes_subset(*new, node_states[N_MEMORY])) {
3b42d28b 1422 err = -EINVAL;
3268c63e 1423 goto out_put;
3b42d28b
CL
1424 }
1425
86c3a764
DQ
1426 err = security_task_movememory(task);
1427 if (err)
3268c63e 1428 goto out_put;
86c3a764 1429
3268c63e
CL
1430 mm = get_task_mm(task);
1431 put_task_struct(task);
f2a9ef88
SL
1432
1433 if (!mm) {
3268c63e 1434 err = -EINVAL;
f2a9ef88
SL
1435 goto out;
1436 }
1437
1438 err = do_migrate_pages(mm, old, new,
1439 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1440
1441 mmput(mm);
1442out:
596d7cfa
KM
1443 NODEMASK_SCRATCH_FREE(scratch);
1444
39743889 1445 return err;
3268c63e
CL
1446
1447out_put:
1448 put_task_struct(task);
1449 goto out;
1450
39743889
CL
1451}
1452
1453
8bccd85f 1454/* Retrieve NUMA policy */
938bb9f5
HC
1455SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1456 unsigned long __user *, nmask, unsigned long, maxnode,
1457 unsigned long, addr, unsigned long, flags)
8bccd85f 1458{
dbcb0f19
AB
1459 int err;
1460 int uninitialized_var(pval);
8bccd85f
CL
1461 nodemask_t nodes;
1462
1463 if (nmask != NULL && maxnode < MAX_NUMNODES)
1464 return -EINVAL;
1465
1466 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1467
1468 if (err)
1469 return err;
1470
1471 if (policy && put_user(pval, policy))
1472 return -EFAULT;
1473
1474 if (nmask)
1475 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1476
1477 return err;
1478}
1479
1da177e4
LT
1480#ifdef CONFIG_COMPAT
1481
c93e0f6c
HC
1482COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1483 compat_ulong_t __user *, nmask,
1484 compat_ulong_t, maxnode,
1485 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1486{
1487 long err;
1488 unsigned long __user *nm = NULL;
1489 unsigned long nr_bits, alloc_size;
1490 DECLARE_BITMAP(bm, MAX_NUMNODES);
1491
1492 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1493 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1494
1495 if (nmask)
1496 nm = compat_alloc_user_space(alloc_size);
1497
1498 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1499
1500 if (!err && nmask) {
2bbff6c7
KH
1501 unsigned long copy_size;
1502 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1503 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1504 /* ensure entire bitmap is zeroed */
1505 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1506 err |= compat_put_bitmap(nmask, bm, nr_bits);
1507 }
1508
1509 return err;
1510}
1511
c93e0f6c
HC
1512COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1513 compat_ulong_t, maxnode)
1da177e4 1514{
1da177e4
LT
1515 unsigned long __user *nm = NULL;
1516 unsigned long nr_bits, alloc_size;
1517 DECLARE_BITMAP(bm, MAX_NUMNODES);
1518
1519 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1520 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1521
1522 if (nmask) {
cf01fb99
CS
1523 if (compat_get_bitmap(bm, nmask, nr_bits))
1524 return -EFAULT;
1da177e4 1525 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1526 if (copy_to_user(nm, bm, alloc_size))
1527 return -EFAULT;
1da177e4
LT
1528 }
1529
1da177e4
LT
1530 return sys_set_mempolicy(mode, nm, nr_bits+1);
1531}
1532
c93e0f6c
HC
1533COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1534 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1535 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4 1536{
1da177e4
LT
1537 unsigned long __user *nm = NULL;
1538 unsigned long nr_bits, alloc_size;
dfcd3c0d 1539 nodemask_t bm;
1da177e4
LT
1540
1541 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1542 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1543
1544 if (nmask) {
cf01fb99
CS
1545 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1546 return -EFAULT;
1da177e4 1547 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1548 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1549 return -EFAULT;
1da177e4
LT
1550 }
1551
1da177e4
LT
1552 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1553}
1554
1555#endif
1556
74d2c3a0
ON
1557struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1558 unsigned long addr)
1da177e4 1559{
8d90274b 1560 struct mempolicy *pol = NULL;
1da177e4
LT
1561
1562 if (vma) {
480eccf9 1563 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1564 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1565 } else if (vma->vm_policy) {
1da177e4 1566 pol = vma->vm_policy;
00442ad0
MG
1567
1568 /*
1569 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1570 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1571 * count on these policies which will be dropped by
1572 * mpol_cond_put() later
1573 */
1574 if (mpol_needs_cond_ref(pol))
1575 mpol_get(pol);
1576 }
1da177e4 1577 }
f15ca78e 1578
74d2c3a0
ON
1579 return pol;
1580}
1581
1582/*
dd6eecb9 1583 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1584 * @vma: virtual memory area whose policy is sought
1585 * @addr: address in @vma for shared policy lookup
1586 *
1587 * Returns effective policy for a VMA at specified address.
dd6eecb9 1588 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1589 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1590 * count--added by the get_policy() vm_op, as appropriate--to protect against
1591 * freeing by another task. It is the caller's responsibility to free the
1592 * extra reference for shared policies.
1593 */
dd6eecb9
ON
1594static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1595 unsigned long addr)
74d2c3a0
ON
1596{
1597 struct mempolicy *pol = __get_vma_policy(vma, addr);
1598
8d90274b 1599 if (!pol)
dd6eecb9 1600 pol = get_task_policy(current);
8d90274b 1601
1da177e4
LT
1602 return pol;
1603}
1604
6b6482bb 1605bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1606{
6b6482bb 1607 struct mempolicy *pol;
fc314724 1608
6b6482bb
ON
1609 if (vma->vm_ops && vma->vm_ops->get_policy) {
1610 bool ret = false;
fc314724 1611
6b6482bb
ON
1612 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1613 if (pol && (pol->flags & MPOL_F_MOF))
1614 ret = true;
1615 mpol_cond_put(pol);
8d90274b 1616
6b6482bb 1617 return ret;
fc314724
MG
1618 }
1619
6b6482bb 1620 pol = vma->vm_policy;
8d90274b 1621 if (!pol)
6b6482bb 1622 pol = get_task_policy(current);
8d90274b 1623
fc314724
MG
1624 return pol->flags & MPOL_F_MOF;
1625}
1626
d3eb1570
LJ
1627static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1628{
1629 enum zone_type dynamic_policy_zone = policy_zone;
1630
1631 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1632
1633 /*
1634 * if policy->v.nodes has movable memory only,
1635 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1636 *
1637 * policy->v.nodes is intersect with node_states[N_MEMORY].
1638 * so if the following test faile, it implies
1639 * policy->v.nodes has movable memory only.
1640 */
1641 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1642 dynamic_policy_zone = ZONE_MOVABLE;
1643
1644 return zone >= dynamic_policy_zone;
1645}
1646
52cd3b07
LS
1647/*
1648 * Return a nodemask representing a mempolicy for filtering nodes for
1649 * page allocation
1650 */
1651static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1652{
1653 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1654 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1655 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1656 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1657 return &policy->v.nodes;
1658
1659 return NULL;
1660}
1661
04ec6264
VB
1662/* Return the node id preferred by the given mempolicy, or the given id */
1663static int policy_node(gfp_t gfp, struct mempolicy *policy,
1664 int nd)
1da177e4 1665{
6d840958
MH
1666 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1667 nd = policy->v.preferred_node;
1668 else {
19770b32 1669 /*
6d840958
MH
1670 * __GFP_THISNODE shouldn't even be used with the bind policy
1671 * because we might easily break the expectation to stay on the
1672 * requested node and not break the policy.
19770b32 1673 */
6d840958 1674 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1675 }
6d840958 1676
04ec6264 1677 return nd;
1da177e4
LT
1678}
1679
1680/* Do dynamic interleaving for a process */
1681static unsigned interleave_nodes(struct mempolicy *policy)
1682{
45816682 1683 unsigned next;
1da177e4
LT
1684 struct task_struct *me = current;
1685
45816682 1686 next = next_node_in(me->il_prev, policy->v.nodes);
f5b087b5 1687 if (next < MAX_NUMNODES)
45816682
VB
1688 me->il_prev = next;
1689 return next;
1da177e4
LT
1690}
1691
dc85da15
CL
1692/*
1693 * Depending on the memory policy provide a node from which to allocate the
1694 * next slab entry.
1695 */
2a389610 1696unsigned int mempolicy_slab_node(void)
dc85da15 1697{
e7b691b0 1698 struct mempolicy *policy;
2a389610 1699 int node = numa_mem_id();
e7b691b0
AK
1700
1701 if (in_interrupt())
2a389610 1702 return node;
e7b691b0
AK
1703
1704 policy = current->mempolicy;
fc36b8d3 1705 if (!policy || policy->flags & MPOL_F_LOCAL)
2a389610 1706 return node;
bea904d5
LS
1707
1708 switch (policy->mode) {
1709 case MPOL_PREFERRED:
fc36b8d3
LS
1710 /*
1711 * handled MPOL_F_LOCAL above
1712 */
1713 return policy->v.preferred_node;
765c4507 1714
dc85da15
CL
1715 case MPOL_INTERLEAVE:
1716 return interleave_nodes(policy);
1717
dd1a239f 1718 case MPOL_BIND: {
c33d6c06
MG
1719 struct zoneref *z;
1720
dc85da15
CL
1721 /*
1722 * Follow bind policy behavior and start allocation at the
1723 * first node.
1724 */
19770b32 1725 struct zonelist *zonelist;
19770b32 1726 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1727 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06
MG
1728 z = first_zones_zonelist(zonelist, highest_zoneidx,
1729 &policy->v.nodes);
1730 return z->zone ? z->zone->node : node;
dd1a239f 1731 }
dc85da15 1732
dc85da15 1733 default:
bea904d5 1734 BUG();
dc85da15
CL
1735 }
1736}
1737
fee83b3a
AM
1738/*
1739 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1740 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1741 * number of present nodes.
1742 */
98c70baa 1743static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1da177e4 1744{
dfcd3c0d 1745 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1746 unsigned target;
fee83b3a
AM
1747 int i;
1748 int nid;
1da177e4 1749
f5b087b5
DR
1750 if (!nnodes)
1751 return numa_node_id();
fee83b3a
AM
1752 target = (unsigned int)n % nnodes;
1753 nid = first_node(pol->v.nodes);
1754 for (i = 0; i < target; i++)
dfcd3c0d 1755 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1756 return nid;
1757}
1758
5da7ca86
CL
1759/* Determine a node number for interleave */
1760static inline unsigned interleave_nid(struct mempolicy *pol,
1761 struct vm_area_struct *vma, unsigned long addr, int shift)
1762{
1763 if (vma) {
1764 unsigned long off;
1765
3b98b087
NA
1766 /*
1767 * for small pages, there is no difference between
1768 * shift and PAGE_SHIFT, so the bit-shift is safe.
1769 * for huge pages, since vm_pgoff is in units of small
1770 * pages, we need to shift off the always 0 bits to get
1771 * a useful offset.
1772 */
1773 BUG_ON(shift < PAGE_SHIFT);
1774 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86 1775 off += (addr - vma->vm_start) >> shift;
98c70baa 1776 return offset_il_node(pol, off);
5da7ca86
CL
1777 } else
1778 return interleave_nodes(pol);
1779}
1780
00ac59ad 1781#ifdef CONFIG_HUGETLBFS
480eccf9 1782/*
04ec6264 1783 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
1784 * @vma: virtual memory area whose policy is sought
1785 * @addr: address in @vma for shared policy lookup and interleave policy
1786 * @gfp_flags: for requested zone
1787 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1788 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1789 *
04ec6264 1790 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07
LS
1791 * to the struct mempolicy for conditional unref after allocation.
1792 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1793 * @nodemask for filtering the zonelist.
c0ff7453 1794 *
d26914d1 1795 * Must be protected by read_mems_allowed_begin()
480eccf9 1796 */
04ec6264
VB
1797int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1798 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 1799{
04ec6264 1800 int nid;
5da7ca86 1801
dd6eecb9 1802 *mpol = get_vma_policy(vma, addr);
19770b32 1803 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1804
52cd3b07 1805 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
04ec6264
VB
1806 nid = interleave_nid(*mpol, vma, addr,
1807 huge_page_shift(hstate_vma(vma)));
52cd3b07 1808 } else {
04ec6264 1809 nid = policy_node(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
1810 if ((*mpol)->mode == MPOL_BIND)
1811 *nodemask = &(*mpol)->v.nodes;
480eccf9 1812 }
04ec6264 1813 return nid;
5da7ca86 1814}
06808b08
LS
1815
1816/*
1817 * init_nodemask_of_mempolicy
1818 *
1819 * If the current task's mempolicy is "default" [NULL], return 'false'
1820 * to indicate default policy. Otherwise, extract the policy nodemask
1821 * for 'bind' or 'interleave' policy into the argument nodemask, or
1822 * initialize the argument nodemask to contain the single node for
1823 * 'preferred' or 'local' policy and return 'true' to indicate presence
1824 * of non-default mempolicy.
1825 *
1826 * We don't bother with reference counting the mempolicy [mpol_get/put]
1827 * because the current task is examining it's own mempolicy and a task's
1828 * mempolicy is only ever changed by the task itself.
1829 *
1830 * N.B., it is the caller's responsibility to free a returned nodemask.
1831 */
1832bool init_nodemask_of_mempolicy(nodemask_t *mask)
1833{
1834 struct mempolicy *mempolicy;
1835 int nid;
1836
1837 if (!(mask && current->mempolicy))
1838 return false;
1839
c0ff7453 1840 task_lock(current);
06808b08
LS
1841 mempolicy = current->mempolicy;
1842 switch (mempolicy->mode) {
1843 case MPOL_PREFERRED:
1844 if (mempolicy->flags & MPOL_F_LOCAL)
1845 nid = numa_node_id();
1846 else
1847 nid = mempolicy->v.preferred_node;
1848 init_nodemask_of_node(mask, nid);
1849 break;
1850
1851 case MPOL_BIND:
1852 /* Fall through */
1853 case MPOL_INTERLEAVE:
1854 *mask = mempolicy->v.nodes;
1855 break;
1856
1857 default:
1858 BUG();
1859 }
c0ff7453 1860 task_unlock(current);
06808b08
LS
1861
1862 return true;
1863}
00ac59ad 1864#endif
5da7ca86 1865
6f48d0eb
DR
1866/*
1867 * mempolicy_nodemask_intersects
1868 *
1869 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1870 * policy. Otherwise, check for intersection between mask and the policy
1871 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1872 * policy, always return true since it may allocate elsewhere on fallback.
1873 *
1874 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1875 */
1876bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1877 const nodemask_t *mask)
1878{
1879 struct mempolicy *mempolicy;
1880 bool ret = true;
1881
1882 if (!mask)
1883 return ret;
1884 task_lock(tsk);
1885 mempolicy = tsk->mempolicy;
1886 if (!mempolicy)
1887 goto out;
1888
1889 switch (mempolicy->mode) {
1890 case MPOL_PREFERRED:
1891 /*
1892 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1893 * allocate from, they may fallback to other nodes when oom.
1894 * Thus, it's possible for tsk to have allocated memory from
1895 * nodes in mask.
1896 */
1897 break;
1898 case MPOL_BIND:
1899 case MPOL_INTERLEAVE:
1900 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1901 break;
1902 default:
1903 BUG();
1904 }
1905out:
1906 task_unlock(tsk);
1907 return ret;
1908}
1909
1da177e4
LT
1910/* Allocate a page in interleaved policy.
1911 Own path because it needs to do special accounting. */
662f3a0b
AK
1912static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1913 unsigned nid)
1da177e4 1914{
1da177e4
LT
1915 struct page *page;
1916
04ec6264 1917 page = __alloc_pages(gfp, order, nid);
de55c8b2
AR
1918 if (page && page_to_nid(page) == nid) {
1919 preempt_disable();
1920 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
1921 preempt_enable();
1922 }
1da177e4
LT
1923 return page;
1924}
1925
1926/**
0bbbc0b3 1927 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
1928 *
1929 * @gfp:
1930 * %GFP_USER user allocation.
1931 * %GFP_KERNEL kernel allocations,
1932 * %GFP_HIGHMEM highmem/user allocations,
1933 * %GFP_FS allocation should not call back into a file system.
1934 * %GFP_ATOMIC don't sleep.
1935 *
0bbbc0b3 1936 * @order:Order of the GFP allocation.
1da177e4
LT
1937 * @vma: Pointer to VMA or NULL if not available.
1938 * @addr: Virtual Address of the allocation. Must be inside the VMA.
be97a41b
VB
1939 * @node: Which node to prefer for allocation (modulo policy).
1940 * @hugepage: for hugepages try only the preferred node if possible
1da177e4
LT
1941 *
1942 * This function allocates a page from the kernel page pool and applies
1943 * a NUMA policy associated with the VMA or the current process.
1944 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1945 * mm_struct of the VMA to prevent it from going away. Should be used for
be97a41b
VB
1946 * all allocations for pages that will be mapped into user space. Returns
1947 * NULL when no page can be allocated.
1da177e4
LT
1948 */
1949struct page *
0bbbc0b3 1950alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
be97a41b 1951 unsigned long addr, int node, bool hugepage)
1da177e4 1952{
cc9a6c87 1953 struct mempolicy *pol;
c0ff7453 1954 struct page *page;
04ec6264 1955 int preferred_nid;
be97a41b 1956 nodemask_t *nmask;
cc9a6c87 1957
dd6eecb9 1958 pol = get_vma_policy(vma, addr);
1da177e4 1959
0867a57c
VB
1960 if (pol->mode == MPOL_INTERLEAVE) {
1961 unsigned nid;
1962
1963 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1964 mpol_cond_put(pol);
1965 page = alloc_page_interleave(gfp, order, nid);
1966 goto out;
1967 }
1968
1969 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1970 int hpage_node = node;
1971
be97a41b
VB
1972 /*
1973 * For hugepage allocation and non-interleave policy which
0867a57c
VB
1974 * allows the current node (or other explicitly preferred
1975 * node) we only try to allocate from the current/preferred
1976 * node and don't fall back to other nodes, as the cost of
1977 * remote accesses would likely offset THP benefits.
be97a41b
VB
1978 *
1979 * If the policy is interleave, or does not allow the current
1980 * node in its nodemask, we allocate the standard way.
1981 */
0867a57c
VB
1982 if (pol->mode == MPOL_PREFERRED &&
1983 !(pol->flags & MPOL_F_LOCAL))
1984 hpage_node = pol->v.preferred_node;
1985
be97a41b 1986 nmask = policy_nodemask(gfp, pol);
0867a57c 1987 if (!nmask || node_isset(hpage_node, *nmask)) {
be97a41b 1988 mpol_cond_put(pol);
96db800f 1989 page = __alloc_pages_node(hpage_node,
5265047a 1990 gfp | __GFP_THISNODE, order);
be97a41b
VB
1991 goto out;
1992 }
1993 }
1994
be97a41b 1995 nmask = policy_nodemask(gfp, pol);
04ec6264
VB
1996 preferred_nid = policy_node(gfp, pol, node);
1997 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
d51e9894 1998 mpol_cond_put(pol);
be97a41b 1999out:
c0ff7453 2000 return page;
1da177e4
LT
2001}
2002
2003/**
2004 * alloc_pages_current - Allocate pages.
2005 *
2006 * @gfp:
2007 * %GFP_USER user allocation,
2008 * %GFP_KERNEL kernel allocation,
2009 * %GFP_HIGHMEM highmem allocation,
2010 * %GFP_FS don't call back into a file system.
2011 * %GFP_ATOMIC don't sleep.
2012 * @order: Power of two of allocation size in pages. 0 is a single page.
2013 *
2014 * Allocate a page from the kernel page pool. When not in
2015 * interrupt context and apply the current process NUMA policy.
2016 * Returns NULL when no page can be allocated.
1da177e4 2017 */
dd0fc66f 2018struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 2019{
8d90274b 2020 struct mempolicy *pol = &default_policy;
c0ff7453 2021 struct page *page;
1da177e4 2022
8d90274b
ON
2023 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2024 pol = get_task_policy(current);
52cd3b07
LS
2025
2026 /*
2027 * No reference counting needed for current->mempolicy
2028 * nor system default_policy
2029 */
45c4745a 2030 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2031 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2032 else
2033 page = __alloc_pages_nodemask(gfp, order,
04ec6264 2034 policy_node(gfp, pol, numa_node_id()),
5c4b4be3 2035 policy_nodemask(gfp, pol));
cc9a6c87 2036
c0ff7453 2037 return page;
1da177e4
LT
2038}
2039EXPORT_SYMBOL(alloc_pages_current);
2040
ef0855d3
ON
2041int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2042{
2043 struct mempolicy *pol = mpol_dup(vma_policy(src));
2044
2045 if (IS_ERR(pol))
2046 return PTR_ERR(pol);
2047 dst->vm_policy = pol;
2048 return 0;
2049}
2050
4225399a 2051/*
846a16bf 2052 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2053 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2054 * with the mems_allowed returned by cpuset_mems_allowed(). This
2055 * keeps mempolicies cpuset relative after its cpuset moves. See
2056 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2057 *
2058 * current's mempolicy may be rebinded by the other task(the task that changes
2059 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2060 */
4225399a 2061
846a16bf
LS
2062/* Slow path of a mempolicy duplicate */
2063struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2064{
2065 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2066
2067 if (!new)
2068 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2069
2070 /* task's mempolicy is protected by alloc_lock */
2071 if (old == current->mempolicy) {
2072 task_lock(current);
2073 *new = *old;
2074 task_unlock(current);
2075 } else
2076 *new = *old;
2077
4225399a
PJ
2078 if (current_cpuset_is_being_rebound()) {
2079 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2080 mpol_rebind_policy(new, &mems);
4225399a 2081 }
1da177e4 2082 atomic_set(&new->refcnt, 1);
1da177e4
LT
2083 return new;
2084}
2085
2086/* Slow path of a mempolicy comparison */
fcfb4dcc 2087bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2088{
2089 if (!a || !b)
fcfb4dcc 2090 return false;
45c4745a 2091 if (a->mode != b->mode)
fcfb4dcc 2092 return false;
19800502 2093 if (a->flags != b->flags)
fcfb4dcc 2094 return false;
19800502
BL
2095 if (mpol_store_user_nodemask(a))
2096 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2097 return false;
19800502 2098
45c4745a 2099 switch (a->mode) {
19770b32
MG
2100 case MPOL_BIND:
2101 /* Fall through */
1da177e4 2102 case MPOL_INTERLEAVE:
fcfb4dcc 2103 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2104 case MPOL_PREFERRED:
75719661 2105 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2106 default:
2107 BUG();
fcfb4dcc 2108 return false;
1da177e4
LT
2109 }
2110}
2111
1da177e4
LT
2112/*
2113 * Shared memory backing store policy support.
2114 *
2115 * Remember policies even when nobody has shared memory mapped.
2116 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2117 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2118 * for any accesses to the tree.
2119 */
2120
4a8c7bb5
NZ
2121/*
2122 * lookup first element intersecting start-end. Caller holds sp->lock for
2123 * reading or for writing
2124 */
1da177e4
LT
2125static struct sp_node *
2126sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2127{
2128 struct rb_node *n = sp->root.rb_node;
2129
2130 while (n) {
2131 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2132
2133 if (start >= p->end)
2134 n = n->rb_right;
2135 else if (end <= p->start)
2136 n = n->rb_left;
2137 else
2138 break;
2139 }
2140 if (!n)
2141 return NULL;
2142 for (;;) {
2143 struct sp_node *w = NULL;
2144 struct rb_node *prev = rb_prev(n);
2145 if (!prev)
2146 break;
2147 w = rb_entry(prev, struct sp_node, nd);
2148 if (w->end <= start)
2149 break;
2150 n = prev;
2151 }
2152 return rb_entry(n, struct sp_node, nd);
2153}
2154
4a8c7bb5
NZ
2155/*
2156 * Insert a new shared policy into the list. Caller holds sp->lock for
2157 * writing.
2158 */
1da177e4
LT
2159static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2160{
2161 struct rb_node **p = &sp->root.rb_node;
2162 struct rb_node *parent = NULL;
2163 struct sp_node *nd;
2164
2165 while (*p) {
2166 parent = *p;
2167 nd = rb_entry(parent, struct sp_node, nd);
2168 if (new->start < nd->start)
2169 p = &(*p)->rb_left;
2170 else if (new->end > nd->end)
2171 p = &(*p)->rb_right;
2172 else
2173 BUG();
2174 }
2175 rb_link_node(&new->nd, parent, p);
2176 rb_insert_color(&new->nd, &sp->root);
140d5a49 2177 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2178 new->policy ? new->policy->mode : 0);
1da177e4
LT
2179}
2180
2181/* Find shared policy intersecting idx */
2182struct mempolicy *
2183mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2184{
2185 struct mempolicy *pol = NULL;
2186 struct sp_node *sn;
2187
2188 if (!sp->root.rb_node)
2189 return NULL;
4a8c7bb5 2190 read_lock(&sp->lock);
1da177e4
LT
2191 sn = sp_lookup(sp, idx, idx+1);
2192 if (sn) {
2193 mpol_get(sn->policy);
2194 pol = sn->policy;
2195 }
4a8c7bb5 2196 read_unlock(&sp->lock);
1da177e4
LT
2197 return pol;
2198}
2199
63f74ca2
KM
2200static void sp_free(struct sp_node *n)
2201{
2202 mpol_put(n->policy);
2203 kmem_cache_free(sn_cache, n);
2204}
2205
771fb4d8
LS
2206/**
2207 * mpol_misplaced - check whether current page node is valid in policy
2208 *
b46e14ac
FF
2209 * @page: page to be checked
2210 * @vma: vm area where page mapped
2211 * @addr: virtual address where page mapped
771fb4d8
LS
2212 *
2213 * Lookup current policy node id for vma,addr and "compare to" page's
2214 * node id.
2215 *
2216 * Returns:
2217 * -1 - not misplaced, page is in the right node
2218 * node - node id where the page should be
2219 *
2220 * Policy determination "mimics" alloc_page_vma().
2221 * Called from fault path where we know the vma and faulting address.
2222 */
2223int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2224{
2225 struct mempolicy *pol;
c33d6c06 2226 struct zoneref *z;
771fb4d8
LS
2227 int curnid = page_to_nid(page);
2228 unsigned long pgoff;
90572890
PZ
2229 int thiscpu = raw_smp_processor_id();
2230 int thisnid = cpu_to_node(thiscpu);
771fb4d8
LS
2231 int polnid = -1;
2232 int ret = -1;
2233
dd6eecb9 2234 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2235 if (!(pol->flags & MPOL_F_MOF))
2236 goto out;
2237
2238 switch (pol->mode) {
2239 case MPOL_INTERLEAVE:
771fb4d8
LS
2240 pgoff = vma->vm_pgoff;
2241 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
98c70baa 2242 polnid = offset_il_node(pol, pgoff);
771fb4d8
LS
2243 break;
2244
2245 case MPOL_PREFERRED:
2246 if (pol->flags & MPOL_F_LOCAL)
2247 polnid = numa_node_id();
2248 else
2249 polnid = pol->v.preferred_node;
2250 break;
2251
2252 case MPOL_BIND:
c33d6c06 2253
771fb4d8
LS
2254 /*
2255 * allows binding to multiple nodes.
2256 * use current page if in policy nodemask,
2257 * else select nearest allowed node, if any.
2258 * If no allowed nodes, use current [!misplaced].
2259 */
2260 if (node_isset(curnid, pol->v.nodes))
2261 goto out;
c33d6c06 2262 z = first_zones_zonelist(
771fb4d8
LS
2263 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2264 gfp_zone(GFP_HIGHUSER),
c33d6c06
MG
2265 &pol->v.nodes);
2266 polnid = z->zone->node;
771fb4d8
LS
2267 break;
2268
2269 default:
2270 BUG();
2271 }
5606e387
MG
2272
2273 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2274 if (pol->flags & MPOL_F_MORON) {
90572890 2275 polnid = thisnid;
5606e387 2276
10f39042 2277 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2278 goto out;
e42c8ff2
MG
2279 }
2280
771fb4d8
LS
2281 if (curnid != polnid)
2282 ret = polnid;
2283out:
2284 mpol_cond_put(pol);
2285
2286 return ret;
2287}
2288
c11600e4
DR
2289/*
2290 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2291 * dropped after task->mempolicy is set to NULL so that any allocation done as
2292 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2293 * policy.
2294 */
2295void mpol_put_task_policy(struct task_struct *task)
2296{
2297 struct mempolicy *pol;
2298
2299 task_lock(task);
2300 pol = task->mempolicy;
2301 task->mempolicy = NULL;
2302 task_unlock(task);
2303 mpol_put(pol);
2304}
2305
1da177e4
LT
2306static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2307{
140d5a49 2308 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2309 rb_erase(&n->nd, &sp->root);
63f74ca2 2310 sp_free(n);
1da177e4
LT
2311}
2312
42288fe3
MG
2313static void sp_node_init(struct sp_node *node, unsigned long start,
2314 unsigned long end, struct mempolicy *pol)
2315{
2316 node->start = start;
2317 node->end = end;
2318 node->policy = pol;
2319}
2320
dbcb0f19
AB
2321static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2322 struct mempolicy *pol)
1da177e4 2323{
869833f2
KM
2324 struct sp_node *n;
2325 struct mempolicy *newpol;
1da177e4 2326
869833f2 2327 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2328 if (!n)
2329 return NULL;
869833f2
KM
2330
2331 newpol = mpol_dup(pol);
2332 if (IS_ERR(newpol)) {
2333 kmem_cache_free(sn_cache, n);
2334 return NULL;
2335 }
2336 newpol->flags |= MPOL_F_SHARED;
42288fe3 2337 sp_node_init(n, start, end, newpol);
869833f2 2338
1da177e4
LT
2339 return n;
2340}
2341
2342/* Replace a policy range. */
2343static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2344 unsigned long end, struct sp_node *new)
2345{
b22d127a 2346 struct sp_node *n;
42288fe3
MG
2347 struct sp_node *n_new = NULL;
2348 struct mempolicy *mpol_new = NULL;
b22d127a 2349 int ret = 0;
1da177e4 2350
42288fe3 2351restart:
4a8c7bb5 2352 write_lock(&sp->lock);
1da177e4
LT
2353 n = sp_lookup(sp, start, end);
2354 /* Take care of old policies in the same range. */
2355 while (n && n->start < end) {
2356 struct rb_node *next = rb_next(&n->nd);
2357 if (n->start >= start) {
2358 if (n->end <= end)
2359 sp_delete(sp, n);
2360 else
2361 n->start = end;
2362 } else {
2363 /* Old policy spanning whole new range. */
2364 if (n->end > end) {
42288fe3
MG
2365 if (!n_new)
2366 goto alloc_new;
2367
2368 *mpol_new = *n->policy;
2369 atomic_set(&mpol_new->refcnt, 1);
7880639c 2370 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2371 n->end = start;
5ca39575 2372 sp_insert(sp, n_new);
42288fe3
MG
2373 n_new = NULL;
2374 mpol_new = NULL;
1da177e4
LT
2375 break;
2376 } else
2377 n->end = start;
2378 }
2379 if (!next)
2380 break;
2381 n = rb_entry(next, struct sp_node, nd);
2382 }
2383 if (new)
2384 sp_insert(sp, new);
4a8c7bb5 2385 write_unlock(&sp->lock);
42288fe3
MG
2386 ret = 0;
2387
2388err_out:
2389 if (mpol_new)
2390 mpol_put(mpol_new);
2391 if (n_new)
2392 kmem_cache_free(sn_cache, n_new);
2393
b22d127a 2394 return ret;
42288fe3
MG
2395
2396alloc_new:
4a8c7bb5 2397 write_unlock(&sp->lock);
42288fe3
MG
2398 ret = -ENOMEM;
2399 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2400 if (!n_new)
2401 goto err_out;
2402 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2403 if (!mpol_new)
2404 goto err_out;
2405 goto restart;
1da177e4
LT
2406}
2407
71fe804b
LS
2408/**
2409 * mpol_shared_policy_init - initialize shared policy for inode
2410 * @sp: pointer to inode shared policy
2411 * @mpol: struct mempolicy to install
2412 *
2413 * Install non-NULL @mpol in inode's shared policy rb-tree.
2414 * On entry, the current task has a reference on a non-NULL @mpol.
2415 * This must be released on exit.
4bfc4495 2416 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2417 */
2418void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2419{
58568d2a
MX
2420 int ret;
2421
71fe804b 2422 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2423 rwlock_init(&sp->lock);
71fe804b
LS
2424
2425 if (mpol) {
2426 struct vm_area_struct pvma;
2427 struct mempolicy *new;
4bfc4495 2428 NODEMASK_SCRATCH(scratch);
71fe804b 2429
4bfc4495 2430 if (!scratch)
5c0c1654 2431 goto put_mpol;
71fe804b
LS
2432 /* contextualize the tmpfs mount point mempolicy */
2433 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2434 if (IS_ERR(new))
0cae3457 2435 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2436
2437 task_lock(current);
4bfc4495 2438 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2439 task_unlock(current);
15d77835 2440 if (ret)
5c0c1654 2441 goto put_new;
71fe804b
LS
2442
2443 /* Create pseudo-vma that contains just the policy */
2444 memset(&pvma, 0, sizeof(struct vm_area_struct));
2445 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2446 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2447
5c0c1654 2448put_new:
71fe804b 2449 mpol_put(new); /* drop initial ref */
0cae3457 2450free_scratch:
4bfc4495 2451 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2452put_mpol:
2453 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2454 }
2455}
2456
1da177e4
LT
2457int mpol_set_shared_policy(struct shared_policy *info,
2458 struct vm_area_struct *vma, struct mempolicy *npol)
2459{
2460 int err;
2461 struct sp_node *new = NULL;
2462 unsigned long sz = vma_pages(vma);
2463
028fec41 2464 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2465 vma->vm_pgoff,
45c4745a 2466 sz, npol ? npol->mode : -1,
028fec41 2467 npol ? npol->flags : -1,
00ef2d2f 2468 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2469
2470 if (npol) {
2471 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2472 if (!new)
2473 return -ENOMEM;
2474 }
2475 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2476 if (err && new)
63f74ca2 2477 sp_free(new);
1da177e4
LT
2478 return err;
2479}
2480
2481/* Free a backing policy store on inode delete. */
2482void mpol_free_shared_policy(struct shared_policy *p)
2483{
2484 struct sp_node *n;
2485 struct rb_node *next;
2486
2487 if (!p->root.rb_node)
2488 return;
4a8c7bb5 2489 write_lock(&p->lock);
1da177e4
LT
2490 next = rb_first(&p->root);
2491 while (next) {
2492 n = rb_entry(next, struct sp_node, nd);
2493 next = rb_next(&n->nd);
63f74ca2 2494 sp_delete(p, n);
1da177e4 2495 }
4a8c7bb5 2496 write_unlock(&p->lock);
1da177e4
LT
2497}
2498
1a687c2e 2499#ifdef CONFIG_NUMA_BALANCING
c297663c 2500static int __initdata numabalancing_override;
1a687c2e
MG
2501
2502static void __init check_numabalancing_enable(void)
2503{
2504 bool numabalancing_default = false;
2505
2506 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2507 numabalancing_default = true;
2508
c297663c
MG
2509 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2510 if (numabalancing_override)
2511 set_numabalancing_state(numabalancing_override == 1);
2512
b0dc2b9b 2513 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2514 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2515 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2516 set_numabalancing_state(numabalancing_default);
2517 }
2518}
2519
2520static int __init setup_numabalancing(char *str)
2521{
2522 int ret = 0;
2523 if (!str)
2524 goto out;
1a687c2e
MG
2525
2526 if (!strcmp(str, "enable")) {
c297663c 2527 numabalancing_override = 1;
1a687c2e
MG
2528 ret = 1;
2529 } else if (!strcmp(str, "disable")) {
c297663c 2530 numabalancing_override = -1;
1a687c2e
MG
2531 ret = 1;
2532 }
2533out:
2534 if (!ret)
4a404bea 2535 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2536
2537 return ret;
2538}
2539__setup("numa_balancing=", setup_numabalancing);
2540#else
2541static inline void __init check_numabalancing_enable(void)
2542{
2543}
2544#endif /* CONFIG_NUMA_BALANCING */
2545
1da177e4
LT
2546/* assumes fs == KERNEL_DS */
2547void __init numa_policy_init(void)
2548{
b71636e2
PM
2549 nodemask_t interleave_nodes;
2550 unsigned long largest = 0;
2551 int nid, prefer = 0;
2552
1da177e4
LT
2553 policy_cache = kmem_cache_create("numa_policy",
2554 sizeof(struct mempolicy),
20c2df83 2555 0, SLAB_PANIC, NULL);
1da177e4
LT
2556
2557 sn_cache = kmem_cache_create("shared_policy_node",
2558 sizeof(struct sp_node),
20c2df83 2559 0, SLAB_PANIC, NULL);
1da177e4 2560
5606e387
MG
2561 for_each_node(nid) {
2562 preferred_node_policy[nid] = (struct mempolicy) {
2563 .refcnt = ATOMIC_INIT(1),
2564 .mode = MPOL_PREFERRED,
2565 .flags = MPOL_F_MOF | MPOL_F_MORON,
2566 .v = { .preferred_node = nid, },
2567 };
2568 }
2569
b71636e2
PM
2570 /*
2571 * Set interleaving policy for system init. Interleaving is only
2572 * enabled across suitably sized nodes (default is >= 16MB), or
2573 * fall back to the largest node if they're all smaller.
2574 */
2575 nodes_clear(interleave_nodes);
01f13bd6 2576 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2577 unsigned long total_pages = node_present_pages(nid);
2578
2579 /* Preserve the largest node */
2580 if (largest < total_pages) {
2581 largest = total_pages;
2582 prefer = nid;
2583 }
2584
2585 /* Interleave this node? */
2586 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2587 node_set(nid, interleave_nodes);
2588 }
2589
2590 /* All too small, use the largest */
2591 if (unlikely(nodes_empty(interleave_nodes)))
2592 node_set(prefer, interleave_nodes);
1da177e4 2593
028fec41 2594 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2595 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2596
2597 check_numabalancing_enable();
1da177e4
LT
2598}
2599
8bccd85f 2600/* Reset policy of current process to default */
1da177e4
LT
2601void numa_default_policy(void)
2602{
028fec41 2603 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2604}
68860ec1 2605
095f1fc4
LS
2606/*
2607 * Parse and format mempolicy from/to strings
2608 */
2609
1a75a6c8 2610/*
f2a07f40 2611 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2612 */
345ace9c
LS
2613static const char * const policy_modes[] =
2614{
2615 [MPOL_DEFAULT] = "default",
2616 [MPOL_PREFERRED] = "prefer",
2617 [MPOL_BIND] = "bind",
2618 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2619 [MPOL_LOCAL] = "local",
345ace9c 2620};
1a75a6c8 2621
095f1fc4
LS
2622
2623#ifdef CONFIG_TMPFS
2624/**
f2a07f40 2625 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2626 * @str: string containing mempolicy to parse
71fe804b 2627 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2628 *
2629 * Format of input:
2630 * <mode>[=<flags>][:<nodelist>]
2631 *
71fe804b 2632 * On success, returns 0, else 1
095f1fc4 2633 */
a7a88b23 2634int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2635{
71fe804b 2636 struct mempolicy *new = NULL;
b4652e84 2637 unsigned short mode;
f2a07f40 2638 unsigned short mode_flags;
71fe804b 2639 nodemask_t nodes;
095f1fc4
LS
2640 char *nodelist = strchr(str, ':');
2641 char *flags = strchr(str, '=');
095f1fc4
LS
2642 int err = 1;
2643
2644 if (nodelist) {
2645 /* NUL-terminate mode or flags string */
2646 *nodelist++ = '\0';
71fe804b 2647 if (nodelist_parse(nodelist, nodes))
095f1fc4 2648 goto out;
01f13bd6 2649 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2650 goto out;
71fe804b
LS
2651 } else
2652 nodes_clear(nodes);
2653
095f1fc4
LS
2654 if (flags)
2655 *flags++ = '\0'; /* terminate mode string */
2656
479e2802 2657 for (mode = 0; mode < MPOL_MAX; mode++) {
345ace9c 2658 if (!strcmp(str, policy_modes[mode])) {
095f1fc4
LS
2659 break;
2660 }
2661 }
a720094d 2662 if (mode >= MPOL_MAX)
095f1fc4
LS
2663 goto out;
2664
71fe804b 2665 switch (mode) {
095f1fc4 2666 case MPOL_PREFERRED:
71fe804b
LS
2667 /*
2668 * Insist on a nodelist of one node only
2669 */
095f1fc4
LS
2670 if (nodelist) {
2671 char *rest = nodelist;
2672 while (isdigit(*rest))
2673 rest++;
926f2ae0
KM
2674 if (*rest)
2675 goto out;
095f1fc4
LS
2676 }
2677 break;
095f1fc4
LS
2678 case MPOL_INTERLEAVE:
2679 /*
2680 * Default to online nodes with memory if no nodelist
2681 */
2682 if (!nodelist)
01f13bd6 2683 nodes = node_states[N_MEMORY];
3f226aa1 2684 break;
71fe804b 2685 case MPOL_LOCAL:
3f226aa1 2686 /*
71fe804b 2687 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2688 */
71fe804b 2689 if (nodelist)
3f226aa1 2690 goto out;
71fe804b 2691 mode = MPOL_PREFERRED;
3f226aa1 2692 break;
413b43de
RT
2693 case MPOL_DEFAULT:
2694 /*
2695 * Insist on a empty nodelist
2696 */
2697 if (!nodelist)
2698 err = 0;
2699 goto out;
d69b2e63
KM
2700 case MPOL_BIND:
2701 /*
2702 * Insist on a nodelist
2703 */
2704 if (!nodelist)
2705 goto out;
095f1fc4
LS
2706 }
2707
71fe804b 2708 mode_flags = 0;
095f1fc4
LS
2709 if (flags) {
2710 /*
2711 * Currently, we only support two mutually exclusive
2712 * mode flags.
2713 */
2714 if (!strcmp(flags, "static"))
71fe804b 2715 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2716 else if (!strcmp(flags, "relative"))
71fe804b 2717 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2718 else
926f2ae0 2719 goto out;
095f1fc4 2720 }
71fe804b
LS
2721
2722 new = mpol_new(mode, mode_flags, &nodes);
2723 if (IS_ERR(new))
926f2ae0
KM
2724 goto out;
2725
f2a07f40
HD
2726 /*
2727 * Save nodes for mpol_to_str() to show the tmpfs mount options
2728 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2729 */
2730 if (mode != MPOL_PREFERRED)
2731 new->v.nodes = nodes;
2732 else if (nodelist)
2733 new->v.preferred_node = first_node(nodes);
2734 else
2735 new->flags |= MPOL_F_LOCAL;
2736
2737 /*
2738 * Save nodes for contextualization: this will be used to "clone"
2739 * the mempolicy in a specific context [cpuset] at a later time.
2740 */
2741 new->w.user_nodemask = nodes;
2742
926f2ae0 2743 err = 0;
71fe804b 2744
095f1fc4
LS
2745out:
2746 /* Restore string for error message */
2747 if (nodelist)
2748 *--nodelist = ':';
2749 if (flags)
2750 *--flags = '=';
71fe804b
LS
2751 if (!err)
2752 *mpol = new;
095f1fc4
LS
2753 return err;
2754}
2755#endif /* CONFIG_TMPFS */
2756
71fe804b
LS
2757/**
2758 * mpol_to_str - format a mempolicy structure for printing
2759 * @buffer: to contain formatted mempolicy string
2760 * @maxlen: length of @buffer
2761 * @pol: pointer to mempolicy to be formatted
71fe804b 2762 *
948927ee
DR
2763 * Convert @pol into a string. If @buffer is too short, truncate the string.
2764 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2765 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 2766 */
948927ee 2767void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
2768{
2769 char *p = buffer;
948927ee
DR
2770 nodemask_t nodes = NODE_MASK_NONE;
2771 unsigned short mode = MPOL_DEFAULT;
2772 unsigned short flags = 0;
2291990a 2773
8790c71a 2774 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 2775 mode = pol->mode;
948927ee
DR
2776 flags = pol->flags;
2777 }
bea904d5 2778
1a75a6c8
CL
2779 switch (mode) {
2780 case MPOL_DEFAULT:
1a75a6c8 2781 break;
1a75a6c8 2782 case MPOL_PREFERRED:
fc36b8d3 2783 if (flags & MPOL_F_LOCAL)
f2a07f40 2784 mode = MPOL_LOCAL;
53f2556b 2785 else
fc36b8d3 2786 node_set(pol->v.preferred_node, nodes);
1a75a6c8 2787 break;
1a75a6c8 2788 case MPOL_BIND:
1a75a6c8 2789 case MPOL_INTERLEAVE:
f2a07f40 2790 nodes = pol->v.nodes;
1a75a6c8 2791 break;
1a75a6c8 2792 default:
948927ee
DR
2793 WARN_ON_ONCE(1);
2794 snprintf(p, maxlen, "unknown");
2795 return;
1a75a6c8
CL
2796 }
2797
b7a9f420 2798 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 2799
fc36b8d3 2800 if (flags & MPOL_MODE_FLAGS) {
948927ee 2801 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 2802
2291990a
LS
2803 /*
2804 * Currently, the only defined flags are mutually exclusive
2805 */
f5b087b5 2806 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2807 p += snprintf(p, buffer + maxlen - p, "static");
2808 else if (flags & MPOL_F_RELATIVE_NODES)
2809 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2810 }
2811
9e763e0f
TH
2812 if (!nodes_empty(nodes))
2813 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2814 nodemask_pr_args(&nodes));
1a75a6c8 2815}