arm64: dts: marvell: mcbin: enable uart headers
[linux-2.6-block.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
b1de0d13
MH
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
1da177e4
LT
70#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
6e84f315 76#include <linux/sched/mm.h>
6a3827d7 77#include <linux/sched/numa_balancing.h>
f719ff9b 78#include <linux/sched/task.h>
1da177e4
LT
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
1da177e4
LT
81#include <linux/slab.h>
82#include <linux/string.h>
b95f1b31 83#include <linux/export.h>
b488893a 84#include <linux/nsproxy.h>
1da177e4
LT
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
31367466 88#include <linux/ptrace.h>
dc9aa5b9 89#include <linux/swap.h>
1a75a6c8
CL
90#include <linux/seq_file.h>
91#include <linux/proc_fs.h>
b20a3503 92#include <linux/migrate.h>
62b61f61 93#include <linux/ksm.h>
95a402c3 94#include <linux/rmap.h>
86c3a764 95#include <linux/security.h>
dbcb0f19 96#include <linux/syscalls.h>
095f1fc4 97#include <linux/ctype.h>
6d9c285a 98#include <linux/mm_inline.h>
b24f53a0 99#include <linux/mmu_notifier.h>
b1de0d13 100#include <linux/printk.h>
c8633798 101#include <linux/swapops.h>
dc9aa5b9 102
1da177e4 103#include <asm/tlbflush.h>
7c0f6ba6 104#include <linux/uaccess.h>
1da177e4 105
62695a84
NP
106#include "internal.h"
107
38e35860 108/* Internal flags */
dc9aa5b9 109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 111
fcc234f8
PE
112static struct kmem_cache *policy_cache;
113static struct kmem_cache *sn_cache;
1da177e4 114
1da177e4
LT
115/* Highest zone. An specific allocation for a zone below that is not
116 policied. */
6267276f 117enum zone_type policy_zone = 0;
1da177e4 118
bea904d5
LS
119/*
120 * run-time system-wide default policy => local allocation
121 */
e754d79d 122static struct mempolicy default_policy = {
1da177e4 123 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 124 .mode = MPOL_PREFERRED,
fc36b8d3 125 .flags = MPOL_F_LOCAL,
1da177e4
LT
126};
127
5606e387
MG
128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129
74d2c3a0 130struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
131{
132 struct mempolicy *pol = p->mempolicy;
f15ca78e 133 int node;
5606e387 134
f15ca78e
ON
135 if (pol)
136 return pol;
5606e387 137
f15ca78e
ON
138 node = numa_node_id();
139 if (node != NUMA_NO_NODE) {
140 pol = &preferred_node_policy[node];
141 /* preferred_node_policy is not initialised early in boot */
142 if (pol->mode)
143 return pol;
5606e387
MG
144 }
145
f15ca78e 146 return &default_policy;
5606e387
MG
147}
148
37012946
DR
149static const struct mempolicy_operations {
150 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 151 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
152} mpol_ops[MPOL_MAX];
153
f5b087b5
DR
154static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155{
6d556294 156 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
157}
158
159static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
160 const nodemask_t *rel)
161{
162 nodemask_t tmp;
163 nodes_fold(tmp, *orig, nodes_weight(*rel));
164 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
165}
166
37012946
DR
167static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
168{
169 if (nodes_empty(*nodes))
170 return -EINVAL;
171 pol->v.nodes = *nodes;
172 return 0;
173}
174
175static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
176{
177 if (!nodes)
fc36b8d3 178 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
179 else if (nodes_empty(*nodes))
180 return -EINVAL; /* no allowed nodes */
181 else
182 pol->v.preferred_node = first_node(*nodes);
183 return 0;
184}
185
186static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
187{
859f7ef1 188 if (nodes_empty(*nodes))
37012946
DR
189 return -EINVAL;
190 pol->v.nodes = *nodes;
191 return 0;
192}
193
58568d2a
MX
194/*
195 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
196 * any, for the new policy. mpol_new() has already validated the nodes
197 * parameter with respect to the policy mode and flags. But, we need to
198 * handle an empty nodemask with MPOL_PREFERRED here.
199 *
200 * Must be called holding task's alloc_lock to protect task's mems_allowed
201 * and mempolicy. May also be called holding the mmap_semaphore for write.
202 */
4bfc4495
KH
203static int mpol_set_nodemask(struct mempolicy *pol,
204 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 205{
58568d2a
MX
206 int ret;
207
208 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
209 if (pol == NULL)
210 return 0;
01f13bd6 211 /* Check N_MEMORY */
4bfc4495 212 nodes_and(nsc->mask1,
01f13bd6 213 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
214
215 VM_BUG_ON(!nodes);
216 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
217 nodes = NULL; /* explicit local allocation */
218 else {
219 if (pol->flags & MPOL_F_RELATIVE_NODES)
859f7ef1 220 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
58568d2a 221 else
4bfc4495
KH
222 nodes_and(nsc->mask2, *nodes, nsc->mask1);
223
58568d2a
MX
224 if (mpol_store_user_nodemask(pol))
225 pol->w.user_nodemask = *nodes;
226 else
227 pol->w.cpuset_mems_allowed =
228 cpuset_current_mems_allowed;
229 }
230
4bfc4495
KH
231 if (nodes)
232 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
233 else
234 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
235 return ret;
236}
237
238/*
239 * This function just creates a new policy, does some check and simple
240 * initialization. You must invoke mpol_set_nodemask() to set nodes.
241 */
028fec41
DR
242static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243 nodemask_t *nodes)
1da177e4
LT
244{
245 struct mempolicy *policy;
246
028fec41 247 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 248 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 249
3e1f0645
DR
250 if (mode == MPOL_DEFAULT) {
251 if (nodes && !nodes_empty(*nodes))
37012946 252 return ERR_PTR(-EINVAL);
d3a71033 253 return NULL;
37012946 254 }
3e1f0645
DR
255 VM_BUG_ON(!nodes);
256
257 /*
258 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
259 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
260 * All other modes require a valid pointer to a non-empty nodemask.
261 */
262 if (mode == MPOL_PREFERRED) {
263 if (nodes_empty(*nodes)) {
264 if (((flags & MPOL_F_STATIC_NODES) ||
265 (flags & MPOL_F_RELATIVE_NODES)))
266 return ERR_PTR(-EINVAL);
3e1f0645 267 }
479e2802 268 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
269 if (!nodes_empty(*nodes) ||
270 (flags & MPOL_F_STATIC_NODES) ||
271 (flags & MPOL_F_RELATIVE_NODES))
479e2802
PZ
272 return ERR_PTR(-EINVAL);
273 mode = MPOL_PREFERRED;
3e1f0645
DR
274 } else if (nodes_empty(*nodes))
275 return ERR_PTR(-EINVAL);
1da177e4
LT
276 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
277 if (!policy)
278 return ERR_PTR(-ENOMEM);
279 atomic_set(&policy->refcnt, 1);
45c4745a 280 policy->mode = mode;
3e1f0645 281 policy->flags = flags;
37012946 282
1da177e4 283 return policy;
37012946
DR
284}
285
52cd3b07
LS
286/* Slow path of a mpol destructor. */
287void __mpol_put(struct mempolicy *p)
288{
289 if (!atomic_dec_and_test(&p->refcnt))
290 return;
52cd3b07
LS
291 kmem_cache_free(policy_cache, p);
292}
293
213980c0 294static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
295{
296}
297
213980c0 298static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
299{
300 nodemask_t tmp;
301
302 if (pol->flags & MPOL_F_STATIC_NODES)
303 nodes_and(tmp, pol->w.user_nodemask, *nodes);
304 else if (pol->flags & MPOL_F_RELATIVE_NODES)
305 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
306 else {
213980c0
VB
307 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308 *nodes);
309 pol->w.cpuset_mems_allowed = tmp;
37012946 310 }
f5b087b5 311
708c1bbc
MX
312 if (nodes_empty(tmp))
313 tmp = *nodes;
314
213980c0 315 pol->v.nodes = tmp;
37012946
DR
316}
317
318static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 319 const nodemask_t *nodes)
37012946
DR
320{
321 nodemask_t tmp;
322
37012946
DR
323 if (pol->flags & MPOL_F_STATIC_NODES) {
324 int node = first_node(pol->w.user_nodemask);
325
fc36b8d3 326 if (node_isset(node, *nodes)) {
37012946 327 pol->v.preferred_node = node;
fc36b8d3
LS
328 pol->flags &= ~MPOL_F_LOCAL;
329 } else
330 pol->flags |= MPOL_F_LOCAL;
37012946
DR
331 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
332 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
333 pol->v.preferred_node = first_node(tmp);
fc36b8d3 334 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
335 pol->v.preferred_node = node_remap(pol->v.preferred_node,
336 pol->w.cpuset_mems_allowed,
337 *nodes);
338 pol->w.cpuset_mems_allowed = *nodes;
339 }
1da177e4
LT
340}
341
708c1bbc
MX
342/*
343 * mpol_rebind_policy - Migrate a policy to a different set of nodes
344 *
213980c0
VB
345 * Per-vma policies are protected by mmap_sem. Allocations using per-task
346 * policies are protected by task->mems_allowed_seq to prevent a premature
347 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 348 */
213980c0 349static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 350{
1d0d2680
DR
351 if (!pol)
352 return;
213980c0 353 if (!mpol_store_user_nodemask(pol) &&
1d0d2680
DR
354 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
355 return;
708c1bbc 356
213980c0 357 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
358}
359
360/*
361 * Wrapper for mpol_rebind_policy() that just requires task
362 * pointer, and updates task mempolicy.
58568d2a
MX
363 *
364 * Called with task's alloc_lock held.
1d0d2680
DR
365 */
366
213980c0 367void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 368{
213980c0 369 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
370}
371
372/*
373 * Rebind each vma in mm to new nodemask.
374 *
375 * Call holding a reference to mm. Takes mm->mmap_sem during call.
376 */
377
378void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
379{
380 struct vm_area_struct *vma;
381
382 down_write(&mm->mmap_sem);
383 for (vma = mm->mmap; vma; vma = vma->vm_next)
213980c0 384 mpol_rebind_policy(vma->vm_policy, new);
1d0d2680
DR
385 up_write(&mm->mmap_sem);
386}
387
37012946
DR
388static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
389 [MPOL_DEFAULT] = {
390 .rebind = mpol_rebind_default,
391 },
392 [MPOL_INTERLEAVE] = {
393 .create = mpol_new_interleave,
394 .rebind = mpol_rebind_nodemask,
395 },
396 [MPOL_PREFERRED] = {
397 .create = mpol_new_preferred,
398 .rebind = mpol_rebind_preferred,
399 },
400 [MPOL_BIND] = {
401 .create = mpol_new_bind,
402 .rebind = mpol_rebind_nodemask,
403 },
404};
405
fc301289
CL
406static void migrate_page_add(struct page *page, struct list_head *pagelist,
407 unsigned long flags);
1a75a6c8 408
6f4576e3
NH
409struct queue_pages {
410 struct list_head *pagelist;
411 unsigned long flags;
412 nodemask_t *nmask;
413 struct vm_area_struct *prev;
414};
415
88aaa2a1
NH
416/*
417 * Check if the page's nid is in qp->nmask.
418 *
419 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
420 * in the invert of qp->nmask.
421 */
422static inline bool queue_pages_required(struct page *page,
423 struct queue_pages *qp)
424{
425 int nid = page_to_nid(page);
426 unsigned long flags = qp->flags;
427
428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
429}
430
c8633798
NH
431static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
432 unsigned long end, struct mm_walk *walk)
433{
434 int ret = 0;
435 struct page *page;
436 struct queue_pages *qp = walk->private;
437 unsigned long flags;
438
439 if (unlikely(is_pmd_migration_entry(*pmd))) {
440 ret = 1;
441 goto unlock;
442 }
443 page = pmd_page(*pmd);
444 if (is_huge_zero_page(page)) {
445 spin_unlock(ptl);
446 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
447 goto out;
448 }
449 if (!thp_migration_supported()) {
450 get_page(page);
451 spin_unlock(ptl);
452 lock_page(page);
453 ret = split_huge_page(page);
454 unlock_page(page);
455 put_page(page);
456 goto out;
457 }
458 if (!queue_pages_required(page, qp)) {
459 ret = 1;
460 goto unlock;
461 }
462
463 ret = 1;
464 flags = qp->flags;
465 /* go to thp migration */
466 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
467 migrate_page_add(page, qp->pagelist, flags);
468unlock:
469 spin_unlock(ptl);
470out:
471 return ret;
472}
473
98094945
NH
474/*
475 * Scan through pages checking if pages follow certain conditions,
476 * and move them to the pagelist if they do.
477 */
6f4576e3
NH
478static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
479 unsigned long end, struct mm_walk *walk)
1da177e4 480{
6f4576e3
NH
481 struct vm_area_struct *vma = walk->vma;
482 struct page *page;
483 struct queue_pages *qp = walk->private;
484 unsigned long flags = qp->flags;
c8633798 485 int ret;
91612e0d 486 pte_t *pte;
705e87c0 487 spinlock_t *ptl;
941150a3 488
c8633798
NH
489 ptl = pmd_trans_huge_lock(pmd, vma);
490 if (ptl) {
491 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
492 if (ret)
493 return 0;
248db92d 494 }
91612e0d 495
337d9abf
NH
496 if (pmd_trans_unstable(pmd))
497 return 0;
248db92d 498retry:
6f4576e3
NH
499 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
500 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 501 if (!pte_present(*pte))
1da177e4 502 continue;
6aab341e
LT
503 page = vm_normal_page(vma, addr, *pte);
504 if (!page)
1da177e4 505 continue;
053837fc 506 /*
62b61f61
HD
507 * vm_normal_page() filters out zero pages, but there might
508 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 509 */
b79bc0a0 510 if (PageReserved(page))
f4598c8b 511 continue;
88aaa2a1 512 if (!queue_pages_required(page, qp))
38e35860 513 continue;
c8633798 514 if (PageTransCompound(page) && !thp_migration_supported()) {
248db92d
KS
515 get_page(page);
516 pte_unmap_unlock(pte, ptl);
517 lock_page(page);
518 ret = split_huge_page(page);
519 unlock_page(page);
520 put_page(page);
521 /* Failed to split -- skip. */
522 if (ret) {
523 pte = pte_offset_map_lock(walk->mm, pmd,
524 addr, &ptl);
525 continue;
526 }
527 goto retry;
528 }
38e35860 529
77bf45e7 530 migrate_page_add(page, qp->pagelist, flags);
6f4576e3
NH
531 }
532 pte_unmap_unlock(pte - 1, ptl);
533 cond_resched();
534 return 0;
91612e0d
HD
535}
536
6f4576e3
NH
537static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
538 unsigned long addr, unsigned long end,
539 struct mm_walk *walk)
e2d8cf40
NH
540{
541#ifdef CONFIG_HUGETLB_PAGE
6f4576e3
NH
542 struct queue_pages *qp = walk->private;
543 unsigned long flags = qp->flags;
e2d8cf40 544 struct page *page;
cb900f41 545 spinlock_t *ptl;
d4c54919 546 pte_t entry;
e2d8cf40 547
6f4576e3
NH
548 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
549 entry = huge_ptep_get(pte);
d4c54919
NH
550 if (!pte_present(entry))
551 goto unlock;
552 page = pte_page(entry);
88aaa2a1 553 if (!queue_pages_required(page, qp))
e2d8cf40
NH
554 goto unlock;
555 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
556 if (flags & (MPOL_MF_MOVE_ALL) ||
557 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
6f4576e3 558 isolate_huge_page(page, qp->pagelist);
e2d8cf40 559unlock:
cb900f41 560 spin_unlock(ptl);
e2d8cf40
NH
561#else
562 BUG();
563#endif
91612e0d 564 return 0;
1da177e4
LT
565}
566
5877231f 567#ifdef CONFIG_NUMA_BALANCING
b24f53a0 568/*
4b10e7d5
MG
569 * This is used to mark a range of virtual addresses to be inaccessible.
570 * These are later cleared by a NUMA hinting fault. Depending on these
571 * faults, pages may be migrated for better NUMA placement.
572 *
573 * This is assuming that NUMA faults are handled using PROT_NONE. If
574 * an architecture makes a different choice, it will need further
575 * changes to the core.
b24f53a0 576 */
4b10e7d5
MG
577unsigned long change_prot_numa(struct vm_area_struct *vma,
578 unsigned long addr, unsigned long end)
b24f53a0 579{
4b10e7d5 580 int nr_updated;
b24f53a0 581
4d942466 582 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
03c5a6e1
MG
583 if (nr_updated)
584 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 585
4b10e7d5 586 return nr_updated;
b24f53a0
LS
587}
588#else
589static unsigned long change_prot_numa(struct vm_area_struct *vma,
590 unsigned long addr, unsigned long end)
591{
592 return 0;
593}
5877231f 594#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 595
6f4576e3
NH
596static int queue_pages_test_walk(unsigned long start, unsigned long end,
597 struct mm_walk *walk)
598{
599 struct vm_area_struct *vma = walk->vma;
600 struct queue_pages *qp = walk->private;
601 unsigned long endvma = vma->vm_end;
602 unsigned long flags = qp->flags;
603
77bf45e7 604 if (!vma_migratable(vma))
48684a65
NH
605 return 1;
606
6f4576e3
NH
607 if (endvma > end)
608 endvma = end;
609 if (vma->vm_start > start)
610 start = vma->vm_start;
611
612 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
613 if (!vma->vm_next && vma->vm_end < end)
614 return -EFAULT;
615 if (qp->prev && qp->prev->vm_end < vma->vm_start)
616 return -EFAULT;
617 }
618
619 qp->prev = vma;
620
6f4576e3
NH
621 if (flags & MPOL_MF_LAZY) {
622 /* Similar to task_numa_work, skip inaccessible VMAs */
4355c018
LC
623 if (!is_vm_hugetlb_page(vma) &&
624 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
625 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
626 change_prot_numa(vma, start, endvma);
627 return 1;
628 }
629
77bf45e7
KS
630 /* queue pages from current vma */
631 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6f4576e3
NH
632 return 0;
633 return 1;
634}
635
dc9aa5b9 636/*
98094945
NH
637 * Walk through page tables and collect pages to be migrated.
638 *
639 * If pages found in a given range are on a set of nodes (determined by
640 * @nodes and @flags,) it's isolated and queued to the pagelist which is
641 * passed via @private.)
dc9aa5b9 642 */
d05f0cdc 643static int
98094945 644queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
645 nodemask_t *nodes, unsigned long flags,
646 struct list_head *pagelist)
1da177e4 647{
6f4576e3
NH
648 struct queue_pages qp = {
649 .pagelist = pagelist,
650 .flags = flags,
651 .nmask = nodes,
652 .prev = NULL,
653 };
654 struct mm_walk queue_pages_walk = {
655 .hugetlb_entry = queue_pages_hugetlb,
656 .pmd_entry = queue_pages_pte_range,
657 .test_walk = queue_pages_test_walk,
658 .mm = mm,
659 .private = &qp,
660 };
661
662 return walk_page_range(start, end, &queue_pages_walk);
1da177e4
LT
663}
664
869833f2
KM
665/*
666 * Apply policy to a single VMA
667 * This must be called with the mmap_sem held for writing.
668 */
669static int vma_replace_policy(struct vm_area_struct *vma,
670 struct mempolicy *pol)
8d34694c 671{
869833f2
KM
672 int err;
673 struct mempolicy *old;
674 struct mempolicy *new;
8d34694c
KM
675
676 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
677 vma->vm_start, vma->vm_end, vma->vm_pgoff,
678 vma->vm_ops, vma->vm_file,
679 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
680
869833f2
KM
681 new = mpol_dup(pol);
682 if (IS_ERR(new))
683 return PTR_ERR(new);
684
685 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 686 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
687 if (err)
688 goto err_out;
8d34694c 689 }
869833f2
KM
690
691 old = vma->vm_policy;
692 vma->vm_policy = new; /* protected by mmap_sem */
693 mpol_put(old);
694
695 return 0;
696 err_out:
697 mpol_put(new);
8d34694c
KM
698 return err;
699}
700
1da177e4 701/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
702static int mbind_range(struct mm_struct *mm, unsigned long start,
703 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
704{
705 struct vm_area_struct *next;
9d8cebd4
KM
706 struct vm_area_struct *prev;
707 struct vm_area_struct *vma;
708 int err = 0;
e26a5114 709 pgoff_t pgoff;
9d8cebd4
KM
710 unsigned long vmstart;
711 unsigned long vmend;
1da177e4 712
097d5910 713 vma = find_vma(mm, start);
9d8cebd4
KM
714 if (!vma || vma->vm_start > start)
715 return -EFAULT;
716
097d5910 717 prev = vma->vm_prev;
e26a5114
KM
718 if (start > vma->vm_start)
719 prev = vma;
720
9d8cebd4 721 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 722 next = vma->vm_next;
9d8cebd4
KM
723 vmstart = max(start, vma->vm_start);
724 vmend = min(end, vma->vm_end);
725
e26a5114
KM
726 if (mpol_equal(vma_policy(vma), new_pol))
727 continue;
728
729 pgoff = vma->vm_pgoff +
730 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 731 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
732 vma->anon_vma, vma->vm_file, pgoff,
733 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
734 if (prev) {
735 vma = prev;
736 next = vma->vm_next;
3964acd0
ON
737 if (mpol_equal(vma_policy(vma), new_pol))
738 continue;
739 /* vma_merge() joined vma && vma->next, case 8 */
740 goto replace;
9d8cebd4
KM
741 }
742 if (vma->vm_start != vmstart) {
743 err = split_vma(vma->vm_mm, vma, vmstart, 1);
744 if (err)
745 goto out;
746 }
747 if (vma->vm_end != vmend) {
748 err = split_vma(vma->vm_mm, vma, vmend, 0);
749 if (err)
750 goto out;
751 }
3964acd0 752 replace:
869833f2 753 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
754 if (err)
755 goto out;
1da177e4 756 }
9d8cebd4
KM
757
758 out:
1da177e4
LT
759 return err;
760}
761
1da177e4 762/* Set the process memory policy */
028fec41
DR
763static long do_set_mempolicy(unsigned short mode, unsigned short flags,
764 nodemask_t *nodes)
1da177e4 765{
58568d2a 766 struct mempolicy *new, *old;
4bfc4495 767 NODEMASK_SCRATCH(scratch);
58568d2a 768 int ret;
1da177e4 769
4bfc4495
KH
770 if (!scratch)
771 return -ENOMEM;
f4e53d91 772
4bfc4495
KH
773 new = mpol_new(mode, flags, nodes);
774 if (IS_ERR(new)) {
775 ret = PTR_ERR(new);
776 goto out;
777 }
2c7c3a7d 778
58568d2a 779 task_lock(current);
4bfc4495 780 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
781 if (ret) {
782 task_unlock(current);
58568d2a 783 mpol_put(new);
4bfc4495 784 goto out;
58568d2a
MX
785 }
786 old = current->mempolicy;
1da177e4 787 current->mempolicy = new;
45816682
VB
788 if (new && new->mode == MPOL_INTERLEAVE)
789 current->il_prev = MAX_NUMNODES-1;
58568d2a 790 task_unlock(current);
58568d2a 791 mpol_put(old);
4bfc4495
KH
792 ret = 0;
793out:
794 NODEMASK_SCRATCH_FREE(scratch);
795 return ret;
1da177e4
LT
796}
797
bea904d5
LS
798/*
799 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
800 *
801 * Called with task's alloc_lock held
bea904d5
LS
802 */
803static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 804{
dfcd3c0d 805 nodes_clear(*nodes);
bea904d5
LS
806 if (p == &default_policy)
807 return;
808
45c4745a 809 switch (p->mode) {
19770b32
MG
810 case MPOL_BIND:
811 /* Fall through */
1da177e4 812 case MPOL_INTERLEAVE:
dfcd3c0d 813 *nodes = p->v.nodes;
1da177e4
LT
814 break;
815 case MPOL_PREFERRED:
fc36b8d3 816 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 817 node_set(p->v.preferred_node, *nodes);
53f2556b 818 /* else return empty node mask for local allocation */
1da177e4
LT
819 break;
820 default:
821 BUG();
822 }
823}
824
d4edcf0d 825static int lookup_node(unsigned long addr)
1da177e4
LT
826{
827 struct page *p;
828 int err;
829
768ae309 830 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
1da177e4
LT
831 if (err >= 0) {
832 err = page_to_nid(p);
833 put_page(p);
834 }
835 return err;
836}
837
1da177e4 838/* Retrieve NUMA policy */
dbcb0f19
AB
839static long do_get_mempolicy(int *policy, nodemask_t *nmask,
840 unsigned long addr, unsigned long flags)
1da177e4 841{
8bccd85f 842 int err;
1da177e4
LT
843 struct mm_struct *mm = current->mm;
844 struct vm_area_struct *vma = NULL;
845 struct mempolicy *pol = current->mempolicy;
846
754af6f5
LS
847 if (flags &
848 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 849 return -EINVAL;
754af6f5
LS
850
851 if (flags & MPOL_F_MEMS_ALLOWED) {
852 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
853 return -EINVAL;
854 *policy = 0; /* just so it's initialized */
58568d2a 855 task_lock(current);
754af6f5 856 *nmask = cpuset_current_mems_allowed;
58568d2a 857 task_unlock(current);
754af6f5
LS
858 return 0;
859 }
860
1da177e4 861 if (flags & MPOL_F_ADDR) {
bea904d5
LS
862 /*
863 * Do NOT fall back to task policy if the
864 * vma/shared policy at addr is NULL. We
865 * want to return MPOL_DEFAULT in this case.
866 */
1da177e4
LT
867 down_read(&mm->mmap_sem);
868 vma = find_vma_intersection(mm, addr, addr+1);
869 if (!vma) {
870 up_read(&mm->mmap_sem);
871 return -EFAULT;
872 }
873 if (vma->vm_ops && vma->vm_ops->get_policy)
874 pol = vma->vm_ops->get_policy(vma, addr);
875 else
876 pol = vma->vm_policy;
877 } else if (addr)
878 return -EINVAL;
879
880 if (!pol)
bea904d5 881 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
882
883 if (flags & MPOL_F_NODE) {
884 if (flags & MPOL_F_ADDR) {
d4edcf0d 885 err = lookup_node(addr);
1da177e4
LT
886 if (err < 0)
887 goto out;
8bccd85f 888 *policy = err;
1da177e4 889 } else if (pol == current->mempolicy &&
45c4745a 890 pol->mode == MPOL_INTERLEAVE) {
45816682 891 *policy = next_node_in(current->il_prev, pol->v.nodes);
1da177e4
LT
892 } else {
893 err = -EINVAL;
894 goto out;
895 }
bea904d5
LS
896 } else {
897 *policy = pol == &default_policy ? MPOL_DEFAULT :
898 pol->mode;
d79df630
DR
899 /*
900 * Internal mempolicy flags must be masked off before exposing
901 * the policy to userspace.
902 */
903 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 904 }
1da177e4 905
1da177e4 906 err = 0;
58568d2a 907 if (nmask) {
c6b6ef8b
LS
908 if (mpol_store_user_nodemask(pol)) {
909 *nmask = pol->w.user_nodemask;
910 } else {
911 task_lock(current);
912 get_policy_nodemask(pol, nmask);
913 task_unlock(current);
914 }
58568d2a 915 }
1da177e4
LT
916
917 out:
52cd3b07 918 mpol_cond_put(pol);
1da177e4
LT
919 if (vma)
920 up_read(&current->mm->mmap_sem);
921 return err;
922}
923
b20a3503 924#ifdef CONFIG_MIGRATION
6ce3c4c0 925/*
c8633798 926 * page migration, thp tail pages can be passed.
6ce3c4c0 927 */
fc301289
CL
928static void migrate_page_add(struct page *page, struct list_head *pagelist,
929 unsigned long flags)
6ce3c4c0 930{
c8633798 931 struct page *head = compound_head(page);
6ce3c4c0 932 /*
fc301289 933 * Avoid migrating a page that is shared with others.
6ce3c4c0 934 */
c8633798
NH
935 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
936 if (!isolate_lru_page(head)) {
937 list_add_tail(&head->lru, pagelist);
938 mod_node_page_state(page_pgdat(head),
939 NR_ISOLATED_ANON + page_is_file_cache(head),
940 hpage_nr_pages(head));
62695a84
NP
941 }
942 }
7e2ab150 943}
6ce3c4c0 944
742755a1 945static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 946{
e2d8cf40
NH
947 if (PageHuge(page))
948 return alloc_huge_page_node(page_hstate(compound_head(page)),
949 node);
c8633798
NH
950 else if (thp_migration_supported() && PageTransHuge(page)) {
951 struct page *thp;
952
953 thp = alloc_pages_node(node,
954 (GFP_TRANSHUGE | __GFP_THISNODE),
955 HPAGE_PMD_ORDER);
956 if (!thp)
957 return NULL;
958 prep_transhuge_page(thp);
959 return thp;
960 } else
96db800f 961 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
b360edb4 962 __GFP_THISNODE, 0);
95a402c3
CL
963}
964
7e2ab150
CL
965/*
966 * Migrate pages from one node to a target node.
967 * Returns error or the number of pages not migrated.
968 */
dbcb0f19
AB
969static int migrate_to_node(struct mm_struct *mm, int source, int dest,
970 int flags)
7e2ab150
CL
971{
972 nodemask_t nmask;
973 LIST_HEAD(pagelist);
974 int err = 0;
975
976 nodes_clear(nmask);
977 node_set(source, nmask);
6ce3c4c0 978
08270807
MK
979 /*
980 * This does not "check" the range but isolates all pages that
981 * need migration. Between passing in the full user address
982 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
983 */
984 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 985 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
986 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
987
cf608ac1 988 if (!list_empty(&pagelist)) {
68711a74 989 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
9c620e2b 990 MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 991 if (err)
e2d8cf40 992 putback_movable_pages(&pagelist);
cf608ac1 993 }
95a402c3 994
7e2ab150 995 return err;
6ce3c4c0
CL
996}
997
39743889 998/*
7e2ab150
CL
999 * Move pages between the two nodesets so as to preserve the physical
1000 * layout as much as possible.
39743889
CL
1001 *
1002 * Returns the number of page that could not be moved.
1003 */
0ce72d4f
AM
1004int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1005 const nodemask_t *to, int flags)
39743889 1006{
7e2ab150 1007 int busy = 0;
0aedadf9 1008 int err;
7e2ab150 1009 nodemask_t tmp;
39743889 1010
0aedadf9
CL
1011 err = migrate_prep();
1012 if (err)
1013 return err;
1014
53f2556b 1015 down_read(&mm->mmap_sem);
39743889 1016
da0aa138
KM
1017 /*
1018 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1019 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1020 * bit in 'tmp', and return that <source, dest> pair for migration.
1021 * The pair of nodemasks 'to' and 'from' define the map.
1022 *
1023 * If no pair of bits is found that way, fallback to picking some
1024 * pair of 'source' and 'dest' bits that are not the same. If the
1025 * 'source' and 'dest' bits are the same, this represents a node
1026 * that will be migrating to itself, so no pages need move.
1027 *
1028 * If no bits are left in 'tmp', or if all remaining bits left
1029 * in 'tmp' correspond to the same bit in 'to', return false
1030 * (nothing left to migrate).
1031 *
1032 * This lets us pick a pair of nodes to migrate between, such that
1033 * if possible the dest node is not already occupied by some other
1034 * source node, minimizing the risk of overloading the memory on a
1035 * node that would happen if we migrated incoming memory to a node
1036 * before migrating outgoing memory source that same node.
1037 *
1038 * A single scan of tmp is sufficient. As we go, we remember the
1039 * most recent <s, d> pair that moved (s != d). If we find a pair
1040 * that not only moved, but what's better, moved to an empty slot
1041 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1042 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1043 * most recent <s, d> pair that moved. If we get all the way through
1044 * the scan of tmp without finding any node that moved, much less
1045 * moved to an empty node, then there is nothing left worth migrating.
1046 */
d4984711 1047
0ce72d4f 1048 tmp = *from;
7e2ab150
CL
1049 while (!nodes_empty(tmp)) {
1050 int s,d;
b76ac7e7 1051 int source = NUMA_NO_NODE;
7e2ab150
CL
1052 int dest = 0;
1053
1054 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1055
1056 /*
1057 * do_migrate_pages() tries to maintain the relative
1058 * node relationship of the pages established between
1059 * threads and memory areas.
1060 *
1061 * However if the number of source nodes is not equal to
1062 * the number of destination nodes we can not preserve
1063 * this node relative relationship. In that case, skip
1064 * copying memory from a node that is in the destination
1065 * mask.
1066 *
1067 * Example: [2,3,4] -> [3,4,5] moves everything.
1068 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1069 */
1070
0ce72d4f
AM
1071 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1072 (node_isset(s, *to)))
4a5b18cc
LW
1073 continue;
1074
0ce72d4f 1075 d = node_remap(s, *from, *to);
7e2ab150
CL
1076 if (s == d)
1077 continue;
1078
1079 source = s; /* Node moved. Memorize */
1080 dest = d;
1081
1082 /* dest not in remaining from nodes? */
1083 if (!node_isset(dest, tmp))
1084 break;
1085 }
b76ac7e7 1086 if (source == NUMA_NO_NODE)
7e2ab150
CL
1087 break;
1088
1089 node_clear(source, tmp);
1090 err = migrate_to_node(mm, source, dest, flags);
1091 if (err > 0)
1092 busy += err;
1093 if (err < 0)
1094 break;
39743889
CL
1095 }
1096 up_read(&mm->mmap_sem);
7e2ab150
CL
1097 if (err < 0)
1098 return err;
1099 return busy;
b20a3503
CL
1100
1101}
1102
3ad33b24
LS
1103/*
1104 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1105 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1106 * Search forward from there, if not. N.B., this assumes that the
1107 * list of pages handed to migrate_pages()--which is how we get here--
1108 * is in virtual address order.
1109 */
d05f0cdc 1110static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3 1111{
d05f0cdc 1112 struct vm_area_struct *vma;
3ad33b24 1113 unsigned long uninitialized_var(address);
95a402c3 1114
d05f0cdc 1115 vma = find_vma(current->mm, start);
3ad33b24
LS
1116 while (vma) {
1117 address = page_address_in_vma(page, vma);
1118 if (address != -EFAULT)
1119 break;
1120 vma = vma->vm_next;
1121 }
11c731e8
WL
1122
1123 if (PageHuge(page)) {
389c8178
MH
1124 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1125 vma, address);
c8633798
NH
1126 } else if (thp_migration_supported() && PageTransHuge(page)) {
1127 struct page *thp;
1128
1129 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1130 HPAGE_PMD_ORDER);
1131 if (!thp)
1132 return NULL;
1133 prep_transhuge_page(thp);
1134 return thp;
11c731e8 1135 }
0bf598d8 1136 /*
11c731e8 1137 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1138 */
0f556856
MH
1139 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1140 vma, address);
95a402c3 1141}
b20a3503
CL
1142#else
1143
1144static void migrate_page_add(struct page *page, struct list_head *pagelist,
1145 unsigned long flags)
1146{
39743889
CL
1147}
1148
0ce72d4f
AM
1149int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1150 const nodemask_t *to, int flags)
b20a3503
CL
1151{
1152 return -ENOSYS;
1153}
95a402c3 1154
d05f0cdc 1155static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3
CL
1156{
1157 return NULL;
1158}
b20a3503
CL
1159#endif
1160
dbcb0f19 1161static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1162 unsigned short mode, unsigned short mode_flags,
1163 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1164{
6ce3c4c0
CL
1165 struct mm_struct *mm = current->mm;
1166 struct mempolicy *new;
1167 unsigned long end;
1168 int err;
1169 LIST_HEAD(pagelist);
1170
b24f53a0 1171 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1172 return -EINVAL;
74c00241 1173 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1174 return -EPERM;
1175
1176 if (start & ~PAGE_MASK)
1177 return -EINVAL;
1178
1179 if (mode == MPOL_DEFAULT)
1180 flags &= ~MPOL_MF_STRICT;
1181
1182 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1183 end = start + len;
1184
1185 if (end < start)
1186 return -EINVAL;
1187 if (end == start)
1188 return 0;
1189
028fec41 1190 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1191 if (IS_ERR(new))
1192 return PTR_ERR(new);
1193
b24f53a0
LS
1194 if (flags & MPOL_MF_LAZY)
1195 new->flags |= MPOL_F_MOF;
1196
6ce3c4c0
CL
1197 /*
1198 * If we are using the default policy then operation
1199 * on discontinuous address spaces is okay after all
1200 */
1201 if (!new)
1202 flags |= MPOL_MF_DISCONTIG_OK;
1203
028fec41
DR
1204 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1205 start, start + len, mode, mode_flags,
00ef2d2f 1206 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1207
0aedadf9
CL
1208 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1209
1210 err = migrate_prep();
1211 if (err)
b05ca738 1212 goto mpol_out;
0aedadf9 1213 }
4bfc4495
KH
1214 {
1215 NODEMASK_SCRATCH(scratch);
1216 if (scratch) {
1217 down_write(&mm->mmap_sem);
1218 task_lock(current);
1219 err = mpol_set_nodemask(new, nmask, scratch);
1220 task_unlock(current);
1221 if (err)
1222 up_write(&mm->mmap_sem);
1223 } else
1224 err = -ENOMEM;
1225 NODEMASK_SCRATCH_FREE(scratch);
1226 }
b05ca738
KM
1227 if (err)
1228 goto mpol_out;
1229
d05f0cdc 1230 err = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1231 flags | MPOL_MF_INVERT, &pagelist);
d05f0cdc 1232 if (!err)
9d8cebd4 1233 err = mbind_range(mm, start, end, new);
7e2ab150 1234
b24f53a0
LS
1235 if (!err) {
1236 int nr_failed = 0;
1237
cf608ac1 1238 if (!list_empty(&pagelist)) {
b24f53a0 1239 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc
HD
1240 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1241 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1242 if (nr_failed)
74060e4d 1243 putback_movable_pages(&pagelist);
cf608ac1 1244 }
6ce3c4c0 1245
b24f53a0 1246 if (nr_failed && (flags & MPOL_MF_STRICT))
6ce3c4c0 1247 err = -EIO;
ab8a3e14 1248 } else
b0e5fd73 1249 putback_movable_pages(&pagelist);
b20a3503 1250
6ce3c4c0 1251 up_write(&mm->mmap_sem);
b05ca738 1252 mpol_out:
f0be3d32 1253 mpol_put(new);
6ce3c4c0
CL
1254 return err;
1255}
1256
8bccd85f
CL
1257/*
1258 * User space interface with variable sized bitmaps for nodelists.
1259 */
1260
1261/* Copy a node mask from user space. */
39743889 1262static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1263 unsigned long maxnode)
1264{
1265 unsigned long k;
56521e7a 1266 unsigned long t;
8bccd85f
CL
1267 unsigned long nlongs;
1268 unsigned long endmask;
1269
1270 --maxnode;
1271 nodes_clear(*nodes);
1272 if (maxnode == 0 || !nmask)
1273 return 0;
a9c930ba 1274 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1275 return -EINVAL;
8bccd85f
CL
1276
1277 nlongs = BITS_TO_LONGS(maxnode);
1278 if ((maxnode % BITS_PER_LONG) == 0)
1279 endmask = ~0UL;
1280 else
1281 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1282
56521e7a
YX
1283 /*
1284 * When the user specified more nodes than supported just check
1285 * if the non supported part is all zero.
1286 *
1287 * If maxnode have more longs than MAX_NUMNODES, check
1288 * the bits in that area first. And then go through to
1289 * check the rest bits which equal or bigger than MAX_NUMNODES.
1290 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1291 */
8bccd85f 1292 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
8bccd85f 1293 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
8bccd85f
CL
1294 if (get_user(t, nmask + k))
1295 return -EFAULT;
1296 if (k == nlongs - 1) {
1297 if (t & endmask)
1298 return -EINVAL;
1299 } else if (t)
1300 return -EINVAL;
1301 }
1302 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1303 endmask = ~0UL;
1304 }
1305
56521e7a
YX
1306 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1307 unsigned long valid_mask = endmask;
1308
1309 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1310 if (get_user(t, nmask + nlongs - 1))
1311 return -EFAULT;
1312 if (t & valid_mask)
1313 return -EINVAL;
1314 }
1315
8bccd85f
CL
1316 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1317 return -EFAULT;
1318 nodes_addr(*nodes)[nlongs-1] &= endmask;
1319 return 0;
1320}
1321
1322/* Copy a kernel node mask to user space */
1323static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1324 nodemask_t *nodes)
1325{
1326 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1327 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1328
1329 if (copy > nbytes) {
1330 if (copy > PAGE_SIZE)
1331 return -EINVAL;
1332 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1333 return -EFAULT;
1334 copy = nbytes;
1335 }
1336 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1337}
1338
938bb9f5 1339SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
f7f28ca9 1340 unsigned long, mode, const unsigned long __user *, nmask,
938bb9f5 1341 unsigned long, maxnode, unsigned, flags)
8bccd85f
CL
1342{
1343 nodemask_t nodes;
1344 int err;
028fec41 1345 unsigned short mode_flags;
8bccd85f 1346
028fec41
DR
1347 mode_flags = mode & MPOL_MODE_FLAGS;
1348 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1349 if (mode >= MPOL_MAX)
1350 return -EINVAL;
4c50bc01
DR
1351 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1352 (mode_flags & MPOL_F_RELATIVE_NODES))
1353 return -EINVAL;
8bccd85f
CL
1354 err = get_nodes(&nodes, nmask, maxnode);
1355 if (err)
1356 return err;
028fec41 1357 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1358}
1359
1360/* Set the process memory policy */
23c8902d 1361SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
938bb9f5 1362 unsigned long, maxnode)
8bccd85f
CL
1363{
1364 int err;
1365 nodemask_t nodes;
028fec41 1366 unsigned short flags;
8bccd85f 1367
028fec41
DR
1368 flags = mode & MPOL_MODE_FLAGS;
1369 mode &= ~MPOL_MODE_FLAGS;
1370 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1371 return -EINVAL;
4c50bc01
DR
1372 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1373 return -EINVAL;
8bccd85f
CL
1374 err = get_nodes(&nodes, nmask, maxnode);
1375 if (err)
1376 return err;
028fec41 1377 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1378}
1379
938bb9f5
HC
1380SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1381 const unsigned long __user *, old_nodes,
1382 const unsigned long __user *, new_nodes)
39743889 1383{
596d7cfa 1384 struct mm_struct *mm = NULL;
39743889 1385 struct task_struct *task;
39743889
CL
1386 nodemask_t task_nodes;
1387 int err;
596d7cfa
KM
1388 nodemask_t *old;
1389 nodemask_t *new;
1390 NODEMASK_SCRATCH(scratch);
1391
1392 if (!scratch)
1393 return -ENOMEM;
39743889 1394
596d7cfa
KM
1395 old = &scratch->mask1;
1396 new = &scratch->mask2;
1397
1398 err = get_nodes(old, old_nodes, maxnode);
39743889 1399 if (err)
596d7cfa 1400 goto out;
39743889 1401
596d7cfa 1402 err = get_nodes(new, new_nodes, maxnode);
39743889 1403 if (err)
596d7cfa 1404 goto out;
39743889
CL
1405
1406 /* Find the mm_struct */
55cfaa3c 1407 rcu_read_lock();
228ebcbe 1408 task = pid ? find_task_by_vpid(pid) : current;
39743889 1409 if (!task) {
55cfaa3c 1410 rcu_read_unlock();
596d7cfa
KM
1411 err = -ESRCH;
1412 goto out;
39743889 1413 }
3268c63e 1414 get_task_struct(task);
39743889 1415
596d7cfa 1416 err = -EINVAL;
39743889
CL
1417
1418 /*
31367466
OE
1419 * Check if this process has the right to modify the specified process.
1420 * Use the regular "ptrace_may_access()" checks.
39743889 1421 */
31367466 1422 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1423 rcu_read_unlock();
39743889 1424 err = -EPERM;
3268c63e 1425 goto out_put;
39743889 1426 }
c69e8d9c 1427 rcu_read_unlock();
39743889
CL
1428
1429 task_nodes = cpuset_mems_allowed(task);
1430 /* Is the user allowed to access the target nodes? */
596d7cfa 1431 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1432 err = -EPERM;
3268c63e 1433 goto out_put;
39743889
CL
1434 }
1435
0486a38b
YX
1436 task_nodes = cpuset_mems_allowed(current);
1437 nodes_and(*new, *new, task_nodes);
1438 if (nodes_empty(*new))
1439 goto out_put;
1440
1441 nodes_and(*new, *new, node_states[N_MEMORY]);
1442 if (nodes_empty(*new))
3268c63e 1443 goto out_put;
3b42d28b 1444
86c3a764
DQ
1445 err = security_task_movememory(task);
1446 if (err)
3268c63e 1447 goto out_put;
86c3a764 1448
3268c63e
CL
1449 mm = get_task_mm(task);
1450 put_task_struct(task);
f2a9ef88
SL
1451
1452 if (!mm) {
3268c63e 1453 err = -EINVAL;
f2a9ef88
SL
1454 goto out;
1455 }
1456
1457 err = do_migrate_pages(mm, old, new,
1458 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1459
1460 mmput(mm);
1461out:
596d7cfa
KM
1462 NODEMASK_SCRATCH_FREE(scratch);
1463
39743889 1464 return err;
3268c63e
CL
1465
1466out_put:
1467 put_task_struct(task);
1468 goto out;
1469
39743889
CL
1470}
1471
1472
8bccd85f 1473/* Retrieve NUMA policy */
938bb9f5
HC
1474SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1475 unsigned long __user *, nmask, unsigned long, maxnode,
1476 unsigned long, addr, unsigned long, flags)
8bccd85f 1477{
dbcb0f19
AB
1478 int err;
1479 int uninitialized_var(pval);
8bccd85f
CL
1480 nodemask_t nodes;
1481
1482 if (nmask != NULL && maxnode < MAX_NUMNODES)
1483 return -EINVAL;
1484
1485 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1486
1487 if (err)
1488 return err;
1489
1490 if (policy && put_user(pval, policy))
1491 return -EFAULT;
1492
1493 if (nmask)
1494 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1495
1496 return err;
1497}
1498
1da177e4
LT
1499#ifdef CONFIG_COMPAT
1500
c93e0f6c
HC
1501COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1502 compat_ulong_t __user *, nmask,
1503 compat_ulong_t, maxnode,
1504 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1505{
1506 long err;
1507 unsigned long __user *nm = NULL;
1508 unsigned long nr_bits, alloc_size;
1509 DECLARE_BITMAP(bm, MAX_NUMNODES);
1510
1511 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1512 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1513
1514 if (nmask)
1515 nm = compat_alloc_user_space(alloc_size);
1516
1517 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1518
1519 if (!err && nmask) {
2bbff6c7
KH
1520 unsigned long copy_size;
1521 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1522 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1523 /* ensure entire bitmap is zeroed */
1524 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1525 err |= compat_put_bitmap(nmask, bm, nr_bits);
1526 }
1527
1528 return err;
1529}
1530
c93e0f6c
HC
1531COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1532 compat_ulong_t, maxnode)
1da177e4 1533{
1da177e4
LT
1534 unsigned long __user *nm = NULL;
1535 unsigned long nr_bits, alloc_size;
1536 DECLARE_BITMAP(bm, MAX_NUMNODES);
1537
1538 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1539 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1540
1541 if (nmask) {
cf01fb99
CS
1542 if (compat_get_bitmap(bm, nmask, nr_bits))
1543 return -EFAULT;
1da177e4 1544 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1545 if (copy_to_user(nm, bm, alloc_size))
1546 return -EFAULT;
1da177e4
LT
1547 }
1548
1da177e4
LT
1549 return sys_set_mempolicy(mode, nm, nr_bits+1);
1550}
1551
c93e0f6c
HC
1552COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1553 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1554 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4 1555{
1da177e4
LT
1556 unsigned long __user *nm = NULL;
1557 unsigned long nr_bits, alloc_size;
dfcd3c0d 1558 nodemask_t bm;
1da177e4
LT
1559
1560 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1561 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1562
1563 if (nmask) {
cf01fb99
CS
1564 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1565 return -EFAULT;
1da177e4 1566 nm = compat_alloc_user_space(alloc_size);
cf01fb99
CS
1567 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1568 return -EFAULT;
1da177e4
LT
1569 }
1570
1da177e4
LT
1571 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1572}
1573
1574#endif
1575
74d2c3a0
ON
1576struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1577 unsigned long addr)
1da177e4 1578{
8d90274b 1579 struct mempolicy *pol = NULL;
1da177e4
LT
1580
1581 if (vma) {
480eccf9 1582 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1583 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1584 } else if (vma->vm_policy) {
1da177e4 1585 pol = vma->vm_policy;
00442ad0
MG
1586
1587 /*
1588 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1589 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1590 * count on these policies which will be dropped by
1591 * mpol_cond_put() later
1592 */
1593 if (mpol_needs_cond_ref(pol))
1594 mpol_get(pol);
1595 }
1da177e4 1596 }
f15ca78e 1597
74d2c3a0
ON
1598 return pol;
1599}
1600
1601/*
dd6eecb9 1602 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1603 * @vma: virtual memory area whose policy is sought
1604 * @addr: address in @vma for shared policy lookup
1605 *
1606 * Returns effective policy for a VMA at specified address.
dd6eecb9 1607 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1608 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1609 * count--added by the get_policy() vm_op, as appropriate--to protect against
1610 * freeing by another task. It is the caller's responsibility to free the
1611 * extra reference for shared policies.
1612 */
dd6eecb9
ON
1613static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1614 unsigned long addr)
74d2c3a0
ON
1615{
1616 struct mempolicy *pol = __get_vma_policy(vma, addr);
1617
8d90274b 1618 if (!pol)
dd6eecb9 1619 pol = get_task_policy(current);
8d90274b 1620
1da177e4
LT
1621 return pol;
1622}
1623
6b6482bb 1624bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1625{
6b6482bb 1626 struct mempolicy *pol;
fc314724 1627
6b6482bb
ON
1628 if (vma->vm_ops && vma->vm_ops->get_policy) {
1629 bool ret = false;
fc314724 1630
6b6482bb
ON
1631 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1632 if (pol && (pol->flags & MPOL_F_MOF))
1633 ret = true;
1634 mpol_cond_put(pol);
8d90274b 1635
6b6482bb 1636 return ret;
fc314724
MG
1637 }
1638
6b6482bb 1639 pol = vma->vm_policy;
8d90274b 1640 if (!pol)
6b6482bb 1641 pol = get_task_policy(current);
8d90274b 1642
fc314724
MG
1643 return pol->flags & MPOL_F_MOF;
1644}
1645
d3eb1570
LJ
1646static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1647{
1648 enum zone_type dynamic_policy_zone = policy_zone;
1649
1650 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1651
1652 /*
1653 * if policy->v.nodes has movable memory only,
1654 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1655 *
1656 * policy->v.nodes is intersect with node_states[N_MEMORY].
1657 * so if the following test faile, it implies
1658 * policy->v.nodes has movable memory only.
1659 */
1660 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1661 dynamic_policy_zone = ZONE_MOVABLE;
1662
1663 return zone >= dynamic_policy_zone;
1664}
1665
52cd3b07
LS
1666/*
1667 * Return a nodemask representing a mempolicy for filtering nodes for
1668 * page allocation
1669 */
1670static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1671{
1672 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1673 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1674 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1675 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1676 return &policy->v.nodes;
1677
1678 return NULL;
1679}
1680
04ec6264
VB
1681/* Return the node id preferred by the given mempolicy, or the given id */
1682static int policy_node(gfp_t gfp, struct mempolicy *policy,
1683 int nd)
1da177e4 1684{
6d840958
MH
1685 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1686 nd = policy->v.preferred_node;
1687 else {
19770b32 1688 /*
6d840958
MH
1689 * __GFP_THISNODE shouldn't even be used with the bind policy
1690 * because we might easily break the expectation to stay on the
1691 * requested node and not break the policy.
19770b32 1692 */
6d840958 1693 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1694 }
6d840958 1695
04ec6264 1696 return nd;
1da177e4
LT
1697}
1698
1699/* Do dynamic interleaving for a process */
1700static unsigned interleave_nodes(struct mempolicy *policy)
1701{
45816682 1702 unsigned next;
1da177e4
LT
1703 struct task_struct *me = current;
1704
45816682 1705 next = next_node_in(me->il_prev, policy->v.nodes);
f5b087b5 1706 if (next < MAX_NUMNODES)
45816682
VB
1707 me->il_prev = next;
1708 return next;
1da177e4
LT
1709}
1710
dc85da15
CL
1711/*
1712 * Depending on the memory policy provide a node from which to allocate the
1713 * next slab entry.
1714 */
2a389610 1715unsigned int mempolicy_slab_node(void)
dc85da15 1716{
e7b691b0 1717 struct mempolicy *policy;
2a389610 1718 int node = numa_mem_id();
e7b691b0
AK
1719
1720 if (in_interrupt())
2a389610 1721 return node;
e7b691b0
AK
1722
1723 policy = current->mempolicy;
fc36b8d3 1724 if (!policy || policy->flags & MPOL_F_LOCAL)
2a389610 1725 return node;
bea904d5
LS
1726
1727 switch (policy->mode) {
1728 case MPOL_PREFERRED:
fc36b8d3
LS
1729 /*
1730 * handled MPOL_F_LOCAL above
1731 */
1732 return policy->v.preferred_node;
765c4507 1733
dc85da15
CL
1734 case MPOL_INTERLEAVE:
1735 return interleave_nodes(policy);
1736
dd1a239f 1737 case MPOL_BIND: {
c33d6c06
MG
1738 struct zoneref *z;
1739
dc85da15
CL
1740 /*
1741 * Follow bind policy behavior and start allocation at the
1742 * first node.
1743 */
19770b32 1744 struct zonelist *zonelist;
19770b32 1745 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1746 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06
MG
1747 z = first_zones_zonelist(zonelist, highest_zoneidx,
1748 &policy->v.nodes);
1749 return z->zone ? z->zone->node : node;
dd1a239f 1750 }
dc85da15 1751
dc85da15 1752 default:
bea904d5 1753 BUG();
dc85da15
CL
1754 }
1755}
1756
fee83b3a
AM
1757/*
1758 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1759 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1760 * number of present nodes.
1761 */
98c70baa 1762static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1da177e4 1763{
dfcd3c0d 1764 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1765 unsigned target;
fee83b3a
AM
1766 int i;
1767 int nid;
1da177e4 1768
f5b087b5
DR
1769 if (!nnodes)
1770 return numa_node_id();
fee83b3a
AM
1771 target = (unsigned int)n % nnodes;
1772 nid = first_node(pol->v.nodes);
1773 for (i = 0; i < target; i++)
dfcd3c0d 1774 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1775 return nid;
1776}
1777
5da7ca86
CL
1778/* Determine a node number for interleave */
1779static inline unsigned interleave_nid(struct mempolicy *pol,
1780 struct vm_area_struct *vma, unsigned long addr, int shift)
1781{
1782 if (vma) {
1783 unsigned long off;
1784
3b98b087
NA
1785 /*
1786 * for small pages, there is no difference between
1787 * shift and PAGE_SHIFT, so the bit-shift is safe.
1788 * for huge pages, since vm_pgoff is in units of small
1789 * pages, we need to shift off the always 0 bits to get
1790 * a useful offset.
1791 */
1792 BUG_ON(shift < PAGE_SHIFT);
1793 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86 1794 off += (addr - vma->vm_start) >> shift;
98c70baa 1795 return offset_il_node(pol, off);
5da7ca86
CL
1796 } else
1797 return interleave_nodes(pol);
1798}
1799
00ac59ad 1800#ifdef CONFIG_HUGETLBFS
480eccf9 1801/*
04ec6264 1802 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
1803 * @vma: virtual memory area whose policy is sought
1804 * @addr: address in @vma for shared policy lookup and interleave policy
1805 * @gfp_flags: for requested zone
1806 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1807 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1808 *
04ec6264 1809 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07
LS
1810 * to the struct mempolicy for conditional unref after allocation.
1811 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1812 * @nodemask for filtering the zonelist.
c0ff7453 1813 *
d26914d1 1814 * Must be protected by read_mems_allowed_begin()
480eccf9 1815 */
04ec6264
VB
1816int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1817 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 1818{
04ec6264 1819 int nid;
5da7ca86 1820
dd6eecb9 1821 *mpol = get_vma_policy(vma, addr);
19770b32 1822 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1823
52cd3b07 1824 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
04ec6264
VB
1825 nid = interleave_nid(*mpol, vma, addr,
1826 huge_page_shift(hstate_vma(vma)));
52cd3b07 1827 } else {
04ec6264 1828 nid = policy_node(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
1829 if ((*mpol)->mode == MPOL_BIND)
1830 *nodemask = &(*mpol)->v.nodes;
480eccf9 1831 }
04ec6264 1832 return nid;
5da7ca86 1833}
06808b08
LS
1834
1835/*
1836 * init_nodemask_of_mempolicy
1837 *
1838 * If the current task's mempolicy is "default" [NULL], return 'false'
1839 * to indicate default policy. Otherwise, extract the policy nodemask
1840 * for 'bind' or 'interleave' policy into the argument nodemask, or
1841 * initialize the argument nodemask to contain the single node for
1842 * 'preferred' or 'local' policy and return 'true' to indicate presence
1843 * of non-default mempolicy.
1844 *
1845 * We don't bother with reference counting the mempolicy [mpol_get/put]
1846 * because the current task is examining it's own mempolicy and a task's
1847 * mempolicy is only ever changed by the task itself.
1848 *
1849 * N.B., it is the caller's responsibility to free a returned nodemask.
1850 */
1851bool init_nodemask_of_mempolicy(nodemask_t *mask)
1852{
1853 struct mempolicy *mempolicy;
1854 int nid;
1855
1856 if (!(mask && current->mempolicy))
1857 return false;
1858
c0ff7453 1859 task_lock(current);
06808b08
LS
1860 mempolicy = current->mempolicy;
1861 switch (mempolicy->mode) {
1862 case MPOL_PREFERRED:
1863 if (mempolicy->flags & MPOL_F_LOCAL)
1864 nid = numa_node_id();
1865 else
1866 nid = mempolicy->v.preferred_node;
1867 init_nodemask_of_node(mask, nid);
1868 break;
1869
1870 case MPOL_BIND:
1871 /* Fall through */
1872 case MPOL_INTERLEAVE:
1873 *mask = mempolicy->v.nodes;
1874 break;
1875
1876 default:
1877 BUG();
1878 }
c0ff7453 1879 task_unlock(current);
06808b08
LS
1880
1881 return true;
1882}
00ac59ad 1883#endif
5da7ca86 1884
6f48d0eb
DR
1885/*
1886 * mempolicy_nodemask_intersects
1887 *
1888 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1889 * policy. Otherwise, check for intersection between mask and the policy
1890 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1891 * policy, always return true since it may allocate elsewhere on fallback.
1892 *
1893 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1894 */
1895bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1896 const nodemask_t *mask)
1897{
1898 struct mempolicy *mempolicy;
1899 bool ret = true;
1900
1901 if (!mask)
1902 return ret;
1903 task_lock(tsk);
1904 mempolicy = tsk->mempolicy;
1905 if (!mempolicy)
1906 goto out;
1907
1908 switch (mempolicy->mode) {
1909 case MPOL_PREFERRED:
1910 /*
1911 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1912 * allocate from, they may fallback to other nodes when oom.
1913 * Thus, it's possible for tsk to have allocated memory from
1914 * nodes in mask.
1915 */
1916 break;
1917 case MPOL_BIND:
1918 case MPOL_INTERLEAVE:
1919 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1920 break;
1921 default:
1922 BUG();
1923 }
1924out:
1925 task_unlock(tsk);
1926 return ret;
1927}
1928
1da177e4
LT
1929/* Allocate a page in interleaved policy.
1930 Own path because it needs to do special accounting. */
662f3a0b
AK
1931static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1932 unsigned nid)
1da177e4 1933{
1da177e4
LT
1934 struct page *page;
1935
04ec6264 1936 page = __alloc_pages(gfp, order, nid);
4518085e
KW
1937 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
1938 if (!static_branch_likely(&vm_numa_stat_key))
1939 return page;
de55c8b2
AR
1940 if (page && page_to_nid(page) == nid) {
1941 preempt_disable();
1942 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
1943 preempt_enable();
1944 }
1da177e4
LT
1945 return page;
1946}
1947
1948/**
0bbbc0b3 1949 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
1950 *
1951 * @gfp:
1952 * %GFP_USER user allocation.
1953 * %GFP_KERNEL kernel allocations,
1954 * %GFP_HIGHMEM highmem/user allocations,
1955 * %GFP_FS allocation should not call back into a file system.
1956 * %GFP_ATOMIC don't sleep.
1957 *
0bbbc0b3 1958 * @order:Order of the GFP allocation.
1da177e4
LT
1959 * @vma: Pointer to VMA or NULL if not available.
1960 * @addr: Virtual Address of the allocation. Must be inside the VMA.
be97a41b
VB
1961 * @node: Which node to prefer for allocation (modulo policy).
1962 * @hugepage: for hugepages try only the preferred node if possible
1da177e4
LT
1963 *
1964 * This function allocates a page from the kernel page pool and applies
1965 * a NUMA policy associated with the VMA or the current process.
1966 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1967 * mm_struct of the VMA to prevent it from going away. Should be used for
be97a41b
VB
1968 * all allocations for pages that will be mapped into user space. Returns
1969 * NULL when no page can be allocated.
1da177e4
LT
1970 */
1971struct page *
0bbbc0b3 1972alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
be97a41b 1973 unsigned long addr, int node, bool hugepage)
1da177e4 1974{
cc9a6c87 1975 struct mempolicy *pol;
c0ff7453 1976 struct page *page;
04ec6264 1977 int preferred_nid;
be97a41b 1978 nodemask_t *nmask;
cc9a6c87 1979
dd6eecb9 1980 pol = get_vma_policy(vma, addr);
1da177e4 1981
0867a57c
VB
1982 if (pol->mode == MPOL_INTERLEAVE) {
1983 unsigned nid;
1984
1985 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1986 mpol_cond_put(pol);
1987 page = alloc_page_interleave(gfp, order, nid);
1988 goto out;
1989 }
1990
1991 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1992 int hpage_node = node;
1993
be97a41b
VB
1994 /*
1995 * For hugepage allocation and non-interleave policy which
0867a57c
VB
1996 * allows the current node (or other explicitly preferred
1997 * node) we only try to allocate from the current/preferred
1998 * node and don't fall back to other nodes, as the cost of
1999 * remote accesses would likely offset THP benefits.
be97a41b
VB
2000 *
2001 * If the policy is interleave, or does not allow the current
2002 * node in its nodemask, we allocate the standard way.
2003 */
0867a57c
VB
2004 if (pol->mode == MPOL_PREFERRED &&
2005 !(pol->flags & MPOL_F_LOCAL))
2006 hpage_node = pol->v.preferred_node;
2007
be97a41b 2008 nmask = policy_nodemask(gfp, pol);
0867a57c 2009 if (!nmask || node_isset(hpage_node, *nmask)) {
be97a41b 2010 mpol_cond_put(pol);
96db800f 2011 page = __alloc_pages_node(hpage_node,
5265047a 2012 gfp | __GFP_THISNODE, order);
be97a41b
VB
2013 goto out;
2014 }
2015 }
2016
be97a41b 2017 nmask = policy_nodemask(gfp, pol);
04ec6264
VB
2018 preferred_nid = policy_node(gfp, pol, node);
2019 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
d51e9894 2020 mpol_cond_put(pol);
be97a41b 2021out:
c0ff7453 2022 return page;
1da177e4
LT
2023}
2024
2025/**
2026 * alloc_pages_current - Allocate pages.
2027 *
2028 * @gfp:
2029 * %GFP_USER user allocation,
2030 * %GFP_KERNEL kernel allocation,
2031 * %GFP_HIGHMEM highmem allocation,
2032 * %GFP_FS don't call back into a file system.
2033 * %GFP_ATOMIC don't sleep.
2034 * @order: Power of two of allocation size in pages. 0 is a single page.
2035 *
2036 * Allocate a page from the kernel page pool. When not in
2037 * interrupt context and apply the current process NUMA policy.
2038 * Returns NULL when no page can be allocated.
1da177e4 2039 */
dd0fc66f 2040struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 2041{
8d90274b 2042 struct mempolicy *pol = &default_policy;
c0ff7453 2043 struct page *page;
1da177e4 2044
8d90274b
ON
2045 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2046 pol = get_task_policy(current);
52cd3b07
LS
2047
2048 /*
2049 * No reference counting needed for current->mempolicy
2050 * nor system default_policy
2051 */
45c4745a 2052 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2053 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2054 else
2055 page = __alloc_pages_nodemask(gfp, order,
04ec6264 2056 policy_node(gfp, pol, numa_node_id()),
5c4b4be3 2057 policy_nodemask(gfp, pol));
cc9a6c87 2058
c0ff7453 2059 return page;
1da177e4
LT
2060}
2061EXPORT_SYMBOL(alloc_pages_current);
2062
ef0855d3
ON
2063int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2064{
2065 struct mempolicy *pol = mpol_dup(vma_policy(src));
2066
2067 if (IS_ERR(pol))
2068 return PTR_ERR(pol);
2069 dst->vm_policy = pol;
2070 return 0;
2071}
2072
4225399a 2073/*
846a16bf 2074 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2075 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2076 * with the mems_allowed returned by cpuset_mems_allowed(). This
2077 * keeps mempolicies cpuset relative after its cpuset moves. See
2078 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2079 *
2080 * current's mempolicy may be rebinded by the other task(the task that changes
2081 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2082 */
4225399a 2083
846a16bf
LS
2084/* Slow path of a mempolicy duplicate */
2085struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2086{
2087 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2088
2089 if (!new)
2090 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2091
2092 /* task's mempolicy is protected by alloc_lock */
2093 if (old == current->mempolicy) {
2094 task_lock(current);
2095 *new = *old;
2096 task_unlock(current);
2097 } else
2098 *new = *old;
2099
4225399a
PJ
2100 if (current_cpuset_is_being_rebound()) {
2101 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2102 mpol_rebind_policy(new, &mems);
4225399a 2103 }
1da177e4 2104 atomic_set(&new->refcnt, 1);
1da177e4
LT
2105 return new;
2106}
2107
2108/* Slow path of a mempolicy comparison */
fcfb4dcc 2109bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2110{
2111 if (!a || !b)
fcfb4dcc 2112 return false;
45c4745a 2113 if (a->mode != b->mode)
fcfb4dcc 2114 return false;
19800502 2115 if (a->flags != b->flags)
fcfb4dcc 2116 return false;
19800502
BL
2117 if (mpol_store_user_nodemask(a))
2118 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2119 return false;
19800502 2120
45c4745a 2121 switch (a->mode) {
19770b32
MG
2122 case MPOL_BIND:
2123 /* Fall through */
1da177e4 2124 case MPOL_INTERLEAVE:
fcfb4dcc 2125 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2126 case MPOL_PREFERRED:
75719661 2127 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2128 default:
2129 BUG();
fcfb4dcc 2130 return false;
1da177e4
LT
2131 }
2132}
2133
1da177e4
LT
2134/*
2135 * Shared memory backing store policy support.
2136 *
2137 * Remember policies even when nobody has shared memory mapped.
2138 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2139 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2140 * for any accesses to the tree.
2141 */
2142
4a8c7bb5
NZ
2143/*
2144 * lookup first element intersecting start-end. Caller holds sp->lock for
2145 * reading or for writing
2146 */
1da177e4
LT
2147static struct sp_node *
2148sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2149{
2150 struct rb_node *n = sp->root.rb_node;
2151
2152 while (n) {
2153 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2154
2155 if (start >= p->end)
2156 n = n->rb_right;
2157 else if (end <= p->start)
2158 n = n->rb_left;
2159 else
2160 break;
2161 }
2162 if (!n)
2163 return NULL;
2164 for (;;) {
2165 struct sp_node *w = NULL;
2166 struct rb_node *prev = rb_prev(n);
2167 if (!prev)
2168 break;
2169 w = rb_entry(prev, struct sp_node, nd);
2170 if (w->end <= start)
2171 break;
2172 n = prev;
2173 }
2174 return rb_entry(n, struct sp_node, nd);
2175}
2176
4a8c7bb5
NZ
2177/*
2178 * Insert a new shared policy into the list. Caller holds sp->lock for
2179 * writing.
2180 */
1da177e4
LT
2181static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2182{
2183 struct rb_node **p = &sp->root.rb_node;
2184 struct rb_node *parent = NULL;
2185 struct sp_node *nd;
2186
2187 while (*p) {
2188 parent = *p;
2189 nd = rb_entry(parent, struct sp_node, nd);
2190 if (new->start < nd->start)
2191 p = &(*p)->rb_left;
2192 else if (new->end > nd->end)
2193 p = &(*p)->rb_right;
2194 else
2195 BUG();
2196 }
2197 rb_link_node(&new->nd, parent, p);
2198 rb_insert_color(&new->nd, &sp->root);
140d5a49 2199 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2200 new->policy ? new->policy->mode : 0);
1da177e4
LT
2201}
2202
2203/* Find shared policy intersecting idx */
2204struct mempolicy *
2205mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2206{
2207 struct mempolicy *pol = NULL;
2208 struct sp_node *sn;
2209
2210 if (!sp->root.rb_node)
2211 return NULL;
4a8c7bb5 2212 read_lock(&sp->lock);
1da177e4
LT
2213 sn = sp_lookup(sp, idx, idx+1);
2214 if (sn) {
2215 mpol_get(sn->policy);
2216 pol = sn->policy;
2217 }
4a8c7bb5 2218 read_unlock(&sp->lock);
1da177e4
LT
2219 return pol;
2220}
2221
63f74ca2
KM
2222static void sp_free(struct sp_node *n)
2223{
2224 mpol_put(n->policy);
2225 kmem_cache_free(sn_cache, n);
2226}
2227
771fb4d8
LS
2228/**
2229 * mpol_misplaced - check whether current page node is valid in policy
2230 *
b46e14ac
FF
2231 * @page: page to be checked
2232 * @vma: vm area where page mapped
2233 * @addr: virtual address where page mapped
771fb4d8
LS
2234 *
2235 * Lookup current policy node id for vma,addr and "compare to" page's
2236 * node id.
2237 *
2238 * Returns:
2239 * -1 - not misplaced, page is in the right node
2240 * node - node id where the page should be
2241 *
2242 * Policy determination "mimics" alloc_page_vma().
2243 * Called from fault path where we know the vma and faulting address.
2244 */
2245int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2246{
2247 struct mempolicy *pol;
c33d6c06 2248 struct zoneref *z;
771fb4d8
LS
2249 int curnid = page_to_nid(page);
2250 unsigned long pgoff;
90572890
PZ
2251 int thiscpu = raw_smp_processor_id();
2252 int thisnid = cpu_to_node(thiscpu);
771fb4d8
LS
2253 int polnid = -1;
2254 int ret = -1;
2255
dd6eecb9 2256 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2257 if (!(pol->flags & MPOL_F_MOF))
2258 goto out;
2259
2260 switch (pol->mode) {
2261 case MPOL_INTERLEAVE:
771fb4d8
LS
2262 pgoff = vma->vm_pgoff;
2263 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
98c70baa 2264 polnid = offset_il_node(pol, pgoff);
771fb4d8
LS
2265 break;
2266
2267 case MPOL_PREFERRED:
2268 if (pol->flags & MPOL_F_LOCAL)
2269 polnid = numa_node_id();
2270 else
2271 polnid = pol->v.preferred_node;
2272 break;
2273
2274 case MPOL_BIND:
c33d6c06 2275
771fb4d8
LS
2276 /*
2277 * allows binding to multiple nodes.
2278 * use current page if in policy nodemask,
2279 * else select nearest allowed node, if any.
2280 * If no allowed nodes, use current [!misplaced].
2281 */
2282 if (node_isset(curnid, pol->v.nodes))
2283 goto out;
c33d6c06 2284 z = first_zones_zonelist(
771fb4d8
LS
2285 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2286 gfp_zone(GFP_HIGHUSER),
c33d6c06
MG
2287 &pol->v.nodes);
2288 polnid = z->zone->node;
771fb4d8
LS
2289 break;
2290
2291 default:
2292 BUG();
2293 }
5606e387
MG
2294
2295 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2296 if (pol->flags & MPOL_F_MORON) {
90572890 2297 polnid = thisnid;
5606e387 2298
10f39042 2299 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2300 goto out;
e42c8ff2
MG
2301 }
2302
771fb4d8
LS
2303 if (curnid != polnid)
2304 ret = polnid;
2305out:
2306 mpol_cond_put(pol);
2307
2308 return ret;
2309}
2310
c11600e4
DR
2311/*
2312 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2313 * dropped after task->mempolicy is set to NULL so that any allocation done as
2314 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2315 * policy.
2316 */
2317void mpol_put_task_policy(struct task_struct *task)
2318{
2319 struct mempolicy *pol;
2320
2321 task_lock(task);
2322 pol = task->mempolicy;
2323 task->mempolicy = NULL;
2324 task_unlock(task);
2325 mpol_put(pol);
2326}
2327
1da177e4
LT
2328static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2329{
140d5a49 2330 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2331 rb_erase(&n->nd, &sp->root);
63f74ca2 2332 sp_free(n);
1da177e4
LT
2333}
2334
42288fe3
MG
2335static void sp_node_init(struct sp_node *node, unsigned long start,
2336 unsigned long end, struct mempolicy *pol)
2337{
2338 node->start = start;
2339 node->end = end;
2340 node->policy = pol;
2341}
2342
dbcb0f19
AB
2343static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2344 struct mempolicy *pol)
1da177e4 2345{
869833f2
KM
2346 struct sp_node *n;
2347 struct mempolicy *newpol;
1da177e4 2348
869833f2 2349 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2350 if (!n)
2351 return NULL;
869833f2
KM
2352
2353 newpol = mpol_dup(pol);
2354 if (IS_ERR(newpol)) {
2355 kmem_cache_free(sn_cache, n);
2356 return NULL;
2357 }
2358 newpol->flags |= MPOL_F_SHARED;
42288fe3 2359 sp_node_init(n, start, end, newpol);
869833f2 2360
1da177e4
LT
2361 return n;
2362}
2363
2364/* Replace a policy range. */
2365static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2366 unsigned long end, struct sp_node *new)
2367{
b22d127a 2368 struct sp_node *n;
42288fe3
MG
2369 struct sp_node *n_new = NULL;
2370 struct mempolicy *mpol_new = NULL;
b22d127a 2371 int ret = 0;
1da177e4 2372
42288fe3 2373restart:
4a8c7bb5 2374 write_lock(&sp->lock);
1da177e4
LT
2375 n = sp_lookup(sp, start, end);
2376 /* Take care of old policies in the same range. */
2377 while (n && n->start < end) {
2378 struct rb_node *next = rb_next(&n->nd);
2379 if (n->start >= start) {
2380 if (n->end <= end)
2381 sp_delete(sp, n);
2382 else
2383 n->start = end;
2384 } else {
2385 /* Old policy spanning whole new range. */
2386 if (n->end > end) {
42288fe3
MG
2387 if (!n_new)
2388 goto alloc_new;
2389
2390 *mpol_new = *n->policy;
2391 atomic_set(&mpol_new->refcnt, 1);
7880639c 2392 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2393 n->end = start;
5ca39575 2394 sp_insert(sp, n_new);
42288fe3
MG
2395 n_new = NULL;
2396 mpol_new = NULL;
1da177e4
LT
2397 break;
2398 } else
2399 n->end = start;
2400 }
2401 if (!next)
2402 break;
2403 n = rb_entry(next, struct sp_node, nd);
2404 }
2405 if (new)
2406 sp_insert(sp, new);
4a8c7bb5 2407 write_unlock(&sp->lock);
42288fe3
MG
2408 ret = 0;
2409
2410err_out:
2411 if (mpol_new)
2412 mpol_put(mpol_new);
2413 if (n_new)
2414 kmem_cache_free(sn_cache, n_new);
2415
b22d127a 2416 return ret;
42288fe3
MG
2417
2418alloc_new:
4a8c7bb5 2419 write_unlock(&sp->lock);
42288fe3
MG
2420 ret = -ENOMEM;
2421 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2422 if (!n_new)
2423 goto err_out;
2424 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2425 if (!mpol_new)
2426 goto err_out;
2427 goto restart;
1da177e4
LT
2428}
2429
71fe804b
LS
2430/**
2431 * mpol_shared_policy_init - initialize shared policy for inode
2432 * @sp: pointer to inode shared policy
2433 * @mpol: struct mempolicy to install
2434 *
2435 * Install non-NULL @mpol in inode's shared policy rb-tree.
2436 * On entry, the current task has a reference on a non-NULL @mpol.
2437 * This must be released on exit.
4bfc4495 2438 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2439 */
2440void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2441{
58568d2a
MX
2442 int ret;
2443
71fe804b 2444 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2445 rwlock_init(&sp->lock);
71fe804b
LS
2446
2447 if (mpol) {
2448 struct vm_area_struct pvma;
2449 struct mempolicy *new;
4bfc4495 2450 NODEMASK_SCRATCH(scratch);
71fe804b 2451
4bfc4495 2452 if (!scratch)
5c0c1654 2453 goto put_mpol;
71fe804b
LS
2454 /* contextualize the tmpfs mount point mempolicy */
2455 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2456 if (IS_ERR(new))
0cae3457 2457 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2458
2459 task_lock(current);
4bfc4495 2460 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2461 task_unlock(current);
15d77835 2462 if (ret)
5c0c1654 2463 goto put_new;
71fe804b
LS
2464
2465 /* Create pseudo-vma that contains just the policy */
2466 memset(&pvma, 0, sizeof(struct vm_area_struct));
2467 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2468 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2469
5c0c1654 2470put_new:
71fe804b 2471 mpol_put(new); /* drop initial ref */
0cae3457 2472free_scratch:
4bfc4495 2473 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2474put_mpol:
2475 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2476 }
2477}
2478
1da177e4
LT
2479int mpol_set_shared_policy(struct shared_policy *info,
2480 struct vm_area_struct *vma, struct mempolicy *npol)
2481{
2482 int err;
2483 struct sp_node *new = NULL;
2484 unsigned long sz = vma_pages(vma);
2485
028fec41 2486 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2487 vma->vm_pgoff,
45c4745a 2488 sz, npol ? npol->mode : -1,
028fec41 2489 npol ? npol->flags : -1,
00ef2d2f 2490 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2491
2492 if (npol) {
2493 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2494 if (!new)
2495 return -ENOMEM;
2496 }
2497 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2498 if (err && new)
63f74ca2 2499 sp_free(new);
1da177e4
LT
2500 return err;
2501}
2502
2503/* Free a backing policy store on inode delete. */
2504void mpol_free_shared_policy(struct shared_policy *p)
2505{
2506 struct sp_node *n;
2507 struct rb_node *next;
2508
2509 if (!p->root.rb_node)
2510 return;
4a8c7bb5 2511 write_lock(&p->lock);
1da177e4
LT
2512 next = rb_first(&p->root);
2513 while (next) {
2514 n = rb_entry(next, struct sp_node, nd);
2515 next = rb_next(&n->nd);
63f74ca2 2516 sp_delete(p, n);
1da177e4 2517 }
4a8c7bb5 2518 write_unlock(&p->lock);
1da177e4
LT
2519}
2520
1a687c2e 2521#ifdef CONFIG_NUMA_BALANCING
c297663c 2522static int __initdata numabalancing_override;
1a687c2e
MG
2523
2524static void __init check_numabalancing_enable(void)
2525{
2526 bool numabalancing_default = false;
2527
2528 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2529 numabalancing_default = true;
2530
c297663c
MG
2531 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2532 if (numabalancing_override)
2533 set_numabalancing_state(numabalancing_override == 1);
2534
b0dc2b9b 2535 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2536 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2537 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2538 set_numabalancing_state(numabalancing_default);
2539 }
2540}
2541
2542static int __init setup_numabalancing(char *str)
2543{
2544 int ret = 0;
2545 if (!str)
2546 goto out;
1a687c2e
MG
2547
2548 if (!strcmp(str, "enable")) {
c297663c 2549 numabalancing_override = 1;
1a687c2e
MG
2550 ret = 1;
2551 } else if (!strcmp(str, "disable")) {
c297663c 2552 numabalancing_override = -1;
1a687c2e
MG
2553 ret = 1;
2554 }
2555out:
2556 if (!ret)
4a404bea 2557 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2558
2559 return ret;
2560}
2561__setup("numa_balancing=", setup_numabalancing);
2562#else
2563static inline void __init check_numabalancing_enable(void)
2564{
2565}
2566#endif /* CONFIG_NUMA_BALANCING */
2567
1da177e4
LT
2568/* assumes fs == KERNEL_DS */
2569void __init numa_policy_init(void)
2570{
b71636e2
PM
2571 nodemask_t interleave_nodes;
2572 unsigned long largest = 0;
2573 int nid, prefer = 0;
2574
1da177e4
LT
2575 policy_cache = kmem_cache_create("numa_policy",
2576 sizeof(struct mempolicy),
20c2df83 2577 0, SLAB_PANIC, NULL);
1da177e4
LT
2578
2579 sn_cache = kmem_cache_create("shared_policy_node",
2580 sizeof(struct sp_node),
20c2df83 2581 0, SLAB_PANIC, NULL);
1da177e4 2582
5606e387
MG
2583 for_each_node(nid) {
2584 preferred_node_policy[nid] = (struct mempolicy) {
2585 .refcnt = ATOMIC_INIT(1),
2586 .mode = MPOL_PREFERRED,
2587 .flags = MPOL_F_MOF | MPOL_F_MORON,
2588 .v = { .preferred_node = nid, },
2589 };
2590 }
2591
b71636e2
PM
2592 /*
2593 * Set interleaving policy for system init. Interleaving is only
2594 * enabled across suitably sized nodes (default is >= 16MB), or
2595 * fall back to the largest node if they're all smaller.
2596 */
2597 nodes_clear(interleave_nodes);
01f13bd6 2598 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2599 unsigned long total_pages = node_present_pages(nid);
2600
2601 /* Preserve the largest node */
2602 if (largest < total_pages) {
2603 largest = total_pages;
2604 prefer = nid;
2605 }
2606
2607 /* Interleave this node? */
2608 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2609 node_set(nid, interleave_nodes);
2610 }
2611
2612 /* All too small, use the largest */
2613 if (unlikely(nodes_empty(interleave_nodes)))
2614 node_set(prefer, interleave_nodes);
1da177e4 2615
028fec41 2616 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2617 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2618
2619 check_numabalancing_enable();
1da177e4
LT
2620}
2621
8bccd85f 2622/* Reset policy of current process to default */
1da177e4
LT
2623void numa_default_policy(void)
2624{
028fec41 2625 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2626}
68860ec1 2627
095f1fc4
LS
2628/*
2629 * Parse and format mempolicy from/to strings
2630 */
2631
1a75a6c8 2632/*
f2a07f40 2633 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2634 */
345ace9c
LS
2635static const char * const policy_modes[] =
2636{
2637 [MPOL_DEFAULT] = "default",
2638 [MPOL_PREFERRED] = "prefer",
2639 [MPOL_BIND] = "bind",
2640 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2641 [MPOL_LOCAL] = "local",
345ace9c 2642};
1a75a6c8 2643
095f1fc4
LS
2644
2645#ifdef CONFIG_TMPFS
2646/**
f2a07f40 2647 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2648 * @str: string containing mempolicy to parse
71fe804b 2649 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2650 *
2651 * Format of input:
2652 * <mode>[=<flags>][:<nodelist>]
2653 *
71fe804b 2654 * On success, returns 0, else 1
095f1fc4 2655 */
a7a88b23 2656int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2657{
71fe804b 2658 struct mempolicy *new = NULL;
b4652e84 2659 unsigned short mode;
f2a07f40 2660 unsigned short mode_flags;
71fe804b 2661 nodemask_t nodes;
095f1fc4
LS
2662 char *nodelist = strchr(str, ':');
2663 char *flags = strchr(str, '=');
095f1fc4
LS
2664 int err = 1;
2665
2666 if (nodelist) {
2667 /* NUL-terminate mode or flags string */
2668 *nodelist++ = '\0';
71fe804b 2669 if (nodelist_parse(nodelist, nodes))
095f1fc4 2670 goto out;
01f13bd6 2671 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2672 goto out;
71fe804b
LS
2673 } else
2674 nodes_clear(nodes);
2675
095f1fc4
LS
2676 if (flags)
2677 *flags++ = '\0'; /* terminate mode string */
2678
479e2802 2679 for (mode = 0; mode < MPOL_MAX; mode++) {
345ace9c 2680 if (!strcmp(str, policy_modes[mode])) {
095f1fc4
LS
2681 break;
2682 }
2683 }
a720094d 2684 if (mode >= MPOL_MAX)
095f1fc4
LS
2685 goto out;
2686
71fe804b 2687 switch (mode) {
095f1fc4 2688 case MPOL_PREFERRED:
71fe804b
LS
2689 /*
2690 * Insist on a nodelist of one node only
2691 */
095f1fc4
LS
2692 if (nodelist) {
2693 char *rest = nodelist;
2694 while (isdigit(*rest))
2695 rest++;
926f2ae0
KM
2696 if (*rest)
2697 goto out;
095f1fc4
LS
2698 }
2699 break;
095f1fc4
LS
2700 case MPOL_INTERLEAVE:
2701 /*
2702 * Default to online nodes with memory if no nodelist
2703 */
2704 if (!nodelist)
01f13bd6 2705 nodes = node_states[N_MEMORY];
3f226aa1 2706 break;
71fe804b 2707 case MPOL_LOCAL:
3f226aa1 2708 /*
71fe804b 2709 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2710 */
71fe804b 2711 if (nodelist)
3f226aa1 2712 goto out;
71fe804b 2713 mode = MPOL_PREFERRED;
3f226aa1 2714 break;
413b43de
RT
2715 case MPOL_DEFAULT:
2716 /*
2717 * Insist on a empty nodelist
2718 */
2719 if (!nodelist)
2720 err = 0;
2721 goto out;
d69b2e63
KM
2722 case MPOL_BIND:
2723 /*
2724 * Insist on a nodelist
2725 */
2726 if (!nodelist)
2727 goto out;
095f1fc4
LS
2728 }
2729
71fe804b 2730 mode_flags = 0;
095f1fc4
LS
2731 if (flags) {
2732 /*
2733 * Currently, we only support two mutually exclusive
2734 * mode flags.
2735 */
2736 if (!strcmp(flags, "static"))
71fe804b 2737 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2738 else if (!strcmp(flags, "relative"))
71fe804b 2739 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2740 else
926f2ae0 2741 goto out;
095f1fc4 2742 }
71fe804b
LS
2743
2744 new = mpol_new(mode, mode_flags, &nodes);
2745 if (IS_ERR(new))
926f2ae0
KM
2746 goto out;
2747
f2a07f40
HD
2748 /*
2749 * Save nodes for mpol_to_str() to show the tmpfs mount options
2750 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2751 */
2752 if (mode != MPOL_PREFERRED)
2753 new->v.nodes = nodes;
2754 else if (nodelist)
2755 new->v.preferred_node = first_node(nodes);
2756 else
2757 new->flags |= MPOL_F_LOCAL;
2758
2759 /*
2760 * Save nodes for contextualization: this will be used to "clone"
2761 * the mempolicy in a specific context [cpuset] at a later time.
2762 */
2763 new->w.user_nodemask = nodes;
2764
926f2ae0 2765 err = 0;
71fe804b 2766
095f1fc4
LS
2767out:
2768 /* Restore string for error message */
2769 if (nodelist)
2770 *--nodelist = ':';
2771 if (flags)
2772 *--flags = '=';
71fe804b
LS
2773 if (!err)
2774 *mpol = new;
095f1fc4
LS
2775 return err;
2776}
2777#endif /* CONFIG_TMPFS */
2778
71fe804b
LS
2779/**
2780 * mpol_to_str - format a mempolicy structure for printing
2781 * @buffer: to contain formatted mempolicy string
2782 * @maxlen: length of @buffer
2783 * @pol: pointer to mempolicy to be formatted
71fe804b 2784 *
948927ee
DR
2785 * Convert @pol into a string. If @buffer is too short, truncate the string.
2786 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2787 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 2788 */
948927ee 2789void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
2790{
2791 char *p = buffer;
948927ee
DR
2792 nodemask_t nodes = NODE_MASK_NONE;
2793 unsigned short mode = MPOL_DEFAULT;
2794 unsigned short flags = 0;
2291990a 2795
8790c71a 2796 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 2797 mode = pol->mode;
948927ee
DR
2798 flags = pol->flags;
2799 }
bea904d5 2800
1a75a6c8
CL
2801 switch (mode) {
2802 case MPOL_DEFAULT:
1a75a6c8 2803 break;
1a75a6c8 2804 case MPOL_PREFERRED:
fc36b8d3 2805 if (flags & MPOL_F_LOCAL)
f2a07f40 2806 mode = MPOL_LOCAL;
53f2556b 2807 else
fc36b8d3 2808 node_set(pol->v.preferred_node, nodes);
1a75a6c8 2809 break;
1a75a6c8 2810 case MPOL_BIND:
1a75a6c8 2811 case MPOL_INTERLEAVE:
f2a07f40 2812 nodes = pol->v.nodes;
1a75a6c8 2813 break;
1a75a6c8 2814 default:
948927ee
DR
2815 WARN_ON_ONCE(1);
2816 snprintf(p, maxlen, "unknown");
2817 return;
1a75a6c8
CL
2818 }
2819
b7a9f420 2820 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 2821
fc36b8d3 2822 if (flags & MPOL_MODE_FLAGS) {
948927ee 2823 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 2824
2291990a
LS
2825 /*
2826 * Currently, the only defined flags are mutually exclusive
2827 */
f5b087b5 2828 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2829 p += snprintf(p, buffer + maxlen - p, "static");
2830 else if (flags & MPOL_F_RELATIVE_NODES)
2831 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2832 }
2833
9e763e0f
TH
2834 if (!nodes_empty(nodes))
2835 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2836 nodemask_pr_args(&nodes));
1a75a6c8 2837}