mm, page_alloc: shortcut watermark checks for order-0 pages
[linux-2.6-block.git] / mm / mempolicy.c
CommitLineData
1da177e4
LT
1/*
2 * Simple NUMA memory policy for the Linux kernel.
3 *
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
6 * Subject to the GNU Public License, version 2.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
1da177e4
LT
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
37 *
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
42 *
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
46 *
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
51 *
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
54 */
55
56/* Notebook:
57 fix mmap readahead to honour policy and enable policy for any page cache
58 object
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
61 first item above.
62 handle mremap for shared memory (currently ignored for the policy)
63 grows down?
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
1da177e4
LT
66*/
67
b1de0d13
MH
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
1da177e4
LT
70#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
1da177e4
LT
76#include <linux/nodemask.h>
77#include <linux/cpuset.h>
1da177e4
LT
78#include <linux/slab.h>
79#include <linux/string.h>
b95f1b31 80#include <linux/export.h>
b488893a 81#include <linux/nsproxy.h>
1da177e4
LT
82#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
dc9aa5b9 85#include <linux/swap.h>
1a75a6c8
CL
86#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
b20a3503 88#include <linux/migrate.h>
62b61f61 89#include <linux/ksm.h>
95a402c3 90#include <linux/rmap.h>
86c3a764 91#include <linux/security.h>
dbcb0f19 92#include <linux/syscalls.h>
095f1fc4 93#include <linux/ctype.h>
6d9c285a 94#include <linux/mm_inline.h>
b24f53a0 95#include <linux/mmu_notifier.h>
b1de0d13 96#include <linux/printk.h>
dc9aa5b9 97
1da177e4
LT
98#include <asm/tlbflush.h>
99#include <asm/uaccess.h>
100
62695a84
NP
101#include "internal.h"
102
38e35860 103/* Internal flags */
dc9aa5b9 104#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 105#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 106
fcc234f8
PE
107static struct kmem_cache *policy_cache;
108static struct kmem_cache *sn_cache;
1da177e4 109
1da177e4
LT
110/* Highest zone. An specific allocation for a zone below that is not
111 policied. */
6267276f 112enum zone_type policy_zone = 0;
1da177e4 113
bea904d5
LS
114/*
115 * run-time system-wide default policy => local allocation
116 */
e754d79d 117static struct mempolicy default_policy = {
1da177e4 118 .refcnt = ATOMIC_INIT(1), /* never free it */
bea904d5 119 .mode = MPOL_PREFERRED,
fc36b8d3 120 .flags = MPOL_F_LOCAL,
1da177e4
LT
121};
122
5606e387
MG
123static struct mempolicy preferred_node_policy[MAX_NUMNODES];
124
74d2c3a0 125struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
126{
127 struct mempolicy *pol = p->mempolicy;
f15ca78e 128 int node;
5606e387 129
f15ca78e
ON
130 if (pol)
131 return pol;
5606e387 132
f15ca78e
ON
133 node = numa_node_id();
134 if (node != NUMA_NO_NODE) {
135 pol = &preferred_node_policy[node];
136 /* preferred_node_policy is not initialised early in boot */
137 if (pol->mode)
138 return pol;
5606e387
MG
139 }
140
f15ca78e 141 return &default_policy;
5606e387
MG
142}
143
37012946
DR
144static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
708c1bbc
MX
146 /*
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
151 * page.
152 * If we have a lock to protect task->mempolicy in read-side, we do
153 * rebind directly.
154 *
155 * step:
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
159 */
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
37012946
DR
162} mpol_ops[MPOL_MAX];
163
f5b087b5
DR
164static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
165{
6d556294 166 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
167}
168
169static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
170 const nodemask_t *rel)
171{
172 nodemask_t tmp;
173 nodes_fold(tmp, *orig, nodes_weight(*rel));
174 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
175}
176
37012946
DR
177static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
178{
179 if (nodes_empty(*nodes))
180 return -EINVAL;
181 pol->v.nodes = *nodes;
182 return 0;
183}
184
185static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
186{
187 if (!nodes)
fc36b8d3 188 pol->flags |= MPOL_F_LOCAL; /* local allocation */
37012946
DR
189 else if (nodes_empty(*nodes))
190 return -EINVAL; /* no allowed nodes */
191 else
192 pol->v.preferred_node = first_node(*nodes);
193 return 0;
194}
195
196static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
197{
859f7ef1 198 if (nodes_empty(*nodes))
37012946
DR
199 return -EINVAL;
200 pol->v.nodes = *nodes;
201 return 0;
202}
203
58568d2a
MX
204/*
205 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
206 * any, for the new policy. mpol_new() has already validated the nodes
207 * parameter with respect to the policy mode and flags. But, we need to
208 * handle an empty nodemask with MPOL_PREFERRED here.
209 *
210 * Must be called holding task's alloc_lock to protect task's mems_allowed
211 * and mempolicy. May also be called holding the mmap_semaphore for write.
212 */
4bfc4495
KH
213static int mpol_set_nodemask(struct mempolicy *pol,
214 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 215{
58568d2a
MX
216 int ret;
217
218 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
219 if (pol == NULL)
220 return 0;
01f13bd6 221 /* Check N_MEMORY */
4bfc4495 222 nodes_and(nsc->mask1,
01f13bd6 223 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
224
225 VM_BUG_ON(!nodes);
226 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
227 nodes = NULL; /* explicit local allocation */
228 else {
229 if (pol->flags & MPOL_F_RELATIVE_NODES)
859f7ef1 230 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
58568d2a 231 else
4bfc4495
KH
232 nodes_and(nsc->mask2, *nodes, nsc->mask1);
233
58568d2a
MX
234 if (mpol_store_user_nodemask(pol))
235 pol->w.user_nodemask = *nodes;
236 else
237 pol->w.cpuset_mems_allowed =
238 cpuset_current_mems_allowed;
239 }
240
4bfc4495
KH
241 if (nodes)
242 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
243 else
244 ret = mpol_ops[pol->mode].create(pol, NULL);
58568d2a
MX
245 return ret;
246}
247
248/*
249 * This function just creates a new policy, does some check and simple
250 * initialization. You must invoke mpol_set_nodemask() to set nodes.
251 */
028fec41
DR
252static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
253 nodemask_t *nodes)
1da177e4
LT
254{
255 struct mempolicy *policy;
256
028fec41 257 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 258 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 259
3e1f0645
DR
260 if (mode == MPOL_DEFAULT) {
261 if (nodes && !nodes_empty(*nodes))
37012946 262 return ERR_PTR(-EINVAL);
d3a71033 263 return NULL;
37012946 264 }
3e1f0645
DR
265 VM_BUG_ON(!nodes);
266
267 /*
268 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
269 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
270 * All other modes require a valid pointer to a non-empty nodemask.
271 */
272 if (mode == MPOL_PREFERRED) {
273 if (nodes_empty(*nodes)) {
274 if (((flags & MPOL_F_STATIC_NODES) ||
275 (flags & MPOL_F_RELATIVE_NODES)))
276 return ERR_PTR(-EINVAL);
3e1f0645 277 }
479e2802
PZ
278 } else if (mode == MPOL_LOCAL) {
279 if (!nodes_empty(*nodes))
280 return ERR_PTR(-EINVAL);
281 mode = MPOL_PREFERRED;
3e1f0645
DR
282 } else if (nodes_empty(*nodes))
283 return ERR_PTR(-EINVAL);
1da177e4
LT
284 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
285 if (!policy)
286 return ERR_PTR(-ENOMEM);
287 atomic_set(&policy->refcnt, 1);
45c4745a 288 policy->mode = mode;
3e1f0645 289 policy->flags = flags;
37012946 290
1da177e4 291 return policy;
37012946
DR
292}
293
52cd3b07
LS
294/* Slow path of a mpol destructor. */
295void __mpol_put(struct mempolicy *p)
296{
297 if (!atomic_dec_and_test(&p->refcnt))
298 return;
52cd3b07
LS
299 kmem_cache_free(policy_cache, p);
300}
301
708c1bbc
MX
302static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
303 enum mpol_rebind_step step)
37012946
DR
304{
305}
306
708c1bbc
MX
307/*
308 * step:
309 * MPOL_REBIND_ONCE - do rebind work at once
310 * MPOL_REBIND_STEP1 - set all the newly nodes
311 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
312 */
313static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
314 enum mpol_rebind_step step)
37012946
DR
315{
316 nodemask_t tmp;
317
318 if (pol->flags & MPOL_F_STATIC_NODES)
319 nodes_and(tmp, pol->w.user_nodemask, *nodes);
320 else if (pol->flags & MPOL_F_RELATIVE_NODES)
321 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
322 else {
708c1bbc
MX
323 /*
324 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
325 * result
326 */
327 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
328 nodes_remap(tmp, pol->v.nodes,
329 pol->w.cpuset_mems_allowed, *nodes);
330 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
331 } else if (step == MPOL_REBIND_STEP2) {
332 tmp = pol->w.cpuset_mems_allowed;
333 pol->w.cpuset_mems_allowed = *nodes;
334 } else
335 BUG();
37012946 336 }
f5b087b5 337
708c1bbc
MX
338 if (nodes_empty(tmp))
339 tmp = *nodes;
340
341 if (step == MPOL_REBIND_STEP1)
342 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
343 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
344 pol->v.nodes = tmp;
345 else
346 BUG();
347
37012946 348 if (!node_isset(current->il_next, tmp)) {
0edaf86c 349 current->il_next = next_node_in(current->il_next, tmp);
37012946
DR
350 if (current->il_next >= MAX_NUMNODES)
351 current->il_next = numa_node_id();
352 }
353}
354
355static void mpol_rebind_preferred(struct mempolicy *pol,
708c1bbc
MX
356 const nodemask_t *nodes,
357 enum mpol_rebind_step step)
37012946
DR
358{
359 nodemask_t tmp;
360
37012946
DR
361 if (pol->flags & MPOL_F_STATIC_NODES) {
362 int node = first_node(pol->w.user_nodemask);
363
fc36b8d3 364 if (node_isset(node, *nodes)) {
37012946 365 pol->v.preferred_node = node;
fc36b8d3
LS
366 pol->flags &= ~MPOL_F_LOCAL;
367 } else
368 pol->flags |= MPOL_F_LOCAL;
37012946
DR
369 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
370 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
371 pol->v.preferred_node = first_node(tmp);
fc36b8d3 372 } else if (!(pol->flags & MPOL_F_LOCAL)) {
37012946
DR
373 pol->v.preferred_node = node_remap(pol->v.preferred_node,
374 pol->w.cpuset_mems_allowed,
375 *nodes);
376 pol->w.cpuset_mems_allowed = *nodes;
377 }
1da177e4
LT
378}
379
708c1bbc
MX
380/*
381 * mpol_rebind_policy - Migrate a policy to a different set of nodes
382 *
383 * If read-side task has no lock to protect task->mempolicy, write-side
384 * task will rebind the task->mempolicy by two step. The first step is
385 * setting all the newly nodes, and the second step is cleaning all the
386 * disallowed nodes. In this way, we can avoid finding no node to alloc
387 * page.
388 * If we have a lock to protect task->mempolicy in read-side, we do
389 * rebind directly.
390 *
391 * step:
392 * MPOL_REBIND_ONCE - do rebind work at once
393 * MPOL_REBIND_STEP1 - set all the newly nodes
394 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
395 */
396static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
397 enum mpol_rebind_step step)
1d0d2680 398{
1d0d2680
DR
399 if (!pol)
400 return;
89c522c7 401 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
1d0d2680
DR
402 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
403 return;
708c1bbc
MX
404
405 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
406 return;
407
408 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
409 BUG();
410
411 if (step == MPOL_REBIND_STEP1)
412 pol->flags |= MPOL_F_REBINDING;
413 else if (step == MPOL_REBIND_STEP2)
414 pol->flags &= ~MPOL_F_REBINDING;
415 else if (step >= MPOL_REBIND_NSTEP)
416 BUG();
417
418 mpol_ops[pol->mode].rebind(pol, newmask, step);
1d0d2680
DR
419}
420
421/*
422 * Wrapper for mpol_rebind_policy() that just requires task
423 * pointer, and updates task mempolicy.
58568d2a
MX
424 *
425 * Called with task's alloc_lock held.
1d0d2680
DR
426 */
427
708c1bbc
MX
428void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
429 enum mpol_rebind_step step)
1d0d2680 430{
708c1bbc 431 mpol_rebind_policy(tsk->mempolicy, new, step);
1d0d2680
DR
432}
433
434/*
435 * Rebind each vma in mm to new nodemask.
436 *
437 * Call holding a reference to mm. Takes mm->mmap_sem during call.
438 */
439
440void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
441{
442 struct vm_area_struct *vma;
443
444 down_write(&mm->mmap_sem);
445 for (vma = mm->mmap; vma; vma = vma->vm_next)
708c1bbc 446 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
1d0d2680
DR
447 up_write(&mm->mmap_sem);
448}
449
37012946
DR
450static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
451 [MPOL_DEFAULT] = {
452 .rebind = mpol_rebind_default,
453 },
454 [MPOL_INTERLEAVE] = {
455 .create = mpol_new_interleave,
456 .rebind = mpol_rebind_nodemask,
457 },
458 [MPOL_PREFERRED] = {
459 .create = mpol_new_preferred,
460 .rebind = mpol_rebind_preferred,
461 },
462 [MPOL_BIND] = {
463 .create = mpol_new_bind,
464 .rebind = mpol_rebind_nodemask,
465 },
466};
467
fc301289
CL
468static void migrate_page_add(struct page *page, struct list_head *pagelist,
469 unsigned long flags);
1a75a6c8 470
6f4576e3
NH
471struct queue_pages {
472 struct list_head *pagelist;
473 unsigned long flags;
474 nodemask_t *nmask;
475 struct vm_area_struct *prev;
476};
477
98094945
NH
478/*
479 * Scan through pages checking if pages follow certain conditions,
480 * and move them to the pagelist if they do.
481 */
6f4576e3
NH
482static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
483 unsigned long end, struct mm_walk *walk)
1da177e4 484{
6f4576e3
NH
485 struct vm_area_struct *vma = walk->vma;
486 struct page *page;
487 struct queue_pages *qp = walk->private;
488 unsigned long flags = qp->flags;
248db92d 489 int nid, ret;
91612e0d 490 pte_t *pte;
705e87c0 491 spinlock_t *ptl;
941150a3 492
248db92d
KS
493 if (pmd_trans_huge(*pmd)) {
494 ptl = pmd_lock(walk->mm, pmd);
495 if (pmd_trans_huge(*pmd)) {
496 page = pmd_page(*pmd);
497 if (is_huge_zero_page(page)) {
498 spin_unlock(ptl);
499 split_huge_pmd(vma, pmd, addr);
500 } else {
501 get_page(page);
502 spin_unlock(ptl);
503 lock_page(page);
504 ret = split_huge_page(page);
505 unlock_page(page);
506 put_page(page);
507 if (ret)
508 return 0;
509 }
510 } else {
511 spin_unlock(ptl);
512 }
513 }
91612e0d 514
248db92d 515retry:
6f4576e3
NH
516 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
517 for (; addr != end; pte++, addr += PAGE_SIZE) {
91612e0d 518 if (!pte_present(*pte))
1da177e4 519 continue;
6aab341e
LT
520 page = vm_normal_page(vma, addr, *pte);
521 if (!page)
1da177e4 522 continue;
053837fc 523 /*
62b61f61
HD
524 * vm_normal_page() filters out zero pages, but there might
525 * still be PageReserved pages to skip, perhaps in a VDSO.
053837fc 526 */
b79bc0a0 527 if (PageReserved(page))
f4598c8b 528 continue;
6aab341e 529 nid = page_to_nid(page);
6f4576e3 530 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
38e35860 531 continue;
0a2e280b 532 if (PageTransCompound(page) && PageAnon(page)) {
248db92d
KS
533 get_page(page);
534 pte_unmap_unlock(pte, ptl);
535 lock_page(page);
536 ret = split_huge_page(page);
537 unlock_page(page);
538 put_page(page);
539 /* Failed to split -- skip. */
540 if (ret) {
541 pte = pte_offset_map_lock(walk->mm, pmd,
542 addr, &ptl);
543 continue;
544 }
545 goto retry;
546 }
38e35860 547
77bf45e7 548 migrate_page_add(page, qp->pagelist, flags);
6f4576e3
NH
549 }
550 pte_unmap_unlock(pte - 1, ptl);
551 cond_resched();
552 return 0;
91612e0d
HD
553}
554
6f4576e3
NH
555static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
556 unsigned long addr, unsigned long end,
557 struct mm_walk *walk)
e2d8cf40
NH
558{
559#ifdef CONFIG_HUGETLB_PAGE
6f4576e3
NH
560 struct queue_pages *qp = walk->private;
561 unsigned long flags = qp->flags;
e2d8cf40
NH
562 int nid;
563 struct page *page;
cb900f41 564 spinlock_t *ptl;
d4c54919 565 pte_t entry;
e2d8cf40 566
6f4576e3
NH
567 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
568 entry = huge_ptep_get(pte);
d4c54919
NH
569 if (!pte_present(entry))
570 goto unlock;
571 page = pte_page(entry);
e2d8cf40 572 nid = page_to_nid(page);
6f4576e3 573 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
e2d8cf40
NH
574 goto unlock;
575 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
576 if (flags & (MPOL_MF_MOVE_ALL) ||
577 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
6f4576e3 578 isolate_huge_page(page, qp->pagelist);
e2d8cf40 579unlock:
cb900f41 580 spin_unlock(ptl);
e2d8cf40
NH
581#else
582 BUG();
583#endif
91612e0d 584 return 0;
1da177e4
LT
585}
586
5877231f 587#ifdef CONFIG_NUMA_BALANCING
b24f53a0 588/*
4b10e7d5
MG
589 * This is used to mark a range of virtual addresses to be inaccessible.
590 * These are later cleared by a NUMA hinting fault. Depending on these
591 * faults, pages may be migrated for better NUMA placement.
592 *
593 * This is assuming that NUMA faults are handled using PROT_NONE. If
594 * an architecture makes a different choice, it will need further
595 * changes to the core.
b24f53a0 596 */
4b10e7d5
MG
597unsigned long change_prot_numa(struct vm_area_struct *vma,
598 unsigned long addr, unsigned long end)
b24f53a0 599{
4b10e7d5 600 int nr_updated;
b24f53a0 601
4d942466 602 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
03c5a6e1
MG
603 if (nr_updated)
604 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 605
4b10e7d5 606 return nr_updated;
b24f53a0
LS
607}
608#else
609static unsigned long change_prot_numa(struct vm_area_struct *vma,
610 unsigned long addr, unsigned long end)
611{
612 return 0;
613}
5877231f 614#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 615
6f4576e3
NH
616static int queue_pages_test_walk(unsigned long start, unsigned long end,
617 struct mm_walk *walk)
618{
619 struct vm_area_struct *vma = walk->vma;
620 struct queue_pages *qp = walk->private;
621 unsigned long endvma = vma->vm_end;
622 unsigned long flags = qp->flags;
623
77bf45e7 624 if (!vma_migratable(vma))
48684a65
NH
625 return 1;
626
6f4576e3
NH
627 if (endvma > end)
628 endvma = end;
629 if (vma->vm_start > start)
630 start = vma->vm_start;
631
632 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
633 if (!vma->vm_next && vma->vm_end < end)
634 return -EFAULT;
635 if (qp->prev && qp->prev->vm_end < vma->vm_start)
636 return -EFAULT;
637 }
638
639 qp->prev = vma;
640
6f4576e3
NH
641 if (flags & MPOL_MF_LAZY) {
642 /* Similar to task_numa_work, skip inaccessible VMAs */
4355c018
LC
643 if (!is_vm_hugetlb_page(vma) &&
644 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
645 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
646 change_prot_numa(vma, start, endvma);
647 return 1;
648 }
649
77bf45e7
KS
650 /* queue pages from current vma */
651 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6f4576e3
NH
652 return 0;
653 return 1;
654}
655
dc9aa5b9 656/*
98094945
NH
657 * Walk through page tables and collect pages to be migrated.
658 *
659 * If pages found in a given range are on a set of nodes (determined by
660 * @nodes and @flags,) it's isolated and queued to the pagelist which is
661 * passed via @private.)
dc9aa5b9 662 */
d05f0cdc 663static int
98094945 664queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
665 nodemask_t *nodes, unsigned long flags,
666 struct list_head *pagelist)
1da177e4 667{
6f4576e3
NH
668 struct queue_pages qp = {
669 .pagelist = pagelist,
670 .flags = flags,
671 .nmask = nodes,
672 .prev = NULL,
673 };
674 struct mm_walk queue_pages_walk = {
675 .hugetlb_entry = queue_pages_hugetlb,
676 .pmd_entry = queue_pages_pte_range,
677 .test_walk = queue_pages_test_walk,
678 .mm = mm,
679 .private = &qp,
680 };
681
682 return walk_page_range(start, end, &queue_pages_walk);
1da177e4
LT
683}
684
869833f2
KM
685/*
686 * Apply policy to a single VMA
687 * This must be called with the mmap_sem held for writing.
688 */
689static int vma_replace_policy(struct vm_area_struct *vma,
690 struct mempolicy *pol)
8d34694c 691{
869833f2
KM
692 int err;
693 struct mempolicy *old;
694 struct mempolicy *new;
8d34694c
KM
695
696 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
697 vma->vm_start, vma->vm_end, vma->vm_pgoff,
698 vma->vm_ops, vma->vm_file,
699 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
700
869833f2
KM
701 new = mpol_dup(pol);
702 if (IS_ERR(new))
703 return PTR_ERR(new);
704
705 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 706 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
707 if (err)
708 goto err_out;
8d34694c 709 }
869833f2
KM
710
711 old = vma->vm_policy;
712 vma->vm_policy = new; /* protected by mmap_sem */
713 mpol_put(old);
714
715 return 0;
716 err_out:
717 mpol_put(new);
8d34694c
KM
718 return err;
719}
720
1da177e4 721/* Step 2: apply policy to a range and do splits. */
9d8cebd4
KM
722static int mbind_range(struct mm_struct *mm, unsigned long start,
723 unsigned long end, struct mempolicy *new_pol)
1da177e4
LT
724{
725 struct vm_area_struct *next;
9d8cebd4
KM
726 struct vm_area_struct *prev;
727 struct vm_area_struct *vma;
728 int err = 0;
e26a5114 729 pgoff_t pgoff;
9d8cebd4
KM
730 unsigned long vmstart;
731 unsigned long vmend;
1da177e4 732
097d5910 733 vma = find_vma(mm, start);
9d8cebd4
KM
734 if (!vma || vma->vm_start > start)
735 return -EFAULT;
736
097d5910 737 prev = vma->vm_prev;
e26a5114
KM
738 if (start > vma->vm_start)
739 prev = vma;
740
9d8cebd4 741 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
1da177e4 742 next = vma->vm_next;
9d8cebd4
KM
743 vmstart = max(start, vma->vm_start);
744 vmend = min(end, vma->vm_end);
745
e26a5114
KM
746 if (mpol_equal(vma_policy(vma), new_pol))
747 continue;
748
749 pgoff = vma->vm_pgoff +
750 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
9d8cebd4 751 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
19a809af
AA
752 vma->anon_vma, vma->vm_file, pgoff,
753 new_pol, vma->vm_userfaultfd_ctx);
9d8cebd4
KM
754 if (prev) {
755 vma = prev;
756 next = vma->vm_next;
3964acd0
ON
757 if (mpol_equal(vma_policy(vma), new_pol))
758 continue;
759 /* vma_merge() joined vma && vma->next, case 8 */
760 goto replace;
9d8cebd4
KM
761 }
762 if (vma->vm_start != vmstart) {
763 err = split_vma(vma->vm_mm, vma, vmstart, 1);
764 if (err)
765 goto out;
766 }
767 if (vma->vm_end != vmend) {
768 err = split_vma(vma->vm_mm, vma, vmend, 0);
769 if (err)
770 goto out;
771 }
3964acd0 772 replace:
869833f2 773 err = vma_replace_policy(vma, new_pol);
8d34694c
KM
774 if (err)
775 goto out;
1da177e4 776 }
9d8cebd4
KM
777
778 out:
1da177e4
LT
779 return err;
780}
781
1da177e4 782/* Set the process memory policy */
028fec41
DR
783static long do_set_mempolicy(unsigned short mode, unsigned short flags,
784 nodemask_t *nodes)
1da177e4 785{
58568d2a 786 struct mempolicy *new, *old;
4bfc4495 787 NODEMASK_SCRATCH(scratch);
58568d2a 788 int ret;
1da177e4 789
4bfc4495
KH
790 if (!scratch)
791 return -ENOMEM;
f4e53d91 792
4bfc4495
KH
793 new = mpol_new(mode, flags, nodes);
794 if (IS_ERR(new)) {
795 ret = PTR_ERR(new);
796 goto out;
797 }
2c7c3a7d 798
58568d2a 799 task_lock(current);
4bfc4495 800 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a
MX
801 if (ret) {
802 task_unlock(current);
58568d2a 803 mpol_put(new);
4bfc4495 804 goto out;
58568d2a
MX
805 }
806 old = current->mempolicy;
1da177e4 807 current->mempolicy = new;
45c4745a 808 if (new && new->mode == MPOL_INTERLEAVE &&
f5b087b5 809 nodes_weight(new->v.nodes))
dfcd3c0d 810 current->il_next = first_node(new->v.nodes);
58568d2a 811 task_unlock(current);
58568d2a 812 mpol_put(old);
4bfc4495
KH
813 ret = 0;
814out:
815 NODEMASK_SCRATCH_FREE(scratch);
816 return ret;
1da177e4
LT
817}
818
bea904d5
LS
819/*
820 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
821 *
822 * Called with task's alloc_lock held
bea904d5
LS
823 */
824static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 825{
dfcd3c0d 826 nodes_clear(*nodes);
bea904d5
LS
827 if (p == &default_policy)
828 return;
829
45c4745a 830 switch (p->mode) {
19770b32
MG
831 case MPOL_BIND:
832 /* Fall through */
1da177e4 833 case MPOL_INTERLEAVE:
dfcd3c0d 834 *nodes = p->v.nodes;
1da177e4
LT
835 break;
836 case MPOL_PREFERRED:
fc36b8d3 837 if (!(p->flags & MPOL_F_LOCAL))
dfcd3c0d 838 node_set(p->v.preferred_node, *nodes);
53f2556b 839 /* else return empty node mask for local allocation */
1da177e4
LT
840 break;
841 default:
842 BUG();
843 }
844}
845
d4edcf0d 846static int lookup_node(unsigned long addr)
1da177e4
LT
847{
848 struct page *p;
849 int err;
850
d4edcf0d 851 err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL);
1da177e4
LT
852 if (err >= 0) {
853 err = page_to_nid(p);
854 put_page(p);
855 }
856 return err;
857}
858
1da177e4 859/* Retrieve NUMA policy */
dbcb0f19
AB
860static long do_get_mempolicy(int *policy, nodemask_t *nmask,
861 unsigned long addr, unsigned long flags)
1da177e4 862{
8bccd85f 863 int err;
1da177e4
LT
864 struct mm_struct *mm = current->mm;
865 struct vm_area_struct *vma = NULL;
866 struct mempolicy *pol = current->mempolicy;
867
754af6f5
LS
868 if (flags &
869 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 870 return -EINVAL;
754af6f5
LS
871
872 if (flags & MPOL_F_MEMS_ALLOWED) {
873 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
874 return -EINVAL;
875 *policy = 0; /* just so it's initialized */
58568d2a 876 task_lock(current);
754af6f5 877 *nmask = cpuset_current_mems_allowed;
58568d2a 878 task_unlock(current);
754af6f5
LS
879 return 0;
880 }
881
1da177e4 882 if (flags & MPOL_F_ADDR) {
bea904d5
LS
883 /*
884 * Do NOT fall back to task policy if the
885 * vma/shared policy at addr is NULL. We
886 * want to return MPOL_DEFAULT in this case.
887 */
1da177e4
LT
888 down_read(&mm->mmap_sem);
889 vma = find_vma_intersection(mm, addr, addr+1);
890 if (!vma) {
891 up_read(&mm->mmap_sem);
892 return -EFAULT;
893 }
894 if (vma->vm_ops && vma->vm_ops->get_policy)
895 pol = vma->vm_ops->get_policy(vma, addr);
896 else
897 pol = vma->vm_policy;
898 } else if (addr)
899 return -EINVAL;
900
901 if (!pol)
bea904d5 902 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
903
904 if (flags & MPOL_F_NODE) {
905 if (flags & MPOL_F_ADDR) {
d4edcf0d 906 err = lookup_node(addr);
1da177e4
LT
907 if (err < 0)
908 goto out;
8bccd85f 909 *policy = err;
1da177e4 910 } else if (pol == current->mempolicy &&
45c4745a 911 pol->mode == MPOL_INTERLEAVE) {
8bccd85f 912 *policy = current->il_next;
1da177e4
LT
913 } else {
914 err = -EINVAL;
915 goto out;
916 }
bea904d5
LS
917 } else {
918 *policy = pol == &default_policy ? MPOL_DEFAULT :
919 pol->mode;
d79df630
DR
920 /*
921 * Internal mempolicy flags must be masked off before exposing
922 * the policy to userspace.
923 */
924 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 925 }
1da177e4
LT
926
927 if (vma) {
928 up_read(&current->mm->mmap_sem);
929 vma = NULL;
930 }
931
1da177e4 932 err = 0;
58568d2a 933 if (nmask) {
c6b6ef8b
LS
934 if (mpol_store_user_nodemask(pol)) {
935 *nmask = pol->w.user_nodemask;
936 } else {
937 task_lock(current);
938 get_policy_nodemask(pol, nmask);
939 task_unlock(current);
940 }
58568d2a 941 }
1da177e4
LT
942
943 out:
52cd3b07 944 mpol_cond_put(pol);
1da177e4
LT
945 if (vma)
946 up_read(&current->mm->mmap_sem);
947 return err;
948}
949
b20a3503 950#ifdef CONFIG_MIGRATION
6ce3c4c0
CL
951/*
952 * page migration
953 */
fc301289
CL
954static void migrate_page_add(struct page *page, struct list_head *pagelist,
955 unsigned long flags)
6ce3c4c0
CL
956{
957 /*
fc301289 958 * Avoid migrating a page that is shared with others.
6ce3c4c0 959 */
62695a84
NP
960 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
961 if (!isolate_lru_page(page)) {
962 list_add_tail(&page->lru, pagelist);
6d9c285a
KM
963 inc_zone_page_state(page, NR_ISOLATED_ANON +
964 page_is_file_cache(page));
62695a84
NP
965 }
966 }
7e2ab150 967}
6ce3c4c0 968
742755a1 969static struct page *new_node_page(struct page *page, unsigned long node, int **x)
95a402c3 970{
e2d8cf40
NH
971 if (PageHuge(page))
972 return alloc_huge_page_node(page_hstate(compound_head(page)),
973 node);
974 else
96db800f 975 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
b360edb4 976 __GFP_THISNODE, 0);
95a402c3
CL
977}
978
7e2ab150
CL
979/*
980 * Migrate pages from one node to a target node.
981 * Returns error or the number of pages not migrated.
982 */
dbcb0f19
AB
983static int migrate_to_node(struct mm_struct *mm, int source, int dest,
984 int flags)
7e2ab150
CL
985{
986 nodemask_t nmask;
987 LIST_HEAD(pagelist);
988 int err = 0;
989
990 nodes_clear(nmask);
991 node_set(source, nmask);
6ce3c4c0 992
08270807
MK
993 /*
994 * This does not "check" the range but isolates all pages that
995 * need migration. Between passing in the full user address
996 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
997 */
998 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98094945 999 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
7e2ab150
CL
1000 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1001
cf608ac1 1002 if (!list_empty(&pagelist)) {
68711a74 1003 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
9c620e2b 1004 MIGRATE_SYNC, MR_SYSCALL);
cf608ac1 1005 if (err)
e2d8cf40 1006 putback_movable_pages(&pagelist);
cf608ac1 1007 }
95a402c3 1008
7e2ab150 1009 return err;
6ce3c4c0
CL
1010}
1011
39743889 1012/*
7e2ab150
CL
1013 * Move pages between the two nodesets so as to preserve the physical
1014 * layout as much as possible.
39743889
CL
1015 *
1016 * Returns the number of page that could not be moved.
1017 */
0ce72d4f
AM
1018int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1019 const nodemask_t *to, int flags)
39743889 1020{
7e2ab150 1021 int busy = 0;
0aedadf9 1022 int err;
7e2ab150 1023 nodemask_t tmp;
39743889 1024
0aedadf9
CL
1025 err = migrate_prep();
1026 if (err)
1027 return err;
1028
53f2556b 1029 down_read(&mm->mmap_sem);
39743889 1030
da0aa138
KM
1031 /*
1032 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1033 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1034 * bit in 'tmp', and return that <source, dest> pair for migration.
1035 * The pair of nodemasks 'to' and 'from' define the map.
1036 *
1037 * If no pair of bits is found that way, fallback to picking some
1038 * pair of 'source' and 'dest' bits that are not the same. If the
1039 * 'source' and 'dest' bits are the same, this represents a node
1040 * that will be migrating to itself, so no pages need move.
1041 *
1042 * If no bits are left in 'tmp', or if all remaining bits left
1043 * in 'tmp' correspond to the same bit in 'to', return false
1044 * (nothing left to migrate).
1045 *
1046 * This lets us pick a pair of nodes to migrate between, such that
1047 * if possible the dest node is not already occupied by some other
1048 * source node, minimizing the risk of overloading the memory on a
1049 * node that would happen if we migrated incoming memory to a node
1050 * before migrating outgoing memory source that same node.
1051 *
1052 * A single scan of tmp is sufficient. As we go, we remember the
1053 * most recent <s, d> pair that moved (s != d). If we find a pair
1054 * that not only moved, but what's better, moved to an empty slot
1055 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1056 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1057 * most recent <s, d> pair that moved. If we get all the way through
1058 * the scan of tmp without finding any node that moved, much less
1059 * moved to an empty node, then there is nothing left worth migrating.
1060 */
d4984711 1061
0ce72d4f 1062 tmp = *from;
7e2ab150
CL
1063 while (!nodes_empty(tmp)) {
1064 int s,d;
b76ac7e7 1065 int source = NUMA_NO_NODE;
7e2ab150
CL
1066 int dest = 0;
1067
1068 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1069
1070 /*
1071 * do_migrate_pages() tries to maintain the relative
1072 * node relationship of the pages established between
1073 * threads and memory areas.
1074 *
1075 * However if the number of source nodes is not equal to
1076 * the number of destination nodes we can not preserve
1077 * this node relative relationship. In that case, skip
1078 * copying memory from a node that is in the destination
1079 * mask.
1080 *
1081 * Example: [2,3,4] -> [3,4,5] moves everything.
1082 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1083 */
1084
0ce72d4f
AM
1085 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1086 (node_isset(s, *to)))
4a5b18cc
LW
1087 continue;
1088
0ce72d4f 1089 d = node_remap(s, *from, *to);
7e2ab150
CL
1090 if (s == d)
1091 continue;
1092
1093 source = s; /* Node moved. Memorize */
1094 dest = d;
1095
1096 /* dest not in remaining from nodes? */
1097 if (!node_isset(dest, tmp))
1098 break;
1099 }
b76ac7e7 1100 if (source == NUMA_NO_NODE)
7e2ab150
CL
1101 break;
1102
1103 node_clear(source, tmp);
1104 err = migrate_to_node(mm, source, dest, flags);
1105 if (err > 0)
1106 busy += err;
1107 if (err < 0)
1108 break;
39743889
CL
1109 }
1110 up_read(&mm->mmap_sem);
7e2ab150
CL
1111 if (err < 0)
1112 return err;
1113 return busy;
b20a3503
CL
1114
1115}
1116
3ad33b24
LS
1117/*
1118 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1119 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1120 * Search forward from there, if not. N.B., this assumes that the
1121 * list of pages handed to migrate_pages()--which is how we get here--
1122 * is in virtual address order.
1123 */
d05f0cdc 1124static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3 1125{
d05f0cdc 1126 struct vm_area_struct *vma;
3ad33b24 1127 unsigned long uninitialized_var(address);
95a402c3 1128
d05f0cdc 1129 vma = find_vma(current->mm, start);
3ad33b24
LS
1130 while (vma) {
1131 address = page_address_in_vma(page, vma);
1132 if (address != -EFAULT)
1133 break;
1134 vma = vma->vm_next;
1135 }
11c731e8
WL
1136
1137 if (PageHuge(page)) {
cc81717e
MH
1138 BUG_ON(!vma);
1139 return alloc_huge_page_noerr(vma, address, 1);
11c731e8 1140 }
0bf598d8 1141 /*
11c731e8 1142 * if !vma, alloc_page_vma() will use task or system default policy
0bf598d8 1143 */
3ad33b24 1144 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
95a402c3 1145}
b20a3503
CL
1146#else
1147
1148static void migrate_page_add(struct page *page, struct list_head *pagelist,
1149 unsigned long flags)
1150{
39743889
CL
1151}
1152
0ce72d4f
AM
1153int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1154 const nodemask_t *to, int flags)
b20a3503
CL
1155{
1156 return -ENOSYS;
1157}
95a402c3 1158
d05f0cdc 1159static struct page *new_page(struct page *page, unsigned long start, int **x)
95a402c3
CL
1160{
1161 return NULL;
1162}
b20a3503
CL
1163#endif
1164
dbcb0f19 1165static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1166 unsigned short mode, unsigned short mode_flags,
1167 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1168{
6ce3c4c0
CL
1169 struct mm_struct *mm = current->mm;
1170 struct mempolicy *new;
1171 unsigned long end;
1172 int err;
1173 LIST_HEAD(pagelist);
1174
b24f53a0 1175 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1176 return -EINVAL;
74c00241 1177 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1178 return -EPERM;
1179
1180 if (start & ~PAGE_MASK)
1181 return -EINVAL;
1182
1183 if (mode == MPOL_DEFAULT)
1184 flags &= ~MPOL_MF_STRICT;
1185
1186 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1187 end = start + len;
1188
1189 if (end < start)
1190 return -EINVAL;
1191 if (end == start)
1192 return 0;
1193
028fec41 1194 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1195 if (IS_ERR(new))
1196 return PTR_ERR(new);
1197
b24f53a0
LS
1198 if (flags & MPOL_MF_LAZY)
1199 new->flags |= MPOL_F_MOF;
1200
6ce3c4c0
CL
1201 /*
1202 * If we are using the default policy then operation
1203 * on discontinuous address spaces is okay after all
1204 */
1205 if (!new)
1206 flags |= MPOL_MF_DISCONTIG_OK;
1207
028fec41
DR
1208 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1209 start, start + len, mode, mode_flags,
00ef2d2f 1210 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1211
0aedadf9
CL
1212 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1213
1214 err = migrate_prep();
1215 if (err)
b05ca738 1216 goto mpol_out;
0aedadf9 1217 }
4bfc4495
KH
1218 {
1219 NODEMASK_SCRATCH(scratch);
1220 if (scratch) {
1221 down_write(&mm->mmap_sem);
1222 task_lock(current);
1223 err = mpol_set_nodemask(new, nmask, scratch);
1224 task_unlock(current);
1225 if (err)
1226 up_write(&mm->mmap_sem);
1227 } else
1228 err = -ENOMEM;
1229 NODEMASK_SCRATCH_FREE(scratch);
1230 }
b05ca738
KM
1231 if (err)
1232 goto mpol_out;
1233
d05f0cdc 1234 err = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1235 flags | MPOL_MF_INVERT, &pagelist);
d05f0cdc 1236 if (!err)
9d8cebd4 1237 err = mbind_range(mm, start, end, new);
7e2ab150 1238
b24f53a0
LS
1239 if (!err) {
1240 int nr_failed = 0;
1241
cf608ac1 1242 if (!list_empty(&pagelist)) {
b24f53a0 1243 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
d05f0cdc
HD
1244 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1245 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
cf608ac1 1246 if (nr_failed)
74060e4d 1247 putback_movable_pages(&pagelist);
cf608ac1 1248 }
6ce3c4c0 1249
b24f53a0 1250 if (nr_failed && (flags & MPOL_MF_STRICT))
6ce3c4c0 1251 err = -EIO;
ab8a3e14 1252 } else
b0e5fd73 1253 putback_movable_pages(&pagelist);
b20a3503 1254
6ce3c4c0 1255 up_write(&mm->mmap_sem);
b05ca738 1256 mpol_out:
f0be3d32 1257 mpol_put(new);
6ce3c4c0
CL
1258 return err;
1259}
1260
8bccd85f
CL
1261/*
1262 * User space interface with variable sized bitmaps for nodelists.
1263 */
1264
1265/* Copy a node mask from user space. */
39743889 1266static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1267 unsigned long maxnode)
1268{
1269 unsigned long k;
1270 unsigned long nlongs;
1271 unsigned long endmask;
1272
1273 --maxnode;
1274 nodes_clear(*nodes);
1275 if (maxnode == 0 || !nmask)
1276 return 0;
a9c930ba 1277 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1278 return -EINVAL;
8bccd85f
CL
1279
1280 nlongs = BITS_TO_LONGS(maxnode);
1281 if ((maxnode % BITS_PER_LONG) == 0)
1282 endmask = ~0UL;
1283 else
1284 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1285
1286 /* When the user specified more nodes than supported just check
1287 if the non supported part is all zero. */
1288 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1289 if (nlongs > PAGE_SIZE/sizeof(long))
1290 return -EINVAL;
1291 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1292 unsigned long t;
1293 if (get_user(t, nmask + k))
1294 return -EFAULT;
1295 if (k == nlongs - 1) {
1296 if (t & endmask)
1297 return -EINVAL;
1298 } else if (t)
1299 return -EINVAL;
1300 }
1301 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1302 endmask = ~0UL;
1303 }
1304
1305 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1306 return -EFAULT;
1307 nodes_addr(*nodes)[nlongs-1] &= endmask;
1308 return 0;
1309}
1310
1311/* Copy a kernel node mask to user space */
1312static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1313 nodemask_t *nodes)
1314{
1315 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1316 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1317
1318 if (copy > nbytes) {
1319 if (copy > PAGE_SIZE)
1320 return -EINVAL;
1321 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1322 return -EFAULT;
1323 copy = nbytes;
1324 }
1325 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1326}
1327
938bb9f5 1328SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
f7f28ca9 1329 unsigned long, mode, const unsigned long __user *, nmask,
938bb9f5 1330 unsigned long, maxnode, unsigned, flags)
8bccd85f
CL
1331{
1332 nodemask_t nodes;
1333 int err;
028fec41 1334 unsigned short mode_flags;
8bccd85f 1335
028fec41
DR
1336 mode_flags = mode & MPOL_MODE_FLAGS;
1337 mode &= ~MPOL_MODE_FLAGS;
a3b51e01
DR
1338 if (mode >= MPOL_MAX)
1339 return -EINVAL;
4c50bc01
DR
1340 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1341 (mode_flags & MPOL_F_RELATIVE_NODES))
1342 return -EINVAL;
8bccd85f
CL
1343 err = get_nodes(&nodes, nmask, maxnode);
1344 if (err)
1345 return err;
028fec41 1346 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
8bccd85f
CL
1347}
1348
1349/* Set the process memory policy */
23c8902d 1350SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
938bb9f5 1351 unsigned long, maxnode)
8bccd85f
CL
1352{
1353 int err;
1354 nodemask_t nodes;
028fec41 1355 unsigned short flags;
8bccd85f 1356
028fec41
DR
1357 flags = mode & MPOL_MODE_FLAGS;
1358 mode &= ~MPOL_MODE_FLAGS;
1359 if ((unsigned int)mode >= MPOL_MAX)
8bccd85f 1360 return -EINVAL;
4c50bc01
DR
1361 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1362 return -EINVAL;
8bccd85f
CL
1363 err = get_nodes(&nodes, nmask, maxnode);
1364 if (err)
1365 return err;
028fec41 1366 return do_set_mempolicy(mode, flags, &nodes);
8bccd85f
CL
1367}
1368
938bb9f5
HC
1369SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1370 const unsigned long __user *, old_nodes,
1371 const unsigned long __user *, new_nodes)
39743889 1372{
c69e8d9c 1373 const struct cred *cred = current_cred(), *tcred;
596d7cfa 1374 struct mm_struct *mm = NULL;
39743889 1375 struct task_struct *task;
39743889
CL
1376 nodemask_t task_nodes;
1377 int err;
596d7cfa
KM
1378 nodemask_t *old;
1379 nodemask_t *new;
1380 NODEMASK_SCRATCH(scratch);
1381
1382 if (!scratch)
1383 return -ENOMEM;
39743889 1384
596d7cfa
KM
1385 old = &scratch->mask1;
1386 new = &scratch->mask2;
1387
1388 err = get_nodes(old, old_nodes, maxnode);
39743889 1389 if (err)
596d7cfa 1390 goto out;
39743889 1391
596d7cfa 1392 err = get_nodes(new, new_nodes, maxnode);
39743889 1393 if (err)
596d7cfa 1394 goto out;
39743889
CL
1395
1396 /* Find the mm_struct */
55cfaa3c 1397 rcu_read_lock();
228ebcbe 1398 task = pid ? find_task_by_vpid(pid) : current;
39743889 1399 if (!task) {
55cfaa3c 1400 rcu_read_unlock();
596d7cfa
KM
1401 err = -ESRCH;
1402 goto out;
39743889 1403 }
3268c63e 1404 get_task_struct(task);
39743889 1405
596d7cfa 1406 err = -EINVAL;
39743889
CL
1407
1408 /*
1409 * Check if this process has the right to modify the specified
1410 * process. The right exists if the process has administrative
7f927fcc 1411 * capabilities, superuser privileges or the same
39743889
CL
1412 * userid as the target process.
1413 */
c69e8d9c 1414 tcred = __task_cred(task);
b38a86eb
EB
1415 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1416 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
74c00241 1417 !capable(CAP_SYS_NICE)) {
c69e8d9c 1418 rcu_read_unlock();
39743889 1419 err = -EPERM;
3268c63e 1420 goto out_put;
39743889 1421 }
c69e8d9c 1422 rcu_read_unlock();
39743889
CL
1423
1424 task_nodes = cpuset_mems_allowed(task);
1425 /* Is the user allowed to access the target nodes? */
596d7cfa 1426 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1427 err = -EPERM;
3268c63e 1428 goto out_put;
39743889
CL
1429 }
1430
01f13bd6 1431 if (!nodes_subset(*new, node_states[N_MEMORY])) {
3b42d28b 1432 err = -EINVAL;
3268c63e 1433 goto out_put;
3b42d28b
CL
1434 }
1435
86c3a764
DQ
1436 err = security_task_movememory(task);
1437 if (err)
3268c63e 1438 goto out_put;
86c3a764 1439
3268c63e
CL
1440 mm = get_task_mm(task);
1441 put_task_struct(task);
f2a9ef88
SL
1442
1443 if (!mm) {
3268c63e 1444 err = -EINVAL;
f2a9ef88
SL
1445 goto out;
1446 }
1447
1448 err = do_migrate_pages(mm, old, new,
1449 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1450
1451 mmput(mm);
1452out:
596d7cfa
KM
1453 NODEMASK_SCRATCH_FREE(scratch);
1454
39743889 1455 return err;
3268c63e
CL
1456
1457out_put:
1458 put_task_struct(task);
1459 goto out;
1460
39743889
CL
1461}
1462
1463
8bccd85f 1464/* Retrieve NUMA policy */
938bb9f5
HC
1465SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1466 unsigned long __user *, nmask, unsigned long, maxnode,
1467 unsigned long, addr, unsigned long, flags)
8bccd85f 1468{
dbcb0f19
AB
1469 int err;
1470 int uninitialized_var(pval);
8bccd85f
CL
1471 nodemask_t nodes;
1472
1473 if (nmask != NULL && maxnode < MAX_NUMNODES)
1474 return -EINVAL;
1475
1476 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1477
1478 if (err)
1479 return err;
1480
1481 if (policy && put_user(pval, policy))
1482 return -EFAULT;
1483
1484 if (nmask)
1485 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1486
1487 return err;
1488}
1489
1da177e4
LT
1490#ifdef CONFIG_COMPAT
1491
c93e0f6c
HC
1492COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1493 compat_ulong_t __user *, nmask,
1494 compat_ulong_t, maxnode,
1495 compat_ulong_t, addr, compat_ulong_t, flags)
1da177e4
LT
1496{
1497 long err;
1498 unsigned long __user *nm = NULL;
1499 unsigned long nr_bits, alloc_size;
1500 DECLARE_BITMAP(bm, MAX_NUMNODES);
1501
1502 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1503 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1504
1505 if (nmask)
1506 nm = compat_alloc_user_space(alloc_size);
1507
1508 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1509
1510 if (!err && nmask) {
2bbff6c7
KH
1511 unsigned long copy_size;
1512 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1513 err = copy_from_user(bm, nm, copy_size);
1da177e4
LT
1514 /* ensure entire bitmap is zeroed */
1515 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1516 err |= compat_put_bitmap(nmask, bm, nr_bits);
1517 }
1518
1519 return err;
1520}
1521
c93e0f6c
HC
1522COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1523 compat_ulong_t, maxnode)
1da177e4
LT
1524{
1525 long err = 0;
1526 unsigned long __user *nm = NULL;
1527 unsigned long nr_bits, alloc_size;
1528 DECLARE_BITMAP(bm, MAX_NUMNODES);
1529
1530 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1531 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1532
1533 if (nmask) {
1534 err = compat_get_bitmap(bm, nmask, nr_bits);
1535 nm = compat_alloc_user_space(alloc_size);
1536 err |= copy_to_user(nm, bm, alloc_size);
1537 }
1538
1539 if (err)
1540 return -EFAULT;
1541
1542 return sys_set_mempolicy(mode, nm, nr_bits+1);
1543}
1544
c93e0f6c
HC
1545COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1546 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1547 compat_ulong_t, maxnode, compat_ulong_t, flags)
1da177e4
LT
1548{
1549 long err = 0;
1550 unsigned long __user *nm = NULL;
1551 unsigned long nr_bits, alloc_size;
dfcd3c0d 1552 nodemask_t bm;
1da177e4
LT
1553
1554 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1555 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1556
1557 if (nmask) {
dfcd3c0d 1558 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1da177e4 1559 nm = compat_alloc_user_space(alloc_size);
dfcd3c0d 1560 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1da177e4
LT
1561 }
1562
1563 if (err)
1564 return -EFAULT;
1565
1566 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1567}
1568
1569#endif
1570
74d2c3a0
ON
1571struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1572 unsigned long addr)
1da177e4 1573{
8d90274b 1574 struct mempolicy *pol = NULL;
1da177e4
LT
1575
1576 if (vma) {
480eccf9 1577 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1578 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1579 } else if (vma->vm_policy) {
1da177e4 1580 pol = vma->vm_policy;
00442ad0
MG
1581
1582 /*
1583 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1584 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1585 * count on these policies which will be dropped by
1586 * mpol_cond_put() later
1587 */
1588 if (mpol_needs_cond_ref(pol))
1589 mpol_get(pol);
1590 }
1da177e4 1591 }
f15ca78e 1592
74d2c3a0
ON
1593 return pol;
1594}
1595
1596/*
dd6eecb9 1597 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1598 * @vma: virtual memory area whose policy is sought
1599 * @addr: address in @vma for shared policy lookup
1600 *
1601 * Returns effective policy for a VMA at specified address.
dd6eecb9 1602 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1603 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1604 * count--added by the get_policy() vm_op, as appropriate--to protect against
1605 * freeing by another task. It is the caller's responsibility to free the
1606 * extra reference for shared policies.
1607 */
dd6eecb9
ON
1608static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1609 unsigned long addr)
74d2c3a0
ON
1610{
1611 struct mempolicy *pol = __get_vma_policy(vma, addr);
1612
8d90274b 1613 if (!pol)
dd6eecb9 1614 pol = get_task_policy(current);
8d90274b 1615
1da177e4
LT
1616 return pol;
1617}
1618
6b6482bb 1619bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1620{
6b6482bb 1621 struct mempolicy *pol;
fc314724 1622
6b6482bb
ON
1623 if (vma->vm_ops && vma->vm_ops->get_policy) {
1624 bool ret = false;
fc314724 1625
6b6482bb
ON
1626 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1627 if (pol && (pol->flags & MPOL_F_MOF))
1628 ret = true;
1629 mpol_cond_put(pol);
8d90274b 1630
6b6482bb 1631 return ret;
fc314724
MG
1632 }
1633
6b6482bb 1634 pol = vma->vm_policy;
8d90274b 1635 if (!pol)
6b6482bb 1636 pol = get_task_policy(current);
8d90274b 1637
fc314724
MG
1638 return pol->flags & MPOL_F_MOF;
1639}
1640
d3eb1570
LJ
1641static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1642{
1643 enum zone_type dynamic_policy_zone = policy_zone;
1644
1645 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1646
1647 /*
1648 * if policy->v.nodes has movable memory only,
1649 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1650 *
1651 * policy->v.nodes is intersect with node_states[N_MEMORY].
1652 * so if the following test faile, it implies
1653 * policy->v.nodes has movable memory only.
1654 */
1655 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1656 dynamic_policy_zone = ZONE_MOVABLE;
1657
1658 return zone >= dynamic_policy_zone;
1659}
1660
52cd3b07
LS
1661/*
1662 * Return a nodemask representing a mempolicy for filtering nodes for
1663 * page allocation
1664 */
1665static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32
MG
1666{
1667 /* Lower zones don't get a nodemask applied for MPOL_BIND */
45c4745a 1668 if (unlikely(policy->mode == MPOL_BIND) &&
d3eb1570 1669 apply_policy_zone(policy, gfp_zone(gfp)) &&
19770b32
MG
1670 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1671 return &policy->v.nodes;
1672
1673 return NULL;
1674}
1675
52cd3b07 1676/* Return a zonelist indicated by gfp for node representing a mempolicy */
2f5f9486
AK
1677static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1678 int nd)
1da177e4 1679{
45c4745a 1680 switch (policy->mode) {
1da177e4 1681 case MPOL_PREFERRED:
fc36b8d3
LS
1682 if (!(policy->flags & MPOL_F_LOCAL))
1683 nd = policy->v.preferred_node;
1da177e4
LT
1684 break;
1685 case MPOL_BIND:
19770b32 1686 /*
52cd3b07
LS
1687 * Normally, MPOL_BIND allocations are node-local within the
1688 * allowed nodemask. However, if __GFP_THISNODE is set and the
6eb27e1f 1689 * current node isn't part of the mask, we use the zonelist for
52cd3b07 1690 * the first node in the mask instead.
19770b32 1691 */
19770b32
MG
1692 if (unlikely(gfp & __GFP_THISNODE) &&
1693 unlikely(!node_isset(nd, policy->v.nodes)))
1694 nd = first_node(policy->v.nodes);
1695 break;
1da177e4 1696 default:
1da177e4
LT
1697 BUG();
1698 }
0e88460d 1699 return node_zonelist(nd, gfp);
1da177e4
LT
1700}
1701
1702/* Do dynamic interleaving for a process */
1703static unsigned interleave_nodes(struct mempolicy *policy)
1704{
1705 unsigned nid, next;
1706 struct task_struct *me = current;
1707
1708 nid = me->il_next;
0edaf86c 1709 next = next_node_in(nid, policy->v.nodes);
f5b087b5
DR
1710 if (next < MAX_NUMNODES)
1711 me->il_next = next;
1da177e4
LT
1712 return nid;
1713}
1714
dc85da15
CL
1715/*
1716 * Depending on the memory policy provide a node from which to allocate the
1717 * next slab entry.
1718 */
2a389610 1719unsigned int mempolicy_slab_node(void)
dc85da15 1720{
e7b691b0 1721 struct mempolicy *policy;
2a389610 1722 int node = numa_mem_id();
e7b691b0
AK
1723
1724 if (in_interrupt())
2a389610 1725 return node;
e7b691b0
AK
1726
1727 policy = current->mempolicy;
fc36b8d3 1728 if (!policy || policy->flags & MPOL_F_LOCAL)
2a389610 1729 return node;
bea904d5
LS
1730
1731 switch (policy->mode) {
1732 case MPOL_PREFERRED:
fc36b8d3
LS
1733 /*
1734 * handled MPOL_F_LOCAL above
1735 */
1736 return policy->v.preferred_node;
765c4507 1737
dc85da15
CL
1738 case MPOL_INTERLEAVE:
1739 return interleave_nodes(policy);
1740
dd1a239f 1741 case MPOL_BIND: {
dc85da15
CL
1742 /*
1743 * Follow bind policy behavior and start allocation at the
1744 * first node.
1745 */
19770b32
MG
1746 struct zonelist *zonelist;
1747 struct zone *zone;
1748 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
2a389610 1749 zonelist = &NODE_DATA(node)->node_zonelists[0];
19770b32
MG
1750 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1751 &policy->v.nodes,
1752 &zone);
2a389610 1753 return zone ? zone->node : node;
dd1a239f 1754 }
dc85da15 1755
dc85da15 1756 default:
bea904d5 1757 BUG();
dc85da15
CL
1758 }
1759}
1760
fee83b3a
AM
1761/*
1762 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1763 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1764 * number of present nodes.
1765 */
1da177e4 1766static unsigned offset_il_node(struct mempolicy *pol,
fee83b3a 1767 struct vm_area_struct *vma, unsigned long n)
1da177e4 1768{
dfcd3c0d 1769 unsigned nnodes = nodes_weight(pol->v.nodes);
f5b087b5 1770 unsigned target;
fee83b3a
AM
1771 int i;
1772 int nid;
1da177e4 1773
f5b087b5
DR
1774 if (!nnodes)
1775 return numa_node_id();
fee83b3a
AM
1776 target = (unsigned int)n % nnodes;
1777 nid = first_node(pol->v.nodes);
1778 for (i = 0; i < target; i++)
dfcd3c0d 1779 nid = next_node(nid, pol->v.nodes);
1da177e4
LT
1780 return nid;
1781}
1782
5da7ca86
CL
1783/* Determine a node number for interleave */
1784static inline unsigned interleave_nid(struct mempolicy *pol,
1785 struct vm_area_struct *vma, unsigned long addr, int shift)
1786{
1787 if (vma) {
1788 unsigned long off;
1789
3b98b087
NA
1790 /*
1791 * for small pages, there is no difference between
1792 * shift and PAGE_SHIFT, so the bit-shift is safe.
1793 * for huge pages, since vm_pgoff is in units of small
1794 * pages, we need to shift off the always 0 bits to get
1795 * a useful offset.
1796 */
1797 BUG_ON(shift < PAGE_SHIFT);
1798 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86
CL
1799 off += (addr - vma->vm_start) >> shift;
1800 return offset_il_node(pol, vma, off);
1801 } else
1802 return interleave_nodes(pol);
1803}
1804
00ac59ad 1805#ifdef CONFIG_HUGETLBFS
480eccf9
LS
1806/*
1807 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
1808 * @vma: virtual memory area whose policy is sought
1809 * @addr: address in @vma for shared policy lookup and interleave policy
1810 * @gfp_flags: for requested zone
1811 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1812 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
480eccf9 1813 *
52cd3b07
LS
1814 * Returns a zonelist suitable for a huge page allocation and a pointer
1815 * to the struct mempolicy for conditional unref after allocation.
1816 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1817 * @nodemask for filtering the zonelist.
c0ff7453 1818 *
d26914d1 1819 * Must be protected by read_mems_allowed_begin()
480eccf9 1820 */
396faf03 1821struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
19770b32
MG
1822 gfp_t gfp_flags, struct mempolicy **mpol,
1823 nodemask_t **nodemask)
5da7ca86 1824{
480eccf9 1825 struct zonelist *zl;
5da7ca86 1826
dd6eecb9 1827 *mpol = get_vma_policy(vma, addr);
19770b32 1828 *nodemask = NULL; /* assume !MPOL_BIND */
5da7ca86 1829
52cd3b07
LS
1830 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1831 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
a5516438 1832 huge_page_shift(hstate_vma(vma))), gfp_flags);
52cd3b07 1833 } else {
2f5f9486 1834 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
52cd3b07
LS
1835 if ((*mpol)->mode == MPOL_BIND)
1836 *nodemask = &(*mpol)->v.nodes;
480eccf9
LS
1837 }
1838 return zl;
5da7ca86 1839}
06808b08
LS
1840
1841/*
1842 * init_nodemask_of_mempolicy
1843 *
1844 * If the current task's mempolicy is "default" [NULL], return 'false'
1845 * to indicate default policy. Otherwise, extract the policy nodemask
1846 * for 'bind' or 'interleave' policy into the argument nodemask, or
1847 * initialize the argument nodemask to contain the single node for
1848 * 'preferred' or 'local' policy and return 'true' to indicate presence
1849 * of non-default mempolicy.
1850 *
1851 * We don't bother with reference counting the mempolicy [mpol_get/put]
1852 * because the current task is examining it's own mempolicy and a task's
1853 * mempolicy is only ever changed by the task itself.
1854 *
1855 * N.B., it is the caller's responsibility to free a returned nodemask.
1856 */
1857bool init_nodemask_of_mempolicy(nodemask_t *mask)
1858{
1859 struct mempolicy *mempolicy;
1860 int nid;
1861
1862 if (!(mask && current->mempolicy))
1863 return false;
1864
c0ff7453 1865 task_lock(current);
06808b08
LS
1866 mempolicy = current->mempolicy;
1867 switch (mempolicy->mode) {
1868 case MPOL_PREFERRED:
1869 if (mempolicy->flags & MPOL_F_LOCAL)
1870 nid = numa_node_id();
1871 else
1872 nid = mempolicy->v.preferred_node;
1873 init_nodemask_of_node(mask, nid);
1874 break;
1875
1876 case MPOL_BIND:
1877 /* Fall through */
1878 case MPOL_INTERLEAVE:
1879 *mask = mempolicy->v.nodes;
1880 break;
1881
1882 default:
1883 BUG();
1884 }
c0ff7453 1885 task_unlock(current);
06808b08
LS
1886
1887 return true;
1888}
00ac59ad 1889#endif
5da7ca86 1890
6f48d0eb
DR
1891/*
1892 * mempolicy_nodemask_intersects
1893 *
1894 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1895 * policy. Otherwise, check for intersection between mask and the policy
1896 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1897 * policy, always return true since it may allocate elsewhere on fallback.
1898 *
1899 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1900 */
1901bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1902 const nodemask_t *mask)
1903{
1904 struct mempolicy *mempolicy;
1905 bool ret = true;
1906
1907 if (!mask)
1908 return ret;
1909 task_lock(tsk);
1910 mempolicy = tsk->mempolicy;
1911 if (!mempolicy)
1912 goto out;
1913
1914 switch (mempolicy->mode) {
1915 case MPOL_PREFERRED:
1916 /*
1917 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1918 * allocate from, they may fallback to other nodes when oom.
1919 * Thus, it's possible for tsk to have allocated memory from
1920 * nodes in mask.
1921 */
1922 break;
1923 case MPOL_BIND:
1924 case MPOL_INTERLEAVE:
1925 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1926 break;
1927 default:
1928 BUG();
1929 }
1930out:
1931 task_unlock(tsk);
1932 return ret;
1933}
1934
1da177e4
LT
1935/* Allocate a page in interleaved policy.
1936 Own path because it needs to do special accounting. */
662f3a0b
AK
1937static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1938 unsigned nid)
1da177e4
LT
1939{
1940 struct zonelist *zl;
1941 struct page *page;
1942
0e88460d 1943 zl = node_zonelist(nid, gfp);
1da177e4 1944 page = __alloc_pages(gfp, order, zl);
dd1a239f 1945 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
ca889e6c 1946 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1da177e4
LT
1947 return page;
1948}
1949
1950/**
0bbbc0b3 1951 * alloc_pages_vma - Allocate a page for a VMA.
1da177e4
LT
1952 *
1953 * @gfp:
1954 * %GFP_USER user allocation.
1955 * %GFP_KERNEL kernel allocations,
1956 * %GFP_HIGHMEM highmem/user allocations,
1957 * %GFP_FS allocation should not call back into a file system.
1958 * %GFP_ATOMIC don't sleep.
1959 *
0bbbc0b3 1960 * @order:Order of the GFP allocation.
1da177e4
LT
1961 * @vma: Pointer to VMA or NULL if not available.
1962 * @addr: Virtual Address of the allocation. Must be inside the VMA.
be97a41b
VB
1963 * @node: Which node to prefer for allocation (modulo policy).
1964 * @hugepage: for hugepages try only the preferred node if possible
1da177e4
LT
1965 *
1966 * This function allocates a page from the kernel page pool and applies
1967 * a NUMA policy associated with the VMA or the current process.
1968 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1969 * mm_struct of the VMA to prevent it from going away. Should be used for
be97a41b
VB
1970 * all allocations for pages that will be mapped into user space. Returns
1971 * NULL when no page can be allocated.
1da177e4
LT
1972 */
1973struct page *
0bbbc0b3 1974alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
be97a41b 1975 unsigned long addr, int node, bool hugepage)
1da177e4 1976{
cc9a6c87 1977 struct mempolicy *pol;
c0ff7453 1978 struct page *page;
cc9a6c87 1979 unsigned int cpuset_mems_cookie;
be97a41b
VB
1980 struct zonelist *zl;
1981 nodemask_t *nmask;
cc9a6c87
MG
1982
1983retry_cpuset:
dd6eecb9 1984 pol = get_vma_policy(vma, addr);
d26914d1 1985 cpuset_mems_cookie = read_mems_allowed_begin();
1da177e4 1986
0867a57c
VB
1987 if (pol->mode == MPOL_INTERLEAVE) {
1988 unsigned nid;
1989
1990 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1991 mpol_cond_put(pol);
1992 page = alloc_page_interleave(gfp, order, nid);
1993 goto out;
1994 }
1995
1996 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1997 int hpage_node = node;
1998
be97a41b
VB
1999 /*
2000 * For hugepage allocation and non-interleave policy which
0867a57c
VB
2001 * allows the current node (or other explicitly preferred
2002 * node) we only try to allocate from the current/preferred
2003 * node and don't fall back to other nodes, as the cost of
2004 * remote accesses would likely offset THP benefits.
be97a41b
VB
2005 *
2006 * If the policy is interleave, or does not allow the current
2007 * node in its nodemask, we allocate the standard way.
2008 */
0867a57c
VB
2009 if (pol->mode == MPOL_PREFERRED &&
2010 !(pol->flags & MPOL_F_LOCAL))
2011 hpage_node = pol->v.preferred_node;
2012
be97a41b 2013 nmask = policy_nodemask(gfp, pol);
0867a57c 2014 if (!nmask || node_isset(hpage_node, *nmask)) {
be97a41b 2015 mpol_cond_put(pol);
96db800f 2016 page = __alloc_pages_node(hpage_node,
5265047a 2017 gfp | __GFP_THISNODE, order);
be97a41b
VB
2018 goto out;
2019 }
2020 }
2021
be97a41b
VB
2022 nmask = policy_nodemask(gfp, pol);
2023 zl = policy_zonelist(gfp, pol, node);
2386740d 2024 mpol_cond_put(pol);
be97a41b
VB
2025 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2026out:
d26914d1 2027 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
cc9a6c87 2028 goto retry_cpuset;
c0ff7453 2029 return page;
1da177e4
LT
2030}
2031
2032/**
2033 * alloc_pages_current - Allocate pages.
2034 *
2035 * @gfp:
2036 * %GFP_USER user allocation,
2037 * %GFP_KERNEL kernel allocation,
2038 * %GFP_HIGHMEM highmem allocation,
2039 * %GFP_FS don't call back into a file system.
2040 * %GFP_ATOMIC don't sleep.
2041 * @order: Power of two of allocation size in pages. 0 is a single page.
2042 *
2043 * Allocate a page from the kernel page pool. When not in
2044 * interrupt context and apply the current process NUMA policy.
2045 * Returns NULL when no page can be allocated.
2046 *
cf2a473c 2047 * Don't call cpuset_update_task_memory_state() unless
1da177e4
LT
2048 * 1) it's ok to take cpuset_sem (can WAIT), and
2049 * 2) allocating for current task (not interrupt).
2050 */
dd0fc66f 2051struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1da177e4 2052{
8d90274b 2053 struct mempolicy *pol = &default_policy;
c0ff7453 2054 struct page *page;
cc9a6c87 2055 unsigned int cpuset_mems_cookie;
1da177e4 2056
8d90274b
ON
2057 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2058 pol = get_task_policy(current);
52cd3b07 2059
cc9a6c87 2060retry_cpuset:
d26914d1 2061 cpuset_mems_cookie = read_mems_allowed_begin();
cc9a6c87 2062
52cd3b07
LS
2063 /*
2064 * No reference counting needed for current->mempolicy
2065 * nor system default_policy
2066 */
45c4745a 2067 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453
MX
2068 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2069 else
2070 page = __alloc_pages_nodemask(gfp, order,
5c4b4be3
AK
2071 policy_zonelist(gfp, pol, numa_node_id()),
2072 policy_nodemask(gfp, pol));
cc9a6c87 2073
d26914d1 2074 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
cc9a6c87
MG
2075 goto retry_cpuset;
2076
c0ff7453 2077 return page;
1da177e4
LT
2078}
2079EXPORT_SYMBOL(alloc_pages_current);
2080
ef0855d3
ON
2081int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2082{
2083 struct mempolicy *pol = mpol_dup(vma_policy(src));
2084
2085 if (IS_ERR(pol))
2086 return PTR_ERR(pol);
2087 dst->vm_policy = pol;
2088 return 0;
2089}
2090
4225399a 2091/*
846a16bf 2092 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2093 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2094 * with the mems_allowed returned by cpuset_mems_allowed(). This
2095 * keeps mempolicies cpuset relative after its cpuset moves. See
2096 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2097 *
2098 * current's mempolicy may be rebinded by the other task(the task that changes
2099 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2100 */
4225399a 2101
846a16bf
LS
2102/* Slow path of a mempolicy duplicate */
2103struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2104{
2105 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2106
2107 if (!new)
2108 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2109
2110 /* task's mempolicy is protected by alloc_lock */
2111 if (old == current->mempolicy) {
2112 task_lock(current);
2113 *new = *old;
2114 task_unlock(current);
2115 } else
2116 *new = *old;
2117
4225399a
PJ
2118 if (current_cpuset_is_being_rebound()) {
2119 nodemask_t mems = cpuset_mems_allowed(current);
708c1bbc
MX
2120 if (new->flags & MPOL_F_REBINDING)
2121 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2122 else
2123 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
4225399a 2124 }
1da177e4 2125 atomic_set(&new->refcnt, 1);
1da177e4
LT
2126 return new;
2127}
2128
2129/* Slow path of a mempolicy comparison */
fcfb4dcc 2130bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2131{
2132 if (!a || !b)
fcfb4dcc 2133 return false;
45c4745a 2134 if (a->mode != b->mode)
fcfb4dcc 2135 return false;
19800502 2136 if (a->flags != b->flags)
fcfb4dcc 2137 return false;
19800502
BL
2138 if (mpol_store_user_nodemask(a))
2139 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2140 return false;
19800502 2141
45c4745a 2142 switch (a->mode) {
19770b32
MG
2143 case MPOL_BIND:
2144 /* Fall through */
1da177e4 2145 case MPOL_INTERLEAVE:
fcfb4dcc 2146 return !!nodes_equal(a->v.nodes, b->v.nodes);
1da177e4 2147 case MPOL_PREFERRED:
75719661 2148 return a->v.preferred_node == b->v.preferred_node;
1da177e4
LT
2149 default:
2150 BUG();
fcfb4dcc 2151 return false;
1da177e4
LT
2152 }
2153}
2154
1da177e4
LT
2155/*
2156 * Shared memory backing store policy support.
2157 *
2158 * Remember policies even when nobody has shared memory mapped.
2159 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2160 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2161 * for any accesses to the tree.
2162 */
2163
4a8c7bb5
NZ
2164/*
2165 * lookup first element intersecting start-end. Caller holds sp->lock for
2166 * reading or for writing
2167 */
1da177e4
LT
2168static struct sp_node *
2169sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2170{
2171 struct rb_node *n = sp->root.rb_node;
2172
2173 while (n) {
2174 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2175
2176 if (start >= p->end)
2177 n = n->rb_right;
2178 else if (end <= p->start)
2179 n = n->rb_left;
2180 else
2181 break;
2182 }
2183 if (!n)
2184 return NULL;
2185 for (;;) {
2186 struct sp_node *w = NULL;
2187 struct rb_node *prev = rb_prev(n);
2188 if (!prev)
2189 break;
2190 w = rb_entry(prev, struct sp_node, nd);
2191 if (w->end <= start)
2192 break;
2193 n = prev;
2194 }
2195 return rb_entry(n, struct sp_node, nd);
2196}
2197
4a8c7bb5
NZ
2198/*
2199 * Insert a new shared policy into the list. Caller holds sp->lock for
2200 * writing.
2201 */
1da177e4
LT
2202static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2203{
2204 struct rb_node **p = &sp->root.rb_node;
2205 struct rb_node *parent = NULL;
2206 struct sp_node *nd;
2207
2208 while (*p) {
2209 parent = *p;
2210 nd = rb_entry(parent, struct sp_node, nd);
2211 if (new->start < nd->start)
2212 p = &(*p)->rb_left;
2213 else if (new->end > nd->end)
2214 p = &(*p)->rb_right;
2215 else
2216 BUG();
2217 }
2218 rb_link_node(&new->nd, parent, p);
2219 rb_insert_color(&new->nd, &sp->root);
140d5a49 2220 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2221 new->policy ? new->policy->mode : 0);
1da177e4
LT
2222}
2223
2224/* Find shared policy intersecting idx */
2225struct mempolicy *
2226mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2227{
2228 struct mempolicy *pol = NULL;
2229 struct sp_node *sn;
2230
2231 if (!sp->root.rb_node)
2232 return NULL;
4a8c7bb5 2233 read_lock(&sp->lock);
1da177e4
LT
2234 sn = sp_lookup(sp, idx, idx+1);
2235 if (sn) {
2236 mpol_get(sn->policy);
2237 pol = sn->policy;
2238 }
4a8c7bb5 2239 read_unlock(&sp->lock);
1da177e4
LT
2240 return pol;
2241}
2242
63f74ca2
KM
2243static void sp_free(struct sp_node *n)
2244{
2245 mpol_put(n->policy);
2246 kmem_cache_free(sn_cache, n);
2247}
2248
771fb4d8
LS
2249/**
2250 * mpol_misplaced - check whether current page node is valid in policy
2251 *
b46e14ac
FF
2252 * @page: page to be checked
2253 * @vma: vm area where page mapped
2254 * @addr: virtual address where page mapped
771fb4d8
LS
2255 *
2256 * Lookup current policy node id for vma,addr and "compare to" page's
2257 * node id.
2258 *
2259 * Returns:
2260 * -1 - not misplaced, page is in the right node
2261 * node - node id where the page should be
2262 *
2263 * Policy determination "mimics" alloc_page_vma().
2264 * Called from fault path where we know the vma and faulting address.
2265 */
2266int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2267{
2268 struct mempolicy *pol;
2269 struct zone *zone;
2270 int curnid = page_to_nid(page);
2271 unsigned long pgoff;
90572890
PZ
2272 int thiscpu = raw_smp_processor_id();
2273 int thisnid = cpu_to_node(thiscpu);
771fb4d8
LS
2274 int polnid = -1;
2275 int ret = -1;
2276
2277 BUG_ON(!vma);
2278
dd6eecb9 2279 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2280 if (!(pol->flags & MPOL_F_MOF))
2281 goto out;
2282
2283 switch (pol->mode) {
2284 case MPOL_INTERLEAVE:
2285 BUG_ON(addr >= vma->vm_end);
2286 BUG_ON(addr < vma->vm_start);
2287
2288 pgoff = vma->vm_pgoff;
2289 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2290 polnid = offset_il_node(pol, vma, pgoff);
2291 break;
2292
2293 case MPOL_PREFERRED:
2294 if (pol->flags & MPOL_F_LOCAL)
2295 polnid = numa_node_id();
2296 else
2297 polnid = pol->v.preferred_node;
2298 break;
2299
2300 case MPOL_BIND:
2301 /*
2302 * allows binding to multiple nodes.
2303 * use current page if in policy nodemask,
2304 * else select nearest allowed node, if any.
2305 * If no allowed nodes, use current [!misplaced].
2306 */
2307 if (node_isset(curnid, pol->v.nodes))
2308 goto out;
2309 (void)first_zones_zonelist(
2310 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2311 gfp_zone(GFP_HIGHUSER),
2312 &pol->v.nodes, &zone);
2313 polnid = zone->node;
2314 break;
2315
2316 default:
2317 BUG();
2318 }
5606e387
MG
2319
2320 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2321 if (pol->flags & MPOL_F_MORON) {
90572890 2322 polnid = thisnid;
5606e387 2323
10f39042 2324 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2325 goto out;
e42c8ff2
MG
2326 }
2327
771fb4d8
LS
2328 if (curnid != polnid)
2329 ret = polnid;
2330out:
2331 mpol_cond_put(pol);
2332
2333 return ret;
2334}
2335
1da177e4
LT
2336static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2337{
140d5a49 2338 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2339 rb_erase(&n->nd, &sp->root);
63f74ca2 2340 sp_free(n);
1da177e4
LT
2341}
2342
42288fe3
MG
2343static void sp_node_init(struct sp_node *node, unsigned long start,
2344 unsigned long end, struct mempolicy *pol)
2345{
2346 node->start = start;
2347 node->end = end;
2348 node->policy = pol;
2349}
2350
dbcb0f19
AB
2351static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2352 struct mempolicy *pol)
1da177e4 2353{
869833f2
KM
2354 struct sp_node *n;
2355 struct mempolicy *newpol;
1da177e4 2356
869833f2 2357 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2358 if (!n)
2359 return NULL;
869833f2
KM
2360
2361 newpol = mpol_dup(pol);
2362 if (IS_ERR(newpol)) {
2363 kmem_cache_free(sn_cache, n);
2364 return NULL;
2365 }
2366 newpol->flags |= MPOL_F_SHARED;
42288fe3 2367 sp_node_init(n, start, end, newpol);
869833f2 2368
1da177e4
LT
2369 return n;
2370}
2371
2372/* Replace a policy range. */
2373static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2374 unsigned long end, struct sp_node *new)
2375{
b22d127a 2376 struct sp_node *n;
42288fe3
MG
2377 struct sp_node *n_new = NULL;
2378 struct mempolicy *mpol_new = NULL;
b22d127a 2379 int ret = 0;
1da177e4 2380
42288fe3 2381restart:
4a8c7bb5 2382 write_lock(&sp->lock);
1da177e4
LT
2383 n = sp_lookup(sp, start, end);
2384 /* Take care of old policies in the same range. */
2385 while (n && n->start < end) {
2386 struct rb_node *next = rb_next(&n->nd);
2387 if (n->start >= start) {
2388 if (n->end <= end)
2389 sp_delete(sp, n);
2390 else
2391 n->start = end;
2392 } else {
2393 /* Old policy spanning whole new range. */
2394 if (n->end > end) {
42288fe3
MG
2395 if (!n_new)
2396 goto alloc_new;
2397
2398 *mpol_new = *n->policy;
2399 atomic_set(&mpol_new->refcnt, 1);
7880639c 2400 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2401 n->end = start;
5ca39575 2402 sp_insert(sp, n_new);
42288fe3
MG
2403 n_new = NULL;
2404 mpol_new = NULL;
1da177e4
LT
2405 break;
2406 } else
2407 n->end = start;
2408 }
2409 if (!next)
2410 break;
2411 n = rb_entry(next, struct sp_node, nd);
2412 }
2413 if (new)
2414 sp_insert(sp, new);
4a8c7bb5 2415 write_unlock(&sp->lock);
42288fe3
MG
2416 ret = 0;
2417
2418err_out:
2419 if (mpol_new)
2420 mpol_put(mpol_new);
2421 if (n_new)
2422 kmem_cache_free(sn_cache, n_new);
2423
b22d127a 2424 return ret;
42288fe3
MG
2425
2426alloc_new:
4a8c7bb5 2427 write_unlock(&sp->lock);
42288fe3
MG
2428 ret = -ENOMEM;
2429 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2430 if (!n_new)
2431 goto err_out;
2432 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2433 if (!mpol_new)
2434 goto err_out;
2435 goto restart;
1da177e4
LT
2436}
2437
71fe804b
LS
2438/**
2439 * mpol_shared_policy_init - initialize shared policy for inode
2440 * @sp: pointer to inode shared policy
2441 * @mpol: struct mempolicy to install
2442 *
2443 * Install non-NULL @mpol in inode's shared policy rb-tree.
2444 * On entry, the current task has a reference on a non-NULL @mpol.
2445 * This must be released on exit.
4bfc4495 2446 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2447 */
2448void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2449{
58568d2a
MX
2450 int ret;
2451
71fe804b 2452 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2453 rwlock_init(&sp->lock);
71fe804b
LS
2454
2455 if (mpol) {
2456 struct vm_area_struct pvma;
2457 struct mempolicy *new;
4bfc4495 2458 NODEMASK_SCRATCH(scratch);
71fe804b 2459
4bfc4495 2460 if (!scratch)
5c0c1654 2461 goto put_mpol;
71fe804b
LS
2462 /* contextualize the tmpfs mount point mempolicy */
2463 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2464 if (IS_ERR(new))
0cae3457 2465 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2466
2467 task_lock(current);
4bfc4495 2468 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2469 task_unlock(current);
15d77835 2470 if (ret)
5c0c1654 2471 goto put_new;
71fe804b
LS
2472
2473 /* Create pseudo-vma that contains just the policy */
2474 memset(&pvma, 0, sizeof(struct vm_area_struct));
2475 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2476 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2477
5c0c1654 2478put_new:
71fe804b 2479 mpol_put(new); /* drop initial ref */
0cae3457 2480free_scratch:
4bfc4495 2481 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2482put_mpol:
2483 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2484 }
2485}
2486
1da177e4
LT
2487int mpol_set_shared_policy(struct shared_policy *info,
2488 struct vm_area_struct *vma, struct mempolicy *npol)
2489{
2490 int err;
2491 struct sp_node *new = NULL;
2492 unsigned long sz = vma_pages(vma);
2493
028fec41 2494 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2495 vma->vm_pgoff,
45c4745a 2496 sz, npol ? npol->mode : -1,
028fec41 2497 npol ? npol->flags : -1,
00ef2d2f 2498 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2499
2500 if (npol) {
2501 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2502 if (!new)
2503 return -ENOMEM;
2504 }
2505 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2506 if (err && new)
63f74ca2 2507 sp_free(new);
1da177e4
LT
2508 return err;
2509}
2510
2511/* Free a backing policy store on inode delete. */
2512void mpol_free_shared_policy(struct shared_policy *p)
2513{
2514 struct sp_node *n;
2515 struct rb_node *next;
2516
2517 if (!p->root.rb_node)
2518 return;
4a8c7bb5 2519 write_lock(&p->lock);
1da177e4
LT
2520 next = rb_first(&p->root);
2521 while (next) {
2522 n = rb_entry(next, struct sp_node, nd);
2523 next = rb_next(&n->nd);
63f74ca2 2524 sp_delete(p, n);
1da177e4 2525 }
4a8c7bb5 2526 write_unlock(&p->lock);
1da177e4
LT
2527}
2528
1a687c2e 2529#ifdef CONFIG_NUMA_BALANCING
c297663c 2530static int __initdata numabalancing_override;
1a687c2e
MG
2531
2532static void __init check_numabalancing_enable(void)
2533{
2534 bool numabalancing_default = false;
2535
2536 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2537 numabalancing_default = true;
2538
c297663c
MG
2539 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2540 if (numabalancing_override)
2541 set_numabalancing_state(numabalancing_override == 1);
2542
b0dc2b9b 2543 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2544 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2545 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2546 set_numabalancing_state(numabalancing_default);
2547 }
2548}
2549
2550static int __init setup_numabalancing(char *str)
2551{
2552 int ret = 0;
2553 if (!str)
2554 goto out;
1a687c2e
MG
2555
2556 if (!strcmp(str, "enable")) {
c297663c 2557 numabalancing_override = 1;
1a687c2e
MG
2558 ret = 1;
2559 } else if (!strcmp(str, "disable")) {
c297663c 2560 numabalancing_override = -1;
1a687c2e
MG
2561 ret = 1;
2562 }
2563out:
2564 if (!ret)
4a404bea 2565 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2566
2567 return ret;
2568}
2569__setup("numa_balancing=", setup_numabalancing);
2570#else
2571static inline void __init check_numabalancing_enable(void)
2572{
2573}
2574#endif /* CONFIG_NUMA_BALANCING */
2575
1da177e4
LT
2576/* assumes fs == KERNEL_DS */
2577void __init numa_policy_init(void)
2578{
b71636e2
PM
2579 nodemask_t interleave_nodes;
2580 unsigned long largest = 0;
2581 int nid, prefer = 0;
2582
1da177e4
LT
2583 policy_cache = kmem_cache_create("numa_policy",
2584 sizeof(struct mempolicy),
20c2df83 2585 0, SLAB_PANIC, NULL);
1da177e4
LT
2586
2587 sn_cache = kmem_cache_create("shared_policy_node",
2588 sizeof(struct sp_node),
20c2df83 2589 0, SLAB_PANIC, NULL);
1da177e4 2590
5606e387
MG
2591 for_each_node(nid) {
2592 preferred_node_policy[nid] = (struct mempolicy) {
2593 .refcnt = ATOMIC_INIT(1),
2594 .mode = MPOL_PREFERRED,
2595 .flags = MPOL_F_MOF | MPOL_F_MORON,
2596 .v = { .preferred_node = nid, },
2597 };
2598 }
2599
b71636e2
PM
2600 /*
2601 * Set interleaving policy for system init. Interleaving is only
2602 * enabled across suitably sized nodes (default is >= 16MB), or
2603 * fall back to the largest node if they're all smaller.
2604 */
2605 nodes_clear(interleave_nodes);
01f13bd6 2606 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2607 unsigned long total_pages = node_present_pages(nid);
2608
2609 /* Preserve the largest node */
2610 if (largest < total_pages) {
2611 largest = total_pages;
2612 prefer = nid;
2613 }
2614
2615 /* Interleave this node? */
2616 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2617 node_set(nid, interleave_nodes);
2618 }
2619
2620 /* All too small, use the largest */
2621 if (unlikely(nodes_empty(interleave_nodes)))
2622 node_set(prefer, interleave_nodes);
1da177e4 2623
028fec41 2624 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2625 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2626
2627 check_numabalancing_enable();
1da177e4
LT
2628}
2629
8bccd85f 2630/* Reset policy of current process to default */
1da177e4
LT
2631void numa_default_policy(void)
2632{
028fec41 2633 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2634}
68860ec1 2635
095f1fc4
LS
2636/*
2637 * Parse and format mempolicy from/to strings
2638 */
2639
1a75a6c8 2640/*
f2a07f40 2641 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
1a75a6c8 2642 */
345ace9c
LS
2643static const char * const policy_modes[] =
2644{
2645 [MPOL_DEFAULT] = "default",
2646 [MPOL_PREFERRED] = "prefer",
2647 [MPOL_BIND] = "bind",
2648 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2649 [MPOL_LOCAL] = "local",
345ace9c 2650};
1a75a6c8 2651
095f1fc4
LS
2652
2653#ifdef CONFIG_TMPFS
2654/**
f2a07f40 2655 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2656 * @str: string containing mempolicy to parse
71fe804b 2657 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2658 *
2659 * Format of input:
2660 * <mode>[=<flags>][:<nodelist>]
2661 *
71fe804b 2662 * On success, returns 0, else 1
095f1fc4 2663 */
a7a88b23 2664int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2665{
71fe804b 2666 struct mempolicy *new = NULL;
b4652e84 2667 unsigned short mode;
f2a07f40 2668 unsigned short mode_flags;
71fe804b 2669 nodemask_t nodes;
095f1fc4
LS
2670 char *nodelist = strchr(str, ':');
2671 char *flags = strchr(str, '=');
095f1fc4
LS
2672 int err = 1;
2673
2674 if (nodelist) {
2675 /* NUL-terminate mode or flags string */
2676 *nodelist++ = '\0';
71fe804b 2677 if (nodelist_parse(nodelist, nodes))
095f1fc4 2678 goto out;
01f13bd6 2679 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 2680 goto out;
71fe804b
LS
2681 } else
2682 nodes_clear(nodes);
2683
095f1fc4
LS
2684 if (flags)
2685 *flags++ = '\0'; /* terminate mode string */
2686
479e2802 2687 for (mode = 0; mode < MPOL_MAX; mode++) {
345ace9c 2688 if (!strcmp(str, policy_modes[mode])) {
095f1fc4
LS
2689 break;
2690 }
2691 }
a720094d 2692 if (mode >= MPOL_MAX)
095f1fc4
LS
2693 goto out;
2694
71fe804b 2695 switch (mode) {
095f1fc4 2696 case MPOL_PREFERRED:
71fe804b
LS
2697 /*
2698 * Insist on a nodelist of one node only
2699 */
095f1fc4
LS
2700 if (nodelist) {
2701 char *rest = nodelist;
2702 while (isdigit(*rest))
2703 rest++;
926f2ae0
KM
2704 if (*rest)
2705 goto out;
095f1fc4
LS
2706 }
2707 break;
095f1fc4
LS
2708 case MPOL_INTERLEAVE:
2709 /*
2710 * Default to online nodes with memory if no nodelist
2711 */
2712 if (!nodelist)
01f13bd6 2713 nodes = node_states[N_MEMORY];
3f226aa1 2714 break;
71fe804b 2715 case MPOL_LOCAL:
3f226aa1 2716 /*
71fe804b 2717 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 2718 */
71fe804b 2719 if (nodelist)
3f226aa1 2720 goto out;
71fe804b 2721 mode = MPOL_PREFERRED;
3f226aa1 2722 break;
413b43de
RT
2723 case MPOL_DEFAULT:
2724 /*
2725 * Insist on a empty nodelist
2726 */
2727 if (!nodelist)
2728 err = 0;
2729 goto out;
d69b2e63
KM
2730 case MPOL_BIND:
2731 /*
2732 * Insist on a nodelist
2733 */
2734 if (!nodelist)
2735 goto out;
095f1fc4
LS
2736 }
2737
71fe804b 2738 mode_flags = 0;
095f1fc4
LS
2739 if (flags) {
2740 /*
2741 * Currently, we only support two mutually exclusive
2742 * mode flags.
2743 */
2744 if (!strcmp(flags, "static"))
71fe804b 2745 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 2746 else if (!strcmp(flags, "relative"))
71fe804b 2747 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 2748 else
926f2ae0 2749 goto out;
095f1fc4 2750 }
71fe804b
LS
2751
2752 new = mpol_new(mode, mode_flags, &nodes);
2753 if (IS_ERR(new))
926f2ae0
KM
2754 goto out;
2755
f2a07f40
HD
2756 /*
2757 * Save nodes for mpol_to_str() to show the tmpfs mount options
2758 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2759 */
2760 if (mode != MPOL_PREFERRED)
2761 new->v.nodes = nodes;
2762 else if (nodelist)
2763 new->v.preferred_node = first_node(nodes);
2764 else
2765 new->flags |= MPOL_F_LOCAL;
2766
2767 /*
2768 * Save nodes for contextualization: this will be used to "clone"
2769 * the mempolicy in a specific context [cpuset] at a later time.
2770 */
2771 new->w.user_nodemask = nodes;
2772
926f2ae0 2773 err = 0;
71fe804b 2774
095f1fc4
LS
2775out:
2776 /* Restore string for error message */
2777 if (nodelist)
2778 *--nodelist = ':';
2779 if (flags)
2780 *--flags = '=';
71fe804b
LS
2781 if (!err)
2782 *mpol = new;
095f1fc4
LS
2783 return err;
2784}
2785#endif /* CONFIG_TMPFS */
2786
71fe804b
LS
2787/**
2788 * mpol_to_str - format a mempolicy structure for printing
2789 * @buffer: to contain formatted mempolicy string
2790 * @maxlen: length of @buffer
2791 * @pol: pointer to mempolicy to be formatted
71fe804b 2792 *
948927ee
DR
2793 * Convert @pol into a string. If @buffer is too short, truncate the string.
2794 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2795 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 2796 */
948927ee 2797void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
2798{
2799 char *p = buffer;
948927ee
DR
2800 nodemask_t nodes = NODE_MASK_NONE;
2801 unsigned short mode = MPOL_DEFAULT;
2802 unsigned short flags = 0;
2291990a 2803
8790c71a 2804 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 2805 mode = pol->mode;
948927ee
DR
2806 flags = pol->flags;
2807 }
bea904d5 2808
1a75a6c8
CL
2809 switch (mode) {
2810 case MPOL_DEFAULT:
1a75a6c8 2811 break;
1a75a6c8 2812 case MPOL_PREFERRED:
fc36b8d3 2813 if (flags & MPOL_F_LOCAL)
f2a07f40 2814 mode = MPOL_LOCAL;
53f2556b 2815 else
fc36b8d3 2816 node_set(pol->v.preferred_node, nodes);
1a75a6c8 2817 break;
1a75a6c8 2818 case MPOL_BIND:
1a75a6c8 2819 case MPOL_INTERLEAVE:
f2a07f40 2820 nodes = pol->v.nodes;
1a75a6c8 2821 break;
1a75a6c8 2822 default:
948927ee
DR
2823 WARN_ON_ONCE(1);
2824 snprintf(p, maxlen, "unknown");
2825 return;
1a75a6c8
CL
2826 }
2827
b7a9f420 2828 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 2829
fc36b8d3 2830 if (flags & MPOL_MODE_FLAGS) {
948927ee 2831 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 2832
2291990a
LS
2833 /*
2834 * Currently, the only defined flags are mutually exclusive
2835 */
f5b087b5 2836 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
2837 p += snprintf(p, buffer + maxlen - p, "static");
2838 else if (flags & MPOL_F_RELATIVE_NODES)
2839 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
2840 }
2841
9e763e0f
TH
2842 if (!nodes_empty(nodes))
2843 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2844 nodemask_pr_args(&nodes));
1a75a6c8 2845}