mailbox: tegra-hsp: Convert to devm_platform_ioremap_resource()
[linux-2.6-block.git] / mm / mempolicy.c
CommitLineData
46aeb7e6 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
1da177e4
LT
22 * bind Only allocate memory on a specific set of nodes,
23 * no fallback.
8bccd85f
CL
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
27 *
1da177e4 28 * preferred Try a specific node first before normal fallback.
00ef2d2f 29 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
32 * process policy.
8bccd85f 33 *
b27abacc
DH
34 * preferred many Try a set of nodes first before normal fallback. This is
35 * similar to preferred without the special case.
36 *
1da177e4
LT
37 * default Allocate on the local node first, or when on a VMA
38 * use the process policy. This is what Linux always did
39 * in a NUMA aware kernel and still does by, ahem, default.
40 *
41 * The process policy is applied for most non interrupt memory allocations
42 * in that process' context. Interrupts ignore the policies and always
43 * try to allocate on the local CPU. The VMA policy is only applied for memory
44 * allocations for a VMA in the VM.
45 *
46 * Currently there are a few corner cases in swapping where the policy
47 * is not applied, but the majority should be handled. When process policy
48 * is used it is not remembered over swap outs/swap ins.
49 *
50 * Only the highest zone in the zone hierarchy gets policied. Allocations
51 * requesting a lower zone just use default policy. This implies that
52 * on systems with highmem kernel lowmem allocation don't get policied.
53 * Same with GFP_DMA allocations.
54 *
55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56 * all users and remembered even when nobody has memory mapped.
57 */
58
59/* Notebook:
60 fix mmap readahead to honour policy and enable policy for any page cache
61 object
62 statistics for bigpages
63 global policy for page cache? currently it uses process policy. Requires
64 first item above.
65 handle mremap for shared memory (currently ignored for the policy)
66 grows down?
67 make bind policy root only? It can trigger oom much faster and the
68 kernel is not always grateful with that.
1da177e4
LT
69*/
70
b1de0d13
MH
71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
1da177e4 73#include <linux/mempolicy.h>
a520110e 74#include <linux/pagewalk.h>
1da177e4
LT
75#include <linux/highmem.h>
76#include <linux/hugetlb.h>
77#include <linux/kernel.h>
78#include <linux/sched.h>
6e84f315 79#include <linux/sched/mm.h>
6a3827d7 80#include <linux/sched/numa_balancing.h>
f719ff9b 81#include <linux/sched/task.h>
1da177e4
LT
82#include <linux/nodemask.h>
83#include <linux/cpuset.h>
1da177e4
LT
84#include <linux/slab.h>
85#include <linux/string.h>
b95f1b31 86#include <linux/export.h>
b488893a 87#include <linux/nsproxy.h>
1da177e4
LT
88#include <linux/interrupt.h>
89#include <linux/init.h>
90#include <linux/compat.h>
31367466 91#include <linux/ptrace.h>
dc9aa5b9 92#include <linux/swap.h>
1a75a6c8
CL
93#include <linux/seq_file.h>
94#include <linux/proc_fs.h>
b20a3503 95#include <linux/migrate.h>
62b61f61 96#include <linux/ksm.h>
95a402c3 97#include <linux/rmap.h>
86c3a764 98#include <linux/security.h>
dbcb0f19 99#include <linux/syscalls.h>
095f1fc4 100#include <linux/ctype.h>
6d9c285a 101#include <linux/mm_inline.h>
b24f53a0 102#include <linux/mmu_notifier.h>
b1de0d13 103#include <linux/printk.h>
c8633798 104#include <linux/swapops.h>
dc9aa5b9 105
1da177e4 106#include <asm/tlbflush.h>
4a18419f 107#include <asm/tlb.h>
7c0f6ba6 108#include <linux/uaccess.h>
1da177e4 109
62695a84
NP
110#include "internal.h"
111
38e35860 112/* Internal flags */
dc9aa5b9 113#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
38e35860 114#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
dc9aa5b9 115
fcc234f8
PE
116static struct kmem_cache *policy_cache;
117static struct kmem_cache *sn_cache;
1da177e4 118
1da177e4
LT
119/* Highest zone. An specific allocation for a zone below that is not
120 policied. */
6267276f 121enum zone_type policy_zone = 0;
1da177e4 122
bea904d5
LS
123/*
124 * run-time system-wide default policy => local allocation
125 */
e754d79d 126static struct mempolicy default_policy = {
1da177e4 127 .refcnt = ATOMIC_INIT(1), /* never free it */
7858d7bc 128 .mode = MPOL_LOCAL,
1da177e4
LT
129};
130
5606e387
MG
131static struct mempolicy preferred_node_policy[MAX_NUMNODES];
132
b2ca916c
DW
133/**
134 * numa_map_to_online_node - Find closest online node
f6e92f40 135 * @node: Node id to start the search
b2ca916c
DW
136 *
137 * Lookup the next closest node by distance if @nid is not online.
dad5b023
RD
138 *
139 * Return: this @node if it is online, otherwise the closest node by distance
b2ca916c
DW
140 */
141int numa_map_to_online_node(int node)
142{
4fcbe96e 143 int min_dist = INT_MAX, dist, n, min_node;
b2ca916c 144
4fcbe96e
DW
145 if (node == NUMA_NO_NODE || node_online(node))
146 return node;
b2ca916c
DW
147
148 min_node = node;
4fcbe96e
DW
149 for_each_online_node(n) {
150 dist = node_distance(node, n);
151 if (dist < min_dist) {
152 min_dist = dist;
153 min_node = n;
b2ca916c
DW
154 }
155 }
156
157 return min_node;
158}
159EXPORT_SYMBOL_GPL(numa_map_to_online_node);
160
74d2c3a0 161struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
162{
163 struct mempolicy *pol = p->mempolicy;
f15ca78e 164 int node;
5606e387 165
f15ca78e
ON
166 if (pol)
167 return pol;
5606e387 168
f15ca78e
ON
169 node = numa_node_id();
170 if (node != NUMA_NO_NODE) {
171 pol = &preferred_node_policy[node];
172 /* preferred_node_policy is not initialised early in boot */
173 if (pol->mode)
174 return pol;
5606e387
MG
175 }
176
f15ca78e 177 return &default_policy;
5606e387
MG
178}
179
37012946
DR
180static const struct mempolicy_operations {
181 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 182 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
183} mpol_ops[MPOL_MAX];
184
f5b087b5
DR
185static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
186{
6d556294 187 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
188}
189
190static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
191 const nodemask_t *rel)
192{
193 nodemask_t tmp;
194 nodes_fold(tmp, *orig, nodes_weight(*rel));
195 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
196}
197
be897d48 198static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
199{
200 if (nodes_empty(*nodes))
201 return -EINVAL;
269fbe72 202 pol->nodes = *nodes;
37012946
DR
203 return 0;
204}
205
206static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
207{
7858d7bc
FT
208 if (nodes_empty(*nodes))
209 return -EINVAL;
269fbe72
BW
210
211 nodes_clear(pol->nodes);
212 node_set(first_node(*nodes), pol->nodes);
37012946
DR
213 return 0;
214}
215
58568d2a
MX
216/*
217 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
218 * any, for the new policy. mpol_new() has already validated the nodes
7858d7bc 219 * parameter with respect to the policy mode and flags.
58568d2a
MX
220 *
221 * Must be called holding task's alloc_lock to protect task's mems_allowed
c1e8d7c6 222 * and mempolicy. May also be called holding the mmap_lock for write.
58568d2a 223 */
4bfc4495
KH
224static int mpol_set_nodemask(struct mempolicy *pol,
225 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 226{
58568d2a
MX
227 int ret;
228
7858d7bc
FT
229 /*
230 * Default (pol==NULL) resp. local memory policies are not a
231 * subject of any remapping. They also do not need any special
232 * constructor.
233 */
234 if (!pol || pol->mode == MPOL_LOCAL)
58568d2a 235 return 0;
7858d7bc 236
01f13bd6 237 /* Check N_MEMORY */
4bfc4495 238 nodes_and(nsc->mask1,
01f13bd6 239 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
240
241 VM_BUG_ON(!nodes);
4bfc4495 242
7858d7bc
FT
243 if (pol->flags & MPOL_F_RELATIVE_NODES)
244 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
245 else
246 nodes_and(nsc->mask2, *nodes, nsc->mask1);
58568d2a 247
7858d7bc
FT
248 if (mpol_store_user_nodemask(pol))
249 pol->w.user_nodemask = *nodes;
4bfc4495 250 else
7858d7bc
FT
251 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
252
253 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
58568d2a
MX
254 return ret;
255}
256
257/*
258 * This function just creates a new policy, does some check and simple
259 * initialization. You must invoke mpol_set_nodemask() to set nodes.
260 */
028fec41
DR
261static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
262 nodemask_t *nodes)
1da177e4
LT
263{
264 struct mempolicy *policy;
265
028fec41 266 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
00ef2d2f 267 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
140d5a49 268
3e1f0645
DR
269 if (mode == MPOL_DEFAULT) {
270 if (nodes && !nodes_empty(*nodes))
37012946 271 return ERR_PTR(-EINVAL);
d3a71033 272 return NULL;
37012946 273 }
3e1f0645
DR
274 VM_BUG_ON(!nodes);
275
276 /*
277 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
278 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
279 * All other modes require a valid pointer to a non-empty nodemask.
280 */
281 if (mode == MPOL_PREFERRED) {
282 if (nodes_empty(*nodes)) {
283 if (((flags & MPOL_F_STATIC_NODES) ||
284 (flags & MPOL_F_RELATIVE_NODES)))
285 return ERR_PTR(-EINVAL);
7858d7bc
FT
286
287 mode = MPOL_LOCAL;
3e1f0645 288 }
479e2802 289 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
290 if (!nodes_empty(*nodes) ||
291 (flags & MPOL_F_STATIC_NODES) ||
292 (flags & MPOL_F_RELATIVE_NODES))
479e2802 293 return ERR_PTR(-EINVAL);
3e1f0645
DR
294 } else if (nodes_empty(*nodes))
295 return ERR_PTR(-EINVAL);
1da177e4
LT
296 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
297 if (!policy)
298 return ERR_PTR(-ENOMEM);
299 atomic_set(&policy->refcnt, 1);
45c4745a 300 policy->mode = mode;
3e1f0645 301 policy->flags = flags;
c6018b4b 302 policy->home_node = NUMA_NO_NODE;
37012946 303
1da177e4 304 return policy;
37012946
DR
305}
306
52cd3b07
LS
307/* Slow path of a mpol destructor. */
308void __mpol_put(struct mempolicy *p)
309{
310 if (!atomic_dec_and_test(&p->refcnt))
311 return;
52cd3b07
LS
312 kmem_cache_free(policy_cache, p);
313}
314
213980c0 315static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
316{
317}
318
213980c0 319static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
320{
321 nodemask_t tmp;
322
323 if (pol->flags & MPOL_F_STATIC_NODES)
324 nodes_and(tmp, pol->w.user_nodemask, *nodes);
325 else if (pol->flags & MPOL_F_RELATIVE_NODES)
326 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
327 else {
269fbe72 328 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
213980c0 329 *nodes);
29b190fa 330 pol->w.cpuset_mems_allowed = *nodes;
37012946 331 }
f5b087b5 332
708c1bbc
MX
333 if (nodes_empty(tmp))
334 tmp = *nodes;
335
269fbe72 336 pol->nodes = tmp;
37012946
DR
337}
338
339static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 340 const nodemask_t *nodes)
37012946 341{
7858d7bc 342 pol->w.cpuset_mems_allowed = *nodes;
1da177e4
LT
343}
344
708c1bbc
MX
345/*
346 * mpol_rebind_policy - Migrate a policy to a different set of nodes
347 *
c1e8d7c6 348 * Per-vma policies are protected by mmap_lock. Allocations using per-task
213980c0
VB
349 * policies are protected by task->mems_allowed_seq to prevent a premature
350 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 351 */
213980c0 352static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 353{
018160ad 354 if (!pol || pol->mode == MPOL_LOCAL)
1d0d2680 355 return;
7858d7bc 356 if (!mpol_store_user_nodemask(pol) &&
1d0d2680
DR
357 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
358 return;
708c1bbc 359
213980c0 360 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
361}
362
363/*
364 * Wrapper for mpol_rebind_policy() that just requires task
365 * pointer, and updates task mempolicy.
58568d2a
MX
366 *
367 * Called with task's alloc_lock held.
1d0d2680
DR
368 */
369
213980c0 370void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 371{
213980c0 372 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
373}
374
375/*
376 * Rebind each vma in mm to new nodemask.
377 *
c1e8d7c6 378 * Call holding a reference to mm. Takes mm->mmap_lock during call.
1d0d2680
DR
379 */
380
381void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
382{
383 struct vm_area_struct *vma;
66850be5 384 VMA_ITERATOR(vmi, mm, 0);
1d0d2680 385
d8ed45c5 386 mmap_write_lock(mm);
6c21e066
JH
387 for_each_vma(vmi, vma) {
388 vma_start_write(vma);
213980c0 389 mpol_rebind_policy(vma->vm_policy, new);
6c21e066 390 }
d8ed45c5 391 mmap_write_unlock(mm);
1d0d2680
DR
392}
393
37012946
DR
394static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
395 [MPOL_DEFAULT] = {
396 .rebind = mpol_rebind_default,
397 },
398 [MPOL_INTERLEAVE] = {
be897d48 399 .create = mpol_new_nodemask,
37012946
DR
400 .rebind = mpol_rebind_nodemask,
401 },
402 [MPOL_PREFERRED] = {
403 .create = mpol_new_preferred,
404 .rebind = mpol_rebind_preferred,
405 },
406 [MPOL_BIND] = {
be897d48 407 .create = mpol_new_nodemask,
37012946
DR
408 .rebind = mpol_rebind_nodemask,
409 },
7858d7bc
FT
410 [MPOL_LOCAL] = {
411 .rebind = mpol_rebind_default,
412 },
b27abacc 413 [MPOL_PREFERRED_MANY] = {
be897d48 414 .create = mpol_new_nodemask,
b27abacc
DH
415 .rebind = mpol_rebind_preferred,
416 },
37012946
DR
417};
418
4a64981d 419static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
fc301289 420 unsigned long flags);
1a75a6c8 421
6f4576e3
NH
422struct queue_pages {
423 struct list_head *pagelist;
424 unsigned long flags;
425 nodemask_t *nmask;
f18da660
LX
426 unsigned long start;
427 unsigned long end;
428 struct vm_area_struct *first;
6f4576e3
NH
429};
430
88aaa2a1 431/*
d451b89d 432 * Check if the folio's nid is in qp->nmask.
88aaa2a1
NH
433 *
434 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
435 * in the invert of qp->nmask.
436 */
d451b89d 437static inline bool queue_folio_required(struct folio *folio,
88aaa2a1
NH
438 struct queue_pages *qp)
439{
d451b89d 440 int nid = folio_nid(folio);
88aaa2a1
NH
441 unsigned long flags = qp->flags;
442
443 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
444}
445
a7f40cfe 446/*
de1f5055
VMO
447 * queue_folios_pmd() has three possible return values:
448 * 0 - folios are placed on the right node or queued successfully, or
e5947d23 449 * special page is met, i.e. huge zero page.
de1f5055 450 * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
d8835445 451 * specified.
d8835445 452 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
de1f5055 453 * existing folio was already on a node that does not follow the
d8835445 454 * policy.
a7f40cfe 455 */
de1f5055 456static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
c8633798 457 unsigned long end, struct mm_walk *walk)
959a7e13 458 __releases(ptl)
c8633798
NH
459{
460 int ret = 0;
de1f5055 461 struct folio *folio;
c8633798
NH
462 struct queue_pages *qp = walk->private;
463 unsigned long flags;
464
465 if (unlikely(is_pmd_migration_entry(*pmd))) {
a7f40cfe 466 ret = -EIO;
c8633798
NH
467 goto unlock;
468 }
de1f5055
VMO
469 folio = pfn_folio(pmd_pfn(*pmd));
470 if (is_huge_zero_page(&folio->page)) {
e5947d23 471 walk->action = ACTION_CONTINUE;
6d97cf88 472 goto unlock;
c8633798 473 }
d451b89d 474 if (!queue_folio_required(folio, qp))
c8633798 475 goto unlock;
c8633798 476
c8633798 477 flags = qp->flags;
de1f5055 478 /* go to folio migration */
a7f40cfe 479 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
a53190a4 480 if (!vma_migratable(walk->vma) ||
4a64981d 481 migrate_folio_add(folio, qp->pagelist, flags)) {
d8835445 482 ret = 1;
a7f40cfe
YS
483 goto unlock;
484 }
a7f40cfe
YS
485 } else
486 ret = -EIO;
c8633798
NH
487unlock:
488 spin_unlock(ptl);
c8633798
NH
489 return ret;
490}
491
98094945
NH
492/*
493 * Scan through pages checking if pages follow certain conditions,
494 * and move them to the pagelist if they do.
d8835445 495 *
3dae02bb
VMO
496 * queue_folios_pte_range() has three possible return values:
497 * 0 - folios are placed on the right node or queued successfully, or
e5947d23 498 * special page is met, i.e. zero page.
3dae02bb 499 * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
d8835445 500 * specified.
3dae02bb 501 * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
d8835445 502 * on a node that does not follow the policy.
98094945 503 */
3dae02bb 504static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
6f4576e3 505 unsigned long end, struct mm_walk *walk)
1da177e4 506{
6f4576e3 507 struct vm_area_struct *vma = walk->vma;
3dae02bb 508 struct folio *folio;
6f4576e3
NH
509 struct queue_pages *qp = walk->private;
510 unsigned long flags = qp->flags;
d8835445 511 bool has_unmovable = false;
3f088420 512 pte_t *pte, *mapped_pte;
c33c7948 513 pte_t ptent;
705e87c0 514 spinlock_t *ptl;
941150a3 515
c8633798 516 ptl = pmd_trans_huge_lock(pmd, vma);
bc78b5ed 517 if (ptl)
de1f5055 518 return queue_folios_pmd(pmd, ptl, addr, end, walk);
91612e0d 519
3f088420 520 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
7780d040
HD
521 if (!pte) {
522 walk->action = ACTION_AGAIN;
523 return 0;
524 }
6f4576e3 525 for (; addr != end; pte++, addr += PAGE_SIZE) {
c33c7948
RR
526 ptent = ptep_get(pte);
527 if (!pte_present(ptent))
1da177e4 528 continue;
c33c7948 529 folio = vm_normal_folio(vma, addr, ptent);
3dae02bb 530 if (!folio || folio_is_zone_device(folio))
1da177e4 531 continue;
053837fc 532 /*
3dae02bb
VMO
533 * vm_normal_folio() filters out zero pages, but there might
534 * still be reserved folios to skip, perhaps in a VDSO.
053837fc 535 */
3dae02bb 536 if (folio_test_reserved(folio))
f4598c8b 537 continue;
d451b89d 538 if (!queue_folio_required(folio, qp))
38e35860 539 continue;
a7f40cfe 540 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
d8835445
YS
541 /* MPOL_MF_STRICT must be specified if we get here */
542 if (!vma_migratable(vma)) {
543 has_unmovable = true;
a7f40cfe 544 break;
d8835445 545 }
a53190a4
YS
546
547 /*
548 * Do not abort immediately since there may be
549 * temporary off LRU pages in the range. Still
550 * need migrate other LRU pages.
551 */
4a64981d 552 if (migrate_folio_add(folio, qp->pagelist, flags))
a53190a4 553 has_unmovable = true;
a7f40cfe
YS
554 } else
555 break;
6f4576e3 556 }
3f088420 557 pte_unmap_unlock(mapped_pte, ptl);
6f4576e3 558 cond_resched();
d8835445
YS
559
560 if (has_unmovable)
561 return 1;
562
a7f40cfe 563 return addr != end ? -EIO : 0;
91612e0d
HD
564}
565
0a2c1e81 566static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
6f4576e3
NH
567 unsigned long addr, unsigned long end,
568 struct mm_walk *walk)
e2d8cf40 569{
dcf17635 570 int ret = 0;
e2d8cf40 571#ifdef CONFIG_HUGETLB_PAGE
6f4576e3 572 struct queue_pages *qp = walk->private;
dcf17635 573 unsigned long flags = (qp->flags & MPOL_MF_VALID);
0a2c1e81 574 struct folio *folio;
cb900f41 575 spinlock_t *ptl;
d4c54919 576 pte_t entry;
e2d8cf40 577
6f4576e3
NH
578 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
579 entry = huge_ptep_get(pte);
d4c54919
NH
580 if (!pte_present(entry))
581 goto unlock;
0a2c1e81 582 folio = pfn_folio(pte_pfn(entry));
d451b89d 583 if (!queue_folio_required(folio, qp))
e2d8cf40 584 goto unlock;
dcf17635
LX
585
586 if (flags == MPOL_MF_STRICT) {
587 /*
0a2c1e81 588 * STRICT alone means only detecting misplaced folio and no
dcf17635
LX
589 * need to further check other vma.
590 */
591 ret = -EIO;
592 goto unlock;
593 }
594
595 if (!vma_migratable(walk->vma)) {
596 /*
597 * Must be STRICT with MOVE*, otherwise .test_walk() have
598 * stopped walking current vma.
0a2c1e81 599 * Detecting misplaced folio but allow migrating folios which
dcf17635
LX
600 * have been queued.
601 */
602 ret = 1;
603 goto unlock;
604 }
605
0a2c1e81
VMO
606 /*
607 * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it
608 * is shared it is likely not worth migrating.
609 *
610 * To check if the folio is shared, ideally we want to make sure
611 * every page is mapped to the same process. Doing that is very
612 * expensive, so check the estimated mapcount of the folio instead.
613 */
e2d8cf40 614 if (flags & (MPOL_MF_MOVE_ALL) ||
0a2c1e81 615 (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 &&
73bdf65e 616 !hugetlb_pmd_shared(pte))) {
9747b9e9 617 if (!isolate_hugetlb(folio, qp->pagelist) &&
dcf17635
LX
618 (flags & MPOL_MF_STRICT))
619 /*
0a2c1e81 620 * Failed to isolate folio but allow migrating pages
dcf17635
LX
621 * which have been queued.
622 */
623 ret = 1;
624 }
e2d8cf40 625unlock:
cb900f41 626 spin_unlock(ptl);
e2d8cf40
NH
627#else
628 BUG();
629#endif
dcf17635 630 return ret;
1da177e4
LT
631}
632
5877231f 633#ifdef CONFIG_NUMA_BALANCING
b24f53a0 634/*
4b10e7d5
MG
635 * This is used to mark a range of virtual addresses to be inaccessible.
636 * These are later cleared by a NUMA hinting fault. Depending on these
637 * faults, pages may be migrated for better NUMA placement.
638 *
639 * This is assuming that NUMA faults are handled using PROT_NONE. If
640 * an architecture makes a different choice, it will need further
641 * changes to the core.
b24f53a0 642 */
4b10e7d5
MG
643unsigned long change_prot_numa(struct vm_area_struct *vma,
644 unsigned long addr, unsigned long end)
b24f53a0 645{
4a18419f 646 struct mmu_gather tlb;
a79390f5 647 long nr_updated;
b24f53a0 648
4a18419f
NA
649 tlb_gather_mmu(&tlb, vma->vm_mm);
650
1ef488ed 651 nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
d1751118 652 if (nr_updated > 0)
03c5a6e1 653 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 654
4a18419f
NA
655 tlb_finish_mmu(&tlb);
656
4b10e7d5 657 return nr_updated;
b24f53a0
LS
658}
659#else
660static unsigned long change_prot_numa(struct vm_area_struct *vma,
661 unsigned long addr, unsigned long end)
662{
663 return 0;
664}
5877231f 665#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 666
6f4576e3
NH
667static int queue_pages_test_walk(unsigned long start, unsigned long end,
668 struct mm_walk *walk)
669{
66850be5 670 struct vm_area_struct *next, *vma = walk->vma;
6f4576e3
NH
671 struct queue_pages *qp = walk->private;
672 unsigned long endvma = vma->vm_end;
673 unsigned long flags = qp->flags;
674
a18b3ac2 675 /* range check first */
ce33135c 676 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
f18da660
LX
677
678 if (!qp->first) {
679 qp->first = vma;
680 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
681 (qp->start < vma->vm_start))
682 /* hole at head side of range */
a18b3ac2
LX
683 return -EFAULT;
684 }
66850be5 685 next = find_vma(vma->vm_mm, vma->vm_end);
f18da660
LX
686 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
687 ((vma->vm_end < qp->end) &&
66850be5 688 (!next || vma->vm_end < next->vm_start)))
f18da660
LX
689 /* hole at middle or tail of range */
690 return -EFAULT;
a18b3ac2 691
a7f40cfe
YS
692 /*
693 * Need check MPOL_MF_STRICT to return -EIO if possible
694 * regardless of vma_migratable
695 */
696 if (!vma_migratable(vma) &&
697 !(flags & MPOL_MF_STRICT))
48684a65
NH
698 return 1;
699
6f4576e3
NH
700 if (endvma > end)
701 endvma = end;
6f4576e3 702
6f4576e3
NH
703 if (flags & MPOL_MF_LAZY) {
704 /* Similar to task_numa_work, skip inaccessible VMAs */
3122e80e 705 if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
4355c018 706 !(vma->vm_flags & VM_MIXEDMAP))
6f4576e3
NH
707 change_prot_numa(vma, start, endvma);
708 return 1;
709 }
710
77bf45e7 711 /* queue pages from current vma */
a7f40cfe 712 if (flags & MPOL_MF_VALID)
6f4576e3
NH
713 return 0;
714 return 1;
715}
716
7b86ac33 717static const struct mm_walk_ops queue_pages_walk_ops = {
0a2c1e81 718 .hugetlb_entry = queue_folios_hugetlb,
3dae02bb 719 .pmd_entry = queue_folios_pte_range,
7b86ac33
CH
720 .test_walk = queue_pages_test_walk,
721};
722
dc9aa5b9 723/*
98094945
NH
724 * Walk through page tables and collect pages to be migrated.
725 *
726 * If pages found in a given range are on a set of nodes (determined by
727 * @nodes and @flags,) it's isolated and queued to the pagelist which is
d8835445
YS
728 * passed via @private.
729 *
730 * queue_pages_range() has three possible return values:
731 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
732 * specified.
733 * 0 - queue pages successfully or no misplaced page.
a85dfc30
YS
734 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
735 * memory range specified by nodemask and maxnode points outside
736 * your accessible address space (-EFAULT)
dc9aa5b9 737 */
d05f0cdc 738static int
98094945 739queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3
NH
740 nodemask_t *nodes, unsigned long flags,
741 struct list_head *pagelist)
1da177e4 742{
f18da660 743 int err;
6f4576e3
NH
744 struct queue_pages qp = {
745 .pagelist = pagelist,
746 .flags = flags,
747 .nmask = nodes,
f18da660
LX
748 .start = start,
749 .end = end,
750 .first = NULL,
6f4576e3 751 };
6f4576e3 752
f18da660
LX
753 err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
754
755 if (!qp.first)
756 /* whole range in hole */
757 err = -EFAULT;
758
759 return err;
1da177e4
LT
760}
761
869833f2
KM
762/*
763 * Apply policy to a single VMA
c1e8d7c6 764 * This must be called with the mmap_lock held for writing.
869833f2
KM
765 */
766static int vma_replace_policy(struct vm_area_struct *vma,
767 struct mempolicy *pol)
8d34694c 768{
869833f2
KM
769 int err;
770 struct mempolicy *old;
771 struct mempolicy *new;
8d34694c 772
6c21e066
JH
773 vma_assert_write_locked(vma);
774
8d34694c
KM
775 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
776 vma->vm_start, vma->vm_end, vma->vm_pgoff,
777 vma->vm_ops, vma->vm_file,
778 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
779
869833f2
KM
780 new = mpol_dup(pol);
781 if (IS_ERR(new))
782 return PTR_ERR(new);
783
784 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 785 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
786 if (err)
787 goto err_out;
8d34694c 788 }
869833f2
KM
789
790 old = vma->vm_policy;
c1e8d7c6 791 vma->vm_policy = new; /* protected by mmap_lock */
869833f2
KM
792 mpol_put(old);
793
794 return 0;
795 err_out:
796 mpol_put(new);
8d34694c
KM
797 return err;
798}
799
f4e9e0e6
LH
800/* Split or merge the VMA (if required) and apply the new policy */
801static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
802 struct vm_area_struct **prev, unsigned long start,
803 unsigned long end, struct mempolicy *new_pol)
1da177e4 804{
f4e9e0e6
LH
805 struct vm_area_struct *merged;
806 unsigned long vmstart, vmend;
e26a5114 807 pgoff_t pgoff;
f4e9e0e6 808 int err;
9d8cebd4 809
f4e9e0e6
LH
810 vmend = min(end, vma->vm_end);
811 if (start > vma->vm_start) {
812 *prev = vma;
813 vmstart = start;
814 } else {
815 vmstart = vma->vm_start;
816 }
817
00ca0f2e
LS
818 if (mpol_equal(vma_policy(vma), new_pol)) {
819 *prev = vma;
7329e3eb 820 return 0;
00ca0f2e 821 }
7329e3eb 822
f4e9e0e6
LH
823 pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
824 merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
825 vma->anon_vma, vma->vm_file, pgoff, new_pol,
826 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
827 if (merged) {
828 *prev = merged;
829 return vma_replace_policy(merged, new_pol);
830 }
831
832 if (vma->vm_start != vmstart) {
833 err = split_vma(vmi, vma, vmstart, 1);
8d34694c 834 if (err)
f4e9e0e6
LH
835 return err;
836 }
9d8cebd4 837
f4e9e0e6
LH
838 if (vma->vm_end != vmend) {
839 err = split_vma(vmi, vma, vmend, 0);
840 if (err)
841 return err;
842 }
843
844 *prev = vma;
845 return vma_replace_policy(vma, new_pol);
1da177e4
LT
846}
847
1da177e4 848/* Set the process memory policy */
028fec41
DR
849static long do_set_mempolicy(unsigned short mode, unsigned short flags,
850 nodemask_t *nodes)
1da177e4 851{
58568d2a 852 struct mempolicy *new, *old;
4bfc4495 853 NODEMASK_SCRATCH(scratch);
58568d2a 854 int ret;
1da177e4 855
4bfc4495
KH
856 if (!scratch)
857 return -ENOMEM;
f4e53d91 858
4bfc4495
KH
859 new = mpol_new(mode, flags, nodes);
860 if (IS_ERR(new)) {
861 ret = PTR_ERR(new);
862 goto out;
863 }
2c7c3a7d 864
12c1dc8e 865 task_lock(current);
4bfc4495 866 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a 867 if (ret) {
12c1dc8e 868 task_unlock(current);
58568d2a 869 mpol_put(new);
4bfc4495 870 goto out;
58568d2a 871 }
12c1dc8e 872
58568d2a 873 old = current->mempolicy;
1da177e4 874 current->mempolicy = new;
45816682
VB
875 if (new && new->mode == MPOL_INTERLEAVE)
876 current->il_prev = MAX_NUMNODES-1;
58568d2a 877 task_unlock(current);
58568d2a 878 mpol_put(old);
4bfc4495
KH
879 ret = 0;
880out:
881 NODEMASK_SCRATCH_FREE(scratch);
882 return ret;
1da177e4
LT
883}
884
bea904d5
LS
885/*
886 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
887 *
888 * Called with task's alloc_lock held
bea904d5
LS
889 */
890static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
1da177e4 891{
dfcd3c0d 892 nodes_clear(*nodes);
bea904d5
LS
893 if (p == &default_policy)
894 return;
895
45c4745a 896 switch (p->mode) {
19770b32 897 case MPOL_BIND:
1da177e4 898 case MPOL_INTERLEAVE:
269fbe72 899 case MPOL_PREFERRED:
b27abacc 900 case MPOL_PREFERRED_MANY:
269fbe72 901 *nodes = p->nodes;
1da177e4 902 break;
7858d7bc
FT
903 case MPOL_LOCAL:
904 /* return empty node mask for local allocation */
905 break;
1da177e4
LT
906 default:
907 BUG();
908 }
909}
910
3b9aadf7 911static int lookup_node(struct mm_struct *mm, unsigned long addr)
1da177e4 912{
ba841078 913 struct page *p = NULL;
f728b9c4 914 int ret;
1da177e4 915
f728b9c4
JH
916 ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
917 if (ret > 0) {
918 ret = page_to_nid(p);
1da177e4
LT
919 put_page(p);
920 }
f728b9c4 921 return ret;
1da177e4
LT
922}
923
1da177e4 924/* Retrieve NUMA policy */
dbcb0f19
AB
925static long do_get_mempolicy(int *policy, nodemask_t *nmask,
926 unsigned long addr, unsigned long flags)
1da177e4 927{
8bccd85f 928 int err;
1da177e4
LT
929 struct mm_struct *mm = current->mm;
930 struct vm_area_struct *vma = NULL;
3b9aadf7 931 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
1da177e4 932
754af6f5
LS
933 if (flags &
934 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 935 return -EINVAL;
754af6f5
LS
936
937 if (flags & MPOL_F_MEMS_ALLOWED) {
938 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
939 return -EINVAL;
940 *policy = 0; /* just so it's initialized */
58568d2a 941 task_lock(current);
754af6f5 942 *nmask = cpuset_current_mems_allowed;
58568d2a 943 task_unlock(current);
754af6f5
LS
944 return 0;
945 }
946
1da177e4 947 if (flags & MPOL_F_ADDR) {
bea904d5
LS
948 /*
949 * Do NOT fall back to task policy if the
950 * vma/shared policy at addr is NULL. We
951 * want to return MPOL_DEFAULT in this case.
952 */
d8ed45c5 953 mmap_read_lock(mm);
33e3575c 954 vma = vma_lookup(mm, addr);
1da177e4 955 if (!vma) {
d8ed45c5 956 mmap_read_unlock(mm);
1da177e4
LT
957 return -EFAULT;
958 }
959 if (vma->vm_ops && vma->vm_ops->get_policy)
960 pol = vma->vm_ops->get_policy(vma, addr);
961 else
962 pol = vma->vm_policy;
963 } else if (addr)
964 return -EINVAL;
965
966 if (!pol)
bea904d5 967 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
968
969 if (flags & MPOL_F_NODE) {
970 if (flags & MPOL_F_ADDR) {
3b9aadf7 971 /*
f728b9c4
JH
972 * Take a refcount on the mpol, because we are about to
973 * drop the mmap_lock, after which only "pol" remains
974 * valid, "vma" is stale.
3b9aadf7
AA
975 */
976 pol_refcount = pol;
977 vma = NULL;
978 mpol_get(pol);
f728b9c4 979 mmap_read_unlock(mm);
3b9aadf7 980 err = lookup_node(mm, addr);
1da177e4
LT
981 if (err < 0)
982 goto out;
8bccd85f 983 *policy = err;
1da177e4 984 } else if (pol == current->mempolicy &&
45c4745a 985 pol->mode == MPOL_INTERLEAVE) {
269fbe72 986 *policy = next_node_in(current->il_prev, pol->nodes);
1da177e4
LT
987 } else {
988 err = -EINVAL;
989 goto out;
990 }
bea904d5
LS
991 } else {
992 *policy = pol == &default_policy ? MPOL_DEFAULT :
993 pol->mode;
d79df630
DR
994 /*
995 * Internal mempolicy flags must be masked off before exposing
996 * the policy to userspace.
997 */
998 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 999 }
1da177e4 1000
1da177e4 1001 err = 0;
58568d2a 1002 if (nmask) {
c6b6ef8b
LS
1003 if (mpol_store_user_nodemask(pol)) {
1004 *nmask = pol->w.user_nodemask;
1005 } else {
1006 task_lock(current);
1007 get_policy_nodemask(pol, nmask);
1008 task_unlock(current);
1009 }
58568d2a 1010 }
1da177e4
LT
1011
1012 out:
52cd3b07 1013 mpol_cond_put(pol);
1da177e4 1014 if (vma)
d8ed45c5 1015 mmap_read_unlock(mm);
3b9aadf7
AA
1016 if (pol_refcount)
1017 mpol_put(pol_refcount);
1da177e4
LT
1018 return err;
1019}
1020
b20a3503 1021#ifdef CONFIG_MIGRATION
4a64981d 1022static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
fc301289 1023 unsigned long flags)
6ce3c4c0
CL
1024{
1025 /*
4a64981d
VMO
1026 * We try to migrate only unshared folios. If it is shared it
1027 * is likely not worth migrating.
1028 *
1029 * To check if the folio is shared, ideally we want to make sure
1030 * every page is mapped to the same process. Doing that is very
1031 * expensive, so check the estimated mapcount of the folio instead.
6ce3c4c0 1032 */
4a64981d 1033 if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
be2d5756 1034 if (folio_isolate_lru(folio)) {
4a64981d
VMO
1035 list_add_tail(&folio->lru, foliolist);
1036 node_stat_mod_folio(folio,
1037 NR_ISOLATED_ANON + folio_is_file_lru(folio),
1038 folio_nr_pages(folio));
a53190a4
YS
1039 } else if (flags & MPOL_MF_STRICT) {
1040 /*
4a64981d
VMO
1041 * Non-movable folio may reach here. And, there may be
1042 * temporary off LRU folios or non-LRU movable folios.
1043 * Treat them as unmovable folios since they can't be
a53190a4
YS
1044 * isolated, so they can't be moved at the moment. It
1045 * should return -EIO for this case too.
1046 */
1047 return -EIO;
62695a84
NP
1048 }
1049 }
a53190a4
YS
1050
1051 return 0;
7e2ab150 1052}
6ce3c4c0 1053
7e2ab150
CL
1054/*
1055 * Migrate pages from one node to a target node.
1056 * Returns error or the number of pages not migrated.
1057 */
dbcb0f19
AB
1058static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1059 int flags)
7e2ab150
CL
1060{
1061 nodemask_t nmask;
66850be5 1062 struct vm_area_struct *vma;
7e2ab150
CL
1063 LIST_HEAD(pagelist);
1064 int err = 0;
a0976311
JK
1065 struct migration_target_control mtc = {
1066 .nid = dest,
1067 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1068 };
7e2ab150
CL
1069
1070 nodes_clear(nmask);
1071 node_set(source, nmask);
6ce3c4c0 1072
08270807
MK
1073 /*
1074 * This does not "check" the range but isolates all pages that
1075 * need migration. Between passing in the full user address
1076 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1077 */
66850be5 1078 vma = find_vma(mm, 0);
08270807 1079 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
66850be5 1080 queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
7e2ab150
CL
1081 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1082
cf608ac1 1083 if (!list_empty(&pagelist)) {
a0976311 1084 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
5ac95884 1085 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
cf608ac1 1086 if (err)
e2d8cf40 1087 putback_movable_pages(&pagelist);
cf608ac1 1088 }
95a402c3 1089
7e2ab150 1090 return err;
6ce3c4c0
CL
1091}
1092
39743889 1093/*
7e2ab150
CL
1094 * Move pages between the two nodesets so as to preserve the physical
1095 * layout as much as possible.
39743889
CL
1096 *
1097 * Returns the number of page that could not be moved.
1098 */
0ce72d4f
AM
1099int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1100 const nodemask_t *to, int flags)
39743889 1101{
7e2ab150 1102 int busy = 0;
f555befd 1103 int err = 0;
7e2ab150 1104 nodemask_t tmp;
39743889 1105
361a2a22 1106 lru_cache_disable();
0aedadf9 1107
d8ed45c5 1108 mmap_read_lock(mm);
39743889 1109
da0aa138
KM
1110 /*
1111 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1112 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1113 * bit in 'tmp', and return that <source, dest> pair for migration.
1114 * The pair of nodemasks 'to' and 'from' define the map.
1115 *
1116 * If no pair of bits is found that way, fallback to picking some
1117 * pair of 'source' and 'dest' bits that are not the same. If the
1118 * 'source' and 'dest' bits are the same, this represents a node
1119 * that will be migrating to itself, so no pages need move.
1120 *
1121 * If no bits are left in 'tmp', or if all remaining bits left
1122 * in 'tmp' correspond to the same bit in 'to', return false
1123 * (nothing left to migrate).
1124 *
1125 * This lets us pick a pair of nodes to migrate between, such that
1126 * if possible the dest node is not already occupied by some other
1127 * source node, minimizing the risk of overloading the memory on a
1128 * node that would happen if we migrated incoming memory to a node
1129 * before migrating outgoing memory source that same node.
1130 *
1131 * A single scan of tmp is sufficient. As we go, we remember the
1132 * most recent <s, d> pair that moved (s != d). If we find a pair
1133 * that not only moved, but what's better, moved to an empty slot
1134 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1135 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1136 * most recent <s, d> pair that moved. If we get all the way through
1137 * the scan of tmp without finding any node that moved, much less
1138 * moved to an empty node, then there is nothing left worth migrating.
1139 */
d4984711 1140
0ce72d4f 1141 tmp = *from;
7e2ab150 1142 while (!nodes_empty(tmp)) {
68d68ff6 1143 int s, d;
b76ac7e7 1144 int source = NUMA_NO_NODE;
7e2ab150
CL
1145 int dest = 0;
1146
1147 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1148
1149 /*
1150 * do_migrate_pages() tries to maintain the relative
1151 * node relationship of the pages established between
1152 * threads and memory areas.
1153 *
1154 * However if the number of source nodes is not equal to
1155 * the number of destination nodes we can not preserve
1156 * this node relative relationship. In that case, skip
1157 * copying memory from a node that is in the destination
1158 * mask.
1159 *
1160 * Example: [2,3,4] -> [3,4,5] moves everything.
1161 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1162 */
1163
0ce72d4f
AM
1164 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1165 (node_isset(s, *to)))
4a5b18cc
LW
1166 continue;
1167
0ce72d4f 1168 d = node_remap(s, *from, *to);
7e2ab150
CL
1169 if (s == d)
1170 continue;
1171
1172 source = s; /* Node moved. Memorize */
1173 dest = d;
1174
1175 /* dest not in remaining from nodes? */
1176 if (!node_isset(dest, tmp))
1177 break;
1178 }
b76ac7e7 1179 if (source == NUMA_NO_NODE)
7e2ab150
CL
1180 break;
1181
1182 node_clear(source, tmp);
1183 err = migrate_to_node(mm, source, dest, flags);
1184 if (err > 0)
1185 busy += err;
1186 if (err < 0)
1187 break;
39743889 1188 }
d8ed45c5 1189 mmap_read_unlock(mm);
d479960e 1190
361a2a22 1191 lru_cache_enable();
7e2ab150
CL
1192 if (err < 0)
1193 return err;
1194 return busy;
b20a3503
CL
1195
1196}
1197
3ad33b24
LS
1198/*
1199 * Allocate a new page for page migration based on vma policy.
d05f0cdc 1200 * Start by assuming the page is mapped by the same vma as contains @start.
3ad33b24
LS
1201 * Search forward from there, if not. N.B., this assumes that the
1202 * list of pages handed to migrate_pages()--which is how we get here--
1203 * is in virtual address order.
1204 */
4e096ae1 1205static struct folio *new_folio(struct folio *src, unsigned long start)
95a402c3 1206{
d05f0cdc 1207 struct vm_area_struct *vma;
3f649ab7 1208 unsigned long address;
66850be5 1209 VMA_ITERATOR(vmi, current->mm, start);
ec4858e0 1210 gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
95a402c3 1211
66850be5 1212 for_each_vma(vmi, vma) {
4e096ae1 1213 address = page_address_in_vma(&src->page, vma);
3ad33b24
LS
1214 if (address != -EFAULT)
1215 break;
3ad33b24 1216 }
11c731e8 1217
d0ce0e47 1218 if (folio_test_hugetlb(src)) {
4e096ae1 1219 return alloc_hugetlb_folio_vma(folio_hstate(src),
389c8178 1220 vma, address);
d0ce0e47 1221 }
ec4858e0
MWO
1222
1223 if (folio_test_large(src))
1224 gfp = GFP_TRANSHUGE;
1225
0bf598d8 1226 /*
ec4858e0 1227 * if !vma, vma_alloc_folio() will use task or system default policy
0bf598d8 1228 */
4e096ae1 1229 return vma_alloc_folio(gfp, folio_order(src), vma, address,
ec4858e0 1230 folio_test_large(src));
95a402c3 1231}
b20a3503
CL
1232#else
1233
4a64981d 1234static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
b20a3503
CL
1235 unsigned long flags)
1236{
a53190a4 1237 return -EIO;
39743889
CL
1238}
1239
0ce72d4f
AM
1240int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1241 const nodemask_t *to, int flags)
b20a3503
CL
1242{
1243 return -ENOSYS;
1244}
95a402c3 1245
4e096ae1 1246static struct folio *new_folio(struct folio *src, unsigned long start)
95a402c3
CL
1247{
1248 return NULL;
1249}
b20a3503
CL
1250#endif
1251
dbcb0f19 1252static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1253 unsigned short mode, unsigned short mode_flags,
1254 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1255{
6ce3c4c0 1256 struct mm_struct *mm = current->mm;
f4e9e0e6
LH
1257 struct vm_area_struct *vma, *prev;
1258 struct vma_iterator vmi;
6ce3c4c0
CL
1259 struct mempolicy *new;
1260 unsigned long end;
1261 int err;
d8835445 1262 int ret;
6ce3c4c0
CL
1263 LIST_HEAD(pagelist);
1264
b24f53a0 1265 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1266 return -EINVAL;
74c00241 1267 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1268 return -EPERM;
1269
1270 if (start & ~PAGE_MASK)
1271 return -EINVAL;
1272
1273 if (mode == MPOL_DEFAULT)
1274 flags &= ~MPOL_MF_STRICT;
1275
aaa31e05 1276 len = PAGE_ALIGN(len);
6ce3c4c0
CL
1277 end = start + len;
1278
1279 if (end < start)
1280 return -EINVAL;
1281 if (end == start)
1282 return 0;
1283
028fec41 1284 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1285 if (IS_ERR(new))
1286 return PTR_ERR(new);
1287
b24f53a0
LS
1288 if (flags & MPOL_MF_LAZY)
1289 new->flags |= MPOL_F_MOF;
1290
6ce3c4c0
CL
1291 /*
1292 * If we are using the default policy then operation
1293 * on discontinuous address spaces is okay after all
1294 */
1295 if (!new)
1296 flags |= MPOL_MF_DISCONTIG_OK;
1297
028fec41
DR
1298 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1299 start, start + len, mode, mode_flags,
00ef2d2f 1300 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
6ce3c4c0 1301
0aedadf9
CL
1302 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1303
361a2a22 1304 lru_cache_disable();
0aedadf9 1305 }
4bfc4495
KH
1306 {
1307 NODEMASK_SCRATCH(scratch);
1308 if (scratch) {
d8ed45c5 1309 mmap_write_lock(mm);
4bfc4495 1310 err = mpol_set_nodemask(new, nmask, scratch);
4bfc4495 1311 if (err)
d8ed45c5 1312 mmap_write_unlock(mm);
4bfc4495
KH
1313 } else
1314 err = -ENOMEM;
1315 NODEMASK_SCRATCH_FREE(scratch);
1316 }
b05ca738
KM
1317 if (err)
1318 goto mpol_out;
1319
6c21e066
JH
1320 /*
1321 * Lock the VMAs before scanning for pages to migrate, to ensure we don't
1322 * miss a concurrently inserted page.
1323 */
1324 vma_iter_init(&vmi, mm, start);
1325 for_each_vma_range(vmi, vma, end)
1326 vma_start_write(vma);
1327
d8835445 1328 ret = queue_pages_range(mm, start, end, nmask,
6ce3c4c0 1329 flags | MPOL_MF_INVERT, &pagelist);
d8835445
YS
1330
1331 if (ret < 0) {
a85dfc30 1332 err = ret;
d8835445
YS
1333 goto up_out;
1334 }
1335
f4e9e0e6
LH
1336 vma_iter_init(&vmi, mm, start);
1337 prev = vma_prev(&vmi);
1338 for_each_vma_range(vmi, vma, end) {
1339 err = mbind_range(&vmi, vma, &prev, start, end, new);
1340 if (err)
1341 break;
1342 }
7e2ab150 1343
b24f53a0
LS
1344 if (!err) {
1345 int nr_failed = 0;
1346
cf608ac1 1347 if (!list_empty(&pagelist)) {
b24f53a0 1348 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
4e096ae1 1349 nr_failed = migrate_pages(&pagelist, new_folio, NULL,
5ac95884 1350 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
cf608ac1 1351 if (nr_failed)
74060e4d 1352 putback_movable_pages(&pagelist);
cf608ac1 1353 }
6ce3c4c0 1354
d8835445 1355 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
6ce3c4c0 1356 err = -EIO;
a85dfc30 1357 } else {
d8835445 1358up_out:
a85dfc30
YS
1359 if (!list_empty(&pagelist))
1360 putback_movable_pages(&pagelist);
1361 }
1362
d8ed45c5 1363 mmap_write_unlock(mm);
d8835445 1364mpol_out:
f0be3d32 1365 mpol_put(new);
d479960e 1366 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
361a2a22 1367 lru_cache_enable();
6ce3c4c0
CL
1368 return err;
1369}
1370
8bccd85f
CL
1371/*
1372 * User space interface with variable sized bitmaps for nodelists.
1373 */
e130242d
AB
1374static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1375 unsigned long maxnode)
1376{
1377 unsigned long nlongs = BITS_TO_LONGS(maxnode);
1378 int ret;
1379
1380 if (in_compat_syscall())
1381 ret = compat_get_bitmap(mask,
1382 (const compat_ulong_t __user *)nmask,
1383 maxnode);
1384 else
1385 ret = copy_from_user(mask, nmask,
1386 nlongs * sizeof(unsigned long));
1387
1388 if (ret)
1389 return -EFAULT;
1390
1391 if (maxnode % BITS_PER_LONG)
1392 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1393
1394 return 0;
1395}
8bccd85f
CL
1396
1397/* Copy a node mask from user space. */
39743889 1398static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1399 unsigned long maxnode)
1400{
8bccd85f
CL
1401 --maxnode;
1402 nodes_clear(*nodes);
1403 if (maxnode == 0 || !nmask)
1404 return 0;
a9c930ba 1405 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1406 return -EINVAL;
8bccd85f 1407
56521e7a
YX
1408 /*
1409 * When the user specified more nodes than supported just check
e130242d
AB
1410 * if the non supported part is all zero, one word at a time,
1411 * starting at the end.
56521e7a 1412 */
e130242d
AB
1413 while (maxnode > MAX_NUMNODES) {
1414 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1415 unsigned long t;
8bccd85f 1416
000eca5d 1417 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
56521e7a 1418 return -EFAULT;
e130242d
AB
1419
1420 if (maxnode - bits >= MAX_NUMNODES) {
1421 maxnode -= bits;
1422 } else {
1423 maxnode = MAX_NUMNODES;
1424 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1425 }
1426 if (t)
56521e7a
YX
1427 return -EINVAL;
1428 }
1429
e130242d 1430 return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
8bccd85f
CL
1431}
1432
1433/* Copy a kernel node mask to user space */
1434static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1435 nodemask_t *nodes)
1436{
1437 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
050c17f2 1438 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
e130242d
AB
1439 bool compat = in_compat_syscall();
1440
1441 if (compat)
1442 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
8bccd85f
CL
1443
1444 if (copy > nbytes) {
1445 if (copy > PAGE_SIZE)
1446 return -EINVAL;
1447 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1448 return -EFAULT;
1449 copy = nbytes;
e130242d 1450 maxnode = nr_node_ids;
8bccd85f 1451 }
e130242d
AB
1452
1453 if (compat)
1454 return compat_put_bitmap((compat_ulong_t __user *)mask,
1455 nodes_addr(*nodes), maxnode);
1456
8bccd85f
CL
1457 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1458}
1459
95837924
FT
1460/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1461static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1462{
1463 *flags = *mode & MPOL_MODE_FLAGS;
1464 *mode &= ~MPOL_MODE_FLAGS;
b27abacc 1465
a38a59fd 1466 if ((unsigned int)(*mode) >= MPOL_MAX)
95837924
FT
1467 return -EINVAL;
1468 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1469 return -EINVAL;
6d2aec9e
ED
1470 if (*flags & MPOL_F_NUMA_BALANCING) {
1471 if (*mode != MPOL_BIND)
1472 return -EINVAL;
1473 *flags |= (MPOL_F_MOF | MPOL_F_MORON);
1474 }
95837924
FT
1475 return 0;
1476}
1477
e7dc9ad6
DB
1478static long kernel_mbind(unsigned long start, unsigned long len,
1479 unsigned long mode, const unsigned long __user *nmask,
1480 unsigned long maxnode, unsigned int flags)
8bccd85f 1481{
95837924 1482 unsigned short mode_flags;
8bccd85f 1483 nodemask_t nodes;
95837924 1484 int lmode = mode;
8bccd85f
CL
1485 int err;
1486
057d3389 1487 start = untagged_addr(start);
95837924
FT
1488 err = sanitize_mpol_flags(&lmode, &mode_flags);
1489 if (err)
1490 return err;
1491
8bccd85f
CL
1492 err = get_nodes(&nodes, nmask, maxnode);
1493 if (err)
1494 return err;
95837924
FT
1495
1496 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
8bccd85f
CL
1497}
1498
c6018b4b
AK
1499SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1500 unsigned long, home_node, unsigned long, flags)
1501{
1502 struct mm_struct *mm = current->mm;
f4e9e0e6 1503 struct vm_area_struct *vma, *prev;
e976936c 1504 struct mempolicy *new, *old;
c6018b4b
AK
1505 unsigned long end;
1506 int err = -ENOENT;
66850be5 1507 VMA_ITERATOR(vmi, mm, start);
c6018b4b
AK
1508
1509 start = untagged_addr(start);
1510 if (start & ~PAGE_MASK)
1511 return -EINVAL;
1512 /*
1513 * flags is used for future extension if any.
1514 */
1515 if (flags != 0)
1516 return -EINVAL;
1517
1518 /*
1519 * Check home_node is online to avoid accessing uninitialized
1520 * NODE_DATA.
1521 */
1522 if (home_node >= MAX_NUMNODES || !node_online(home_node))
1523 return -EINVAL;
1524
aaa31e05 1525 len = PAGE_ALIGN(len);
c6018b4b
AK
1526 end = start + len;
1527
1528 if (end < start)
1529 return -EINVAL;
1530 if (end == start)
1531 return 0;
1532 mmap_write_lock(mm);
f4e9e0e6 1533 prev = vma_prev(&vmi);
66850be5 1534 for_each_vma_range(vmi, vma, end) {
c6018b4b
AK
1535 /*
1536 * If any vma in the range got policy other than MPOL_BIND
1537 * or MPOL_PREFERRED_MANY we return error. We don't reset
1538 * the home node for vmas we already updated before.
1539 */
e976936c
MH
1540 old = vma_policy(vma);
1541 if (!old)
1542 continue;
1543 if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
c6018b4b
AK
1544 err = -EOPNOTSUPP;
1545 break;
1546 }
e976936c
MH
1547 new = mpol_dup(old);
1548 if (IS_ERR(new)) {
1549 err = PTR_ERR(new);
1550 break;
1551 }
c6018b4b 1552
6c21e066 1553 vma_start_write(vma);
c6018b4b 1554 new->home_node = home_node;
f4e9e0e6 1555 err = mbind_range(&vmi, vma, &prev, start, end, new);
c6018b4b
AK
1556 mpol_put(new);
1557 if (err)
1558 break;
1559 }
1560 mmap_write_unlock(mm);
1561 return err;
1562}
1563
e7dc9ad6
DB
1564SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1565 unsigned long, mode, const unsigned long __user *, nmask,
1566 unsigned long, maxnode, unsigned int, flags)
1567{
1568 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1569}
1570
8bccd85f 1571/* Set the process memory policy */
af03c4ac
DB
1572static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1573 unsigned long maxnode)
8bccd85f 1574{
95837924 1575 unsigned short mode_flags;
8bccd85f 1576 nodemask_t nodes;
95837924
FT
1577 int lmode = mode;
1578 int err;
1579
1580 err = sanitize_mpol_flags(&lmode, &mode_flags);
1581 if (err)
1582 return err;
8bccd85f 1583
8bccd85f
CL
1584 err = get_nodes(&nodes, nmask, maxnode);
1585 if (err)
1586 return err;
95837924
FT
1587
1588 return do_set_mempolicy(lmode, mode_flags, &nodes);
8bccd85f
CL
1589}
1590
af03c4ac
DB
1591SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1592 unsigned long, maxnode)
1593{
1594 return kernel_set_mempolicy(mode, nmask, maxnode);
1595}
1596
b6e9b0ba
DB
1597static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1598 const unsigned long __user *old_nodes,
1599 const unsigned long __user *new_nodes)
39743889 1600{
596d7cfa 1601 struct mm_struct *mm = NULL;
39743889 1602 struct task_struct *task;
39743889
CL
1603 nodemask_t task_nodes;
1604 int err;
596d7cfa
KM
1605 nodemask_t *old;
1606 nodemask_t *new;
1607 NODEMASK_SCRATCH(scratch);
1608
1609 if (!scratch)
1610 return -ENOMEM;
39743889 1611
596d7cfa
KM
1612 old = &scratch->mask1;
1613 new = &scratch->mask2;
1614
1615 err = get_nodes(old, old_nodes, maxnode);
39743889 1616 if (err)
596d7cfa 1617 goto out;
39743889 1618
596d7cfa 1619 err = get_nodes(new, new_nodes, maxnode);
39743889 1620 if (err)
596d7cfa 1621 goto out;
39743889
CL
1622
1623 /* Find the mm_struct */
55cfaa3c 1624 rcu_read_lock();
228ebcbe 1625 task = pid ? find_task_by_vpid(pid) : current;
39743889 1626 if (!task) {
55cfaa3c 1627 rcu_read_unlock();
596d7cfa
KM
1628 err = -ESRCH;
1629 goto out;
39743889 1630 }
3268c63e 1631 get_task_struct(task);
39743889 1632
596d7cfa 1633 err = -EINVAL;
39743889
CL
1634
1635 /*
31367466
OE
1636 * Check if this process has the right to modify the specified process.
1637 * Use the regular "ptrace_may_access()" checks.
39743889 1638 */
31367466 1639 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1640 rcu_read_unlock();
39743889 1641 err = -EPERM;
3268c63e 1642 goto out_put;
39743889 1643 }
c69e8d9c 1644 rcu_read_unlock();
39743889
CL
1645
1646 task_nodes = cpuset_mems_allowed(task);
1647 /* Is the user allowed to access the target nodes? */
596d7cfa 1648 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1649 err = -EPERM;
3268c63e 1650 goto out_put;
39743889
CL
1651 }
1652
0486a38b
YX
1653 task_nodes = cpuset_mems_allowed(current);
1654 nodes_and(*new, *new, task_nodes);
1655 if (nodes_empty(*new))
1656 goto out_put;
1657
86c3a764
DQ
1658 err = security_task_movememory(task);
1659 if (err)
3268c63e 1660 goto out_put;
86c3a764 1661
3268c63e
CL
1662 mm = get_task_mm(task);
1663 put_task_struct(task);
f2a9ef88
SL
1664
1665 if (!mm) {
3268c63e 1666 err = -EINVAL;
f2a9ef88
SL
1667 goto out;
1668 }
1669
1670 err = do_migrate_pages(mm, old, new,
1671 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1672
1673 mmput(mm);
1674out:
596d7cfa
KM
1675 NODEMASK_SCRATCH_FREE(scratch);
1676
39743889 1677 return err;
3268c63e
CL
1678
1679out_put:
1680 put_task_struct(task);
1681 goto out;
1682
39743889
CL
1683}
1684
b6e9b0ba
DB
1685SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1686 const unsigned long __user *, old_nodes,
1687 const unsigned long __user *, new_nodes)
1688{
1689 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1690}
1691
39743889 1692
8bccd85f 1693/* Retrieve NUMA policy */
af03c4ac
DB
1694static int kernel_get_mempolicy(int __user *policy,
1695 unsigned long __user *nmask,
1696 unsigned long maxnode,
1697 unsigned long addr,
1698 unsigned long flags)
8bccd85f 1699{
dbcb0f19 1700 int err;
3f649ab7 1701 int pval;
8bccd85f
CL
1702 nodemask_t nodes;
1703
050c17f2 1704 if (nmask != NULL && maxnode < nr_node_ids)
8bccd85f
CL
1705 return -EINVAL;
1706
4605f057
WH
1707 addr = untagged_addr(addr);
1708
8bccd85f
CL
1709 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1710
1711 if (err)
1712 return err;
1713
1714 if (policy && put_user(pval, policy))
1715 return -EFAULT;
1716
1717 if (nmask)
1718 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1719
1720 return err;
1721}
1722
af03c4ac
DB
1723SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1724 unsigned long __user *, nmask, unsigned long, maxnode,
1725 unsigned long, addr, unsigned long, flags)
1726{
1727 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1728}
1729
20ca87f2
LX
1730bool vma_migratable(struct vm_area_struct *vma)
1731{
1732 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1733 return false;
1734
1735 /*
1736 * DAX device mappings require predictable access latency, so avoid
1737 * incurring periodic faults.
1738 */
1739 if (vma_is_dax(vma))
1740 return false;
1741
1742 if (is_vm_hugetlb_page(vma) &&
1743 !hugepage_migration_supported(hstate_vma(vma)))
1744 return false;
1745
1746 /*
1747 * Migration allocates pages in the highest zone. If we cannot
1748 * do so then migration (at least from node to node) is not
1749 * possible.
1750 */
1751 if (vma->vm_file &&
1752 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1753 < policy_zone)
1754 return false;
1755 return true;
1756}
1757
74d2c3a0
ON
1758struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1759 unsigned long addr)
1da177e4 1760{
8d90274b 1761 struct mempolicy *pol = NULL;
1da177e4
LT
1762
1763 if (vma) {
480eccf9 1764 if (vma->vm_ops && vma->vm_ops->get_policy) {
8d90274b 1765 pol = vma->vm_ops->get_policy(vma, addr);
00442ad0 1766 } else if (vma->vm_policy) {
1da177e4 1767 pol = vma->vm_policy;
00442ad0
MG
1768
1769 /*
1770 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1771 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1772 * count on these policies which will be dropped by
1773 * mpol_cond_put() later
1774 */
1775 if (mpol_needs_cond_ref(pol))
1776 mpol_get(pol);
1777 }
1da177e4 1778 }
f15ca78e 1779
74d2c3a0
ON
1780 return pol;
1781}
1782
1783/*
dd6eecb9 1784 * get_vma_policy(@vma, @addr)
74d2c3a0
ON
1785 * @vma: virtual memory area whose policy is sought
1786 * @addr: address in @vma for shared policy lookup
1787 *
1788 * Returns effective policy for a VMA at specified address.
dd6eecb9 1789 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1790 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1791 * count--added by the get_policy() vm_op, as appropriate--to protect against
1792 * freeing by another task. It is the caller's responsibility to free the
1793 * extra reference for shared policies.
1794 */
ac79f78d 1795static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
dd6eecb9 1796 unsigned long addr)
74d2c3a0
ON
1797{
1798 struct mempolicy *pol = __get_vma_policy(vma, addr);
1799
8d90274b 1800 if (!pol)
dd6eecb9 1801 pol = get_task_policy(current);
8d90274b 1802
1da177e4
LT
1803 return pol;
1804}
1805
6b6482bb 1806bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1807{
6b6482bb 1808 struct mempolicy *pol;
fc314724 1809
6b6482bb
ON
1810 if (vma->vm_ops && vma->vm_ops->get_policy) {
1811 bool ret = false;
fc314724 1812
6b6482bb
ON
1813 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1814 if (pol && (pol->flags & MPOL_F_MOF))
1815 ret = true;
1816 mpol_cond_put(pol);
8d90274b 1817
6b6482bb 1818 return ret;
fc314724
MG
1819 }
1820
6b6482bb 1821 pol = vma->vm_policy;
8d90274b 1822 if (!pol)
6b6482bb 1823 pol = get_task_policy(current);
8d90274b 1824
fc314724
MG
1825 return pol->flags & MPOL_F_MOF;
1826}
1827
d2226ebd 1828bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
d3eb1570
LJ
1829{
1830 enum zone_type dynamic_policy_zone = policy_zone;
1831
1832 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1833
1834 /*
269fbe72 1835 * if policy->nodes has movable memory only,
d3eb1570
LJ
1836 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1837 *
269fbe72 1838 * policy->nodes is intersect with node_states[N_MEMORY].
f0953a1b 1839 * so if the following test fails, it implies
269fbe72 1840 * policy->nodes has movable memory only.
d3eb1570 1841 */
269fbe72 1842 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
d3eb1570
LJ
1843 dynamic_policy_zone = ZONE_MOVABLE;
1844
1845 return zone >= dynamic_policy_zone;
1846}
1847
52cd3b07
LS
1848/*
1849 * Return a nodemask representing a mempolicy for filtering nodes for
1850 * page allocation
1851 */
8ca39e68 1852nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
19770b32 1853{
b27abacc
DH
1854 int mode = policy->mode;
1855
19770b32 1856 /* Lower zones don't get a nodemask applied for MPOL_BIND */
b27abacc
DH
1857 if (unlikely(mode == MPOL_BIND) &&
1858 apply_policy_zone(policy, gfp_zone(gfp)) &&
1859 cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1860 return &policy->nodes;
1861
1862 if (mode == MPOL_PREFERRED_MANY)
269fbe72 1863 return &policy->nodes;
19770b32
MG
1864
1865 return NULL;
1866}
1867
b27abacc
DH
1868/*
1869 * Return the preferred node id for 'prefer' mempolicy, and return
1870 * the given id for all other policies.
1871 *
1872 * policy_node() is always coupled with policy_nodemask(), which
1873 * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1874 */
f8fd5253 1875static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1da177e4 1876{
7858d7bc 1877 if (policy->mode == MPOL_PREFERRED) {
269fbe72 1878 nd = first_node(policy->nodes);
7858d7bc 1879 } else {
19770b32 1880 /*
6d840958
MH
1881 * __GFP_THISNODE shouldn't even be used with the bind policy
1882 * because we might easily break the expectation to stay on the
1883 * requested node and not break the policy.
19770b32 1884 */
6d840958 1885 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1da177e4 1886 }
6d840958 1887
c6018b4b
AK
1888 if ((policy->mode == MPOL_BIND ||
1889 policy->mode == MPOL_PREFERRED_MANY) &&
1890 policy->home_node != NUMA_NO_NODE)
1891 return policy->home_node;
1892
04ec6264 1893 return nd;
1da177e4
LT
1894}
1895
1896/* Do dynamic interleaving for a process */
1897static unsigned interleave_nodes(struct mempolicy *policy)
1898{
45816682 1899 unsigned next;
1da177e4
LT
1900 struct task_struct *me = current;
1901
269fbe72 1902 next = next_node_in(me->il_prev, policy->nodes);
f5b087b5 1903 if (next < MAX_NUMNODES)
45816682
VB
1904 me->il_prev = next;
1905 return next;
1da177e4
LT
1906}
1907
dc85da15
CL
1908/*
1909 * Depending on the memory policy provide a node from which to allocate the
1910 * next slab entry.
1911 */
2a389610 1912unsigned int mempolicy_slab_node(void)
dc85da15 1913{
e7b691b0 1914 struct mempolicy *policy;
2a389610 1915 int node = numa_mem_id();
e7b691b0 1916
38b031dd 1917 if (!in_task())
2a389610 1918 return node;
e7b691b0
AK
1919
1920 policy = current->mempolicy;
7858d7bc 1921 if (!policy)
2a389610 1922 return node;
bea904d5
LS
1923
1924 switch (policy->mode) {
1925 case MPOL_PREFERRED:
269fbe72 1926 return first_node(policy->nodes);
765c4507 1927
dc85da15
CL
1928 case MPOL_INTERLEAVE:
1929 return interleave_nodes(policy);
1930
b27abacc
DH
1931 case MPOL_BIND:
1932 case MPOL_PREFERRED_MANY:
1933 {
c33d6c06
MG
1934 struct zoneref *z;
1935
dc85da15
CL
1936 /*
1937 * Follow bind policy behavior and start allocation at the
1938 * first node.
1939 */
19770b32 1940 struct zonelist *zonelist;
19770b32 1941 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1942 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06 1943 z = first_zones_zonelist(zonelist, highest_zoneidx,
269fbe72 1944 &policy->nodes);
c1093b74 1945 return z->zone ? zone_to_nid(z->zone) : node;
dd1a239f 1946 }
7858d7bc
FT
1947 case MPOL_LOCAL:
1948 return node;
dc85da15 1949
dc85da15 1950 default:
bea904d5 1951 BUG();
dc85da15
CL
1952 }
1953}
1954
fee83b3a
AM
1955/*
1956 * Do static interleaving for a VMA with known offset @n. Returns the n'th
269fbe72 1957 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
fee83b3a
AM
1958 * number of present nodes.
1959 */
98c70baa 1960static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1da177e4 1961{
276aeee1 1962 nodemask_t nodemask = pol->nodes;
1963 unsigned int target, nnodes;
fee83b3a
AM
1964 int i;
1965 int nid;
276aeee1 1966 /*
1967 * The barrier will stabilize the nodemask in a register or on
1968 * the stack so that it will stop changing under the code.
1969 *
1970 * Between first_node() and next_node(), pol->nodes could be changed
1971 * by other threads. So we put pol->nodes in a local stack.
1972 */
1973 barrier();
1da177e4 1974
276aeee1 1975 nnodes = nodes_weight(nodemask);
f5b087b5
DR
1976 if (!nnodes)
1977 return numa_node_id();
fee83b3a 1978 target = (unsigned int)n % nnodes;
276aeee1 1979 nid = first_node(nodemask);
fee83b3a 1980 for (i = 0; i < target; i++)
276aeee1 1981 nid = next_node(nid, nodemask);
1da177e4
LT
1982 return nid;
1983}
1984
5da7ca86
CL
1985/* Determine a node number for interleave */
1986static inline unsigned interleave_nid(struct mempolicy *pol,
1987 struct vm_area_struct *vma, unsigned long addr, int shift)
1988{
1989 if (vma) {
1990 unsigned long off;
1991
3b98b087
NA
1992 /*
1993 * for small pages, there is no difference between
1994 * shift and PAGE_SHIFT, so the bit-shift is safe.
1995 * for huge pages, since vm_pgoff is in units of small
1996 * pages, we need to shift off the always 0 bits to get
1997 * a useful offset.
1998 */
1999 BUG_ON(shift < PAGE_SHIFT);
2000 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
5da7ca86 2001 off += (addr - vma->vm_start) >> shift;
98c70baa 2002 return offset_il_node(pol, off);
5da7ca86
CL
2003 } else
2004 return interleave_nodes(pol);
2005}
2006
00ac59ad 2007#ifdef CONFIG_HUGETLBFS
480eccf9 2008/*
04ec6264 2009 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
2010 * @vma: virtual memory area whose policy is sought
2011 * @addr: address in @vma for shared policy lookup and interleave policy
2012 * @gfp_flags: for requested zone
2013 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
b27abacc 2014 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
480eccf9 2015 *
04ec6264 2016 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07 2017 * to the struct mempolicy for conditional unref after allocation.
b27abacc
DH
2018 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2019 * to the mempolicy's @nodemask for filtering the zonelist.
c0ff7453 2020 *
d26914d1 2021 * Must be protected by read_mems_allowed_begin()
480eccf9 2022 */
04ec6264
VB
2023int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2024 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 2025{
04ec6264 2026 int nid;
b27abacc 2027 int mode;
5da7ca86 2028
dd6eecb9 2029 *mpol = get_vma_policy(vma, addr);
b27abacc
DH
2030 *nodemask = NULL;
2031 mode = (*mpol)->mode;
5da7ca86 2032
b27abacc 2033 if (unlikely(mode == MPOL_INTERLEAVE)) {
04ec6264
VB
2034 nid = interleave_nid(*mpol, vma, addr,
2035 huge_page_shift(hstate_vma(vma)));
52cd3b07 2036 } else {
04ec6264 2037 nid = policy_node(gfp_flags, *mpol, numa_node_id());
b27abacc 2038 if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
269fbe72 2039 *nodemask = &(*mpol)->nodes;
480eccf9 2040 }
04ec6264 2041 return nid;
5da7ca86 2042}
06808b08
LS
2043
2044/*
2045 * init_nodemask_of_mempolicy
2046 *
2047 * If the current task's mempolicy is "default" [NULL], return 'false'
2048 * to indicate default policy. Otherwise, extract the policy nodemask
2049 * for 'bind' or 'interleave' policy into the argument nodemask, or
2050 * initialize the argument nodemask to contain the single node for
2051 * 'preferred' or 'local' policy and return 'true' to indicate presence
2052 * of non-default mempolicy.
2053 *
2054 * We don't bother with reference counting the mempolicy [mpol_get/put]
2055 * because the current task is examining it's own mempolicy and a task's
2056 * mempolicy is only ever changed by the task itself.
2057 *
2058 * N.B., it is the caller's responsibility to free a returned nodemask.
2059 */
2060bool init_nodemask_of_mempolicy(nodemask_t *mask)
2061{
2062 struct mempolicy *mempolicy;
06808b08
LS
2063
2064 if (!(mask && current->mempolicy))
2065 return false;
2066
c0ff7453 2067 task_lock(current);
06808b08
LS
2068 mempolicy = current->mempolicy;
2069 switch (mempolicy->mode) {
2070 case MPOL_PREFERRED:
b27abacc 2071 case MPOL_PREFERRED_MANY:
06808b08 2072 case MPOL_BIND:
06808b08 2073 case MPOL_INTERLEAVE:
269fbe72 2074 *mask = mempolicy->nodes;
7858d7bc
FT
2075 break;
2076
2077 case MPOL_LOCAL:
269fbe72 2078 init_nodemask_of_node(mask, numa_node_id());
06808b08
LS
2079 break;
2080
2081 default:
2082 BUG();
2083 }
c0ff7453 2084 task_unlock(current);
06808b08
LS
2085
2086 return true;
2087}
00ac59ad 2088#endif
5da7ca86 2089
6f48d0eb 2090/*
b26e517a 2091 * mempolicy_in_oom_domain
6f48d0eb 2092 *
b26e517a
FT
2093 * If tsk's mempolicy is "bind", check for intersection between mask and
2094 * the policy nodemask. Otherwise, return true for all other policies
2095 * including "interleave", as a tsk with "interleave" policy may have
2096 * memory allocated from all nodes in system.
6f48d0eb
DR
2097 *
2098 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2099 */
b26e517a 2100bool mempolicy_in_oom_domain(struct task_struct *tsk,
6f48d0eb
DR
2101 const nodemask_t *mask)
2102{
2103 struct mempolicy *mempolicy;
2104 bool ret = true;
2105
2106 if (!mask)
2107 return ret;
b26e517a 2108
6f48d0eb
DR
2109 task_lock(tsk);
2110 mempolicy = tsk->mempolicy;
b26e517a 2111 if (mempolicy && mempolicy->mode == MPOL_BIND)
269fbe72 2112 ret = nodes_intersects(mempolicy->nodes, *mask);
6f48d0eb 2113 task_unlock(tsk);
b26e517a 2114
6f48d0eb
DR
2115 return ret;
2116}
2117
1da177e4
LT
2118/* Allocate a page in interleaved policy.
2119 Own path because it needs to do special accounting. */
662f3a0b
AK
2120static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2121 unsigned nid)
1da177e4 2122{
1da177e4
LT
2123 struct page *page;
2124
84172f4b 2125 page = __alloc_pages(gfp, order, nid, NULL);
4518085e
KW
2126 /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2127 if (!static_branch_likely(&vm_numa_stat_key))
2128 return page;
de55c8b2
AR
2129 if (page && page_to_nid(page) == nid) {
2130 preempt_disable();
f19298b9 2131 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
de55c8b2
AR
2132 preempt_enable();
2133 }
1da177e4
LT
2134 return page;
2135}
2136
4c54d949
FT
2137static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2138 int nid, struct mempolicy *pol)
2139{
2140 struct page *page;
2141 gfp_t preferred_gfp;
2142
2143 /*
2144 * This is a two pass approach. The first pass will only try the
2145 * preferred nodes but skip the direct reclaim and allow the
2146 * allocation to fail, while the second pass will try all the
2147 * nodes in system.
2148 */
2149 preferred_gfp = gfp | __GFP_NOWARN;
2150 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2151 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2152 if (!page)
c0455116 2153 page = __alloc_pages(gfp, order, nid, NULL);
4c54d949
FT
2154
2155 return page;
2156}
2157
1da177e4 2158/**
adf88aa8 2159 * vma_alloc_folio - Allocate a folio for a VMA.
eb350739 2160 * @gfp: GFP flags.
adf88aa8 2161 * @order: Order of the folio.
eb350739
MWO
2162 * @vma: Pointer to VMA or NULL if not available.
2163 * @addr: Virtual address of the allocation. Must be inside @vma.
eb350739 2164 * @hugepage: For hugepages try only the preferred node if possible.
1da177e4 2165 *
adf88aa8 2166 * Allocate a folio for a specific address in @vma, using the appropriate
eb350739
MWO
2167 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock
2168 * of the mm_struct of the VMA to prevent it from going away. Should be
adf88aa8 2169 * used for all allocations for folios that will be mapped into user space.
1da177e4 2170 *
adf88aa8 2171 * Return: The folio on success or NULL if allocation fails.
1da177e4 2172 */
adf88aa8 2173struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
be1a13eb 2174 unsigned long addr, bool hugepage)
1da177e4 2175{
cc9a6c87 2176 struct mempolicy *pol;
be1a13eb 2177 int node = numa_node_id();
adf88aa8 2178 struct folio *folio;
04ec6264 2179 int preferred_nid;
be97a41b 2180 nodemask_t *nmask;
cc9a6c87 2181
dd6eecb9 2182 pol = get_vma_policy(vma, addr);
1da177e4 2183
0867a57c 2184 if (pol->mode == MPOL_INTERLEAVE) {
adf88aa8 2185 struct page *page;
0867a57c
VB
2186 unsigned nid;
2187
2188 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2189 mpol_cond_put(pol);
adf88aa8 2190 gfp |= __GFP_COMP;
0867a57c 2191 page = alloc_page_interleave(gfp, order, nid);
adf88aa8
MWO
2192 if (page && order > 1)
2193 prep_transhuge_page(page);
2194 folio = (struct folio *)page;
0867a57c 2195 goto out;
19deb769
DR
2196 }
2197
4c54d949 2198 if (pol->mode == MPOL_PREFERRED_MANY) {
adf88aa8
MWO
2199 struct page *page;
2200
c0455116 2201 node = policy_node(gfp, pol, node);
adf88aa8 2202 gfp |= __GFP_COMP;
4c54d949
FT
2203 page = alloc_pages_preferred_many(gfp, order, node, pol);
2204 mpol_cond_put(pol);
adf88aa8
MWO
2205 if (page && order > 1)
2206 prep_transhuge_page(page);
2207 folio = (struct folio *)page;
4c54d949
FT
2208 goto out;
2209 }
2210
19deb769
DR
2211 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2212 int hpage_node = node;
2213
2214 /*
2215 * For hugepage allocation and non-interleave policy which
2216 * allows the current node (or other explicitly preferred
2217 * node) we only try to allocate from the current/preferred
2218 * node and don't fall back to other nodes, as the cost of
2219 * remote accesses would likely offset THP benefits.
2220 *
b27abacc 2221 * If the policy is interleave or does not allow the current
19deb769
DR
2222 * node in its nodemask, we allocate the standard way.
2223 */
7858d7bc 2224 if (pol->mode == MPOL_PREFERRED)
269fbe72 2225 hpage_node = first_node(pol->nodes);
19deb769
DR
2226
2227 nmask = policy_nodemask(gfp, pol);
2228 if (!nmask || node_isset(hpage_node, *nmask)) {
2229 mpol_cond_put(pol);
cc638f32
VB
2230 /*
2231 * First, try to allocate THP only on local node, but
2232 * don't reclaim unnecessarily, just compact.
2233 */
adf88aa8
MWO
2234 folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2235 __GFP_NORETRY, order, hpage_node);
76e654cc
DR
2236
2237 /*
2238 * If hugepage allocations are configured to always
2239 * synchronous compact or the vma has been madvised
2240 * to prefer hugepage backing, retry allowing remote
cc638f32 2241 * memory with both reclaim and compact as well.
76e654cc 2242 */
adf88aa8
MWO
2243 if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2244 folio = __folio_alloc(gfp, order, hpage_node,
2245 nmask);
76e654cc 2246
19deb769
DR
2247 goto out;
2248 }
356ff8a9
DR
2249 }
2250
be97a41b 2251 nmask = policy_nodemask(gfp, pol);
04ec6264 2252 preferred_nid = policy_node(gfp, pol, node);
adf88aa8 2253 folio = __folio_alloc(gfp, order, preferred_nid, nmask);
d51e9894 2254 mpol_cond_put(pol);
be97a41b 2255out:
f584b680
MWO
2256 return folio;
2257}
adf88aa8 2258EXPORT_SYMBOL(vma_alloc_folio);
f584b680 2259
1da177e4 2260/**
6421ec76
MWO
2261 * alloc_pages - Allocate pages.
2262 * @gfp: GFP flags.
2263 * @order: Power of two of number of pages to allocate.
1da177e4 2264 *
6421ec76
MWO
2265 * Allocate 1 << @order contiguous pages. The physical address of the
2266 * first page is naturally aligned (eg an order-3 allocation will be aligned
2267 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2268 * process is honoured when in process context.
1da177e4 2269 *
6421ec76
MWO
2270 * Context: Can be called from any context, providing the appropriate GFP
2271 * flags are used.
2272 * Return: The page on success or NULL if allocation fails.
1da177e4 2273 */
d7f946d0 2274struct page *alloc_pages(gfp_t gfp, unsigned order)
1da177e4 2275{
8d90274b 2276 struct mempolicy *pol = &default_policy;
c0ff7453 2277 struct page *page;
1da177e4 2278
8d90274b
ON
2279 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2280 pol = get_task_policy(current);
52cd3b07
LS
2281
2282 /*
2283 * No reference counting needed for current->mempolicy
2284 * nor system default_policy
2285 */
45c4745a 2286 if (pol->mode == MPOL_INTERLEAVE)
c0ff7453 2287 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
4c54d949
FT
2288 else if (pol->mode == MPOL_PREFERRED_MANY)
2289 page = alloc_pages_preferred_many(gfp, order,
c0455116 2290 policy_node(gfp, pol, numa_node_id()), pol);
c0ff7453 2291 else
84172f4b 2292 page = __alloc_pages(gfp, order,
04ec6264 2293 policy_node(gfp, pol, numa_node_id()),
5c4b4be3 2294 policy_nodemask(gfp, pol));
cc9a6c87 2295
c0ff7453 2296 return page;
1da177e4 2297}
d7f946d0 2298EXPORT_SYMBOL(alloc_pages);
1da177e4 2299
cc09cb13
MWO
2300struct folio *folio_alloc(gfp_t gfp, unsigned order)
2301{
2302 struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2303
2304 if (page && order > 1)
2305 prep_transhuge_page(page);
2306 return (struct folio *)page;
2307}
2308EXPORT_SYMBOL(folio_alloc);
2309
c00b6b96
CW
2310static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2311 struct mempolicy *pol, unsigned long nr_pages,
2312 struct page **page_array)
2313{
2314 int nodes;
2315 unsigned long nr_pages_per_node;
2316 int delta;
2317 int i;
2318 unsigned long nr_allocated;
2319 unsigned long total_allocated = 0;
2320
2321 nodes = nodes_weight(pol->nodes);
2322 nr_pages_per_node = nr_pages / nodes;
2323 delta = nr_pages - nodes * nr_pages_per_node;
2324
2325 for (i = 0; i < nodes; i++) {
2326 if (delta) {
2327 nr_allocated = __alloc_pages_bulk(gfp,
2328 interleave_nodes(pol), NULL,
2329 nr_pages_per_node + 1, NULL,
2330 page_array);
2331 delta--;
2332 } else {
2333 nr_allocated = __alloc_pages_bulk(gfp,
2334 interleave_nodes(pol), NULL,
2335 nr_pages_per_node, NULL, page_array);
2336 }
2337
2338 page_array += nr_allocated;
2339 total_allocated += nr_allocated;
2340 }
2341
2342 return total_allocated;
2343}
2344
2345static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2346 struct mempolicy *pol, unsigned long nr_pages,
2347 struct page **page_array)
2348{
2349 gfp_t preferred_gfp;
2350 unsigned long nr_allocated = 0;
2351
2352 preferred_gfp = gfp | __GFP_NOWARN;
2353 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2354
2355 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2356 nr_pages, NULL, page_array);
2357
2358 if (nr_allocated < nr_pages)
2359 nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2360 nr_pages - nr_allocated, NULL,
2361 page_array + nr_allocated);
2362 return nr_allocated;
2363}
2364
2365/* alloc pages bulk and mempolicy should be considered at the
2366 * same time in some situation such as vmalloc.
2367 *
2368 * It can accelerate memory allocation especially interleaving
2369 * allocate memory.
2370 */
2371unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2372 unsigned long nr_pages, struct page **page_array)
2373{
2374 struct mempolicy *pol = &default_policy;
2375
2376 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2377 pol = get_task_policy(current);
2378
2379 if (pol->mode == MPOL_INTERLEAVE)
2380 return alloc_pages_bulk_array_interleave(gfp, pol,
2381 nr_pages, page_array);
2382
2383 if (pol->mode == MPOL_PREFERRED_MANY)
2384 return alloc_pages_bulk_array_preferred_many(gfp,
2385 numa_node_id(), pol, nr_pages, page_array);
2386
2387 return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2388 policy_nodemask(gfp, pol), nr_pages, NULL,
2389 page_array);
2390}
2391
ef0855d3
ON
2392int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2393{
2394 struct mempolicy *pol = mpol_dup(vma_policy(src));
2395
2396 if (IS_ERR(pol))
2397 return PTR_ERR(pol);
2398 dst->vm_policy = pol;
2399 return 0;
2400}
2401
4225399a 2402/*
846a16bf 2403 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2404 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2405 * with the mems_allowed returned by cpuset_mems_allowed(). This
2406 * keeps mempolicies cpuset relative after its cpuset moves. See
2407 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2408 *
2409 * current's mempolicy may be rebinded by the other task(the task that changes
2410 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2411 */
4225399a 2412
846a16bf
LS
2413/* Slow path of a mempolicy duplicate */
2414struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2415{
2416 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2417
2418 if (!new)
2419 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2420
2421 /* task's mempolicy is protected by alloc_lock */
2422 if (old == current->mempolicy) {
2423 task_lock(current);
2424 *new = *old;
2425 task_unlock(current);
2426 } else
2427 *new = *old;
2428
4225399a
PJ
2429 if (current_cpuset_is_being_rebound()) {
2430 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2431 mpol_rebind_policy(new, &mems);
4225399a 2432 }
1da177e4 2433 atomic_set(&new->refcnt, 1);
1da177e4
LT
2434 return new;
2435}
2436
2437/* Slow path of a mempolicy comparison */
fcfb4dcc 2438bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2439{
2440 if (!a || !b)
fcfb4dcc 2441 return false;
45c4745a 2442 if (a->mode != b->mode)
fcfb4dcc 2443 return false;
19800502 2444 if (a->flags != b->flags)
fcfb4dcc 2445 return false;
c6018b4b
AK
2446 if (a->home_node != b->home_node)
2447 return false;
19800502
BL
2448 if (mpol_store_user_nodemask(a))
2449 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2450 return false;
19800502 2451
45c4745a 2452 switch (a->mode) {
19770b32 2453 case MPOL_BIND:
1da177e4 2454 case MPOL_INTERLEAVE:
1da177e4 2455 case MPOL_PREFERRED:
b27abacc 2456 case MPOL_PREFERRED_MANY:
269fbe72 2457 return !!nodes_equal(a->nodes, b->nodes);
7858d7bc
FT
2458 case MPOL_LOCAL:
2459 return true;
1da177e4
LT
2460 default:
2461 BUG();
fcfb4dcc 2462 return false;
1da177e4
LT
2463 }
2464}
2465
1da177e4
LT
2466/*
2467 * Shared memory backing store policy support.
2468 *
2469 * Remember policies even when nobody has shared memory mapped.
2470 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2471 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2472 * for any accesses to the tree.
2473 */
2474
4a8c7bb5
NZ
2475/*
2476 * lookup first element intersecting start-end. Caller holds sp->lock for
2477 * reading or for writing
2478 */
1da177e4
LT
2479static struct sp_node *
2480sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2481{
2482 struct rb_node *n = sp->root.rb_node;
2483
2484 while (n) {
2485 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2486
2487 if (start >= p->end)
2488 n = n->rb_right;
2489 else if (end <= p->start)
2490 n = n->rb_left;
2491 else
2492 break;
2493 }
2494 if (!n)
2495 return NULL;
2496 for (;;) {
2497 struct sp_node *w = NULL;
2498 struct rb_node *prev = rb_prev(n);
2499 if (!prev)
2500 break;
2501 w = rb_entry(prev, struct sp_node, nd);
2502 if (w->end <= start)
2503 break;
2504 n = prev;
2505 }
2506 return rb_entry(n, struct sp_node, nd);
2507}
2508
4a8c7bb5
NZ
2509/*
2510 * Insert a new shared policy into the list. Caller holds sp->lock for
2511 * writing.
2512 */
1da177e4
LT
2513static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2514{
2515 struct rb_node **p = &sp->root.rb_node;
2516 struct rb_node *parent = NULL;
2517 struct sp_node *nd;
2518
2519 while (*p) {
2520 parent = *p;
2521 nd = rb_entry(parent, struct sp_node, nd);
2522 if (new->start < nd->start)
2523 p = &(*p)->rb_left;
2524 else if (new->end > nd->end)
2525 p = &(*p)->rb_right;
2526 else
2527 BUG();
2528 }
2529 rb_link_node(&new->nd, parent, p);
2530 rb_insert_color(&new->nd, &sp->root);
140d5a49 2531 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
45c4745a 2532 new->policy ? new->policy->mode : 0);
1da177e4
LT
2533}
2534
2535/* Find shared policy intersecting idx */
2536struct mempolicy *
2537mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2538{
2539 struct mempolicy *pol = NULL;
2540 struct sp_node *sn;
2541
2542 if (!sp->root.rb_node)
2543 return NULL;
4a8c7bb5 2544 read_lock(&sp->lock);
1da177e4
LT
2545 sn = sp_lookup(sp, idx, idx+1);
2546 if (sn) {
2547 mpol_get(sn->policy);
2548 pol = sn->policy;
2549 }
4a8c7bb5 2550 read_unlock(&sp->lock);
1da177e4
LT
2551 return pol;
2552}
2553
63f74ca2
KM
2554static void sp_free(struct sp_node *n)
2555{
2556 mpol_put(n->policy);
2557 kmem_cache_free(sn_cache, n);
2558}
2559
771fb4d8
LS
2560/**
2561 * mpol_misplaced - check whether current page node is valid in policy
2562 *
b46e14ac
FF
2563 * @page: page to be checked
2564 * @vma: vm area where page mapped
2565 * @addr: virtual address where page mapped
771fb4d8
LS
2566 *
2567 * Lookup current policy node id for vma,addr and "compare to" page's
5f076944 2568 * node id. Policy determination "mimics" alloc_page_vma().
771fb4d8 2569 * Called from fault path where we know the vma and faulting address.
5f076944 2570 *
062db293
BW
2571 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2572 * policy, or a suitable node ID to allocate a replacement page from.
771fb4d8
LS
2573 */
2574int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2575{
2576 struct mempolicy *pol;
c33d6c06 2577 struct zoneref *z;
771fb4d8
LS
2578 int curnid = page_to_nid(page);
2579 unsigned long pgoff;
90572890
PZ
2580 int thiscpu = raw_smp_processor_id();
2581 int thisnid = cpu_to_node(thiscpu);
98fa15f3 2582 int polnid = NUMA_NO_NODE;
062db293 2583 int ret = NUMA_NO_NODE;
771fb4d8 2584
dd6eecb9 2585 pol = get_vma_policy(vma, addr);
771fb4d8
LS
2586 if (!(pol->flags & MPOL_F_MOF))
2587 goto out;
2588
2589 switch (pol->mode) {
2590 case MPOL_INTERLEAVE:
771fb4d8
LS
2591 pgoff = vma->vm_pgoff;
2592 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
98c70baa 2593 polnid = offset_il_node(pol, pgoff);
771fb4d8
LS
2594 break;
2595
2596 case MPOL_PREFERRED:
b27abacc
DH
2597 if (node_isset(curnid, pol->nodes))
2598 goto out;
269fbe72 2599 polnid = first_node(pol->nodes);
7858d7bc
FT
2600 break;
2601
2602 case MPOL_LOCAL:
2603 polnid = numa_node_id();
771fb4d8
LS
2604 break;
2605
2606 case MPOL_BIND:
bda420b9
HY
2607 /* Optimize placement among multiple nodes via NUMA balancing */
2608 if (pol->flags & MPOL_F_MORON) {
269fbe72 2609 if (node_isset(thisnid, pol->nodes))
bda420b9
HY
2610 break;
2611 goto out;
2612 }
b27abacc 2613 fallthrough;
c33d6c06 2614
b27abacc 2615 case MPOL_PREFERRED_MANY:
771fb4d8 2616 /*
771fb4d8
LS
2617 * use current page if in policy nodemask,
2618 * else select nearest allowed node, if any.
2619 * If no allowed nodes, use current [!misplaced].
2620 */
269fbe72 2621 if (node_isset(curnid, pol->nodes))
771fb4d8 2622 goto out;
c33d6c06 2623 z = first_zones_zonelist(
771fb4d8
LS
2624 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2625 gfp_zone(GFP_HIGHUSER),
269fbe72 2626 &pol->nodes);
c1093b74 2627 polnid = zone_to_nid(z->zone);
771fb4d8
LS
2628 break;
2629
2630 default:
2631 BUG();
2632 }
5606e387
MG
2633
2634 /* Migrate the page towards the node whose CPU is referencing it */
e42c8ff2 2635 if (pol->flags & MPOL_F_MORON) {
90572890 2636 polnid = thisnid;
5606e387 2637
10f39042 2638 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
de1c9ce6 2639 goto out;
e42c8ff2
MG
2640 }
2641
771fb4d8
LS
2642 if (curnid != polnid)
2643 ret = polnid;
2644out:
2645 mpol_cond_put(pol);
2646
2647 return ret;
2648}
2649
c11600e4
DR
2650/*
2651 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2652 * dropped after task->mempolicy is set to NULL so that any allocation done as
2653 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2654 * policy.
2655 */
2656void mpol_put_task_policy(struct task_struct *task)
2657{
2658 struct mempolicy *pol;
2659
2660 task_lock(task);
2661 pol = task->mempolicy;
2662 task->mempolicy = NULL;
2663 task_unlock(task);
2664 mpol_put(pol);
2665}
2666
1da177e4
LT
2667static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2668{
140d5a49 2669 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1da177e4 2670 rb_erase(&n->nd, &sp->root);
63f74ca2 2671 sp_free(n);
1da177e4
LT
2672}
2673
42288fe3
MG
2674static void sp_node_init(struct sp_node *node, unsigned long start,
2675 unsigned long end, struct mempolicy *pol)
2676{
2677 node->start = start;
2678 node->end = end;
2679 node->policy = pol;
2680}
2681
dbcb0f19
AB
2682static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2683 struct mempolicy *pol)
1da177e4 2684{
869833f2
KM
2685 struct sp_node *n;
2686 struct mempolicy *newpol;
1da177e4 2687
869833f2 2688 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2689 if (!n)
2690 return NULL;
869833f2
KM
2691
2692 newpol = mpol_dup(pol);
2693 if (IS_ERR(newpol)) {
2694 kmem_cache_free(sn_cache, n);
2695 return NULL;
2696 }
2697 newpol->flags |= MPOL_F_SHARED;
42288fe3 2698 sp_node_init(n, start, end, newpol);
869833f2 2699
1da177e4
LT
2700 return n;
2701}
2702
2703/* Replace a policy range. */
2704static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2705 unsigned long end, struct sp_node *new)
2706{
b22d127a 2707 struct sp_node *n;
42288fe3
MG
2708 struct sp_node *n_new = NULL;
2709 struct mempolicy *mpol_new = NULL;
b22d127a 2710 int ret = 0;
1da177e4 2711
42288fe3 2712restart:
4a8c7bb5 2713 write_lock(&sp->lock);
1da177e4
LT
2714 n = sp_lookup(sp, start, end);
2715 /* Take care of old policies in the same range. */
2716 while (n && n->start < end) {
2717 struct rb_node *next = rb_next(&n->nd);
2718 if (n->start >= start) {
2719 if (n->end <= end)
2720 sp_delete(sp, n);
2721 else
2722 n->start = end;
2723 } else {
2724 /* Old policy spanning whole new range. */
2725 if (n->end > end) {
42288fe3
MG
2726 if (!n_new)
2727 goto alloc_new;
2728
2729 *mpol_new = *n->policy;
2730 atomic_set(&mpol_new->refcnt, 1);
7880639c 2731 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2732 n->end = start;
5ca39575 2733 sp_insert(sp, n_new);
42288fe3
MG
2734 n_new = NULL;
2735 mpol_new = NULL;
1da177e4
LT
2736 break;
2737 } else
2738 n->end = start;
2739 }
2740 if (!next)
2741 break;
2742 n = rb_entry(next, struct sp_node, nd);
2743 }
2744 if (new)
2745 sp_insert(sp, new);
4a8c7bb5 2746 write_unlock(&sp->lock);
42288fe3
MG
2747 ret = 0;
2748
2749err_out:
2750 if (mpol_new)
2751 mpol_put(mpol_new);
2752 if (n_new)
2753 kmem_cache_free(sn_cache, n_new);
2754
b22d127a 2755 return ret;
42288fe3
MG
2756
2757alloc_new:
4a8c7bb5 2758 write_unlock(&sp->lock);
42288fe3
MG
2759 ret = -ENOMEM;
2760 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2761 if (!n_new)
2762 goto err_out;
2763 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2764 if (!mpol_new)
2765 goto err_out;
4ad09955 2766 atomic_set(&mpol_new->refcnt, 1);
42288fe3 2767 goto restart;
1da177e4
LT
2768}
2769
71fe804b
LS
2770/**
2771 * mpol_shared_policy_init - initialize shared policy for inode
2772 * @sp: pointer to inode shared policy
2773 * @mpol: struct mempolicy to install
2774 *
2775 * Install non-NULL @mpol in inode's shared policy rb-tree.
2776 * On entry, the current task has a reference on a non-NULL @mpol.
2777 * This must be released on exit.
4bfc4495 2778 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2779 */
2780void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2781{
58568d2a
MX
2782 int ret;
2783
71fe804b 2784 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2785 rwlock_init(&sp->lock);
71fe804b
LS
2786
2787 if (mpol) {
2788 struct vm_area_struct pvma;
2789 struct mempolicy *new;
4bfc4495 2790 NODEMASK_SCRATCH(scratch);
71fe804b 2791
4bfc4495 2792 if (!scratch)
5c0c1654 2793 goto put_mpol;
71fe804b
LS
2794 /* contextualize the tmpfs mount point mempolicy */
2795 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
15d77835 2796 if (IS_ERR(new))
0cae3457 2797 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2798
2799 task_lock(current);
4bfc4495 2800 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
58568d2a 2801 task_unlock(current);
15d77835 2802 if (ret)
5c0c1654 2803 goto put_new;
71fe804b
LS
2804
2805 /* Create pseudo-vma that contains just the policy */
2c4541e2 2806 vma_init(&pvma, NULL);
71fe804b
LS
2807 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2808 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
15d77835 2809
5c0c1654 2810put_new:
71fe804b 2811 mpol_put(new); /* drop initial ref */
0cae3457 2812free_scratch:
4bfc4495 2813 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2814put_mpol:
2815 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2816 }
2817}
2818
1da177e4
LT
2819int mpol_set_shared_policy(struct shared_policy *info,
2820 struct vm_area_struct *vma, struct mempolicy *npol)
2821{
2822 int err;
2823 struct sp_node *new = NULL;
2824 unsigned long sz = vma_pages(vma);
2825
028fec41 2826 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1da177e4 2827 vma->vm_pgoff,
45c4745a 2828 sz, npol ? npol->mode : -1,
028fec41 2829 npol ? npol->flags : -1,
269fbe72 2830 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
1da177e4
LT
2831
2832 if (npol) {
2833 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2834 if (!new)
2835 return -ENOMEM;
2836 }
2837 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2838 if (err && new)
63f74ca2 2839 sp_free(new);
1da177e4
LT
2840 return err;
2841}
2842
2843/* Free a backing policy store on inode delete. */
2844void mpol_free_shared_policy(struct shared_policy *p)
2845{
2846 struct sp_node *n;
2847 struct rb_node *next;
2848
2849 if (!p->root.rb_node)
2850 return;
4a8c7bb5 2851 write_lock(&p->lock);
1da177e4
LT
2852 next = rb_first(&p->root);
2853 while (next) {
2854 n = rb_entry(next, struct sp_node, nd);
2855 next = rb_next(&n->nd);
63f74ca2 2856 sp_delete(p, n);
1da177e4 2857 }
4a8c7bb5 2858 write_unlock(&p->lock);
1da177e4
LT
2859}
2860
1a687c2e 2861#ifdef CONFIG_NUMA_BALANCING
c297663c 2862static int __initdata numabalancing_override;
1a687c2e
MG
2863
2864static void __init check_numabalancing_enable(void)
2865{
2866 bool numabalancing_default = false;
2867
2868 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2869 numabalancing_default = true;
2870
c297663c
MG
2871 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2872 if (numabalancing_override)
2873 set_numabalancing_state(numabalancing_override == 1);
2874
b0dc2b9b 2875 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 2876 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 2877 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
2878 set_numabalancing_state(numabalancing_default);
2879 }
2880}
2881
2882static int __init setup_numabalancing(char *str)
2883{
2884 int ret = 0;
2885 if (!str)
2886 goto out;
1a687c2e
MG
2887
2888 if (!strcmp(str, "enable")) {
c297663c 2889 numabalancing_override = 1;
1a687c2e
MG
2890 ret = 1;
2891 } else if (!strcmp(str, "disable")) {
c297663c 2892 numabalancing_override = -1;
1a687c2e
MG
2893 ret = 1;
2894 }
2895out:
2896 if (!ret)
4a404bea 2897 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
2898
2899 return ret;
2900}
2901__setup("numa_balancing=", setup_numabalancing);
2902#else
2903static inline void __init check_numabalancing_enable(void)
2904{
2905}
2906#endif /* CONFIG_NUMA_BALANCING */
2907
1da177e4
LT
2908/* assumes fs == KERNEL_DS */
2909void __init numa_policy_init(void)
2910{
b71636e2
PM
2911 nodemask_t interleave_nodes;
2912 unsigned long largest = 0;
2913 int nid, prefer = 0;
2914
1da177e4
LT
2915 policy_cache = kmem_cache_create("numa_policy",
2916 sizeof(struct mempolicy),
20c2df83 2917 0, SLAB_PANIC, NULL);
1da177e4
LT
2918
2919 sn_cache = kmem_cache_create("shared_policy_node",
2920 sizeof(struct sp_node),
20c2df83 2921 0, SLAB_PANIC, NULL);
1da177e4 2922
5606e387
MG
2923 for_each_node(nid) {
2924 preferred_node_policy[nid] = (struct mempolicy) {
2925 .refcnt = ATOMIC_INIT(1),
2926 .mode = MPOL_PREFERRED,
2927 .flags = MPOL_F_MOF | MPOL_F_MORON,
269fbe72 2928 .nodes = nodemask_of_node(nid),
5606e387
MG
2929 };
2930 }
2931
b71636e2
PM
2932 /*
2933 * Set interleaving policy for system init. Interleaving is only
2934 * enabled across suitably sized nodes (default is >= 16MB), or
2935 * fall back to the largest node if they're all smaller.
2936 */
2937 nodes_clear(interleave_nodes);
01f13bd6 2938 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
2939 unsigned long total_pages = node_present_pages(nid);
2940
2941 /* Preserve the largest node */
2942 if (largest < total_pages) {
2943 largest = total_pages;
2944 prefer = nid;
2945 }
2946
2947 /* Interleave this node? */
2948 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2949 node_set(nid, interleave_nodes);
2950 }
2951
2952 /* All too small, use the largest */
2953 if (unlikely(nodes_empty(interleave_nodes)))
2954 node_set(prefer, interleave_nodes);
1da177e4 2955
028fec41 2956 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 2957 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
2958
2959 check_numabalancing_enable();
1da177e4
LT
2960}
2961
8bccd85f 2962/* Reset policy of current process to default */
1da177e4
LT
2963void numa_default_policy(void)
2964{
028fec41 2965 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 2966}
68860ec1 2967
095f1fc4
LS
2968/*
2969 * Parse and format mempolicy from/to strings
2970 */
2971
345ace9c
LS
2972static const char * const policy_modes[] =
2973{
2974 [MPOL_DEFAULT] = "default",
2975 [MPOL_PREFERRED] = "prefer",
2976 [MPOL_BIND] = "bind",
2977 [MPOL_INTERLEAVE] = "interleave",
d3a71033 2978 [MPOL_LOCAL] = "local",
b27abacc 2979 [MPOL_PREFERRED_MANY] = "prefer (many)",
345ace9c 2980};
1a75a6c8 2981
095f1fc4
LS
2982
2983#ifdef CONFIG_TMPFS
2984/**
f2a07f40 2985 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 2986 * @str: string containing mempolicy to parse
71fe804b 2987 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
2988 *
2989 * Format of input:
2990 * <mode>[=<flags>][:<nodelist>]
2991 *
dad5b023 2992 * Return: %0 on success, else %1
095f1fc4 2993 */
a7a88b23 2994int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 2995{
71fe804b 2996 struct mempolicy *new = NULL;
f2a07f40 2997 unsigned short mode_flags;
71fe804b 2998 nodemask_t nodes;
095f1fc4
LS
2999 char *nodelist = strchr(str, ':');
3000 char *flags = strchr(str, '=');
dedf2c73 3001 int err = 1, mode;
095f1fc4 3002
c7a91bc7
DC
3003 if (flags)
3004 *flags++ = '\0'; /* terminate mode string */
3005
095f1fc4
LS
3006 if (nodelist) {
3007 /* NUL-terminate mode or flags string */
3008 *nodelist++ = '\0';
71fe804b 3009 if (nodelist_parse(nodelist, nodes))
095f1fc4 3010 goto out;
01f13bd6 3011 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 3012 goto out;
71fe804b
LS
3013 } else
3014 nodes_clear(nodes);
3015
dedf2c73 3016 mode = match_string(policy_modes, MPOL_MAX, str);
3017 if (mode < 0)
095f1fc4
LS
3018 goto out;
3019
71fe804b 3020 switch (mode) {
095f1fc4 3021 case MPOL_PREFERRED:
71fe804b 3022 /*
aa9f7d51
RD
3023 * Insist on a nodelist of one node only, although later
3024 * we use first_node(nodes) to grab a single node, so here
3025 * nodelist (or nodes) cannot be empty.
71fe804b 3026 */
095f1fc4
LS
3027 if (nodelist) {
3028 char *rest = nodelist;
3029 while (isdigit(*rest))
3030 rest++;
926f2ae0
KM
3031 if (*rest)
3032 goto out;
aa9f7d51
RD
3033 if (nodes_empty(nodes))
3034 goto out;
095f1fc4
LS
3035 }
3036 break;
095f1fc4
LS
3037 case MPOL_INTERLEAVE:
3038 /*
3039 * Default to online nodes with memory if no nodelist
3040 */
3041 if (!nodelist)
01f13bd6 3042 nodes = node_states[N_MEMORY];
3f226aa1 3043 break;
71fe804b 3044 case MPOL_LOCAL:
3f226aa1 3045 /*
71fe804b 3046 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 3047 */
71fe804b 3048 if (nodelist)
3f226aa1 3049 goto out;
3f226aa1 3050 break;
413b43de
RT
3051 case MPOL_DEFAULT:
3052 /*
3053 * Insist on a empty nodelist
3054 */
3055 if (!nodelist)
3056 err = 0;
3057 goto out;
b27abacc 3058 case MPOL_PREFERRED_MANY:
d69b2e63
KM
3059 case MPOL_BIND:
3060 /*
3061 * Insist on a nodelist
3062 */
3063 if (!nodelist)
3064 goto out;
095f1fc4
LS
3065 }
3066
71fe804b 3067 mode_flags = 0;
095f1fc4
LS
3068 if (flags) {
3069 /*
3070 * Currently, we only support two mutually exclusive
3071 * mode flags.
3072 */
3073 if (!strcmp(flags, "static"))
71fe804b 3074 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 3075 else if (!strcmp(flags, "relative"))
71fe804b 3076 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 3077 else
926f2ae0 3078 goto out;
095f1fc4 3079 }
71fe804b
LS
3080
3081 new = mpol_new(mode, mode_flags, &nodes);
3082 if (IS_ERR(new))
926f2ae0
KM
3083 goto out;
3084
f2a07f40
HD
3085 /*
3086 * Save nodes for mpol_to_str() to show the tmpfs mount options
3087 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3088 */
269fbe72
BW
3089 if (mode != MPOL_PREFERRED) {
3090 new->nodes = nodes;
3091 } else if (nodelist) {
3092 nodes_clear(new->nodes);
3093 node_set(first_node(nodes), new->nodes);
3094 } else {
7858d7bc 3095 new->mode = MPOL_LOCAL;
269fbe72 3096 }
f2a07f40
HD
3097
3098 /*
3099 * Save nodes for contextualization: this will be used to "clone"
3100 * the mempolicy in a specific context [cpuset] at a later time.
3101 */
3102 new->w.user_nodemask = nodes;
3103
926f2ae0 3104 err = 0;
71fe804b 3105
095f1fc4
LS
3106out:
3107 /* Restore string for error message */
3108 if (nodelist)
3109 *--nodelist = ':';
3110 if (flags)
3111 *--flags = '=';
71fe804b
LS
3112 if (!err)
3113 *mpol = new;
095f1fc4
LS
3114 return err;
3115}
3116#endif /* CONFIG_TMPFS */
3117
71fe804b
LS
3118/**
3119 * mpol_to_str - format a mempolicy structure for printing
3120 * @buffer: to contain formatted mempolicy string
3121 * @maxlen: length of @buffer
3122 * @pol: pointer to mempolicy to be formatted
71fe804b 3123 *
948927ee
DR
3124 * Convert @pol into a string. If @buffer is too short, truncate the string.
3125 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3126 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 3127 */
948927ee 3128void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
3129{
3130 char *p = buffer;
948927ee
DR
3131 nodemask_t nodes = NODE_MASK_NONE;
3132 unsigned short mode = MPOL_DEFAULT;
3133 unsigned short flags = 0;
2291990a 3134
8790c71a 3135 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 3136 mode = pol->mode;
948927ee
DR
3137 flags = pol->flags;
3138 }
bea904d5 3139
1a75a6c8
CL
3140 switch (mode) {
3141 case MPOL_DEFAULT:
7858d7bc 3142 case MPOL_LOCAL:
1a75a6c8 3143 break;
1a75a6c8 3144 case MPOL_PREFERRED:
b27abacc 3145 case MPOL_PREFERRED_MANY:
1a75a6c8 3146 case MPOL_BIND:
1a75a6c8 3147 case MPOL_INTERLEAVE:
269fbe72 3148 nodes = pol->nodes;
1a75a6c8 3149 break;
1a75a6c8 3150 default:
948927ee
DR
3151 WARN_ON_ONCE(1);
3152 snprintf(p, maxlen, "unknown");
3153 return;
1a75a6c8
CL
3154 }
3155
b7a9f420 3156 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 3157
fc36b8d3 3158 if (flags & MPOL_MODE_FLAGS) {
948927ee 3159 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 3160
2291990a
LS
3161 /*
3162 * Currently, the only defined flags are mutually exclusive
3163 */
f5b087b5 3164 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
3165 p += snprintf(p, buffer + maxlen - p, "static");
3166 else if (flags & MPOL_F_RELATIVE_NODES)
3167 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
3168 }
3169
9e763e0f
TH
3170 if (!nodes_empty(nodes))
3171 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3172 nodemask_pr_args(&nodes));
1a75a6c8 3173}