Merge tag 'perf-urgent-2024-05-18' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / mm / mempolicy.c
CommitLineData
46aeb7e6 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
8bccd85f 6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
1da177e4
LT
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should
9 * be allocated.
10 *
11 * Support four policies per VMA and per process:
12 *
13 * The VMA policy has priority over the process policy for a page fault.
14 *
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
20 * is used.
8bccd85f 21 *
fa3bea4e
GP
22 * weighted interleave
23 * Allocate memory interleaved over a set of nodes based on
24 * a set of weights (per-node), with normal fallback if it
25 * fails. Otherwise operates the same as interleave.
26 * Example: nodeset(0,1) & weights (2,1) - 2 pages allocated
27 * on node 0 for every 1 page allocated on node 1.
28 *
1da177e4
LT
29 * bind Only allocate memory on a specific set of nodes,
30 * no fallback.
8bccd85f
CL
31 * FIXME: memory is allocated starting with the first node
32 * to the last. It would be better if bind would truly restrict
33 * the allocation to memory nodes instead
34 *
c36f6e6d 35 * preferred Try a specific node first before normal fallback.
00ef2d2f 36 * As a special case NUMA_NO_NODE here means do the allocation
1da177e4
LT
37 * on the local CPU. This is normally identical to default,
38 * but useful to set in a VMA when you have a non default
39 * process policy.
8bccd85f 40 *
b27abacc
DH
41 * preferred many Try a set of nodes first before normal fallback. This is
42 * similar to preferred without the special case.
43 *
1da177e4
LT
44 * default Allocate on the local node first, or when on a VMA
45 * use the process policy. This is what Linux always did
46 * in a NUMA aware kernel and still does by, ahem, default.
47 *
48 * The process policy is applied for most non interrupt memory allocations
49 * in that process' context. Interrupts ignore the policies and always
50 * try to allocate on the local CPU. The VMA policy is only applied for memory
51 * allocations for a VMA in the VM.
52 *
53 * Currently there are a few corner cases in swapping where the policy
54 * is not applied, but the majority should be handled. When process policy
55 * is used it is not remembered over swap outs/swap ins.
56 *
57 * Only the highest zone in the zone hierarchy gets policied. Allocations
58 * requesting a lower zone just use default policy. This implies that
59 * on systems with highmem kernel lowmem allocation don't get policied.
60 * Same with GFP_DMA allocations.
61 *
c36f6e6d 62 * For shmem/tmpfs shared memory the policy is shared between
1da177e4
LT
63 * all users and remembered even when nobody has memory mapped.
64 */
65
66/* Notebook:
67 fix mmap readahead to honour policy and enable policy for any page cache
68 object
69 statistics for bigpages
70 global policy for page cache? currently it uses process policy. Requires
71 first item above.
72 handle mremap for shared memory (currently ignored for the policy)
73 grows down?
74 make bind policy root only? It can trigger oom much faster and the
75 kernel is not always grateful with that.
1da177e4
LT
76*/
77
b1de0d13
MH
78#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
79
1da177e4 80#include <linux/mempolicy.h>
a520110e 81#include <linux/pagewalk.h>
1da177e4
LT
82#include <linux/highmem.h>
83#include <linux/hugetlb.h>
84#include <linux/kernel.h>
85#include <linux/sched.h>
6e84f315 86#include <linux/sched/mm.h>
6a3827d7 87#include <linux/sched/numa_balancing.h>
f719ff9b 88#include <linux/sched/task.h>
1da177e4
LT
89#include <linux/nodemask.h>
90#include <linux/cpuset.h>
1da177e4
LT
91#include <linux/slab.h>
92#include <linux/string.h>
b95f1b31 93#include <linux/export.h>
b488893a 94#include <linux/nsproxy.h>
1da177e4
LT
95#include <linux/interrupt.h>
96#include <linux/init.h>
97#include <linux/compat.h>
31367466 98#include <linux/ptrace.h>
dc9aa5b9 99#include <linux/swap.h>
1a75a6c8
CL
100#include <linux/seq_file.h>
101#include <linux/proc_fs.h>
b20a3503 102#include <linux/migrate.h>
62b61f61 103#include <linux/ksm.h>
95a402c3 104#include <linux/rmap.h>
86c3a764 105#include <linux/security.h>
dbcb0f19 106#include <linux/syscalls.h>
095f1fc4 107#include <linux/ctype.h>
6d9c285a 108#include <linux/mm_inline.h>
b24f53a0 109#include <linux/mmu_notifier.h>
b1de0d13 110#include <linux/printk.h>
c8633798 111#include <linux/swapops.h>
dc9aa5b9 112
1da177e4 113#include <asm/tlbflush.h>
4a18419f 114#include <asm/tlb.h>
7c0f6ba6 115#include <linux/uaccess.h>
1da177e4 116
62695a84
NP
117#include "internal.h"
118
38e35860 119/* Internal flags */
dc9aa5b9 120#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
1cb5d11a
HD
121#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
122#define MPOL_MF_WRLOCK (MPOL_MF_INTERNAL << 2) /* Write-lock walked vmas */
dc9aa5b9 123
fcc234f8
PE
124static struct kmem_cache *policy_cache;
125static struct kmem_cache *sn_cache;
1da177e4 126
1da177e4
LT
127/* Highest zone. An specific allocation for a zone below that is not
128 policied. */
6267276f 129enum zone_type policy_zone = 0;
1da177e4 130
bea904d5
LS
131/*
132 * run-time system-wide default policy => local allocation
133 */
e754d79d 134static struct mempolicy default_policy = {
1da177e4 135 .refcnt = ATOMIC_INIT(1), /* never free it */
7858d7bc 136 .mode = MPOL_LOCAL,
1da177e4
LT
137};
138
5606e387
MG
139static struct mempolicy preferred_node_policy[MAX_NUMNODES];
140
dce41f5a
RK
141/*
142 * iw_table is the sysfs-set interleave weight table, a value of 0 denotes
143 * system-default value should be used. A NULL iw_table also denotes that
144 * system-default values should be used. Until the system-default table
145 * is implemented, the system-default is always 1.
146 *
147 * iw_table is RCU protected
148 */
149static u8 __rcu *iw_table;
150static DEFINE_MUTEX(iw_table_lock);
151
152static u8 get_il_weight(int node)
153{
154 u8 *table;
155 u8 weight;
156
157 rcu_read_lock();
158 table = rcu_dereference(iw_table);
159 /* if no iw_table, use system default */
160 weight = table ? table[node] : 1;
161 /* if value in iw_table is 0, use system default */
162 weight = weight ? weight : 1;
163 rcu_read_unlock();
164 return weight;
165}
166
b2ca916c 167/**
b1f099b1 168 * numa_nearest_node - Find nearest node by state
f6e92f40 169 * @node: Node id to start the search
b1f099b1 170 * @state: State to filter the search
b2ca916c 171 *
b1f099b1 172 * Lookup the closest node by distance if @nid is not in state.
dad5b023 173 *
b1f099b1 174 * Return: this @node if it is in state, otherwise the closest node by distance
b2ca916c 175 */
b1f099b1 176int numa_nearest_node(int node, unsigned int state)
b2ca916c 177{
4fcbe96e 178 int min_dist = INT_MAX, dist, n, min_node;
b2ca916c 179
b1f099b1
YN
180 if (state >= NR_NODE_STATES)
181 return -EINVAL;
182
183 if (node == NUMA_NO_NODE || node_state(node, state))
4fcbe96e 184 return node;
b2ca916c
DW
185
186 min_node = node;
b1f099b1 187 for_each_node_state(n, state) {
4fcbe96e
DW
188 dist = node_distance(node, n);
189 if (dist < min_dist) {
190 min_dist = dist;
191 min_node = n;
b2ca916c
DW
192 }
193 }
194
195 return min_node;
196}
b1f099b1 197EXPORT_SYMBOL_GPL(numa_nearest_node);
b2ca916c 198
74d2c3a0 199struct mempolicy *get_task_policy(struct task_struct *p)
5606e387
MG
200{
201 struct mempolicy *pol = p->mempolicy;
f15ca78e 202 int node;
5606e387 203
f15ca78e
ON
204 if (pol)
205 return pol;
5606e387 206
f15ca78e
ON
207 node = numa_node_id();
208 if (node != NUMA_NO_NODE) {
209 pol = &preferred_node_policy[node];
210 /* preferred_node_policy is not initialised early in boot */
211 if (pol->mode)
212 return pol;
5606e387
MG
213 }
214
f15ca78e 215 return &default_policy;
5606e387
MG
216}
217
37012946
DR
218static const struct mempolicy_operations {
219 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
213980c0 220 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
37012946
DR
221} mpol_ops[MPOL_MAX];
222
f5b087b5
DR
223static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
224{
6d556294 225 return pol->flags & MPOL_MODE_FLAGS;
4c50bc01
DR
226}
227
228static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
229 const nodemask_t *rel)
230{
231 nodemask_t tmp;
232 nodes_fold(tmp, *orig, nodes_weight(*rel));
233 nodes_onto(*ret, tmp, *rel);
f5b087b5
DR
234}
235
be897d48 236static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
237{
238 if (nodes_empty(*nodes))
239 return -EINVAL;
269fbe72 240 pol->nodes = *nodes;
37012946
DR
241 return 0;
242}
243
244static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
245{
7858d7bc
FT
246 if (nodes_empty(*nodes))
247 return -EINVAL;
269fbe72
BW
248
249 nodes_clear(pol->nodes);
250 node_set(first_node(*nodes), pol->nodes);
37012946
DR
251 return 0;
252}
253
58568d2a
MX
254/*
255 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
256 * any, for the new policy. mpol_new() has already validated the nodes
7858d7bc 257 * parameter with respect to the policy mode and flags.
58568d2a
MX
258 *
259 * Must be called holding task's alloc_lock to protect task's mems_allowed
c1e8d7c6 260 * and mempolicy. May also be called holding the mmap_lock for write.
58568d2a 261 */
4bfc4495
KH
262static int mpol_set_nodemask(struct mempolicy *pol,
263 const nodemask_t *nodes, struct nodemask_scratch *nsc)
58568d2a 264{
58568d2a
MX
265 int ret;
266
7858d7bc
FT
267 /*
268 * Default (pol==NULL) resp. local memory policies are not a
269 * subject of any remapping. They also do not need any special
270 * constructor.
271 */
272 if (!pol || pol->mode == MPOL_LOCAL)
58568d2a 273 return 0;
7858d7bc 274
01f13bd6 275 /* Check N_MEMORY */
4bfc4495 276 nodes_and(nsc->mask1,
01f13bd6 277 cpuset_current_mems_allowed, node_states[N_MEMORY]);
58568d2a
MX
278
279 VM_BUG_ON(!nodes);
4bfc4495 280
7858d7bc
FT
281 if (pol->flags & MPOL_F_RELATIVE_NODES)
282 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
283 else
284 nodes_and(nsc->mask2, *nodes, nsc->mask1);
58568d2a 285
7858d7bc
FT
286 if (mpol_store_user_nodemask(pol))
287 pol->w.user_nodemask = *nodes;
4bfc4495 288 else
7858d7bc
FT
289 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
290
291 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
58568d2a
MX
292 return ret;
293}
294
295/*
296 * This function just creates a new policy, does some check and simple
297 * initialization. You must invoke mpol_set_nodemask() to set nodes.
298 */
028fec41
DR
299static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
300 nodemask_t *nodes)
1da177e4
LT
301{
302 struct mempolicy *policy;
303
3e1f0645
DR
304 if (mode == MPOL_DEFAULT) {
305 if (nodes && !nodes_empty(*nodes))
37012946 306 return ERR_PTR(-EINVAL);
d3a71033 307 return NULL;
37012946 308 }
3e1f0645
DR
309 VM_BUG_ON(!nodes);
310
311 /*
312 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
313 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
314 * All other modes require a valid pointer to a non-empty nodemask.
315 */
316 if (mode == MPOL_PREFERRED) {
317 if (nodes_empty(*nodes)) {
318 if (((flags & MPOL_F_STATIC_NODES) ||
319 (flags & MPOL_F_RELATIVE_NODES)))
320 return ERR_PTR(-EINVAL);
7858d7bc
FT
321
322 mode = MPOL_LOCAL;
3e1f0645 323 }
479e2802 324 } else if (mode == MPOL_LOCAL) {
8d303e44
PK
325 if (!nodes_empty(*nodes) ||
326 (flags & MPOL_F_STATIC_NODES) ||
327 (flags & MPOL_F_RELATIVE_NODES))
479e2802 328 return ERR_PTR(-EINVAL);
3e1f0645
DR
329 } else if (nodes_empty(*nodes))
330 return ERR_PTR(-EINVAL);
c36f6e6d 331
1da177e4
LT
332 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
333 if (!policy)
334 return ERR_PTR(-ENOMEM);
335 atomic_set(&policy->refcnt, 1);
45c4745a 336 policy->mode = mode;
3e1f0645 337 policy->flags = flags;
c6018b4b 338 policy->home_node = NUMA_NO_NODE;
37012946 339
1da177e4 340 return policy;
37012946
DR
341}
342
52cd3b07 343/* Slow path of a mpol destructor. */
c36f6e6d 344void __mpol_put(struct mempolicy *pol)
52cd3b07 345{
c36f6e6d 346 if (!atomic_dec_and_test(&pol->refcnt))
52cd3b07 347 return;
c36f6e6d 348 kmem_cache_free(policy_cache, pol);
52cd3b07
LS
349}
350
213980c0 351static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
352{
353}
354
213980c0 355static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
37012946
DR
356{
357 nodemask_t tmp;
358
359 if (pol->flags & MPOL_F_STATIC_NODES)
360 nodes_and(tmp, pol->w.user_nodemask, *nodes);
361 else if (pol->flags & MPOL_F_RELATIVE_NODES)
362 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
363 else {
269fbe72 364 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
213980c0 365 *nodes);
29b190fa 366 pol->w.cpuset_mems_allowed = *nodes;
37012946 367 }
f5b087b5 368
708c1bbc
MX
369 if (nodes_empty(tmp))
370 tmp = *nodes;
371
269fbe72 372 pol->nodes = tmp;
37012946
DR
373}
374
375static void mpol_rebind_preferred(struct mempolicy *pol,
213980c0 376 const nodemask_t *nodes)
37012946 377{
7858d7bc 378 pol->w.cpuset_mems_allowed = *nodes;
1da177e4
LT
379}
380
708c1bbc
MX
381/*
382 * mpol_rebind_policy - Migrate a policy to a different set of nodes
383 *
c1e8d7c6 384 * Per-vma policies are protected by mmap_lock. Allocations using per-task
213980c0
VB
385 * policies are protected by task->mems_allowed_seq to prevent a premature
386 * OOM/allocation failure due to parallel nodemask modification.
708c1bbc 387 */
213980c0 388static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1d0d2680 389{
018160ad 390 if (!pol || pol->mode == MPOL_LOCAL)
1d0d2680 391 return;
7858d7bc 392 if (!mpol_store_user_nodemask(pol) &&
1d0d2680
DR
393 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
394 return;
708c1bbc 395
213980c0 396 mpol_ops[pol->mode].rebind(pol, newmask);
1d0d2680
DR
397}
398
399/*
400 * Wrapper for mpol_rebind_policy() that just requires task
401 * pointer, and updates task mempolicy.
58568d2a
MX
402 *
403 * Called with task's alloc_lock held.
1d0d2680 404 */
213980c0 405void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
1d0d2680 406{
213980c0 407 mpol_rebind_policy(tsk->mempolicy, new);
1d0d2680
DR
408}
409
410/*
411 * Rebind each vma in mm to new nodemask.
412 *
c1e8d7c6 413 * Call holding a reference to mm. Takes mm->mmap_lock during call.
1d0d2680 414 */
1d0d2680
DR
415void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
416{
417 struct vm_area_struct *vma;
66850be5 418 VMA_ITERATOR(vmi, mm, 0);
1d0d2680 419
d8ed45c5 420 mmap_write_lock(mm);
6c21e066
JH
421 for_each_vma(vmi, vma) {
422 vma_start_write(vma);
213980c0 423 mpol_rebind_policy(vma->vm_policy, new);
6c21e066 424 }
d8ed45c5 425 mmap_write_unlock(mm);
1d0d2680
DR
426}
427
37012946
DR
428static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
429 [MPOL_DEFAULT] = {
430 .rebind = mpol_rebind_default,
431 },
432 [MPOL_INTERLEAVE] = {
be897d48 433 .create = mpol_new_nodemask,
37012946
DR
434 .rebind = mpol_rebind_nodemask,
435 },
436 [MPOL_PREFERRED] = {
437 .create = mpol_new_preferred,
438 .rebind = mpol_rebind_preferred,
439 },
440 [MPOL_BIND] = {
be897d48 441 .create = mpol_new_nodemask,
37012946
DR
442 .rebind = mpol_rebind_nodemask,
443 },
7858d7bc
FT
444 [MPOL_LOCAL] = {
445 .rebind = mpol_rebind_default,
446 },
b27abacc 447 [MPOL_PREFERRED_MANY] = {
be897d48 448 .create = mpol_new_nodemask,
b27abacc
DH
449 .rebind = mpol_rebind_preferred,
450 },
fa3bea4e
GP
451 [MPOL_WEIGHTED_INTERLEAVE] = {
452 .create = mpol_new_nodemask,
453 .rebind = mpol_rebind_nodemask,
454 },
37012946
DR
455};
456
1cb5d11a 457static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
fc301289 458 unsigned long flags);
72e315f7
HD
459static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
460 pgoff_t ilx, int *nid);
1a75a6c8 461
1cb5d11a
HD
462static bool strictly_unmovable(unsigned long flags)
463{
464 /*
465 * STRICT without MOVE flags lets do_mbind() fail immediately with -EIO
466 * if any misplaced page is found.
467 */
468 return (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ==
469 MPOL_MF_STRICT;
470}
471
88c91dc5
HD
472struct migration_mpol { /* for alloc_migration_target_by_mpol() */
473 struct mempolicy *pol;
474 pgoff_t ilx;
475};
1a75a6c8 476
6f4576e3
NH
477struct queue_pages {
478 struct list_head *pagelist;
479 unsigned long flags;
480 nodemask_t *nmask;
f18da660
LX
481 unsigned long start;
482 unsigned long end;
483 struct vm_area_struct *first;
1cb5d11a
HD
484 struct folio *large; /* note last large folio encountered */
485 long nr_failed; /* could not be isolated at this time */
6f4576e3
NH
486};
487
88aaa2a1 488/*
d451b89d 489 * Check if the folio's nid is in qp->nmask.
88aaa2a1
NH
490 *
491 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
492 * in the invert of qp->nmask.
493 */
d451b89d 494static inline bool queue_folio_required(struct folio *folio,
88aaa2a1
NH
495 struct queue_pages *qp)
496{
d451b89d 497 int nid = folio_nid(folio);
88aaa2a1
NH
498 unsigned long flags = qp->flags;
499
500 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
501}
502
1cb5d11a 503static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
c8633798 504{
de1f5055 505 struct folio *folio;
c8633798 506 struct queue_pages *qp = walk->private;
c8633798
NH
507
508 if (unlikely(is_pmd_migration_entry(*pmd))) {
1cb5d11a
HD
509 qp->nr_failed++;
510 return;
c8633798 511 }
e06d03d5 512 folio = pmd_folio(*pmd);
5beaee54 513 if (is_huge_zero_folio(folio)) {
e5947d23 514 walk->action = ACTION_CONTINUE;
1cb5d11a 515 return;
c8633798 516 }
d451b89d 517 if (!queue_folio_required(folio, qp))
1cb5d11a
HD
518 return;
519 if (!(qp->flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
520 !vma_migratable(walk->vma) ||
521 !migrate_folio_add(folio, qp->pagelist, qp->flags))
522 qp->nr_failed++;
c8633798
NH
523}
524
98094945 525/*
1cb5d11a
HD
526 * Scan through folios, checking if they satisfy the required conditions,
527 * moving them from LRU to local pagelist for migration if they do (or not).
d8835445 528 *
1cb5d11a
HD
529 * queue_folios_pte_range() has two possible return values:
530 * 0 - continue walking to scan for more, even if an existing folio on the
531 * wrong node could not be isolated and queued for migration.
532 * -EIO - only MPOL_MF_STRICT was specified, without MPOL_MF_MOVE or ..._ALL,
533 * and an existing folio was on a node that does not follow the policy.
98094945 534 */
3dae02bb 535static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
6f4576e3 536 unsigned long end, struct mm_walk *walk)
1da177e4 537{
6f4576e3 538 struct vm_area_struct *vma = walk->vma;
3dae02bb 539 struct folio *folio;
6f4576e3
NH
540 struct queue_pages *qp = walk->private;
541 unsigned long flags = qp->flags;
3f088420 542 pte_t *pte, *mapped_pte;
c33c7948 543 pte_t ptent;
705e87c0 544 spinlock_t *ptl;
941150a3 545
c8633798 546 ptl = pmd_trans_huge_lock(pmd, vma);
1cb5d11a
HD
547 if (ptl) {
548 queue_folios_pmd(pmd, walk);
549 spin_unlock(ptl);
550 goto out;
551 }
91612e0d 552
3f088420 553 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
7780d040
HD
554 if (!pte) {
555 walk->action = ACTION_AGAIN;
556 return 0;
557 }
6f4576e3 558 for (; addr != end; pte++, addr += PAGE_SIZE) {
c33c7948 559 ptent = ptep_get(pte);
1cb5d11a 560 if (pte_none(ptent))
1da177e4 561 continue;
1cb5d11a
HD
562 if (!pte_present(ptent)) {
563 if (is_migration_entry(pte_to_swp_entry(ptent)))
564 qp->nr_failed++;
1da177e4 565 continue;
1cb5d11a 566 }
c33c7948 567 folio = vm_normal_folio(vma, addr, ptent);
3dae02bb 568 if (!folio || folio_is_zone_device(folio))
1da177e4 569 continue;
053837fc 570 /*
3dae02bb
VMO
571 * vm_normal_folio() filters out zero pages, but there might
572 * still be reserved folios to skip, perhaps in a VDSO.
053837fc 573 */
3dae02bb 574 if (folio_test_reserved(folio))
f4598c8b 575 continue;
d451b89d 576 if (!queue_folio_required(folio, qp))
38e35860 577 continue;
1cb5d11a 578 if (folio_test_large(folio)) {
a53190a4 579 /*
1cb5d11a
HD
580 * A large folio can only be isolated from LRU once,
581 * but may be mapped by many PTEs (and Copy-On-Write may
582 * intersperse PTEs of other, order 0, folios). This is
583 * a common case, so don't mistake it for failure (but
584 * there can be other cases of multi-mapped pages which
585 * this quick check does not help to filter out - and a
586 * search of the pagelist might grow to be prohibitive).
587 *
588 * migrate_pages(&pagelist) returns nr_failed folios, so
589 * check "large" now so that queue_pages_range() returns
590 * a comparable nr_failed folios. This does imply that
591 * if folio could not be isolated for some racy reason
592 * at its first PTE, later PTEs will not give it another
593 * chance of isolation; but keeps the accounting simple.
a53190a4 594 */
1cb5d11a
HD
595 if (folio == qp->large)
596 continue;
597 qp->large = folio;
598 }
599 if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
600 !vma_migratable(vma) ||
601 !migrate_folio_add(folio, qp->pagelist, flags)) {
602 qp->nr_failed++;
603 if (strictly_unmovable(flags))
604 break;
605 }
6f4576e3 606 }
3f088420 607 pte_unmap_unlock(mapped_pte, ptl);
6f4576e3 608 cond_resched();
1cb5d11a
HD
609out:
610 if (qp->nr_failed && strictly_unmovable(flags))
611 return -EIO;
612 return 0;
91612e0d
HD
613}
614
0a2c1e81 615static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
6f4576e3
NH
616 unsigned long addr, unsigned long end,
617 struct mm_walk *walk)
e2d8cf40
NH
618{
619#ifdef CONFIG_HUGETLB_PAGE
6f4576e3 620 struct queue_pages *qp = walk->private;
1cb5d11a 621 unsigned long flags = qp->flags;
0a2c1e81 622 struct folio *folio;
cb900f41 623 spinlock_t *ptl;
d4c54919 624 pte_t entry;
e2d8cf40 625
6f4576e3
NH
626 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
627 entry = huge_ptep_get(pte);
1cb5d11a
HD
628 if (!pte_present(entry)) {
629 if (unlikely(is_hugetlb_entry_migration(entry)))
630 qp->nr_failed++;
d4c54919 631 goto unlock;
1cb5d11a 632 }
0a2c1e81 633 folio = pfn_folio(pte_pfn(entry));
d451b89d 634 if (!queue_folio_required(folio, qp))
e2d8cf40 635 goto unlock;
1cb5d11a
HD
636 if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
637 !vma_migratable(walk->vma)) {
638 qp->nr_failed++;
dcf17635
LX
639 goto unlock;
640 }
0a2c1e81 641 /*
1cb5d11a
HD
642 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
643 * Choosing not to migrate a shared folio is not counted as a failure.
0a2c1e81 644 *
ebb34f78
DH
645 * See folio_likely_mapped_shared() on possible imprecision when we
646 * cannot easily detect if a folio is shared.
0a2c1e81 647 */
1cb5d11a 648 if ((flags & MPOL_MF_MOVE_ALL) ||
ebb34f78 649 (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte)))
1cb5d11a
HD
650 if (!isolate_hugetlb(folio, qp->pagelist))
651 qp->nr_failed++;
e2d8cf40 652unlock:
cb900f41 653 spin_unlock(ptl);
1cb5d11a
HD
654 if (qp->nr_failed && strictly_unmovable(flags))
655 return -EIO;
e2d8cf40 656#endif
1cb5d11a 657 return 0;
1da177e4
LT
658}
659
5877231f 660#ifdef CONFIG_NUMA_BALANCING
b24f53a0 661/*
4b10e7d5
MG
662 * This is used to mark a range of virtual addresses to be inaccessible.
663 * These are later cleared by a NUMA hinting fault. Depending on these
664 * faults, pages may be migrated for better NUMA placement.
665 *
666 * This is assuming that NUMA faults are handled using PROT_NONE. If
667 * an architecture makes a different choice, it will need further
668 * changes to the core.
b24f53a0 669 */
4b10e7d5
MG
670unsigned long change_prot_numa(struct vm_area_struct *vma,
671 unsigned long addr, unsigned long end)
b24f53a0 672{
4a18419f 673 struct mmu_gather tlb;
a79390f5 674 long nr_updated;
b24f53a0 675
4a18419f
NA
676 tlb_gather_mmu(&tlb, vma->vm_mm);
677
1ef488ed 678 nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
d1751118 679 if (nr_updated > 0)
03c5a6e1 680 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
b24f53a0 681
4a18419f
NA
682 tlb_finish_mmu(&tlb);
683
4b10e7d5 684 return nr_updated;
b24f53a0 685}
5877231f 686#endif /* CONFIG_NUMA_BALANCING */
b24f53a0 687
6f4576e3
NH
688static int queue_pages_test_walk(unsigned long start, unsigned long end,
689 struct mm_walk *walk)
690{
66850be5 691 struct vm_area_struct *next, *vma = walk->vma;
6f4576e3 692 struct queue_pages *qp = walk->private;
6f4576e3
NH
693 unsigned long flags = qp->flags;
694
a18b3ac2 695 /* range check first */
ce33135c 696 VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
f18da660
LX
697
698 if (!qp->first) {
699 qp->first = vma;
700 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
701 (qp->start < vma->vm_start))
702 /* hole at head side of range */
a18b3ac2
LX
703 return -EFAULT;
704 }
66850be5 705 next = find_vma(vma->vm_mm, vma->vm_end);
f18da660
LX
706 if (!(flags & MPOL_MF_DISCONTIG_OK) &&
707 ((vma->vm_end < qp->end) &&
66850be5 708 (!next || vma->vm_end < next->vm_start)))
f18da660
LX
709 /* hole at middle or tail of range */
710 return -EFAULT;
a18b3ac2 711
a7f40cfe
YS
712 /*
713 * Need check MPOL_MF_STRICT to return -EIO if possible
714 * regardless of vma_migratable
715 */
716 if (!vma_migratable(vma) &&
717 !(flags & MPOL_MF_STRICT))
48684a65
NH
718 return 1;
719
1cb5d11a
HD
720 /*
721 * Check page nodes, and queue pages to move, in the current vma.
722 * But if no moving, and no strict checking, the scan can be skipped.
723 */
724 if (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6f4576e3
NH
725 return 0;
726 return 1;
727}
728
7b86ac33 729static const struct mm_walk_ops queue_pages_walk_ops = {
0a2c1e81 730 .hugetlb_entry = queue_folios_hugetlb,
3dae02bb 731 .pmd_entry = queue_folios_pte_range,
7b86ac33 732 .test_walk = queue_pages_test_walk,
49b06385
SB
733 .walk_lock = PGWALK_RDLOCK,
734};
735
736static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = {
737 .hugetlb_entry = queue_folios_hugetlb,
738 .pmd_entry = queue_folios_pte_range,
739 .test_walk = queue_pages_test_walk,
740 .walk_lock = PGWALK_WRLOCK,
7b86ac33
CH
741};
742
dc9aa5b9 743/*
98094945
NH
744 * Walk through page tables and collect pages to be migrated.
745 *
1cb5d11a
HD
746 * If pages found in a given range are not on the required set of @nodes,
747 * and migration is allowed, they are isolated and queued to @pagelist.
d8835445 748 *
1cb5d11a
HD
749 * queue_pages_range() may return:
750 * 0 - all pages already on the right node, or successfully queued for moving
751 * (or neither strict checking nor moving requested: only range checking).
752 * >0 - this number of misplaced folios could not be queued for moving
753 * (a hugetlbfs page or a transparent huge page being counted as 1).
754 * -EIO - a misplaced page found, when MPOL_MF_STRICT specified without MOVEs.
755 * -EFAULT - a hole in the memory range, when MPOL_MF_DISCONTIG_OK unspecified.
dc9aa5b9 756 */
1cb5d11a 757static long
98094945 758queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6f4576e3 759 nodemask_t *nodes, unsigned long flags,
1cb5d11a 760 struct list_head *pagelist)
1da177e4 761{
f18da660 762 int err;
6f4576e3
NH
763 struct queue_pages qp = {
764 .pagelist = pagelist,
765 .flags = flags,
766 .nmask = nodes,
f18da660
LX
767 .start = start,
768 .end = end,
769 .first = NULL,
6f4576e3 770 };
1cb5d11a 771 const struct mm_walk_ops *ops = (flags & MPOL_MF_WRLOCK) ?
49b06385 772 &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
6f4576e3 773
49b06385 774 err = walk_page_range(mm, start, end, ops, &qp);
f18da660
LX
775
776 if (!qp.first)
777 /* whole range in hole */
778 err = -EFAULT;
779
1cb5d11a 780 return err ? : qp.nr_failed;
1da177e4
LT
781}
782
869833f2
KM
783/*
784 * Apply policy to a single VMA
c1e8d7c6 785 * This must be called with the mmap_lock held for writing.
869833f2
KM
786 */
787static int vma_replace_policy(struct vm_area_struct *vma,
c36f6e6d 788 struct mempolicy *pol)
8d34694c 789{
869833f2
KM
790 int err;
791 struct mempolicy *old;
792 struct mempolicy *new;
8d34694c 793
6c21e066
JH
794 vma_assert_write_locked(vma);
795
869833f2
KM
796 new = mpol_dup(pol);
797 if (IS_ERR(new))
798 return PTR_ERR(new);
799
800 if (vma->vm_ops && vma->vm_ops->set_policy) {
8d34694c 801 err = vma->vm_ops->set_policy(vma, new);
869833f2
KM
802 if (err)
803 goto err_out;
8d34694c 804 }
869833f2
KM
805
806 old = vma->vm_policy;
c1e8d7c6 807 vma->vm_policy = new; /* protected by mmap_lock */
869833f2
KM
808 mpol_put(old);
809
810 return 0;
811 err_out:
812 mpol_put(new);
8d34694c
KM
813 return err;
814}
815
f4e9e0e6
LH
816/* Split or merge the VMA (if required) and apply the new policy */
817static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
818 struct vm_area_struct **prev, unsigned long start,
819 unsigned long end, struct mempolicy *new_pol)
1da177e4 820{
f4e9e0e6 821 unsigned long vmstart, vmend;
9d8cebd4 822
f4e9e0e6
LH
823 vmend = min(end, vma->vm_end);
824 if (start > vma->vm_start) {
825 *prev = vma;
826 vmstart = start;
827 } else {
828 vmstart = vma->vm_start;
829 }
830
c36f6e6d 831 if (mpol_equal(vma->vm_policy, new_pol)) {
00ca0f2e 832 *prev = vma;
7329e3eb 833 return 0;
00ca0f2e 834 }
7329e3eb 835
94d7d923
LS
836 vma = vma_modify_policy(vmi, *prev, vma, vmstart, vmend, new_pol);
837 if (IS_ERR(vma))
838 return PTR_ERR(vma);
f4e9e0e6
LH
839
840 *prev = vma;
841 return vma_replace_policy(vma, new_pol);
1da177e4
LT
842}
843
1da177e4 844/* Set the process memory policy */
028fec41
DR
845static long do_set_mempolicy(unsigned short mode, unsigned short flags,
846 nodemask_t *nodes)
1da177e4 847{
58568d2a 848 struct mempolicy *new, *old;
4bfc4495 849 NODEMASK_SCRATCH(scratch);
58568d2a 850 int ret;
1da177e4 851
4bfc4495
KH
852 if (!scratch)
853 return -ENOMEM;
f4e53d91 854
4bfc4495
KH
855 new = mpol_new(mode, flags, nodes);
856 if (IS_ERR(new)) {
857 ret = PTR_ERR(new);
858 goto out;
859 }
2c7c3a7d 860
12c1dc8e 861 task_lock(current);
4bfc4495 862 ret = mpol_set_nodemask(new, nodes, scratch);
58568d2a 863 if (ret) {
12c1dc8e 864 task_unlock(current);
58568d2a 865 mpol_put(new);
4bfc4495 866 goto out;
58568d2a 867 }
12c1dc8e 868
58568d2a 869 old = current->mempolicy;
1da177e4 870 current->mempolicy = new;
fa3bea4e
GP
871 if (new && (new->mode == MPOL_INTERLEAVE ||
872 new->mode == MPOL_WEIGHTED_INTERLEAVE)) {
45816682 873 current->il_prev = MAX_NUMNODES-1;
fa3bea4e
GP
874 current->il_weight = 0;
875 }
58568d2a 876 task_unlock(current);
58568d2a 877 mpol_put(old);
4bfc4495
KH
878 ret = 0;
879out:
880 NODEMASK_SCRATCH_FREE(scratch);
881 return ret;
1da177e4
LT
882}
883
bea904d5
LS
884/*
885 * Return nodemask for policy for get_mempolicy() query
58568d2a
MX
886 *
887 * Called with task's alloc_lock held
bea904d5 888 */
c36f6e6d 889static void get_policy_nodemask(struct mempolicy *pol, nodemask_t *nodes)
1da177e4 890{
dfcd3c0d 891 nodes_clear(*nodes);
c36f6e6d 892 if (pol == &default_policy)
bea904d5
LS
893 return;
894
c36f6e6d 895 switch (pol->mode) {
19770b32 896 case MPOL_BIND:
1da177e4 897 case MPOL_INTERLEAVE:
269fbe72 898 case MPOL_PREFERRED:
b27abacc 899 case MPOL_PREFERRED_MANY:
fa3bea4e 900 case MPOL_WEIGHTED_INTERLEAVE:
c36f6e6d 901 *nodes = pol->nodes;
1da177e4 902 break;
7858d7bc
FT
903 case MPOL_LOCAL:
904 /* return empty node mask for local allocation */
905 break;
1da177e4
LT
906 default:
907 BUG();
908 }
909}
910
3b9aadf7 911static int lookup_node(struct mm_struct *mm, unsigned long addr)
1da177e4 912{
ba841078 913 struct page *p = NULL;
f728b9c4 914 int ret;
1da177e4 915
f728b9c4
JH
916 ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
917 if (ret > 0) {
918 ret = page_to_nid(p);
1da177e4
LT
919 put_page(p);
920 }
f728b9c4 921 return ret;
1da177e4
LT
922}
923
1da177e4 924/* Retrieve NUMA policy */
dbcb0f19
AB
925static long do_get_mempolicy(int *policy, nodemask_t *nmask,
926 unsigned long addr, unsigned long flags)
1da177e4 927{
8bccd85f 928 int err;
1da177e4
LT
929 struct mm_struct *mm = current->mm;
930 struct vm_area_struct *vma = NULL;
3b9aadf7 931 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
1da177e4 932
754af6f5
LS
933 if (flags &
934 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
1da177e4 935 return -EINVAL;
754af6f5
LS
936
937 if (flags & MPOL_F_MEMS_ALLOWED) {
938 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
939 return -EINVAL;
940 *policy = 0; /* just so it's initialized */
58568d2a 941 task_lock(current);
754af6f5 942 *nmask = cpuset_current_mems_allowed;
58568d2a 943 task_unlock(current);
754af6f5
LS
944 return 0;
945 }
946
1da177e4 947 if (flags & MPOL_F_ADDR) {
ddc1a5cb 948 pgoff_t ilx; /* ignored here */
bea904d5
LS
949 /*
950 * Do NOT fall back to task policy if the
951 * vma/shared policy at addr is NULL. We
952 * want to return MPOL_DEFAULT in this case.
953 */
d8ed45c5 954 mmap_read_lock(mm);
33e3575c 955 vma = vma_lookup(mm, addr);
1da177e4 956 if (!vma) {
d8ed45c5 957 mmap_read_unlock(mm);
1da177e4
LT
958 return -EFAULT;
959 }
ddc1a5cb 960 pol = __get_vma_policy(vma, addr, &ilx);
1da177e4
LT
961 } else if (addr)
962 return -EINVAL;
963
964 if (!pol)
bea904d5 965 pol = &default_policy; /* indicates default behavior */
1da177e4
LT
966
967 if (flags & MPOL_F_NODE) {
968 if (flags & MPOL_F_ADDR) {
3b9aadf7 969 /*
f728b9c4
JH
970 * Take a refcount on the mpol, because we are about to
971 * drop the mmap_lock, after which only "pol" remains
972 * valid, "vma" is stale.
3b9aadf7
AA
973 */
974 pol_refcount = pol;
975 vma = NULL;
976 mpol_get(pol);
f728b9c4 977 mmap_read_unlock(mm);
3b9aadf7 978 err = lookup_node(mm, addr);
1da177e4
LT
979 if (err < 0)
980 goto out;
8bccd85f 981 *policy = err;
1da177e4 982 } else if (pol == current->mempolicy &&
45c4745a 983 pol->mode == MPOL_INTERLEAVE) {
269fbe72 984 *policy = next_node_in(current->il_prev, pol->nodes);
fa3bea4e
GP
985 } else if (pol == current->mempolicy &&
986 pol->mode == MPOL_WEIGHTED_INTERLEAVE) {
987 if (current->il_weight)
988 *policy = current->il_prev;
989 else
990 *policy = next_node_in(current->il_prev,
991 pol->nodes);
1da177e4
LT
992 } else {
993 err = -EINVAL;
994 goto out;
995 }
bea904d5
LS
996 } else {
997 *policy = pol == &default_policy ? MPOL_DEFAULT :
998 pol->mode;
d79df630
DR
999 /*
1000 * Internal mempolicy flags must be masked off before exposing
1001 * the policy to userspace.
1002 */
1003 *policy |= (pol->flags & MPOL_MODE_FLAGS);
bea904d5 1004 }
1da177e4 1005
1da177e4 1006 err = 0;
58568d2a 1007 if (nmask) {
c6b6ef8b
LS
1008 if (mpol_store_user_nodemask(pol)) {
1009 *nmask = pol->w.user_nodemask;
1010 } else {
1011 task_lock(current);
1012 get_policy_nodemask(pol, nmask);
1013 task_unlock(current);
1014 }
58568d2a 1015 }
1da177e4
LT
1016
1017 out:
52cd3b07 1018 mpol_cond_put(pol);
1da177e4 1019 if (vma)
d8ed45c5 1020 mmap_read_unlock(mm);
3b9aadf7
AA
1021 if (pol_refcount)
1022 mpol_put(pol_refcount);
1da177e4
LT
1023 return err;
1024}
1025
b20a3503 1026#ifdef CONFIG_MIGRATION
1cb5d11a 1027static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
fc301289 1028 unsigned long flags)
6ce3c4c0
CL
1029{
1030 /*
1cb5d11a
HD
1031 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
1032 * Choosing not to migrate a shared folio is not counted as a failure.
4a64981d 1033 *
ebb34f78
DH
1034 * See folio_likely_mapped_shared() on possible imprecision when we
1035 * cannot easily detect if a folio is shared.
6ce3c4c0 1036 */
ebb34f78 1037 if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) {
be2d5756 1038 if (folio_isolate_lru(folio)) {
4a64981d
VMO
1039 list_add_tail(&folio->lru, foliolist);
1040 node_stat_mod_folio(folio,
1041 NR_ISOLATED_ANON + folio_is_file_lru(folio),
1042 folio_nr_pages(folio));
1cb5d11a 1043 } else {
a53190a4 1044 /*
4a64981d
VMO
1045 * Non-movable folio may reach here. And, there may be
1046 * temporary off LRU folios or non-LRU movable folios.
1047 * Treat them as unmovable folios since they can't be
1cb5d11a 1048 * isolated, so they can't be moved at the moment.
a53190a4 1049 */
1cb5d11a 1050 return false;
62695a84
NP
1051 }
1052 }
1cb5d11a 1053 return true;
7e2ab150 1054}
6ce3c4c0 1055
7e2ab150
CL
1056/*
1057 * Migrate pages from one node to a target node.
1058 * Returns error or the number of pages not migrated.
1059 */
1cb5d11a
HD
1060static long migrate_to_node(struct mm_struct *mm, int source, int dest,
1061 int flags)
7e2ab150
CL
1062{
1063 nodemask_t nmask;
66850be5 1064 struct vm_area_struct *vma;
7e2ab150 1065 LIST_HEAD(pagelist);
1cb5d11a
HD
1066 long nr_failed;
1067 long err = 0;
a0976311
JK
1068 struct migration_target_control mtc = {
1069 .nid = dest,
1070 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
e42dfe4e 1071 .reason = MR_SYSCALL,
a0976311 1072 };
7e2ab150
CL
1073
1074 nodes_clear(nmask);
1075 node_set(source, nmask);
6ce3c4c0 1076
1cb5d11a 1077 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
72e315f7
HD
1078
1079 mmap_read_lock(mm);
1cb5d11a
HD
1080 vma = find_vma(mm, 0);
1081
08270807 1082 /*
1cb5d11a 1083 * This does not migrate the range, but isolates all pages that
08270807 1084 * need migration. Between passing in the full user address
1cb5d11a
HD
1085 * space range and MPOL_MF_DISCONTIG_OK, this call cannot fail,
1086 * but passes back the count of pages which could not be isolated.
08270807 1087 */
1cb5d11a
HD
1088 nr_failed = queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1089 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
72e315f7 1090 mmap_read_unlock(mm);
7e2ab150 1091
cf608ac1 1092 if (!list_empty(&pagelist)) {
a0976311 1093 err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1cb5d11a 1094 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
cf608ac1 1095 if (err)
e2d8cf40 1096 putback_movable_pages(&pagelist);
cf608ac1 1097 }
95a402c3 1098
1cb5d11a
HD
1099 if (err >= 0)
1100 err += nr_failed;
7e2ab150 1101 return err;
6ce3c4c0
CL
1102}
1103
39743889 1104/*
7e2ab150
CL
1105 * Move pages between the two nodesets so as to preserve the physical
1106 * layout as much as possible.
39743889
CL
1107 *
1108 * Returns the number of page that could not be moved.
1109 */
0ce72d4f
AM
1110int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1111 const nodemask_t *to, int flags)
39743889 1112{
1cb5d11a
HD
1113 long nr_failed = 0;
1114 long err = 0;
7e2ab150 1115 nodemask_t tmp;
39743889 1116
361a2a22 1117 lru_cache_disable();
0aedadf9 1118
da0aa138
KM
1119 /*
1120 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1121 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1122 * bit in 'tmp', and return that <source, dest> pair for migration.
1123 * The pair of nodemasks 'to' and 'from' define the map.
1124 *
1125 * If no pair of bits is found that way, fallback to picking some
1126 * pair of 'source' and 'dest' bits that are not the same. If the
1127 * 'source' and 'dest' bits are the same, this represents a node
1128 * that will be migrating to itself, so no pages need move.
1129 *
1130 * If no bits are left in 'tmp', or if all remaining bits left
1131 * in 'tmp' correspond to the same bit in 'to', return false
1132 * (nothing left to migrate).
1133 *
1134 * This lets us pick a pair of nodes to migrate between, such that
1135 * if possible the dest node is not already occupied by some other
1136 * source node, minimizing the risk of overloading the memory on a
1137 * node that would happen if we migrated incoming memory to a node
1138 * before migrating outgoing memory source that same node.
1139 *
1140 * A single scan of tmp is sufficient. As we go, we remember the
1141 * most recent <s, d> pair that moved (s != d). If we find a pair
1142 * that not only moved, but what's better, moved to an empty slot
1143 * (d is not set in tmp), then we break out then, with that pair.
ae0e47f0 1144 * Otherwise when we finish scanning from_tmp, we at least have the
da0aa138
KM
1145 * most recent <s, d> pair that moved. If we get all the way through
1146 * the scan of tmp without finding any node that moved, much less
1147 * moved to an empty node, then there is nothing left worth migrating.
1148 */
d4984711 1149
0ce72d4f 1150 tmp = *from;
7e2ab150 1151 while (!nodes_empty(tmp)) {
68d68ff6 1152 int s, d;
b76ac7e7 1153 int source = NUMA_NO_NODE;
7e2ab150
CL
1154 int dest = 0;
1155
1156 for_each_node_mask(s, tmp) {
4a5b18cc
LW
1157
1158 /*
1159 * do_migrate_pages() tries to maintain the relative
1160 * node relationship of the pages established between
1161 * threads and memory areas.
1162 *
1163 * However if the number of source nodes is not equal to
1164 * the number of destination nodes we can not preserve
1165 * this node relative relationship. In that case, skip
1166 * copying memory from a node that is in the destination
1167 * mask.
1168 *
1169 * Example: [2,3,4] -> [3,4,5] moves everything.
1170 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1171 */
1172
0ce72d4f
AM
1173 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1174 (node_isset(s, *to)))
4a5b18cc
LW
1175 continue;
1176
0ce72d4f 1177 d = node_remap(s, *from, *to);
7e2ab150
CL
1178 if (s == d)
1179 continue;
1180
1181 source = s; /* Node moved. Memorize */
1182 dest = d;
1183
1184 /* dest not in remaining from nodes? */
1185 if (!node_isset(dest, tmp))
1186 break;
1187 }
b76ac7e7 1188 if (source == NUMA_NO_NODE)
7e2ab150
CL
1189 break;
1190
1191 node_clear(source, tmp);
1192 err = migrate_to_node(mm, source, dest, flags);
1193 if (err > 0)
1cb5d11a 1194 nr_failed += err;
7e2ab150
CL
1195 if (err < 0)
1196 break;
39743889 1197 }
d479960e 1198
361a2a22 1199 lru_cache_enable();
7e2ab150
CL
1200 if (err < 0)
1201 return err;
1cb5d11a 1202 return (nr_failed < INT_MAX) ? nr_failed : INT_MAX;
b20a3503
CL
1203}
1204
3ad33b24 1205/*
72e315f7 1206 * Allocate a new folio for page migration, according to NUMA mempolicy.
3ad33b24 1207 */
72e315f7
HD
1208static struct folio *alloc_migration_target_by_mpol(struct folio *src,
1209 unsigned long private)
95a402c3 1210{
88c91dc5
HD
1211 struct migration_mpol *mmpol = (struct migration_mpol *)private;
1212 struct mempolicy *pol = mmpol->pol;
1213 pgoff_t ilx = mmpol->ilx;
72e315f7
HD
1214 struct page *page;
1215 unsigned int order;
1216 int nid = numa_node_id();
1217 gfp_t gfp;
95a402c3 1218
72e315f7
HD
1219 order = folio_order(src);
1220 ilx += src->index >> order;
11c731e8 1221
d0ce0e47 1222 if (folio_test_hugetlb(src)) {
72e315f7
HD
1223 nodemask_t *nodemask;
1224 struct hstate *h;
1225
1226 h = folio_hstate(src);
1227 gfp = htlb_alloc_mask(h);
1228 nodemask = policy_nodemask(gfp, pol, ilx, &nid);
42d0c3fb
BW
1229 return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp,
1230 htlb_allow_alloc_fallback(MR_MEMPOLICY_MBIND));
d0ce0e47 1231 }
ec4858e0
MWO
1232
1233 if (folio_test_large(src))
1234 gfp = GFP_TRANSHUGE;
72e315f7
HD
1235 else
1236 gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL | __GFP_COMP;
ec4858e0 1237
72e315f7
HD
1238 page = alloc_pages_mpol(gfp, order, pol, ilx, nid);
1239 return page_rmappable_folio(page);
95a402c3 1240}
b20a3503
CL
1241#else
1242
1cb5d11a 1243static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
b20a3503
CL
1244 unsigned long flags)
1245{
1cb5d11a 1246 return false;
39743889
CL
1247}
1248
0ce72d4f
AM
1249int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1250 const nodemask_t *to, int flags)
b20a3503
CL
1251{
1252 return -ENOSYS;
1253}
95a402c3 1254
72e315f7
HD
1255static struct folio *alloc_migration_target_by_mpol(struct folio *src,
1256 unsigned long private)
95a402c3
CL
1257{
1258 return NULL;
1259}
b20a3503
CL
1260#endif
1261
dbcb0f19 1262static long do_mbind(unsigned long start, unsigned long len,
028fec41
DR
1263 unsigned short mode, unsigned short mode_flags,
1264 nodemask_t *nmask, unsigned long flags)
6ce3c4c0 1265{
6ce3c4c0 1266 struct mm_struct *mm = current->mm;
f4e9e0e6
LH
1267 struct vm_area_struct *vma, *prev;
1268 struct vma_iterator vmi;
88c91dc5 1269 struct migration_mpol mmpol;
6ce3c4c0
CL
1270 struct mempolicy *new;
1271 unsigned long end;
1cb5d11a
HD
1272 long err;
1273 long nr_failed;
6ce3c4c0
CL
1274 LIST_HEAD(pagelist);
1275
b24f53a0 1276 if (flags & ~(unsigned long)MPOL_MF_VALID)
6ce3c4c0 1277 return -EINVAL;
74c00241 1278 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
6ce3c4c0
CL
1279 return -EPERM;
1280
1281 if (start & ~PAGE_MASK)
1282 return -EINVAL;
1283
1284 if (mode == MPOL_DEFAULT)
1285 flags &= ~MPOL_MF_STRICT;
1286
aaa31e05 1287 len = PAGE_ALIGN(len);
6ce3c4c0
CL
1288 end = start + len;
1289
1290 if (end < start)
1291 return -EINVAL;
1292 if (end == start)
1293 return 0;
1294
028fec41 1295 new = mpol_new(mode, mode_flags, nmask);
6ce3c4c0
CL
1296 if (IS_ERR(new))
1297 return PTR_ERR(new);
1298
1299 /*
1300 * If we are using the default policy then operation
1301 * on discontinuous address spaces is okay after all
1302 */
1303 if (!new)
1304 flags |= MPOL_MF_DISCONTIG_OK;
1305
1cb5d11a 1306 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
361a2a22 1307 lru_cache_disable();
4bfc4495
KH
1308 {
1309 NODEMASK_SCRATCH(scratch);
1310 if (scratch) {
d8ed45c5 1311 mmap_write_lock(mm);
4bfc4495 1312 err = mpol_set_nodemask(new, nmask, scratch);
4bfc4495 1313 if (err)
d8ed45c5 1314 mmap_write_unlock(mm);
4bfc4495
KH
1315 } else
1316 err = -ENOMEM;
1317 NODEMASK_SCRATCH_FREE(scratch);
1318 }
b05ca738
KM
1319 if (err)
1320 goto mpol_out;
1321
6c21e066 1322 /*
1cb5d11a
HD
1323 * Lock the VMAs before scanning for pages to migrate,
1324 * to ensure we don't miss a concurrently inserted page.
6c21e066 1325 */
1cb5d11a
HD
1326 nr_failed = queue_pages_range(mm, start, end, nmask,
1327 flags | MPOL_MF_INVERT | MPOL_MF_WRLOCK, &pagelist);
d8835445 1328
1cb5d11a
HD
1329 if (nr_failed < 0) {
1330 err = nr_failed;
72e315f7 1331 nr_failed = 0;
1cb5d11a
HD
1332 } else {
1333 vma_iter_init(&vmi, mm, start);
1334 prev = vma_prev(&vmi);
1335 for_each_vma_range(vmi, vma, end) {
1336 err = mbind_range(&vmi, vma, &prev, start, end, new);
1337 if (err)
1338 break;
1339 }
f4e9e0e6 1340 }
7e2ab150 1341
72e315f7
HD
1342 if (!err && !list_empty(&pagelist)) {
1343 /* Convert MPOL_DEFAULT's NULL to task or default policy */
1344 if (!new) {
1345 new = get_task_policy(current);
1346 mpol_get(new);
cf608ac1 1347 }
88c91dc5
HD
1348 mmpol.pol = new;
1349 mmpol.ilx = 0;
6ce3c4c0 1350
88c91dc5
HD
1351 /*
1352 * In the interleaved case, attempt to allocate on exactly the
1353 * targeted nodes, for the first VMA to be migrated; for later
1354 * VMAs, the nodes will still be interleaved from the targeted
1355 * nodemask, but one by one may be selected differently.
1356 */
fa3bea4e
GP
1357 if (new->mode == MPOL_INTERLEAVE ||
1358 new->mode == MPOL_WEIGHTED_INTERLEAVE) {
f1cce6f7 1359 struct folio *folio;
88c91dc5
HD
1360 unsigned int order;
1361 unsigned long addr = -EFAULT;
1362
f1cce6f7
MWO
1363 list_for_each_entry(folio, &pagelist, lru) {
1364 if (!folio_test_ksm(folio))
88c91dc5
HD
1365 break;
1366 }
f1cce6f7 1367 if (!list_entry_is_head(folio, &pagelist, lru)) {
88c91dc5
HD
1368 vma_iter_init(&vmi, mm, start);
1369 for_each_vma_range(vmi, vma, end) {
f1cce6f7
MWO
1370 addr = page_address_in_vma(
1371 folio_page(folio, 0), vma);
88c91dc5
HD
1372 if (addr != -EFAULT)
1373 break;
1374 }
1375 }
1376 if (addr != -EFAULT) {
f1cce6f7 1377 order = folio_order(folio);
88c91dc5
HD
1378 /* We already know the pol, but not the ilx */
1379 mpol_cond_put(get_vma_policy(vma, addr, order,
1380 &mmpol.ilx));
1381 /* Set base from which to increment by index */
f1cce6f7 1382 mmpol.ilx -= folio->index >> order;
88c91dc5
HD
1383 }
1384 }
a85dfc30
YS
1385 }
1386
d8ed45c5 1387 mmap_write_unlock(mm);
88c91dc5
HD
1388
1389 if (!err && !list_empty(&pagelist)) {
72e315f7
HD
1390 nr_failed |= migrate_pages(&pagelist,
1391 alloc_migration_target_by_mpol, NULL,
88c91dc5 1392 (unsigned long)&mmpol, MIGRATE_SYNC,
72e315f7 1393 MR_MEMPOLICY_MBIND, NULL);
a85dfc30
YS
1394 }
1395
72e315f7
HD
1396 if (nr_failed && (flags & MPOL_MF_STRICT))
1397 err = -EIO;
1cb5d11a
HD
1398 if (!list_empty(&pagelist))
1399 putback_movable_pages(&pagelist);
d8835445 1400mpol_out:
f0be3d32 1401 mpol_put(new);
d479960e 1402 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
361a2a22 1403 lru_cache_enable();
6ce3c4c0
CL
1404 return err;
1405}
1406
8bccd85f
CL
1407/*
1408 * User space interface with variable sized bitmaps for nodelists.
1409 */
e130242d
AB
1410static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1411 unsigned long maxnode)
1412{
1413 unsigned long nlongs = BITS_TO_LONGS(maxnode);
1414 int ret;
1415
1416 if (in_compat_syscall())
1417 ret = compat_get_bitmap(mask,
1418 (const compat_ulong_t __user *)nmask,
1419 maxnode);
1420 else
1421 ret = copy_from_user(mask, nmask,
1422 nlongs * sizeof(unsigned long));
1423
1424 if (ret)
1425 return -EFAULT;
1426
1427 if (maxnode % BITS_PER_LONG)
1428 mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1429
1430 return 0;
1431}
8bccd85f
CL
1432
1433/* Copy a node mask from user space. */
39743889 1434static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
8bccd85f
CL
1435 unsigned long maxnode)
1436{
8bccd85f
CL
1437 --maxnode;
1438 nodes_clear(*nodes);
1439 if (maxnode == 0 || !nmask)
1440 return 0;
a9c930ba 1441 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
636f13c1 1442 return -EINVAL;
8bccd85f 1443
56521e7a
YX
1444 /*
1445 * When the user specified more nodes than supported just check
e130242d
AB
1446 * if the non supported part is all zero, one word at a time,
1447 * starting at the end.
56521e7a 1448 */
e130242d
AB
1449 while (maxnode > MAX_NUMNODES) {
1450 unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1451 unsigned long t;
8bccd85f 1452
000eca5d 1453 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
56521e7a 1454 return -EFAULT;
e130242d
AB
1455
1456 if (maxnode - bits >= MAX_NUMNODES) {
1457 maxnode -= bits;
1458 } else {
1459 maxnode = MAX_NUMNODES;
1460 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1461 }
1462 if (t)
56521e7a
YX
1463 return -EINVAL;
1464 }
1465
e130242d 1466 return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
8bccd85f
CL
1467}
1468
1469/* Copy a kernel node mask to user space */
1470static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1471 nodemask_t *nodes)
1472{
1473 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
050c17f2 1474 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
e130242d
AB
1475 bool compat = in_compat_syscall();
1476
1477 if (compat)
1478 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
8bccd85f
CL
1479
1480 if (copy > nbytes) {
1481 if (copy > PAGE_SIZE)
1482 return -EINVAL;
1483 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1484 return -EFAULT;
1485 copy = nbytes;
e130242d 1486 maxnode = nr_node_ids;
8bccd85f 1487 }
e130242d
AB
1488
1489 if (compat)
1490 return compat_put_bitmap((compat_ulong_t __user *)mask,
1491 nodes_addr(*nodes), maxnode);
1492
8bccd85f
CL
1493 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1494}
1495
95837924
FT
1496/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1497static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1498{
1499 *flags = *mode & MPOL_MODE_FLAGS;
1500 *mode &= ~MPOL_MODE_FLAGS;
b27abacc 1501
a38a59fd 1502 if ((unsigned int)(*mode) >= MPOL_MAX)
95837924
FT
1503 return -EINVAL;
1504 if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1505 return -EINVAL;
6d2aec9e 1506 if (*flags & MPOL_F_NUMA_BALANCING) {
133d04b1
DT
1507 if (*mode == MPOL_BIND || *mode == MPOL_PREFERRED_MANY)
1508 *flags |= (MPOL_F_MOF | MPOL_F_MORON);
1509 else
6d2aec9e 1510 return -EINVAL;
6d2aec9e 1511 }
95837924
FT
1512 return 0;
1513}
1514
e7dc9ad6
DB
1515static long kernel_mbind(unsigned long start, unsigned long len,
1516 unsigned long mode, const unsigned long __user *nmask,
1517 unsigned long maxnode, unsigned int flags)
8bccd85f 1518{
95837924 1519 unsigned short mode_flags;
8bccd85f 1520 nodemask_t nodes;
95837924 1521 int lmode = mode;
8bccd85f
CL
1522 int err;
1523
057d3389 1524 start = untagged_addr(start);
95837924
FT
1525 err = sanitize_mpol_flags(&lmode, &mode_flags);
1526 if (err)
1527 return err;
1528
8bccd85f
CL
1529 err = get_nodes(&nodes, nmask, maxnode);
1530 if (err)
1531 return err;
95837924
FT
1532
1533 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
8bccd85f
CL
1534}
1535
c6018b4b
AK
1536SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1537 unsigned long, home_node, unsigned long, flags)
1538{
1539 struct mm_struct *mm = current->mm;
f4e9e0e6 1540 struct vm_area_struct *vma, *prev;
e976936c 1541 struct mempolicy *new, *old;
c6018b4b
AK
1542 unsigned long end;
1543 int err = -ENOENT;
66850be5 1544 VMA_ITERATOR(vmi, mm, start);
c6018b4b
AK
1545
1546 start = untagged_addr(start);
1547 if (start & ~PAGE_MASK)
1548 return -EINVAL;
1549 /*
1550 * flags is used for future extension if any.
1551 */
1552 if (flags != 0)
1553 return -EINVAL;
1554
1555 /*
1556 * Check home_node is online to avoid accessing uninitialized
1557 * NODE_DATA.
1558 */
1559 if (home_node >= MAX_NUMNODES || !node_online(home_node))
1560 return -EINVAL;
1561
aaa31e05 1562 len = PAGE_ALIGN(len);
c6018b4b
AK
1563 end = start + len;
1564
1565 if (end < start)
1566 return -EINVAL;
1567 if (end == start)
1568 return 0;
1569 mmap_write_lock(mm);
f4e9e0e6 1570 prev = vma_prev(&vmi);
66850be5 1571 for_each_vma_range(vmi, vma, end) {
c6018b4b
AK
1572 /*
1573 * If any vma in the range got policy other than MPOL_BIND
1574 * or MPOL_PREFERRED_MANY we return error. We don't reset
1575 * the home node for vmas we already updated before.
1576 */
e976936c 1577 old = vma_policy(vma);
51f62537
LH
1578 if (!old) {
1579 prev = vma;
e976936c 1580 continue;
51f62537 1581 }
e976936c 1582 if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
c6018b4b
AK
1583 err = -EOPNOTSUPP;
1584 break;
1585 }
e976936c
MH
1586 new = mpol_dup(old);
1587 if (IS_ERR(new)) {
1588 err = PTR_ERR(new);
1589 break;
1590 }
c6018b4b 1591
6c21e066 1592 vma_start_write(vma);
c6018b4b 1593 new->home_node = home_node;
f4e9e0e6 1594 err = mbind_range(&vmi, vma, &prev, start, end, new);
c6018b4b
AK
1595 mpol_put(new);
1596 if (err)
1597 break;
1598 }
1599 mmap_write_unlock(mm);
1600 return err;
1601}
1602
e7dc9ad6
DB
1603SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1604 unsigned long, mode, const unsigned long __user *, nmask,
1605 unsigned long, maxnode, unsigned int, flags)
1606{
1607 return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1608}
1609
8bccd85f 1610/* Set the process memory policy */
af03c4ac
DB
1611static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1612 unsigned long maxnode)
8bccd85f 1613{
95837924 1614 unsigned short mode_flags;
8bccd85f 1615 nodemask_t nodes;
95837924
FT
1616 int lmode = mode;
1617 int err;
1618
1619 err = sanitize_mpol_flags(&lmode, &mode_flags);
1620 if (err)
1621 return err;
8bccd85f 1622
8bccd85f
CL
1623 err = get_nodes(&nodes, nmask, maxnode);
1624 if (err)
1625 return err;
95837924
FT
1626
1627 return do_set_mempolicy(lmode, mode_flags, &nodes);
8bccd85f
CL
1628}
1629
af03c4ac
DB
1630SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1631 unsigned long, maxnode)
1632{
1633 return kernel_set_mempolicy(mode, nmask, maxnode);
1634}
1635
b6e9b0ba
DB
1636static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1637 const unsigned long __user *old_nodes,
1638 const unsigned long __user *new_nodes)
39743889 1639{
596d7cfa 1640 struct mm_struct *mm = NULL;
39743889 1641 struct task_struct *task;
39743889
CL
1642 nodemask_t task_nodes;
1643 int err;
596d7cfa
KM
1644 nodemask_t *old;
1645 nodemask_t *new;
1646 NODEMASK_SCRATCH(scratch);
1647
1648 if (!scratch)
1649 return -ENOMEM;
39743889 1650
596d7cfa
KM
1651 old = &scratch->mask1;
1652 new = &scratch->mask2;
1653
1654 err = get_nodes(old, old_nodes, maxnode);
39743889 1655 if (err)
596d7cfa 1656 goto out;
39743889 1657
596d7cfa 1658 err = get_nodes(new, new_nodes, maxnode);
39743889 1659 if (err)
596d7cfa 1660 goto out;
39743889
CL
1661
1662 /* Find the mm_struct */
55cfaa3c 1663 rcu_read_lock();
228ebcbe 1664 task = pid ? find_task_by_vpid(pid) : current;
39743889 1665 if (!task) {
55cfaa3c 1666 rcu_read_unlock();
596d7cfa
KM
1667 err = -ESRCH;
1668 goto out;
39743889 1669 }
3268c63e 1670 get_task_struct(task);
39743889 1671
596d7cfa 1672 err = -EINVAL;
39743889
CL
1673
1674 /*
31367466
OE
1675 * Check if this process has the right to modify the specified process.
1676 * Use the regular "ptrace_may_access()" checks.
39743889 1677 */
31367466 1678 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 1679 rcu_read_unlock();
39743889 1680 err = -EPERM;
3268c63e 1681 goto out_put;
39743889 1682 }
c69e8d9c 1683 rcu_read_unlock();
39743889
CL
1684
1685 task_nodes = cpuset_mems_allowed(task);
1686 /* Is the user allowed to access the target nodes? */
596d7cfa 1687 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
39743889 1688 err = -EPERM;
3268c63e 1689 goto out_put;
39743889
CL
1690 }
1691
0486a38b
YX
1692 task_nodes = cpuset_mems_allowed(current);
1693 nodes_and(*new, *new, task_nodes);
1694 if (nodes_empty(*new))
1695 goto out_put;
1696
86c3a764
DQ
1697 err = security_task_movememory(task);
1698 if (err)
3268c63e 1699 goto out_put;
86c3a764 1700
3268c63e
CL
1701 mm = get_task_mm(task);
1702 put_task_struct(task);
f2a9ef88
SL
1703
1704 if (!mm) {
3268c63e 1705 err = -EINVAL;
f2a9ef88
SL
1706 goto out;
1707 }
1708
1709 err = do_migrate_pages(mm, old, new,
1710 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
3268c63e
CL
1711
1712 mmput(mm);
1713out:
596d7cfa
KM
1714 NODEMASK_SCRATCH_FREE(scratch);
1715
39743889 1716 return err;
3268c63e
CL
1717
1718out_put:
1719 put_task_struct(task);
1720 goto out;
39743889
CL
1721}
1722
b6e9b0ba
DB
1723SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1724 const unsigned long __user *, old_nodes,
1725 const unsigned long __user *, new_nodes)
1726{
1727 return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1728}
1729
8bccd85f 1730/* Retrieve NUMA policy */
af03c4ac
DB
1731static int kernel_get_mempolicy(int __user *policy,
1732 unsigned long __user *nmask,
1733 unsigned long maxnode,
1734 unsigned long addr,
1735 unsigned long flags)
8bccd85f 1736{
dbcb0f19 1737 int err;
3f649ab7 1738 int pval;
8bccd85f
CL
1739 nodemask_t nodes;
1740
050c17f2 1741 if (nmask != NULL && maxnode < nr_node_ids)
8bccd85f
CL
1742 return -EINVAL;
1743
4605f057
WH
1744 addr = untagged_addr(addr);
1745
8bccd85f
CL
1746 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1747
1748 if (err)
1749 return err;
1750
1751 if (policy && put_user(pval, policy))
1752 return -EFAULT;
1753
1754 if (nmask)
1755 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1756
1757 return err;
1758}
1759
af03c4ac
DB
1760SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1761 unsigned long __user *, nmask, unsigned long, maxnode,
1762 unsigned long, addr, unsigned long, flags)
1763{
1764 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1765}
1766
20ca87f2
LX
1767bool vma_migratable(struct vm_area_struct *vma)
1768{
1769 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1770 return false;
1771
1772 /*
1773 * DAX device mappings require predictable access latency, so avoid
1774 * incurring periodic faults.
1775 */
1776 if (vma_is_dax(vma))
1777 return false;
1778
1779 if (is_vm_hugetlb_page(vma) &&
1780 !hugepage_migration_supported(hstate_vma(vma)))
1781 return false;
1782
1783 /*
1784 * Migration allocates pages in the highest zone. If we cannot
1785 * do so then migration (at least from node to node) is not
1786 * possible.
1787 */
1788 if (vma->vm_file &&
1789 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1790 < policy_zone)
1791 return false;
1792 return true;
1793}
1794
74d2c3a0 1795struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
ddc1a5cb 1796 unsigned long addr, pgoff_t *ilx)
1da177e4 1797{
ddc1a5cb
HD
1798 *ilx = 0;
1799 return (vma->vm_ops && vma->vm_ops->get_policy) ?
1800 vma->vm_ops->get_policy(vma, addr, ilx) : vma->vm_policy;
74d2c3a0
ON
1801}
1802
1803/*
ddc1a5cb 1804 * get_vma_policy(@vma, @addr, @order, @ilx)
74d2c3a0
ON
1805 * @vma: virtual memory area whose policy is sought
1806 * @addr: address in @vma for shared policy lookup
ddc1a5cb 1807 * @order: 0, or appropriate huge_page_order for interleaving
fa3bea4e
GP
1808 * @ilx: interleave index (output), for use only when MPOL_INTERLEAVE or
1809 * MPOL_WEIGHTED_INTERLEAVE
74d2c3a0
ON
1810 *
1811 * Returns effective policy for a VMA at specified address.
dd6eecb9 1812 * Falls back to current->mempolicy or system default policy, as necessary.
74d2c3a0
ON
1813 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1814 * count--added by the get_policy() vm_op, as appropriate--to protect against
1815 * freeing by another task. It is the caller's responsibility to free the
1816 * extra reference for shared policies.
1817 */
ddc1a5cb
HD
1818struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1819 unsigned long addr, int order, pgoff_t *ilx)
74d2c3a0 1820{
ddc1a5cb 1821 struct mempolicy *pol;
74d2c3a0 1822
ddc1a5cb 1823 pol = __get_vma_policy(vma, addr, ilx);
8d90274b 1824 if (!pol)
dd6eecb9 1825 pol = get_task_policy(current);
fa3bea4e
GP
1826 if (pol->mode == MPOL_INTERLEAVE ||
1827 pol->mode == MPOL_WEIGHTED_INTERLEAVE) {
ddc1a5cb
HD
1828 *ilx += vma->vm_pgoff >> order;
1829 *ilx += (addr - vma->vm_start) >> (PAGE_SHIFT + order);
1830 }
1da177e4
LT
1831 return pol;
1832}
1833
6b6482bb 1834bool vma_policy_mof(struct vm_area_struct *vma)
fc314724 1835{
6b6482bb 1836 struct mempolicy *pol;
fc314724 1837
6b6482bb
ON
1838 if (vma->vm_ops && vma->vm_ops->get_policy) {
1839 bool ret = false;
ddc1a5cb 1840 pgoff_t ilx; /* ignored here */
fc314724 1841
ddc1a5cb 1842 pol = vma->vm_ops->get_policy(vma, vma->vm_start, &ilx);
6b6482bb
ON
1843 if (pol && (pol->flags & MPOL_F_MOF))
1844 ret = true;
1845 mpol_cond_put(pol);
8d90274b 1846
6b6482bb 1847 return ret;
fc314724
MG
1848 }
1849
6b6482bb 1850 pol = vma->vm_policy;
8d90274b 1851 if (!pol)
6b6482bb 1852 pol = get_task_policy(current);
8d90274b 1853
fc314724
MG
1854 return pol->flags & MPOL_F_MOF;
1855}
1856
d2226ebd 1857bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
d3eb1570
LJ
1858{
1859 enum zone_type dynamic_policy_zone = policy_zone;
1860
1861 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1862
1863 /*
269fbe72 1864 * if policy->nodes has movable memory only,
d3eb1570
LJ
1865 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1866 *
269fbe72 1867 * policy->nodes is intersect with node_states[N_MEMORY].
f0953a1b 1868 * so if the following test fails, it implies
269fbe72 1869 * policy->nodes has movable memory only.
d3eb1570 1870 */
269fbe72 1871 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
d3eb1570
LJ
1872 dynamic_policy_zone = ZONE_MOVABLE;
1873
1874 return zone >= dynamic_policy_zone;
1875}
1876
fa3bea4e
GP
1877static unsigned int weighted_interleave_nodes(struct mempolicy *policy)
1878{
274519ed
GP
1879 unsigned int node;
1880 unsigned int cpuset_mems_cookie;
fa3bea4e 1881
274519ed
GP
1882retry:
1883 /* to prevent miscount use tsk->mems_allowed_seq to detect rebind */
1884 cpuset_mems_cookie = read_mems_allowed_begin();
1885 node = current->il_prev;
fa3bea4e
GP
1886 if (!current->il_weight || !node_isset(node, policy->nodes)) {
1887 node = next_node_in(node, policy->nodes);
274519ed
GP
1888 if (read_mems_allowed_retry(cpuset_mems_cookie))
1889 goto retry;
fa3bea4e
GP
1890 if (node == MAX_NUMNODES)
1891 return node;
1892 current->il_prev = node;
1893 current->il_weight = get_il_weight(node);
1894 }
1895 current->il_weight--;
1896 return node;
1897}
1898
1da177e4 1899/* Do dynamic interleaving for a process */
c36f6e6d 1900static unsigned int interleave_nodes(struct mempolicy *policy)
1da177e4 1901{
c36f6e6d 1902 unsigned int nid;
274519ed
GP
1903 unsigned int cpuset_mems_cookie;
1904
1905 /* to prevent miscount, use tsk->mems_allowed_seq to detect rebind */
1906 do {
1907 cpuset_mems_cookie = read_mems_allowed_begin();
1908 nid = next_node_in(current->il_prev, policy->nodes);
1909 } while (read_mems_allowed_retry(cpuset_mems_cookie));
1da177e4 1910
c36f6e6d
HD
1911 if (nid < MAX_NUMNODES)
1912 current->il_prev = nid;
1913 return nid;
1da177e4
LT
1914}
1915
dc85da15
CL
1916/*
1917 * Depending on the memory policy provide a node from which to allocate the
1918 * next slab entry.
1919 */
2a389610 1920unsigned int mempolicy_slab_node(void)
dc85da15 1921{
e7b691b0 1922 struct mempolicy *policy;
2a389610 1923 int node = numa_mem_id();
e7b691b0 1924
38b031dd 1925 if (!in_task())
2a389610 1926 return node;
e7b691b0
AK
1927
1928 policy = current->mempolicy;
7858d7bc 1929 if (!policy)
2a389610 1930 return node;
bea904d5
LS
1931
1932 switch (policy->mode) {
1933 case MPOL_PREFERRED:
269fbe72 1934 return first_node(policy->nodes);
765c4507 1935
dc85da15
CL
1936 case MPOL_INTERLEAVE:
1937 return interleave_nodes(policy);
1938
fa3bea4e
GP
1939 case MPOL_WEIGHTED_INTERLEAVE:
1940 return weighted_interleave_nodes(policy);
1941
b27abacc
DH
1942 case MPOL_BIND:
1943 case MPOL_PREFERRED_MANY:
1944 {
c33d6c06
MG
1945 struct zoneref *z;
1946
dc85da15
CL
1947 /*
1948 * Follow bind policy behavior and start allocation at the
1949 * first node.
1950 */
19770b32 1951 struct zonelist *zonelist;
19770b32 1952 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
c9634cf0 1953 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
c33d6c06 1954 z = first_zones_zonelist(zonelist, highest_zoneidx,
269fbe72 1955 &policy->nodes);
c1093b74 1956 return z->zone ? zone_to_nid(z->zone) : node;
dd1a239f 1957 }
7858d7bc
FT
1958 case MPOL_LOCAL:
1959 return node;
dc85da15 1960
dc85da15 1961 default:
bea904d5 1962 BUG();
dc85da15
CL
1963 }
1964}
1965
9685e6e3
GP
1966static unsigned int read_once_policy_nodemask(struct mempolicy *pol,
1967 nodemask_t *mask)
1968{
1969 /*
1970 * barrier stabilizes the nodemask locally so that it can be iterated
1971 * over safely without concern for changes. Allocators validate node
1972 * selection does not violate mems_allowed, so this is safe.
1973 */
1974 barrier();
1975 memcpy(mask, &pol->nodes, sizeof(nodemask_t));
1976 barrier();
1977 return nodes_weight(*mask);
1978}
1979
fa3bea4e
GP
1980static unsigned int weighted_interleave_nid(struct mempolicy *pol, pgoff_t ilx)
1981{
1982 nodemask_t nodemask;
1983 unsigned int target, nr_nodes;
1984 u8 *table;
1985 unsigned int weight_total = 0;
1986 u8 weight;
1987 int nid;
1988
1989 nr_nodes = read_once_policy_nodemask(pol, &nodemask);
1990 if (!nr_nodes)
1991 return numa_node_id();
1992
1993 rcu_read_lock();
1994 table = rcu_dereference(iw_table);
1995 /* calculate the total weight */
1996 for_each_node_mask(nid, nodemask) {
1997 /* detect system default usage */
1998 weight = table ? table[nid] : 1;
1999 weight = weight ? weight : 1;
2000 weight_total += weight;
2001 }
2002
2003 /* Calculate the node offset based on totals */
2004 target = ilx % weight_total;
2005 nid = first_node(nodemask);
2006 while (target) {
2007 /* detect system default usage */
2008 weight = table ? table[nid] : 1;
2009 weight = weight ? weight : 1;
2010 if (target < weight)
2011 break;
2012 target -= weight;
2013 nid = next_node_in(nid, nodemask);
2014 }
2015 rcu_read_unlock();
2016 return nid;
2017}
2018
fee83b3a 2019/*
ddc1a5cb
HD
2020 * Do static interleaving for interleave index @ilx. Returns the ilx'th
2021 * node in pol->nodes (starting from ilx=0), wrapping around if ilx
2022 * exceeds the number of present nodes.
fee83b3a 2023 */
ddc1a5cb 2024static unsigned int interleave_nid(struct mempolicy *pol, pgoff_t ilx)
1da177e4 2025{
9685e6e3 2026 nodemask_t nodemask;
276aeee1 2027 unsigned int target, nnodes;
fee83b3a
AM
2028 int i;
2029 int nid;
1da177e4 2030
9685e6e3 2031 nnodes = read_once_policy_nodemask(pol, &nodemask);
f5b087b5
DR
2032 if (!nnodes)
2033 return numa_node_id();
ddc1a5cb 2034 target = ilx % nnodes;
276aeee1 2035 nid = first_node(nodemask);
fee83b3a 2036 for (i = 0; i < target; i++)
276aeee1 2037 nid = next_node(nid, nodemask);
1da177e4
LT
2038 return nid;
2039}
2040
ddc1a5cb
HD
2041/*
2042 * Return a nodemask representing a mempolicy for filtering nodes for
2043 * page allocation, together with preferred node id (or the input node id).
2044 */
2045static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *pol,
2046 pgoff_t ilx, int *nid)
5da7ca86 2047{
ddc1a5cb 2048 nodemask_t *nodemask = NULL;
5da7ca86 2049
ddc1a5cb
HD
2050 switch (pol->mode) {
2051 case MPOL_PREFERRED:
2052 /* Override input node id */
2053 *nid = first_node(pol->nodes);
2054 break;
2055 case MPOL_PREFERRED_MANY:
2056 nodemask = &pol->nodes;
2057 if (pol->home_node != NUMA_NO_NODE)
2058 *nid = pol->home_node;
2059 break;
2060 case MPOL_BIND:
2061 /* Restrict to nodemask (but not on lower zones) */
2062 if (apply_policy_zone(pol, gfp_zone(gfp)) &&
2063 cpuset_nodemask_valid_mems_allowed(&pol->nodes))
2064 nodemask = &pol->nodes;
2065 if (pol->home_node != NUMA_NO_NODE)
2066 *nid = pol->home_node;
3b98b087 2067 /*
ddc1a5cb
HD
2068 * __GFP_THISNODE shouldn't even be used with the bind policy
2069 * because we might easily break the expectation to stay on the
2070 * requested node and not break the policy.
3b98b087 2071 */
ddc1a5cb
HD
2072 WARN_ON_ONCE(gfp & __GFP_THISNODE);
2073 break;
2074 case MPOL_INTERLEAVE:
2075 /* Override input node id */
2076 *nid = (ilx == NO_INTERLEAVE_INDEX) ?
2077 interleave_nodes(pol) : interleave_nid(pol, ilx);
2078 break;
fa3bea4e
GP
2079 case MPOL_WEIGHTED_INTERLEAVE:
2080 *nid = (ilx == NO_INTERLEAVE_INDEX) ?
2081 weighted_interleave_nodes(pol) :
2082 weighted_interleave_nid(pol, ilx);
2083 break;
ddc1a5cb
HD
2084 }
2085
2086 return nodemask;
5da7ca86
CL
2087}
2088
00ac59ad 2089#ifdef CONFIG_HUGETLBFS
480eccf9 2090/*
04ec6264 2091 * huge_node(@vma, @addr, @gfp_flags, @mpol)
b46e14ac
FF
2092 * @vma: virtual memory area whose policy is sought
2093 * @addr: address in @vma for shared policy lookup and interleave policy
2094 * @gfp_flags: for requested zone
2095 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
b27abacc 2096 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
480eccf9 2097 *
04ec6264 2098 * Returns a nid suitable for a huge page allocation and a pointer
52cd3b07 2099 * to the struct mempolicy for conditional unref after allocation.
b27abacc
DH
2100 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2101 * to the mempolicy's @nodemask for filtering the zonelist.
480eccf9 2102 */
04ec6264 2103int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
ddc1a5cb 2104 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 2105{
ddc1a5cb 2106 pgoff_t ilx;
04ec6264 2107 int nid;
5da7ca86 2108
ddc1a5cb
HD
2109 nid = numa_node_id();
2110 *mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx);
2111 *nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid);
04ec6264 2112 return nid;
5da7ca86 2113}
06808b08
LS
2114
2115/*
2116 * init_nodemask_of_mempolicy
2117 *
2118 * If the current task's mempolicy is "default" [NULL], return 'false'
2119 * to indicate default policy. Otherwise, extract the policy nodemask
2120 * for 'bind' or 'interleave' policy into the argument nodemask, or
2121 * initialize the argument nodemask to contain the single node for
2122 * 'preferred' or 'local' policy and return 'true' to indicate presence
2123 * of non-default mempolicy.
2124 *
2125 * We don't bother with reference counting the mempolicy [mpol_get/put]
2126 * because the current task is examining it's own mempolicy and a task's
2127 * mempolicy is only ever changed by the task itself.
2128 *
2129 * N.B., it is the caller's responsibility to free a returned nodemask.
2130 */
2131bool init_nodemask_of_mempolicy(nodemask_t *mask)
2132{
2133 struct mempolicy *mempolicy;
06808b08
LS
2134
2135 if (!(mask && current->mempolicy))
2136 return false;
2137
c0ff7453 2138 task_lock(current);
06808b08
LS
2139 mempolicy = current->mempolicy;
2140 switch (mempolicy->mode) {
2141 case MPOL_PREFERRED:
b27abacc 2142 case MPOL_PREFERRED_MANY:
06808b08 2143 case MPOL_BIND:
06808b08 2144 case MPOL_INTERLEAVE:
fa3bea4e 2145 case MPOL_WEIGHTED_INTERLEAVE:
269fbe72 2146 *mask = mempolicy->nodes;
7858d7bc
FT
2147 break;
2148
2149 case MPOL_LOCAL:
269fbe72 2150 init_nodemask_of_node(mask, numa_node_id());
06808b08
LS
2151 break;
2152
2153 default:
2154 BUG();
2155 }
c0ff7453 2156 task_unlock(current);
06808b08
LS
2157
2158 return true;
2159}
00ac59ad 2160#endif
5da7ca86 2161
6f48d0eb 2162/*
b26e517a 2163 * mempolicy_in_oom_domain
6f48d0eb 2164 *
b26e517a
FT
2165 * If tsk's mempolicy is "bind", check for intersection between mask and
2166 * the policy nodemask. Otherwise, return true for all other policies
2167 * including "interleave", as a tsk with "interleave" policy may have
2168 * memory allocated from all nodes in system.
6f48d0eb
DR
2169 *
2170 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2171 */
b26e517a 2172bool mempolicy_in_oom_domain(struct task_struct *tsk,
6f48d0eb
DR
2173 const nodemask_t *mask)
2174{
2175 struct mempolicy *mempolicy;
2176 bool ret = true;
2177
2178 if (!mask)
2179 return ret;
b26e517a 2180
6f48d0eb
DR
2181 task_lock(tsk);
2182 mempolicy = tsk->mempolicy;
b26e517a 2183 if (mempolicy && mempolicy->mode == MPOL_BIND)
269fbe72 2184 ret = nodes_intersects(mempolicy->nodes, *mask);
6f48d0eb 2185 task_unlock(tsk);
b26e517a 2186
6f48d0eb
DR
2187 return ret;
2188}
2189
4c54d949 2190static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
ddc1a5cb 2191 int nid, nodemask_t *nodemask)
4c54d949
FT
2192{
2193 struct page *page;
2194 gfp_t preferred_gfp;
2195
2196 /*
2197 * This is a two pass approach. The first pass will only try the
2198 * preferred nodes but skip the direct reclaim and allow the
2199 * allocation to fail, while the second pass will try all the
2200 * nodes in system.
2201 */
2202 preferred_gfp = gfp | __GFP_NOWARN;
2203 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
b951aaff 2204 page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask);
4c54d949 2205 if (!page)
b951aaff 2206 page = __alloc_pages_noprof(gfp, order, nid, NULL);
4c54d949
FT
2207
2208 return page;
2209}
2210
1da177e4 2211/**
ddc1a5cb 2212 * alloc_pages_mpol - Allocate pages according to NUMA mempolicy.
eb350739 2213 * @gfp: GFP flags.
ddc1a5cb
HD
2214 * @order: Order of the page allocation.
2215 * @pol: Pointer to the NUMA mempolicy.
2216 * @ilx: Index for interleave mempolicy (also distinguishes alloc_pages()).
2217 * @nid: Preferred node (usually numa_node_id() but @mpol may override it).
1da177e4 2218 *
ddc1a5cb 2219 * Return: The page on success or NULL if allocation fails.
1da177e4 2220 */
b951aaff 2221struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order,
ddc1a5cb 2222 struct mempolicy *pol, pgoff_t ilx, int nid)
1da177e4 2223{
ddc1a5cb
HD
2224 nodemask_t *nodemask;
2225 struct page *page;
adf88aa8 2226
ddc1a5cb 2227 nodemask = policy_nodemask(gfp, pol, ilx, &nid);
4c54d949 2228
ddc1a5cb
HD
2229 if (pol->mode == MPOL_PREFERRED_MANY)
2230 return alloc_pages_preferred_many(gfp, order, nid, nodemask);
19deb769 2231
ddc1a5cb
HD
2232 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
2233 /* filter "hugepage" allocation, unless from alloc_pages() */
2234 order == HPAGE_PMD_ORDER && ilx != NO_INTERLEAVE_INDEX) {
19deb769
DR
2235 /*
2236 * For hugepage allocation and non-interleave policy which
2237 * allows the current node (or other explicitly preferred
2238 * node) we only try to allocate from the current/preferred
2239 * node and don't fall back to other nodes, as the cost of
2240 * remote accesses would likely offset THP benefits.
2241 *
b27abacc 2242 * If the policy is interleave or does not allow the current
19deb769
DR
2243 * node in its nodemask, we allocate the standard way.
2244 */
ddc1a5cb 2245 if (pol->mode != MPOL_INTERLEAVE &&
fa3bea4e 2246 pol->mode != MPOL_WEIGHTED_INTERLEAVE &&
ddc1a5cb 2247 (!nodemask || node_isset(nid, *nodemask))) {
cc638f32
VB
2248 /*
2249 * First, try to allocate THP only on local node, but
2250 * don't reclaim unnecessarily, just compact.
2251 */
b951aaff 2252 page = __alloc_pages_node_noprof(nid,
ddc1a5cb
HD
2253 gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2254 if (page || !(gfp & __GFP_DIRECT_RECLAIM))
2255 return page;
76e654cc
DR
2256 /*
2257 * If hugepage allocations are configured to always
2258 * synchronous compact or the vma has been madvised
2259 * to prefer hugepage backing, retry allowing remote
cc638f32 2260 * memory with both reclaim and compact as well.
76e654cc 2261 */
ddc1a5cb
HD
2262 }
2263 }
76e654cc 2264
b951aaff 2265 page = __alloc_pages_noprof(gfp, order, nid, nodemask);
ddc1a5cb
HD
2266
2267 if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) {
2268 /* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */
2269 if (static_branch_likely(&vm_numa_stat_key) &&
2270 page_to_nid(page) == nid) {
2271 preempt_disable();
2272 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2273 preempt_enable();
19deb769 2274 }
356ff8a9
DR
2275 }
2276
ddc1a5cb
HD
2277 return page;
2278}
2279
2280/**
2281 * vma_alloc_folio - Allocate a folio for a VMA.
2282 * @gfp: GFP flags.
2283 * @order: Order of the folio.
2284 * @vma: Pointer to VMA.
2285 * @addr: Virtual address of the allocation. Must be inside @vma.
2286 * @hugepage: Unused (was: For hugepages try only preferred node if possible).
2287 *
2288 * Allocate a folio for a specific address in @vma, using the appropriate
2289 * NUMA policy. The caller must hold the mmap_lock of the mm_struct of the
2290 * VMA to prevent it from going away. Should be used for all allocations
2291 * for folios that will be mapped into user space, excepting hugetlbfs, and
2292 * excepting where direct use of alloc_pages_mpol() is more appropriate.
2293 *
2294 * Return: The folio on success or NULL if allocation fails.
2295 */
b951aaff 2296struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma,
ddc1a5cb
HD
2297 unsigned long addr, bool hugepage)
2298{
2299 struct mempolicy *pol;
2300 pgoff_t ilx;
2301 struct page *page;
2302
2303 pol = get_vma_policy(vma, addr, order, &ilx);
b951aaff
SB
2304 page = alloc_pages_mpol_noprof(gfp | __GFP_COMP, order,
2305 pol, ilx, numa_node_id());
d51e9894 2306 mpol_cond_put(pol);
ddc1a5cb 2307 return page_rmappable_folio(page);
f584b680 2308}
b951aaff 2309EXPORT_SYMBOL(vma_alloc_folio_noprof);
f584b680 2310
1da177e4 2311/**
6421ec76
MWO
2312 * alloc_pages - Allocate pages.
2313 * @gfp: GFP flags.
2314 * @order: Power of two of number of pages to allocate.
1da177e4 2315 *
6421ec76
MWO
2316 * Allocate 1 << @order contiguous pages. The physical address of the
2317 * first page is naturally aligned (eg an order-3 allocation will be aligned
2318 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2319 * process is honoured when in process context.
1da177e4 2320 *
6421ec76
MWO
2321 * Context: Can be called from any context, providing the appropriate GFP
2322 * flags are used.
2323 * Return: The page on success or NULL if allocation fails.
1da177e4 2324 */
b951aaff 2325struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order)
1da177e4 2326{
8d90274b 2327 struct mempolicy *pol = &default_policy;
52cd3b07
LS
2328
2329 /*
2330 * No reference counting needed for current->mempolicy
2331 * nor system default_policy
2332 */
ddc1a5cb
HD
2333 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2334 pol = get_task_policy(current);
cc9a6c87 2335
b951aaff
SB
2336 return alloc_pages_mpol_noprof(gfp, order, pol, NO_INTERLEAVE_INDEX,
2337 numa_node_id());
1da177e4 2338}
b951aaff 2339EXPORT_SYMBOL(alloc_pages_noprof);
1da177e4 2340
b951aaff 2341struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order)
cc09cb13 2342{
b951aaff 2343 return page_rmappable_folio(alloc_pages_noprof(gfp | __GFP_COMP, order));
cc09cb13 2344}
b951aaff 2345EXPORT_SYMBOL(folio_alloc_noprof);
cc09cb13 2346
c00b6b96
CW
2347static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2348 struct mempolicy *pol, unsigned long nr_pages,
2349 struct page **page_array)
2350{
2351 int nodes;
2352 unsigned long nr_pages_per_node;
2353 int delta;
2354 int i;
2355 unsigned long nr_allocated;
2356 unsigned long total_allocated = 0;
2357
2358 nodes = nodes_weight(pol->nodes);
2359 nr_pages_per_node = nr_pages / nodes;
2360 delta = nr_pages - nodes * nr_pages_per_node;
2361
2362 for (i = 0; i < nodes; i++) {
2363 if (delta) {
b951aaff 2364 nr_allocated = alloc_pages_bulk_noprof(gfp,
c00b6b96
CW
2365 interleave_nodes(pol), NULL,
2366 nr_pages_per_node + 1, NULL,
2367 page_array);
2368 delta--;
2369 } else {
b951aaff 2370 nr_allocated = alloc_pages_bulk_noprof(gfp,
c00b6b96
CW
2371 interleave_nodes(pol), NULL,
2372 nr_pages_per_node, NULL, page_array);
2373 }
2374
2375 page_array += nr_allocated;
2376 total_allocated += nr_allocated;
2377 }
2378
2379 return total_allocated;
2380}
2381
fa3bea4e
GP
2382static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp,
2383 struct mempolicy *pol, unsigned long nr_pages,
2384 struct page **page_array)
2385{
2386 struct task_struct *me = current;
274519ed 2387 unsigned int cpuset_mems_cookie;
fa3bea4e
GP
2388 unsigned long total_allocated = 0;
2389 unsigned long nr_allocated = 0;
2390 unsigned long rounds;
2391 unsigned long node_pages, delta;
2392 u8 *table, *weights, weight;
2393 unsigned int weight_total = 0;
2394 unsigned long rem_pages = nr_pages;
2395 nodemask_t nodes;
2396 int nnodes, node;
2397 int resume_node = MAX_NUMNODES - 1;
2398 u8 resume_weight = 0;
2399 int prev_node;
2400 int i;
2401
2402 if (!nr_pages)
2403 return 0;
2404
274519ed
GP
2405 /* read the nodes onto the stack, retry if done during rebind */
2406 do {
2407 cpuset_mems_cookie = read_mems_allowed_begin();
2408 nnodes = read_once_policy_nodemask(pol, &nodes);
2409 } while (read_mems_allowed_retry(cpuset_mems_cookie));
2410
2411 /* if the nodemask has become invalid, we cannot do anything */
fa3bea4e
GP
2412 if (!nnodes)
2413 return 0;
2414
2415 /* Continue allocating from most recent node and adjust the nr_pages */
2416 node = me->il_prev;
2417 weight = me->il_weight;
2418 if (weight && node_isset(node, nodes)) {
2419 node_pages = min(rem_pages, weight);
2420 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2421 NULL, page_array);
2422 page_array += nr_allocated;
2423 total_allocated += nr_allocated;
2424 /* if that's all the pages, no need to interleave */
2425 if (rem_pages <= weight) {
2426 me->il_weight -= rem_pages;
2427 return total_allocated;
2428 }
2429 /* Otherwise we adjust remaining pages, continue from there */
2430 rem_pages -= weight;
2431 }
2432 /* clear active weight in case of an allocation failure */
2433 me->il_weight = 0;
2434 prev_node = node;
2435
2436 /* create a local copy of node weights to operate on outside rcu */
2437 weights = kzalloc(nr_node_ids, GFP_KERNEL);
2438 if (!weights)
2439 return total_allocated;
2440
2441 rcu_read_lock();
2442 table = rcu_dereference(iw_table);
2443 if (table)
2444 memcpy(weights, table, nr_node_ids);
2445 rcu_read_unlock();
2446
2447 /* calculate total, detect system default usage */
2448 for_each_node_mask(node, nodes) {
2449 if (!weights[node])
2450 weights[node] = 1;
2451 weight_total += weights[node];
2452 }
2453
2454 /*
2455 * Calculate rounds/partial rounds to minimize __alloc_pages_bulk calls.
2456 * Track which node weighted interleave should resume from.
2457 *
2458 * if (rounds > 0) and (delta == 0), resume_node will always be
2459 * the node following prev_node and its weight.
2460 */
2461 rounds = rem_pages / weight_total;
2462 delta = rem_pages % weight_total;
2463 resume_node = next_node_in(prev_node, nodes);
2464 resume_weight = weights[resume_node];
2465 for (i = 0; i < nnodes; i++) {
2466 node = next_node_in(prev_node, nodes);
2467 weight = weights[node];
2468 node_pages = weight * rounds;
2469 /* If a delta exists, add this node's portion of the delta */
2470 if (delta > weight) {
2471 node_pages += weight;
2472 delta -= weight;
2473 } else if (delta) {
2474 /* when delta is depleted, resume from that node */
2475 node_pages += delta;
2476 resume_node = node;
2477 resume_weight = weight - delta;
2478 delta = 0;
2479 }
2480 /* node_pages can be 0 if an allocation fails and rounds == 0 */
2481 if (!node_pages)
2482 break;
2483 nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages,
2484 NULL, page_array);
2485 page_array += nr_allocated;
2486 total_allocated += nr_allocated;
2487 if (total_allocated == nr_pages)
2488 break;
2489 prev_node = node;
2490 }
2491 me->il_prev = resume_node;
2492 me->il_weight = resume_weight;
2493 kfree(weights);
2494 return total_allocated;
2495}
2496
c00b6b96
CW
2497static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2498 struct mempolicy *pol, unsigned long nr_pages,
2499 struct page **page_array)
2500{
2501 gfp_t preferred_gfp;
2502 unsigned long nr_allocated = 0;
2503
2504 preferred_gfp = gfp | __GFP_NOWARN;
2505 preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2506
b951aaff 2507 nr_allocated = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes,
c00b6b96
CW
2508 nr_pages, NULL, page_array);
2509
2510 if (nr_allocated < nr_pages)
b951aaff 2511 nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL,
c00b6b96
CW
2512 nr_pages - nr_allocated, NULL,
2513 page_array + nr_allocated);
2514 return nr_allocated;
2515}
2516
2517/* alloc pages bulk and mempolicy should be considered at the
2518 * same time in some situation such as vmalloc.
2519 *
2520 * It can accelerate memory allocation especially interleaving
2521 * allocate memory.
2522 */
b951aaff 2523unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp,
c00b6b96
CW
2524 unsigned long nr_pages, struct page **page_array)
2525{
2526 struct mempolicy *pol = &default_policy;
ddc1a5cb
HD
2527 nodemask_t *nodemask;
2528 int nid;
c00b6b96
CW
2529
2530 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2531 pol = get_task_policy(current);
2532
2533 if (pol->mode == MPOL_INTERLEAVE)
2534 return alloc_pages_bulk_array_interleave(gfp, pol,
2535 nr_pages, page_array);
2536
fa3bea4e
GP
2537 if (pol->mode == MPOL_WEIGHTED_INTERLEAVE)
2538 return alloc_pages_bulk_array_weighted_interleave(
2539 gfp, pol, nr_pages, page_array);
2540
c00b6b96
CW
2541 if (pol->mode == MPOL_PREFERRED_MANY)
2542 return alloc_pages_bulk_array_preferred_many(gfp,
2543 numa_node_id(), pol, nr_pages, page_array);
2544
ddc1a5cb
HD
2545 nid = numa_node_id();
2546 nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid);
b951aaff
SB
2547 return alloc_pages_bulk_noprof(gfp, nid, nodemask,
2548 nr_pages, NULL, page_array);
c00b6b96
CW
2549}
2550
ef0855d3
ON
2551int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2552{
c36f6e6d 2553 struct mempolicy *pol = mpol_dup(src->vm_policy);
ef0855d3
ON
2554
2555 if (IS_ERR(pol))
2556 return PTR_ERR(pol);
2557 dst->vm_policy = pol;
2558 return 0;
2559}
2560
4225399a 2561/*
846a16bf 2562 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
4225399a
PJ
2563 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2564 * with the mems_allowed returned by cpuset_mems_allowed(). This
2565 * keeps mempolicies cpuset relative after its cpuset moves. See
2566 * further kernel/cpuset.c update_nodemask().
708c1bbc
MX
2567 *
2568 * current's mempolicy may be rebinded by the other task(the task that changes
2569 * cpuset's mems), so we needn't do rebind work for current task.
4225399a 2570 */
4225399a 2571
846a16bf
LS
2572/* Slow path of a mempolicy duplicate */
2573struct mempolicy *__mpol_dup(struct mempolicy *old)
1da177e4
LT
2574{
2575 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2576
2577 if (!new)
2578 return ERR_PTR(-ENOMEM);
708c1bbc
MX
2579
2580 /* task's mempolicy is protected by alloc_lock */
2581 if (old == current->mempolicy) {
2582 task_lock(current);
2583 *new = *old;
2584 task_unlock(current);
2585 } else
2586 *new = *old;
2587
4225399a
PJ
2588 if (current_cpuset_is_being_rebound()) {
2589 nodemask_t mems = cpuset_mems_allowed(current);
213980c0 2590 mpol_rebind_policy(new, &mems);
4225399a 2591 }
1da177e4 2592 atomic_set(&new->refcnt, 1);
1da177e4
LT
2593 return new;
2594}
2595
2596/* Slow path of a mempolicy comparison */
fcfb4dcc 2597bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1da177e4
LT
2598{
2599 if (!a || !b)
fcfb4dcc 2600 return false;
45c4745a 2601 if (a->mode != b->mode)
fcfb4dcc 2602 return false;
19800502 2603 if (a->flags != b->flags)
fcfb4dcc 2604 return false;
c6018b4b
AK
2605 if (a->home_node != b->home_node)
2606 return false;
19800502
BL
2607 if (mpol_store_user_nodemask(a))
2608 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
fcfb4dcc 2609 return false;
19800502 2610
45c4745a 2611 switch (a->mode) {
19770b32 2612 case MPOL_BIND:
1da177e4 2613 case MPOL_INTERLEAVE:
1da177e4 2614 case MPOL_PREFERRED:
b27abacc 2615 case MPOL_PREFERRED_MANY:
fa3bea4e 2616 case MPOL_WEIGHTED_INTERLEAVE:
269fbe72 2617 return !!nodes_equal(a->nodes, b->nodes);
7858d7bc
FT
2618 case MPOL_LOCAL:
2619 return true;
1da177e4
LT
2620 default:
2621 BUG();
fcfb4dcc 2622 return false;
1da177e4
LT
2623 }
2624}
2625
1da177e4
LT
2626/*
2627 * Shared memory backing store policy support.
2628 *
2629 * Remember policies even when nobody has shared memory mapped.
2630 * The policies are kept in Red-Black tree linked from the inode.
4a8c7bb5 2631 * They are protected by the sp->lock rwlock, which should be held
1da177e4
LT
2632 * for any accesses to the tree.
2633 */
2634
4a8c7bb5
NZ
2635/*
2636 * lookup first element intersecting start-end. Caller holds sp->lock for
2637 * reading or for writing
2638 */
93397c3b
HD
2639static struct sp_node *sp_lookup(struct shared_policy *sp,
2640 pgoff_t start, pgoff_t end)
1da177e4
LT
2641{
2642 struct rb_node *n = sp->root.rb_node;
2643
2644 while (n) {
2645 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2646
2647 if (start >= p->end)
2648 n = n->rb_right;
2649 else if (end <= p->start)
2650 n = n->rb_left;
2651 else
2652 break;
2653 }
2654 if (!n)
2655 return NULL;
2656 for (;;) {
2657 struct sp_node *w = NULL;
2658 struct rb_node *prev = rb_prev(n);
2659 if (!prev)
2660 break;
2661 w = rb_entry(prev, struct sp_node, nd);
2662 if (w->end <= start)
2663 break;
2664 n = prev;
2665 }
2666 return rb_entry(n, struct sp_node, nd);
2667}
2668
4a8c7bb5
NZ
2669/*
2670 * Insert a new shared policy into the list. Caller holds sp->lock for
2671 * writing.
2672 */
1da177e4
LT
2673static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2674{
2675 struct rb_node **p = &sp->root.rb_node;
2676 struct rb_node *parent = NULL;
2677 struct sp_node *nd;
2678
2679 while (*p) {
2680 parent = *p;
2681 nd = rb_entry(parent, struct sp_node, nd);
2682 if (new->start < nd->start)
2683 p = &(*p)->rb_left;
2684 else if (new->end > nd->end)
2685 p = &(*p)->rb_right;
2686 else
2687 BUG();
2688 }
2689 rb_link_node(&new->nd, parent, p);
2690 rb_insert_color(&new->nd, &sp->root);
1da177e4
LT
2691}
2692
2693/* Find shared policy intersecting idx */
93397c3b
HD
2694struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
2695 pgoff_t idx)
1da177e4
LT
2696{
2697 struct mempolicy *pol = NULL;
2698 struct sp_node *sn;
2699
2700 if (!sp->root.rb_node)
2701 return NULL;
4a8c7bb5 2702 read_lock(&sp->lock);
1da177e4
LT
2703 sn = sp_lookup(sp, idx, idx+1);
2704 if (sn) {
2705 mpol_get(sn->policy);
2706 pol = sn->policy;
2707 }
4a8c7bb5 2708 read_unlock(&sp->lock);
1da177e4
LT
2709 return pol;
2710}
2711
63f74ca2
KM
2712static void sp_free(struct sp_node *n)
2713{
2714 mpol_put(n->policy);
2715 kmem_cache_free(sn_cache, n);
2716}
2717
771fb4d8 2718/**
75c70128 2719 * mpol_misplaced - check whether current folio node is valid in policy
771fb4d8 2720 *
75c70128 2721 * @folio: folio to be checked
f8fd525b 2722 * @vmf: structure describing the fault
75c70128 2723 * @addr: virtual address in @vma for shared policy lookup and interleave policy
771fb4d8 2724 *
75c70128 2725 * Lookup current policy node id for vma,addr and "compare to" folio's
5f076944 2726 * node id. Policy determination "mimics" alloc_page_vma().
771fb4d8 2727 * Called from fault path where we know the vma and faulting address.
5f076944 2728 *
062db293 2729 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
75c70128 2730 * policy, or a suitable node ID to allocate a replacement folio from.
771fb4d8 2731 */
f8fd525b 2732int mpol_misplaced(struct folio *folio, struct vm_fault *vmf,
75c70128 2733 unsigned long addr)
771fb4d8
LS
2734{
2735 struct mempolicy *pol;
ddc1a5cb 2736 pgoff_t ilx;
c33d6c06 2737 struct zoneref *z;
75c70128 2738 int curnid = folio_nid(folio);
f8fd525b 2739 struct vm_area_struct *vma = vmf->vma;
90572890 2740 int thiscpu = raw_smp_processor_id();
f8fd525b 2741 int thisnid = numa_node_id();
98fa15f3 2742 int polnid = NUMA_NO_NODE;
062db293 2743 int ret = NUMA_NO_NODE;
771fb4d8 2744
f8fd525b
DT
2745 /*
2746 * Make sure ptl is held so that we don't preempt and we
2747 * have a stable smp processor id
2748 */
2749 lockdep_assert_held(vmf->ptl);
ddc1a5cb 2750 pol = get_vma_policy(vma, addr, folio_order(folio), &ilx);
771fb4d8
LS
2751 if (!(pol->flags & MPOL_F_MOF))
2752 goto out;
2753
2754 switch (pol->mode) {
2755 case MPOL_INTERLEAVE:
ddc1a5cb 2756 polnid = interleave_nid(pol, ilx);
771fb4d8
LS
2757 break;
2758
fa3bea4e
GP
2759 case MPOL_WEIGHTED_INTERLEAVE:
2760 polnid = weighted_interleave_nid(pol, ilx);
2761 break;
2762
771fb4d8 2763 case MPOL_PREFERRED:
b27abacc
DH
2764 if (node_isset(curnid, pol->nodes))
2765 goto out;
269fbe72 2766 polnid = first_node(pol->nodes);
7858d7bc
FT
2767 break;
2768
2769 case MPOL_LOCAL:
2770 polnid = numa_node_id();
771fb4d8
LS
2771 break;
2772
2773 case MPOL_BIND:
133d04b1
DT
2774 case MPOL_PREFERRED_MANY:
2775 /*
2776 * Even though MPOL_PREFERRED_MANY can allocate pages outside
2777 * policy nodemask we don't allow numa migration to nodes
2778 * outside policy nodemask for now. This is done so that if we
2779 * want demotion to slow memory to happen, before allocating
2780 * from some DRAM node say 'x', we will end up using a
2781 * MPOL_PREFERRED_MANY mask excluding node 'x'. In such scenario
2782 * we should not promote to node 'x' from slow memory node.
2783 */
bda420b9 2784 if (pol->flags & MPOL_F_MORON) {
133d04b1
DT
2785 /*
2786 * Optimize placement among multiple nodes
2787 * via NUMA balancing
2788 */
269fbe72 2789 if (node_isset(thisnid, pol->nodes))
bda420b9
HY
2790 break;
2791 goto out;
2792 }
c33d6c06 2793
771fb4d8 2794 /*
771fb4d8
LS
2795 * use current page if in policy nodemask,
2796 * else select nearest allowed node, if any.
2797 * If no allowed nodes, use current [!misplaced].
2798 */
269fbe72 2799 if (node_isset(curnid, pol->nodes))
771fb4d8 2800 goto out;
c33d6c06 2801 z = first_zones_zonelist(
f8fd525b 2802 node_zonelist(thisnid, GFP_HIGHUSER),
771fb4d8 2803 gfp_zone(GFP_HIGHUSER),
269fbe72 2804 &pol->nodes);
c1093b74 2805 polnid = zone_to_nid(z->zone);
771fb4d8
LS
2806 break;
2807
2808 default:
2809 BUG();
2810 }
5606e387 2811
75c70128 2812 /* Migrate the folio towards the node whose CPU is referencing it */
e42c8ff2 2813 if (pol->flags & MPOL_F_MORON) {
90572890 2814 polnid = thisnid;
5606e387 2815
8c9ae56d 2816 if (!should_numa_migrate_memory(current, folio, curnid,
75c70128 2817 thiscpu))
de1c9ce6 2818 goto out;
e42c8ff2
MG
2819 }
2820
771fb4d8
LS
2821 if (curnid != polnid)
2822 ret = polnid;
2823out:
2824 mpol_cond_put(pol);
2825
2826 return ret;
2827}
2828
c11600e4
DR
2829/*
2830 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2831 * dropped after task->mempolicy is set to NULL so that any allocation done as
2832 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2833 * policy.
2834 */
2835void mpol_put_task_policy(struct task_struct *task)
2836{
2837 struct mempolicy *pol;
2838
2839 task_lock(task);
2840 pol = task->mempolicy;
2841 task->mempolicy = NULL;
2842 task_unlock(task);
2843 mpol_put(pol);
2844}
2845
1da177e4
LT
2846static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2847{
1da177e4 2848 rb_erase(&n->nd, &sp->root);
63f74ca2 2849 sp_free(n);
1da177e4
LT
2850}
2851
42288fe3
MG
2852static void sp_node_init(struct sp_node *node, unsigned long start,
2853 unsigned long end, struct mempolicy *pol)
2854{
2855 node->start = start;
2856 node->end = end;
2857 node->policy = pol;
2858}
2859
dbcb0f19
AB
2860static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2861 struct mempolicy *pol)
1da177e4 2862{
869833f2
KM
2863 struct sp_node *n;
2864 struct mempolicy *newpol;
1da177e4 2865
869833f2 2866 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1da177e4
LT
2867 if (!n)
2868 return NULL;
869833f2
KM
2869
2870 newpol = mpol_dup(pol);
2871 if (IS_ERR(newpol)) {
2872 kmem_cache_free(sn_cache, n);
2873 return NULL;
2874 }
2875 newpol->flags |= MPOL_F_SHARED;
42288fe3 2876 sp_node_init(n, start, end, newpol);
869833f2 2877
1da177e4
LT
2878 return n;
2879}
2880
2881/* Replace a policy range. */
93397c3b
HD
2882static int shared_policy_replace(struct shared_policy *sp, pgoff_t start,
2883 pgoff_t end, struct sp_node *new)
1da177e4 2884{
b22d127a 2885 struct sp_node *n;
42288fe3
MG
2886 struct sp_node *n_new = NULL;
2887 struct mempolicy *mpol_new = NULL;
b22d127a 2888 int ret = 0;
1da177e4 2889
42288fe3 2890restart:
4a8c7bb5 2891 write_lock(&sp->lock);
1da177e4
LT
2892 n = sp_lookup(sp, start, end);
2893 /* Take care of old policies in the same range. */
2894 while (n && n->start < end) {
2895 struct rb_node *next = rb_next(&n->nd);
2896 if (n->start >= start) {
2897 if (n->end <= end)
2898 sp_delete(sp, n);
2899 else
2900 n->start = end;
2901 } else {
2902 /* Old policy spanning whole new range. */
2903 if (n->end > end) {
42288fe3
MG
2904 if (!n_new)
2905 goto alloc_new;
2906
2907 *mpol_new = *n->policy;
2908 atomic_set(&mpol_new->refcnt, 1);
7880639c 2909 sp_node_init(n_new, end, n->end, mpol_new);
1da177e4 2910 n->end = start;
5ca39575 2911 sp_insert(sp, n_new);
42288fe3
MG
2912 n_new = NULL;
2913 mpol_new = NULL;
1da177e4
LT
2914 break;
2915 } else
2916 n->end = start;
2917 }
2918 if (!next)
2919 break;
2920 n = rb_entry(next, struct sp_node, nd);
2921 }
2922 if (new)
2923 sp_insert(sp, new);
4a8c7bb5 2924 write_unlock(&sp->lock);
42288fe3
MG
2925 ret = 0;
2926
2927err_out:
2928 if (mpol_new)
2929 mpol_put(mpol_new);
2930 if (n_new)
2931 kmem_cache_free(sn_cache, n_new);
2932
b22d127a 2933 return ret;
42288fe3
MG
2934
2935alloc_new:
4a8c7bb5 2936 write_unlock(&sp->lock);
42288fe3
MG
2937 ret = -ENOMEM;
2938 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2939 if (!n_new)
2940 goto err_out;
2941 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2942 if (!mpol_new)
2943 goto err_out;
4ad09955 2944 atomic_set(&mpol_new->refcnt, 1);
42288fe3 2945 goto restart;
1da177e4
LT
2946}
2947
71fe804b
LS
2948/**
2949 * mpol_shared_policy_init - initialize shared policy for inode
2950 * @sp: pointer to inode shared policy
2951 * @mpol: struct mempolicy to install
2952 *
2953 * Install non-NULL @mpol in inode's shared policy rb-tree.
2954 * On entry, the current task has a reference on a non-NULL @mpol.
2955 * This must be released on exit.
4bfc4495 2956 * This is called at get_inode() calls and we can use GFP_KERNEL.
71fe804b
LS
2957 */
2958void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2959{
58568d2a
MX
2960 int ret;
2961
71fe804b 2962 sp->root = RB_ROOT; /* empty tree == default mempolicy */
4a8c7bb5 2963 rwlock_init(&sp->lock);
71fe804b
LS
2964
2965 if (mpol) {
35ec8fa0
HD
2966 struct sp_node *sn;
2967 struct mempolicy *npol;
4bfc4495 2968 NODEMASK_SCRATCH(scratch);
71fe804b 2969
4bfc4495 2970 if (!scratch)
5c0c1654 2971 goto put_mpol;
35ec8fa0
HD
2972
2973 /* contextualize the tmpfs mount point mempolicy to this file */
2974 npol = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2975 if (IS_ERR(npol))
0cae3457 2976 goto free_scratch; /* no valid nodemask intersection */
58568d2a
MX
2977
2978 task_lock(current);
35ec8fa0 2979 ret = mpol_set_nodemask(npol, &mpol->w.user_nodemask, scratch);
58568d2a 2980 task_unlock(current);
15d77835 2981 if (ret)
35ec8fa0
HD
2982 goto put_npol;
2983
2984 /* alloc node covering entire file; adds ref to file's npol */
2985 sn = sp_alloc(0, MAX_LFS_FILESIZE >> PAGE_SHIFT, npol);
2986 if (sn)
2987 sp_insert(sp, sn);
2988put_npol:
2989 mpol_put(npol); /* drop initial ref on file's npol */
0cae3457 2990free_scratch:
4bfc4495 2991 NODEMASK_SCRATCH_FREE(scratch);
5c0c1654
LS
2992put_mpol:
2993 mpol_put(mpol); /* drop our incoming ref on sb mpol */
7339ff83
RH
2994 }
2995}
2996
c36f6e6d
HD
2997int mpol_set_shared_policy(struct shared_policy *sp,
2998 struct vm_area_struct *vma, struct mempolicy *pol)
1da177e4
LT
2999{
3000 int err;
3001 struct sp_node *new = NULL;
3002 unsigned long sz = vma_pages(vma);
3003
c36f6e6d
HD
3004 if (pol) {
3005 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, pol);
1da177e4
LT
3006 if (!new)
3007 return -ENOMEM;
3008 }
c36f6e6d 3009 err = shared_policy_replace(sp, vma->vm_pgoff, vma->vm_pgoff + sz, new);
1da177e4 3010 if (err && new)
63f74ca2 3011 sp_free(new);
1da177e4
LT
3012 return err;
3013}
3014
3015/* Free a backing policy store on inode delete. */
c36f6e6d 3016void mpol_free_shared_policy(struct shared_policy *sp)
1da177e4
LT
3017{
3018 struct sp_node *n;
3019 struct rb_node *next;
3020
c36f6e6d 3021 if (!sp->root.rb_node)
1da177e4 3022 return;
c36f6e6d
HD
3023 write_lock(&sp->lock);
3024 next = rb_first(&sp->root);
1da177e4
LT
3025 while (next) {
3026 n = rb_entry(next, struct sp_node, nd);
3027 next = rb_next(&n->nd);
c36f6e6d 3028 sp_delete(sp, n);
1da177e4 3029 }
c36f6e6d 3030 write_unlock(&sp->lock);
1da177e4
LT
3031}
3032
1a687c2e 3033#ifdef CONFIG_NUMA_BALANCING
c297663c 3034static int __initdata numabalancing_override;
1a687c2e
MG
3035
3036static void __init check_numabalancing_enable(void)
3037{
3038 bool numabalancing_default = false;
3039
3040 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
3041 numabalancing_default = true;
3042
c297663c
MG
3043 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
3044 if (numabalancing_override)
3045 set_numabalancing_state(numabalancing_override == 1);
3046
b0dc2b9b 3047 if (num_online_nodes() > 1 && !numabalancing_override) {
756a025f 3048 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
c297663c 3049 numabalancing_default ? "Enabling" : "Disabling");
1a687c2e
MG
3050 set_numabalancing_state(numabalancing_default);
3051 }
3052}
3053
3054static int __init setup_numabalancing(char *str)
3055{
3056 int ret = 0;
3057 if (!str)
3058 goto out;
1a687c2e
MG
3059
3060 if (!strcmp(str, "enable")) {
c297663c 3061 numabalancing_override = 1;
1a687c2e
MG
3062 ret = 1;
3063 } else if (!strcmp(str, "disable")) {
c297663c 3064 numabalancing_override = -1;
1a687c2e
MG
3065 ret = 1;
3066 }
3067out:
3068 if (!ret)
4a404bea 3069 pr_warn("Unable to parse numa_balancing=\n");
1a687c2e
MG
3070
3071 return ret;
3072}
3073__setup("numa_balancing=", setup_numabalancing);
3074#else
3075static inline void __init check_numabalancing_enable(void)
3076{
3077}
3078#endif /* CONFIG_NUMA_BALANCING */
3079
1da177e4
LT
3080void __init numa_policy_init(void)
3081{
b71636e2
PM
3082 nodemask_t interleave_nodes;
3083 unsigned long largest = 0;
3084 int nid, prefer = 0;
3085
1da177e4
LT
3086 policy_cache = kmem_cache_create("numa_policy",
3087 sizeof(struct mempolicy),
20c2df83 3088 0, SLAB_PANIC, NULL);
1da177e4
LT
3089
3090 sn_cache = kmem_cache_create("shared_policy_node",
3091 sizeof(struct sp_node),
20c2df83 3092 0, SLAB_PANIC, NULL);
1da177e4 3093
5606e387
MG
3094 for_each_node(nid) {
3095 preferred_node_policy[nid] = (struct mempolicy) {
3096 .refcnt = ATOMIC_INIT(1),
3097 .mode = MPOL_PREFERRED,
3098 .flags = MPOL_F_MOF | MPOL_F_MORON,
269fbe72 3099 .nodes = nodemask_of_node(nid),
5606e387
MG
3100 };
3101 }
3102
b71636e2
PM
3103 /*
3104 * Set interleaving policy for system init. Interleaving is only
3105 * enabled across suitably sized nodes (default is >= 16MB), or
3106 * fall back to the largest node if they're all smaller.
3107 */
3108 nodes_clear(interleave_nodes);
01f13bd6 3109 for_each_node_state(nid, N_MEMORY) {
b71636e2
PM
3110 unsigned long total_pages = node_present_pages(nid);
3111
3112 /* Preserve the largest node */
3113 if (largest < total_pages) {
3114 largest = total_pages;
3115 prefer = nid;
3116 }
3117
3118 /* Interleave this node? */
3119 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
3120 node_set(nid, interleave_nodes);
3121 }
3122
3123 /* All too small, use the largest */
3124 if (unlikely(nodes_empty(interleave_nodes)))
3125 node_set(prefer, interleave_nodes);
1da177e4 3126
028fec41 3127 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
b1de0d13 3128 pr_err("%s: interleaving failed\n", __func__);
1a687c2e
MG
3129
3130 check_numabalancing_enable();
1da177e4
LT
3131}
3132
8bccd85f 3133/* Reset policy of current process to default */
1da177e4
LT
3134void numa_default_policy(void)
3135{
028fec41 3136 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1da177e4 3137}
68860ec1 3138
095f1fc4
LS
3139/*
3140 * Parse and format mempolicy from/to strings
3141 */
345ace9c
LS
3142static const char * const policy_modes[] =
3143{
3144 [MPOL_DEFAULT] = "default",
3145 [MPOL_PREFERRED] = "prefer",
3146 [MPOL_BIND] = "bind",
3147 [MPOL_INTERLEAVE] = "interleave",
fa3bea4e 3148 [MPOL_WEIGHTED_INTERLEAVE] = "weighted interleave",
d3a71033 3149 [MPOL_LOCAL] = "local",
b27abacc 3150 [MPOL_PREFERRED_MANY] = "prefer (many)",
345ace9c 3151};
1a75a6c8 3152
095f1fc4
LS
3153#ifdef CONFIG_TMPFS
3154/**
f2a07f40 3155 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
095f1fc4 3156 * @str: string containing mempolicy to parse
71fe804b 3157 * @mpol: pointer to struct mempolicy pointer, returned on success.
095f1fc4
LS
3158 *
3159 * Format of input:
3160 * <mode>[=<flags>][:<nodelist>]
3161 *
dad5b023 3162 * Return: %0 on success, else %1
095f1fc4 3163 */
a7a88b23 3164int mpol_parse_str(char *str, struct mempolicy **mpol)
095f1fc4 3165{
71fe804b 3166 struct mempolicy *new = NULL;
f2a07f40 3167 unsigned short mode_flags;
71fe804b 3168 nodemask_t nodes;
095f1fc4
LS
3169 char *nodelist = strchr(str, ':');
3170 char *flags = strchr(str, '=');
dedf2c73 3171 int err = 1, mode;
095f1fc4 3172
c7a91bc7
DC
3173 if (flags)
3174 *flags++ = '\0'; /* terminate mode string */
3175
095f1fc4
LS
3176 if (nodelist) {
3177 /* NUL-terminate mode or flags string */
3178 *nodelist++ = '\0';
71fe804b 3179 if (nodelist_parse(nodelist, nodes))
095f1fc4 3180 goto out;
01f13bd6 3181 if (!nodes_subset(nodes, node_states[N_MEMORY]))
095f1fc4 3182 goto out;
71fe804b
LS
3183 } else
3184 nodes_clear(nodes);
3185
dedf2c73 3186 mode = match_string(policy_modes, MPOL_MAX, str);
3187 if (mode < 0)
095f1fc4
LS
3188 goto out;
3189
71fe804b 3190 switch (mode) {
095f1fc4 3191 case MPOL_PREFERRED:
71fe804b 3192 /*
aa9f7d51
RD
3193 * Insist on a nodelist of one node only, although later
3194 * we use first_node(nodes) to grab a single node, so here
3195 * nodelist (or nodes) cannot be empty.
71fe804b 3196 */
095f1fc4
LS
3197 if (nodelist) {
3198 char *rest = nodelist;
3199 while (isdigit(*rest))
3200 rest++;
926f2ae0
KM
3201 if (*rest)
3202 goto out;
aa9f7d51
RD
3203 if (nodes_empty(nodes))
3204 goto out;
095f1fc4
LS
3205 }
3206 break;
095f1fc4 3207 case MPOL_INTERLEAVE:
fa3bea4e 3208 case MPOL_WEIGHTED_INTERLEAVE:
095f1fc4
LS
3209 /*
3210 * Default to online nodes with memory if no nodelist
3211 */
3212 if (!nodelist)
01f13bd6 3213 nodes = node_states[N_MEMORY];
3f226aa1 3214 break;
71fe804b 3215 case MPOL_LOCAL:
3f226aa1 3216 /*
71fe804b 3217 * Don't allow a nodelist; mpol_new() checks flags
3f226aa1 3218 */
71fe804b 3219 if (nodelist)
3f226aa1 3220 goto out;
3f226aa1 3221 break;
413b43de
RT
3222 case MPOL_DEFAULT:
3223 /*
3224 * Insist on a empty nodelist
3225 */
3226 if (!nodelist)
3227 err = 0;
3228 goto out;
b27abacc 3229 case MPOL_PREFERRED_MANY:
d69b2e63
KM
3230 case MPOL_BIND:
3231 /*
3232 * Insist on a nodelist
3233 */
3234 if (!nodelist)
3235 goto out;
095f1fc4
LS
3236 }
3237
71fe804b 3238 mode_flags = 0;
095f1fc4
LS
3239 if (flags) {
3240 /*
3241 * Currently, we only support two mutually exclusive
3242 * mode flags.
3243 */
3244 if (!strcmp(flags, "static"))
71fe804b 3245 mode_flags |= MPOL_F_STATIC_NODES;
095f1fc4 3246 else if (!strcmp(flags, "relative"))
71fe804b 3247 mode_flags |= MPOL_F_RELATIVE_NODES;
095f1fc4 3248 else
926f2ae0 3249 goto out;
095f1fc4 3250 }
71fe804b
LS
3251
3252 new = mpol_new(mode, mode_flags, &nodes);
3253 if (IS_ERR(new))
926f2ae0
KM
3254 goto out;
3255
f2a07f40
HD
3256 /*
3257 * Save nodes for mpol_to_str() to show the tmpfs mount options
3258 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3259 */
269fbe72
BW
3260 if (mode != MPOL_PREFERRED) {
3261 new->nodes = nodes;
3262 } else if (nodelist) {
3263 nodes_clear(new->nodes);
3264 node_set(first_node(nodes), new->nodes);
3265 } else {
7858d7bc 3266 new->mode = MPOL_LOCAL;
269fbe72 3267 }
f2a07f40
HD
3268
3269 /*
3270 * Save nodes for contextualization: this will be used to "clone"
3271 * the mempolicy in a specific context [cpuset] at a later time.
3272 */
3273 new->w.user_nodemask = nodes;
3274
926f2ae0 3275 err = 0;
71fe804b 3276
095f1fc4
LS
3277out:
3278 /* Restore string for error message */
3279 if (nodelist)
3280 *--nodelist = ':';
3281 if (flags)
3282 *--flags = '=';
71fe804b
LS
3283 if (!err)
3284 *mpol = new;
095f1fc4
LS
3285 return err;
3286}
3287#endif /* CONFIG_TMPFS */
3288
71fe804b
LS
3289/**
3290 * mpol_to_str - format a mempolicy structure for printing
3291 * @buffer: to contain formatted mempolicy string
3292 * @maxlen: length of @buffer
3293 * @pol: pointer to mempolicy to be formatted
71fe804b 3294 *
948927ee
DR
3295 * Convert @pol into a string. If @buffer is too short, truncate the string.
3296 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3297 * longest flag, "relative", and to display at least a few node ids.
1a75a6c8 3298 */
948927ee 3299void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
1a75a6c8
CL
3300{
3301 char *p = buffer;
948927ee
DR
3302 nodemask_t nodes = NODE_MASK_NONE;
3303 unsigned short mode = MPOL_DEFAULT;
3304 unsigned short flags = 0;
2291990a 3305
8790c71a 3306 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
bea904d5 3307 mode = pol->mode;
948927ee
DR
3308 flags = pol->flags;
3309 }
bea904d5 3310
1a75a6c8
CL
3311 switch (mode) {
3312 case MPOL_DEFAULT:
7858d7bc 3313 case MPOL_LOCAL:
1a75a6c8 3314 break;
1a75a6c8 3315 case MPOL_PREFERRED:
b27abacc 3316 case MPOL_PREFERRED_MANY:
1a75a6c8 3317 case MPOL_BIND:
1a75a6c8 3318 case MPOL_INTERLEAVE:
fa3bea4e 3319 case MPOL_WEIGHTED_INTERLEAVE:
269fbe72 3320 nodes = pol->nodes;
1a75a6c8 3321 break;
1a75a6c8 3322 default:
948927ee
DR
3323 WARN_ON_ONCE(1);
3324 snprintf(p, maxlen, "unknown");
3325 return;
1a75a6c8
CL
3326 }
3327
b7a9f420 3328 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
1a75a6c8 3329
fc36b8d3 3330 if (flags & MPOL_MODE_FLAGS) {
948927ee 3331 p += snprintf(p, buffer + maxlen - p, "=");
f5b087b5 3332
2291990a
LS
3333 /*
3334 * Currently, the only defined flags are mutually exclusive
3335 */
f5b087b5 3336 if (flags & MPOL_F_STATIC_NODES)
2291990a
LS
3337 p += snprintf(p, buffer + maxlen - p, "static");
3338 else if (flags & MPOL_F_RELATIVE_NODES)
3339 p += snprintf(p, buffer + maxlen - p, "relative");
f5b087b5
DR
3340 }
3341
9e763e0f
TH
3342 if (!nodes_empty(nodes))
3343 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3344 nodemask_pr_args(&nodes));
1a75a6c8 3345}
dce41f5a
RK
3346
3347#ifdef CONFIG_SYSFS
3348struct iw_node_attr {
3349 struct kobj_attribute kobj_attr;
3350 int nid;
3351};
3352
3353static ssize_t node_show(struct kobject *kobj, struct kobj_attribute *attr,
3354 char *buf)
3355{
3356 struct iw_node_attr *node_attr;
3357 u8 weight;
3358
3359 node_attr = container_of(attr, struct iw_node_attr, kobj_attr);
3360 weight = get_il_weight(node_attr->nid);
3361 return sysfs_emit(buf, "%d\n", weight);
3362}
3363
3364static ssize_t node_store(struct kobject *kobj, struct kobj_attribute *attr,
3365 const char *buf, size_t count)
3366{
3367 struct iw_node_attr *node_attr;
3368 u8 *new;
3369 u8 *old;
3370 u8 weight = 0;
3371
3372 node_attr = container_of(attr, struct iw_node_attr, kobj_attr);
3373 if (count == 0 || sysfs_streq(buf, ""))
3374 weight = 0;
3375 else if (kstrtou8(buf, 0, &weight))
3376 return -EINVAL;
3377
3378 new = kzalloc(nr_node_ids, GFP_KERNEL);
3379 if (!new)
3380 return -ENOMEM;
3381
3382 mutex_lock(&iw_table_lock);
3383 old = rcu_dereference_protected(iw_table,
3384 lockdep_is_held(&iw_table_lock));
3385 if (old)
3386 memcpy(new, old, nr_node_ids);
3387 new[node_attr->nid] = weight;
3388 rcu_assign_pointer(iw_table, new);
3389 mutex_unlock(&iw_table_lock);
3390 synchronize_rcu();
3391 kfree(old);
3392 return count;
3393}
3394
3395static struct iw_node_attr **node_attrs;
3396
3397static void sysfs_wi_node_release(struct iw_node_attr *node_attr,
3398 struct kobject *parent)
3399{
3400 if (!node_attr)
3401 return;
3402 sysfs_remove_file(parent, &node_attr->kobj_attr.attr);
3403 kfree(node_attr->kobj_attr.attr.name);
3404 kfree(node_attr);
3405}
3406
3407static void sysfs_wi_release(struct kobject *wi_kobj)
3408{
3409 int i;
3410
3411 for (i = 0; i < nr_node_ids; i++)
3412 sysfs_wi_node_release(node_attrs[i], wi_kobj);
3413 kobject_put(wi_kobj);
3414}
3415
3416static const struct kobj_type wi_ktype = {
3417 .sysfs_ops = &kobj_sysfs_ops,
3418 .release = sysfs_wi_release,
3419};
3420
3421static int add_weight_node(int nid, struct kobject *wi_kobj)
3422{
3423 struct iw_node_attr *node_attr;
3424 char *name;
3425
3426 node_attr = kzalloc(sizeof(*node_attr), GFP_KERNEL);
3427 if (!node_attr)
3428 return -ENOMEM;
3429
3430 name = kasprintf(GFP_KERNEL, "node%d", nid);
3431 if (!name) {
3432 kfree(node_attr);
3433 return -ENOMEM;
3434 }
3435
3436 sysfs_attr_init(&node_attr->kobj_attr.attr);
3437 node_attr->kobj_attr.attr.name = name;
3438 node_attr->kobj_attr.attr.mode = 0644;
3439 node_attr->kobj_attr.show = node_show;
3440 node_attr->kobj_attr.store = node_store;
3441 node_attr->nid = nid;
3442
3443 if (sysfs_create_file(wi_kobj, &node_attr->kobj_attr.attr)) {
3444 kfree(node_attr->kobj_attr.attr.name);
3445 kfree(node_attr);
3446 pr_err("failed to add attribute to weighted_interleave\n");
3447 return -ENOMEM;
3448 }
3449
3450 node_attrs[nid] = node_attr;
3451 return 0;
3452}
3453
3454static int add_weighted_interleave_group(struct kobject *root_kobj)
3455{
3456 struct kobject *wi_kobj;
3457 int nid, err;
3458
3459 wi_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
3460 if (!wi_kobj)
3461 return -ENOMEM;
3462
3463 err = kobject_init_and_add(wi_kobj, &wi_ktype, root_kobj,
3464 "weighted_interleave");
3465 if (err) {
3466 kfree(wi_kobj);
3467 return err;
3468 }
3469
3470 for_each_node_state(nid, N_POSSIBLE) {
3471 err = add_weight_node(nid, wi_kobj);
3472 if (err) {
3473 pr_err("failed to add sysfs [node%d]\n", nid);
3474 break;
3475 }
3476 }
3477 if (err)
3478 kobject_put(wi_kobj);
3479 return 0;
3480}
3481
3482static void mempolicy_kobj_release(struct kobject *kobj)
3483{
3484 u8 *old;
3485
3486 mutex_lock(&iw_table_lock);
3487 old = rcu_dereference_protected(iw_table,
3488 lockdep_is_held(&iw_table_lock));
3489 rcu_assign_pointer(iw_table, NULL);
3490 mutex_unlock(&iw_table_lock);
3491 synchronize_rcu();
3492 kfree(old);
3493 kfree(node_attrs);
3494 kfree(kobj);
3495}
3496
3497static const struct kobj_type mempolicy_ktype = {
3498 .release = mempolicy_kobj_release
3499};
3500
3501static int __init mempolicy_sysfs_init(void)
3502{
3503 int err;
3504 static struct kobject *mempolicy_kobj;
3505
3506 mempolicy_kobj = kzalloc(sizeof(*mempolicy_kobj), GFP_KERNEL);
3507 if (!mempolicy_kobj) {
3508 err = -ENOMEM;
3509 goto err_out;
3510 }
3511
3512 node_attrs = kcalloc(nr_node_ids, sizeof(struct iw_node_attr *),
3513 GFP_KERNEL);
3514 if (!node_attrs) {
3515 err = -ENOMEM;
3516 goto mempol_out;
3517 }
3518
3519 err = kobject_init_and_add(mempolicy_kobj, &mempolicy_ktype, mm_kobj,
3520 "mempolicy");
3521 if (err)
3522 goto node_out;
3523
3524 err = add_weighted_interleave_group(mempolicy_kobj);
3525 if (err) {
3526 pr_err("mempolicy sysfs structure failed to initialize\n");
3527 kobject_put(mempolicy_kobj);
3528 return err;
3529 }
3530
3531 return err;
3532node_out:
3533 kfree(node_attrs);
3534mempol_out:
3535 kfree(mempolicy_kobj);
3536err_out:
3537 pr_err("failed to add mempolicy kobject to the system\n");
3538 return err;
3539}
3540
3541late_initcall(mempolicy_sysfs_init);
3542#endif /* CONFIG_SYSFS */