migrate_pages: separate hugetlb folios migration
[linux-block.git] / mm / migrate.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b20a3503 2/*
14e0f9bc 3 * Memory Migration functionality - linux/mm/migrate.c
b20a3503
CL
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
cde53535 13 * Christoph Lameter
b20a3503
CL
14 */
15
16#include <linux/migrate.h>
b95f1b31 17#include <linux/export.h>
b20a3503 18#include <linux/swap.h>
0697212a 19#include <linux/swapops.h>
b20a3503 20#include <linux/pagemap.h>
e23ca00b 21#include <linux/buffer_head.h>
b20a3503 22#include <linux/mm_inline.h>
b488893a 23#include <linux/nsproxy.h>
b20a3503 24#include <linux/pagevec.h>
e9995ef9 25#include <linux/ksm.h>
b20a3503
CL
26#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
04e62a29 30#include <linux/writeback.h>
742755a1
CL
31#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
86c3a764 33#include <linux/security.h>
42cb14b1 34#include <linux/backing-dev.h>
bda807d4 35#include <linux/compaction.h>
4f5ca265 36#include <linux/syscalls.h>
7addf443 37#include <linux/compat.h>
290408d4 38#include <linux/hugetlb.h>
8e6ac7fa 39#include <linux/hugetlb_cgroup.h>
5a0e3ad6 40#include <linux/gfp.h>
df6ad698 41#include <linux/pfn_t.h>
a5430dda 42#include <linux/memremap.h>
8315ada7 43#include <linux/userfaultfd_k.h>
bf6bddf1 44#include <linux/balloon_compaction.h>
33c3fc71 45#include <linux/page_idle.h>
d435edca 46#include <linux/page_owner.h>
6e84f315 47#include <linux/sched/mm.h>
197e7e52 48#include <linux/ptrace.h>
34290e2c 49#include <linux/oom.h>
884a6e5d 50#include <linux/memory.h>
ac16ec83 51#include <linux/random.h>
c574bbe9 52#include <linux/sched/sysctl.h>
467b171a 53#include <linux/memory-tiers.h>
b20a3503 54
0d1836c3
MN
55#include <asm/tlbflush.h>
56
7b2a2d4a
MG
57#include <trace/events/migrate.h>
58
b20a3503
CL
59#include "internal.h"
60
9e5bcd61 61int isolate_movable_page(struct page *page, isolate_mode_t mode)
bda807d4 62{
19979497 63 struct folio *folio = folio_get_nontail_page(page);
68f2736a 64 const struct movable_operations *mops;
bda807d4
MK
65
66 /*
67 * Avoid burning cycles with pages that are yet under __free_pages(),
68 * or just got freed under us.
69 *
70 * In case we 'win' a race for a movable page being freed under us and
71 * raise its refcount preventing __free_pages() from doing its job
72 * the put_page() at the end of this block will take care of
73 * release this page, thus avoiding a nasty leakage.
74 */
19979497 75 if (!folio)
bda807d4
MK
76 goto out;
77
19979497
VMO
78 if (unlikely(folio_test_slab(folio)))
79 goto out_putfolio;
8b881763
VB
80 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
81 smp_rmb();
bda807d4 82 /*
8b881763
VB
83 * Check movable flag before taking the page lock because
84 * we use non-atomic bitops on newly allocated page flags so
85 * unconditionally grabbing the lock ruins page's owner side.
bda807d4 86 */
19979497
VMO
87 if (unlikely(!__folio_test_movable(folio)))
88 goto out_putfolio;
8b881763
VB
89 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
90 smp_rmb();
19979497
VMO
91 if (unlikely(folio_test_slab(folio)))
92 goto out_putfolio;
8b881763 93
bda807d4
MK
94 /*
95 * As movable pages are not isolated from LRU lists, concurrent
96 * compaction threads can race against page migration functions
97 * as well as race against the releasing a page.
98 *
99 * In order to avoid having an already isolated movable page
100 * being (wrongly) re-isolated while it is under migration,
101 * or to avoid attempting to isolate pages being released,
102 * lets be sure we have the page lock
103 * before proceeding with the movable page isolation steps.
104 */
19979497
VMO
105 if (unlikely(!folio_trylock(folio)))
106 goto out_putfolio;
bda807d4 107
19979497 108 if (!folio_test_movable(folio) || folio_test_isolated(folio))
bda807d4
MK
109 goto out_no_isolated;
110
19979497
VMO
111 mops = folio_movable_ops(folio);
112 VM_BUG_ON_FOLIO(!mops, folio);
bda807d4 113
19979497 114 if (!mops->isolate_page(&folio->page, mode))
bda807d4
MK
115 goto out_no_isolated;
116
117 /* Driver shouldn't use PG_isolated bit of page->flags */
19979497
VMO
118 WARN_ON_ONCE(folio_test_isolated(folio));
119 folio_set_isolated(folio);
120 folio_unlock(folio);
bda807d4 121
9e5bcd61 122 return 0;
bda807d4
MK
123
124out_no_isolated:
19979497
VMO
125 folio_unlock(folio);
126out_putfolio:
127 folio_put(folio);
bda807d4 128out:
9e5bcd61 129 return -EBUSY;
bda807d4
MK
130}
131
280d724a 132static void putback_movable_folio(struct folio *folio)
bda807d4 133{
280d724a 134 const struct movable_operations *mops = folio_movable_ops(folio);
bda807d4 135
280d724a
VMO
136 mops->putback_page(&folio->page);
137 folio_clear_isolated(folio);
bda807d4
MK
138}
139
5733c7d1
RA
140/*
141 * Put previously isolated pages back onto the appropriate lists
142 * from where they were once taken off for compaction/migration.
143 *
59c82b70
JK
144 * This function shall be used whenever the isolated pageset has been
145 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
7ce82f4c 146 * and isolate_hugetlb().
5733c7d1
RA
147 */
148void putback_movable_pages(struct list_head *l)
149{
280d724a
VMO
150 struct folio *folio;
151 struct folio *folio2;
5733c7d1 152
280d724a
VMO
153 list_for_each_entry_safe(folio, folio2, l, lru) {
154 if (unlikely(folio_test_hugetlb(folio))) {
155 folio_putback_active_hugetlb(folio);
31caf665
NH
156 continue;
157 }
280d724a 158 list_del(&folio->lru);
bda807d4 159 /*
280d724a
VMO
160 * We isolated non-lru movable folio so here we can use
161 * __PageMovable because LRU folio's mapping cannot have
bda807d4
MK
162 * PAGE_MAPPING_MOVABLE.
163 */
280d724a
VMO
164 if (unlikely(__folio_test_movable(folio))) {
165 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
166 folio_lock(folio);
167 if (folio_test_movable(folio))
168 putback_movable_folio(folio);
bda807d4 169 else
280d724a
VMO
170 folio_clear_isolated(folio);
171 folio_unlock(folio);
172 folio_put(folio);
bda807d4 173 } else {
280d724a
VMO
174 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
175 folio_is_file_lru(folio), -folio_nr_pages(folio));
176 folio_putback_lru(folio);
bda807d4 177 }
b20a3503 178 }
b20a3503
CL
179}
180
0697212a
CL
181/*
182 * Restore a potential migration pte to a working pte entry
183 */
2f031c6f
MWO
184static bool remove_migration_pte(struct folio *folio,
185 struct vm_area_struct *vma, unsigned long addr, void *old)
0697212a 186{
4eecb8b9 187 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
0697212a 188
3fe87967 189 while (page_vma_mapped_walk(&pvmw)) {
6c287605 190 rmap_t rmap_flags = RMAP_NONE;
4eecb8b9
MWO
191 pte_t pte;
192 swp_entry_t entry;
193 struct page *new;
194 unsigned long idx = 0;
195
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 new = folio_page(folio, idx);
0697212a 200
616b8371
ZY
201#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
203 if (!pvmw.pte) {
4eecb8b9
MWO
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
616b8371
ZY
206 remove_migration_pmd(&pvmw, new);
207 continue;
208 }
209#endif
210
4eecb8b9 211 folio_get(folio);
2e346877 212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
3fe87967
KS
213 if (pte_swp_soft_dirty(*pvmw.pte))
214 pte = pte_mksoft_dirty(pte);
0697212a 215
3fe87967
KS
216 /*
217 * Recheck VMA as permissions can change since migration started
218 */
219 entry = pte_to_swp_entry(*pvmw.pte);
2e346877
PX
220 if (!is_migration_entry_young(entry))
221 pte = pte_mkold(pte);
222 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
223 pte = pte_mkdirty(pte);
4dd845b5 224 if (is_writable_migration_entry(entry))
3fe87967 225 pte = maybe_mkwrite(pte, vma);
f45ec5ff
PX
226 else if (pte_swp_uffd_wp(*pvmw.pte))
227 pte = pte_mkuffd_wp(pte);
d3cb8bf6 228
6c287605
DH
229 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
230 rmap_flags |= RMAP_EXCLUSIVE;
231
6128763f 232 if (unlikely(is_device_private_page(new))) {
4dd845b5
AP
233 if (pte_write(pte))
234 entry = make_writable_device_private_entry(
235 page_to_pfn(new));
236 else
237 entry = make_readable_device_private_entry(
238 page_to_pfn(new));
6128763f 239 pte = swp_entry_to_pte(entry);
3d321bf8
RC
240 if (pte_swp_soft_dirty(*pvmw.pte))
241 pte = pte_swp_mksoft_dirty(pte);
6128763f
RC
242 if (pte_swp_uffd_wp(*pvmw.pte))
243 pte = pte_swp_mkuffd_wp(pte);
d2b2c6dd 244 }
a5430dda 245
3ef8fd7f 246#ifdef CONFIG_HUGETLB_PAGE
4eecb8b9 247 if (folio_test_hugetlb(folio)) {
79c1c594
CL
248 unsigned int shift = huge_page_shift(hstate_vma(vma));
249
3fe87967 250 pte = pte_mkhuge(pte);
79c1c594 251 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
4eecb8b9 252 if (folio_test_anon(folio))
28c5209d 253 hugepage_add_anon_rmap(new, vma, pvmw.address,
6c287605 254 rmap_flags);
3fe87967 255 else
fb3d824d 256 page_dup_file_rmap(new, true);
1eba86c0 257 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
383321ab
AK
258 } else
259#endif
260 {
4eecb8b9 261 if (folio_test_anon(folio))
f1e2db12 262 page_add_anon_rmap(new, vma, pvmw.address,
6c287605 263 rmap_flags);
383321ab 264 else
cea86fe2 265 page_add_file_rmap(new, vma, false);
1eba86c0 266 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
383321ab 267 }
b7435507 268 if (vma->vm_flags & VM_LOCKED)
96f97c43 269 mlock_drain_local();
e125fe40 270
4cc79b33
AK
271 trace_remove_migration_pte(pvmw.address, pte_val(pte),
272 compound_order(new));
273
3fe87967
KS
274 /* No need to invalidate - it was non-present before */
275 update_mmu_cache(vma, pvmw.address, pvmw.pte);
276 }
51afb12b 277
e4b82222 278 return true;
0697212a
CL
279}
280
04e62a29
CL
281/*
282 * Get rid of all migration entries and replace them by
283 * references to the indicated page.
284 */
4eecb8b9 285void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
04e62a29 286{
051ac83a
JK
287 struct rmap_walk_control rwc = {
288 .rmap_one = remove_migration_pte,
4eecb8b9 289 .arg = src,
051ac83a
JK
290 };
291
e388466d 292 if (locked)
2f031c6f 293 rmap_walk_locked(dst, &rwc);
e388466d 294 else
2f031c6f 295 rmap_walk(dst, &rwc);
04e62a29
CL
296}
297
0697212a
CL
298/*
299 * Something used the pte of a page under migration. We need to
300 * get to the page and wait until migration is finished.
301 * When we return from this function the fault will be retried.
0697212a 302 */
e66f17ff 303void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
30dad309 304 spinlock_t *ptl)
0697212a 305{
30dad309 306 pte_t pte;
0697212a 307 swp_entry_t entry;
0697212a 308
30dad309 309 spin_lock(ptl);
0697212a
CL
310 pte = *ptep;
311 if (!is_swap_pte(pte))
312 goto out;
313
314 entry = pte_to_swp_entry(pte);
315 if (!is_migration_entry(entry))
316 goto out;
317
ffa65753 318 migration_entry_wait_on_locked(entry, ptep, ptl);
0697212a
CL
319 return;
320out:
321 pte_unmap_unlock(ptep, ptl);
322}
323
30dad309
NH
324void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
325 unsigned long address)
326{
327 spinlock_t *ptl = pte_lockptr(mm, pmd);
328 pte_t *ptep = pte_offset_map(pmd, address);
329 __migration_entry_wait(mm, ptep, ptl);
330}
331
ad1ac596 332#ifdef CONFIG_HUGETLB_PAGE
fcd48540
PX
333/*
334 * The vma read lock must be held upon entry. Holding that lock prevents either
335 * the pte or the ptl from being freed.
336 *
337 * This function will release the vma lock before returning.
338 */
339void __migration_entry_wait_huge(struct vm_area_struct *vma,
340 pte_t *ptep, spinlock_t *ptl)
30dad309 341{
ad1ac596
ML
342 pte_t pte;
343
fcd48540 344 hugetlb_vma_assert_locked(vma);
ad1ac596
ML
345 spin_lock(ptl);
346 pte = huge_ptep_get(ptep);
347
fcd48540 348 if (unlikely(!is_hugetlb_entry_migration(pte))) {
ad1ac596 349 spin_unlock(ptl);
fcd48540
PX
350 hugetlb_vma_unlock_read(vma);
351 } else {
352 /*
353 * If migration entry existed, safe to release vma lock
354 * here because the pgtable page won't be freed without the
355 * pgtable lock released. See comment right above pgtable
356 * lock release in migration_entry_wait_on_locked().
357 */
358 hugetlb_vma_unlock_read(vma);
ad1ac596 359 migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
fcd48540 360 }
30dad309
NH
361}
362
ad1ac596
ML
363void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
364{
365 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
366
fcd48540 367 __migration_entry_wait_huge(vma, pte, ptl);
ad1ac596
ML
368}
369#endif
370
616b8371
ZY
371#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
372void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
373{
374 spinlock_t *ptl;
616b8371
ZY
375
376 ptl = pmd_lock(mm, pmd);
377 if (!is_pmd_migration_entry(*pmd))
378 goto unlock;
ffa65753 379 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
616b8371
ZY
380 return;
381unlock:
382 spin_unlock(ptl);
383}
384#endif
385
108ca835
MWO
386static int folio_expected_refs(struct address_space *mapping,
387 struct folio *folio)
0b3901b3 388{
108ca835
MWO
389 int refs = 1;
390 if (!mapping)
391 return refs;
0b3901b3 392
108ca835
MWO
393 refs += folio_nr_pages(folio);
394 if (folio_test_private(folio))
395 refs++;
396
397 return refs;
0b3901b3
JK
398}
399
b20a3503 400/*
c3fcf8a5 401 * Replace the page in the mapping.
5b5c7120
CL
402 *
403 * The number of remaining references must be:
404 * 1 for anonymous pages without a mapping
405 * 2 for pages with a mapping
266cf658 406 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a3503 407 */
3417013e
MWO
408int folio_migrate_mapping(struct address_space *mapping,
409 struct folio *newfolio, struct folio *folio, int extra_count)
b20a3503 410{
3417013e 411 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
42cb14b1
HD
412 struct zone *oldzone, *newzone;
413 int dirty;
108ca835 414 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
3417013e 415 long nr = folio_nr_pages(folio);
8763cb45 416
6c5240ae 417 if (!mapping) {
0e8c7d0f 418 /* Anonymous page without mapping */
3417013e 419 if (folio_ref_count(folio) != expected_count)
6c5240ae 420 return -EAGAIN;
cf4b769a
HD
421
422 /* No turning back from here */
3417013e
MWO
423 newfolio->index = folio->index;
424 newfolio->mapping = folio->mapping;
425 if (folio_test_swapbacked(folio))
426 __folio_set_swapbacked(newfolio);
cf4b769a 427
78bd5209 428 return MIGRATEPAGE_SUCCESS;
6c5240ae
CL
429 }
430
3417013e
MWO
431 oldzone = folio_zone(folio);
432 newzone = folio_zone(newfolio);
42cb14b1 433
89eb946a 434 xas_lock_irq(&xas);
3417013e 435 if (!folio_ref_freeze(folio, expected_count)) {
89eb946a 436 xas_unlock_irq(&xas);
e286781d
NP
437 return -EAGAIN;
438 }
439
b20a3503 440 /*
3417013e 441 * Now we know that no one else is looking at the folio:
cf4b769a 442 * no turning back from here.
b20a3503 443 */
3417013e
MWO
444 newfolio->index = folio->index;
445 newfolio->mapping = folio->mapping;
446 folio_ref_add(newfolio, nr); /* add cache reference */
447 if (folio_test_swapbacked(folio)) {
448 __folio_set_swapbacked(newfolio);
449 if (folio_test_swapcache(folio)) {
450 folio_set_swapcache(newfolio);
451 newfolio->private = folio_get_private(folio);
6326fec1
NP
452 }
453 } else {
3417013e 454 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
b20a3503
CL
455 }
456
42cb14b1 457 /* Move dirty while page refs frozen and newpage not yet exposed */
3417013e 458 dirty = folio_test_dirty(folio);
42cb14b1 459 if (dirty) {
3417013e
MWO
460 folio_clear_dirty(folio);
461 folio_set_dirty(newfolio);
42cb14b1
HD
462 }
463
3417013e 464 xas_store(&xas, newfolio);
7cf9c2c7
NP
465
466 /*
937a94c9
JG
467 * Drop cache reference from old page by unfreezing
468 * to one less reference.
7cf9c2c7
NP
469 * We know this isn't the last reference.
470 */
3417013e 471 folio_ref_unfreeze(folio, expected_count - nr);
7cf9c2c7 472
89eb946a 473 xas_unlock(&xas);
42cb14b1
HD
474 /* Leave irq disabled to prevent preemption while updating stats */
475
0e8c7d0f
CL
476 /*
477 * If moved to a different zone then also account
478 * the page for that zone. Other VM counters will be
479 * taken care of when we establish references to the
480 * new page and drop references to the old page.
481 *
482 * Note that anonymous pages are accounted for
4b9d0fab 483 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
0e8c7d0f
CL
484 * are mapped to swap space.
485 */
42cb14b1 486 if (newzone != oldzone) {
0d1c2072
JW
487 struct lruvec *old_lruvec, *new_lruvec;
488 struct mem_cgroup *memcg;
489
3417013e 490 memcg = folio_memcg(folio);
0d1c2072
JW
491 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
492 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
493
5c447d27
SB
494 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
495 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
3417013e 496 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
5c447d27
SB
497 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
498 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
42cb14b1 499 }
b6038942 500#ifdef CONFIG_SWAP
3417013e 501 if (folio_test_swapcache(folio)) {
b6038942
SB
502 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
503 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
504 }
505#endif
f56753ac 506 if (dirty && mapping_can_writeback(mapping)) {
5c447d27
SB
507 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
508 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
509 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
510 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
42cb14b1 511 }
4b02108a 512 }
42cb14b1 513 local_irq_enable();
b20a3503 514
78bd5209 515 return MIGRATEPAGE_SUCCESS;
b20a3503 516}
3417013e 517EXPORT_SYMBOL(folio_migrate_mapping);
b20a3503 518
290408d4
NH
519/*
520 * The expected number of remaining references is the same as that
3417013e 521 * of folio_migrate_mapping().
290408d4
NH
522 */
523int migrate_huge_page_move_mapping(struct address_space *mapping,
b890ec2a 524 struct folio *dst, struct folio *src)
290408d4 525{
b890ec2a 526 XA_STATE(xas, &mapping->i_pages, folio_index(src));
290408d4 527 int expected_count;
290408d4 528
89eb946a 529 xas_lock_irq(&xas);
b890ec2a
MWO
530 expected_count = 2 + folio_has_private(src);
531 if (!folio_ref_freeze(src, expected_count)) {
89eb946a 532 xas_unlock_irq(&xas);
290408d4
NH
533 return -EAGAIN;
534 }
535
b890ec2a
MWO
536 dst->index = src->index;
537 dst->mapping = src->mapping;
6a93ca8f 538
b890ec2a 539 folio_get(dst);
290408d4 540
b890ec2a 541 xas_store(&xas, dst);
290408d4 542
b890ec2a 543 folio_ref_unfreeze(src, expected_count - 1);
290408d4 544
89eb946a 545 xas_unlock_irq(&xas);
6a93ca8f 546
78bd5209 547 return MIGRATEPAGE_SUCCESS;
290408d4
NH
548}
549
b20a3503 550/*
19138349 551 * Copy the flags and some other ancillary information
b20a3503 552 */
19138349 553void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
b20a3503 554{
7851a45c
RR
555 int cpupid;
556
19138349
MWO
557 if (folio_test_error(folio))
558 folio_set_error(newfolio);
559 if (folio_test_referenced(folio))
560 folio_set_referenced(newfolio);
561 if (folio_test_uptodate(folio))
562 folio_mark_uptodate(newfolio);
563 if (folio_test_clear_active(folio)) {
564 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
565 folio_set_active(newfolio);
566 } else if (folio_test_clear_unevictable(folio))
567 folio_set_unevictable(newfolio);
568 if (folio_test_workingset(folio))
569 folio_set_workingset(newfolio);
570 if (folio_test_checked(folio))
571 folio_set_checked(newfolio);
6c287605
DH
572 /*
573 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
574 * migration entries. We can still have PG_anon_exclusive set on an
575 * effectively unmapped and unreferenced first sub-pages of an
576 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
577 */
19138349
MWO
578 if (folio_test_mappedtodisk(folio))
579 folio_set_mappedtodisk(newfolio);
b20a3503 580
3417013e 581 /* Move dirty on pages not done by folio_migrate_mapping() */
19138349
MWO
582 if (folio_test_dirty(folio))
583 folio_set_dirty(newfolio);
b20a3503 584
19138349
MWO
585 if (folio_test_young(folio))
586 folio_set_young(newfolio);
587 if (folio_test_idle(folio))
588 folio_set_idle(newfolio);
33c3fc71 589
7851a45c
RR
590 /*
591 * Copy NUMA information to the new page, to prevent over-eager
592 * future migrations of this same page.
593 */
19138349 594 cpupid = page_cpupid_xchg_last(&folio->page, -1);
33024536
HY
595 /*
596 * For memory tiering mode, when migrate between slow and fast
597 * memory node, reset cpupid, because that is used to record
598 * page access time in slow memory node.
599 */
600 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
601 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
602 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
603
604 if (f_toptier != t_toptier)
605 cpupid = -1;
606 }
19138349 607 page_cpupid_xchg_last(&newfolio->page, cpupid);
7851a45c 608
19138349 609 folio_migrate_ksm(newfolio, folio);
c8d6553b
HD
610 /*
611 * Please do not reorder this without considering how mm/ksm.c's
612 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
613 */
19138349
MWO
614 if (folio_test_swapcache(folio))
615 folio_clear_swapcache(folio);
616 folio_clear_private(folio);
ad2fa371
MS
617
618 /* page->private contains hugetlb specific flags */
19138349
MWO
619 if (!folio_test_hugetlb(folio))
620 folio->private = NULL;
b20a3503
CL
621
622 /*
623 * If any waiters have accumulated on the new page then
624 * wake them up.
625 */
19138349
MWO
626 if (folio_test_writeback(newfolio))
627 folio_end_writeback(newfolio);
d435edca 628
6aeff241
YS
629 /*
630 * PG_readahead shares the same bit with PG_reclaim. The above
631 * end_page_writeback() may clear PG_readahead mistakenly, so set the
632 * bit after that.
633 */
19138349
MWO
634 if (folio_test_readahead(folio))
635 folio_set_readahead(newfolio);
6aeff241 636
19138349 637 folio_copy_owner(newfolio, folio);
74485cf2 638
19138349 639 if (!folio_test_hugetlb(folio))
d21bba2b 640 mem_cgroup_migrate(folio, newfolio);
b20a3503 641}
19138349 642EXPORT_SYMBOL(folio_migrate_flags);
2916ecc0 643
715cbfd6 644void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
2916ecc0 645{
715cbfd6
MWO
646 folio_copy(newfolio, folio);
647 folio_migrate_flags(newfolio, folio);
2916ecc0 648}
715cbfd6 649EXPORT_SYMBOL(folio_migrate_copy);
b20a3503 650
1d8b85cc
CL
651/************************************************************
652 * Migration functions
653 ***********************************************************/
654
16ce101d
AP
655int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
656 struct folio *src, enum migrate_mode mode, int extra_count)
657{
658 int rc;
659
660 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
661
662 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
663
664 if (rc != MIGRATEPAGE_SUCCESS)
665 return rc;
666
667 if (mode != MIGRATE_SYNC_NO_COPY)
668 folio_migrate_copy(dst, src);
669 else
670 folio_migrate_flags(dst, src);
671 return MIGRATEPAGE_SUCCESS;
672}
673
54184650
MWO
674/**
675 * migrate_folio() - Simple folio migration.
676 * @mapping: The address_space containing the folio.
677 * @dst: The folio to migrate the data to.
678 * @src: The folio containing the current data.
679 * @mode: How to migrate the page.
680 *
681 * Common logic to directly migrate a single LRU folio suitable for
682 * folios that do not use PagePrivate/PagePrivate2.
b20a3503 683 *
54184650 684 * Folios are locked upon entry and exit.
b20a3503 685 */
54184650
MWO
686int migrate_folio(struct address_space *mapping, struct folio *dst,
687 struct folio *src, enum migrate_mode mode)
b20a3503 688{
16ce101d 689 return migrate_folio_extra(mapping, dst, src, mode, 0);
b20a3503 690}
54184650 691EXPORT_SYMBOL(migrate_folio);
b20a3503 692
9361401e 693#ifdef CONFIG_BLOCK
84ade7c1
JK
694/* Returns true if all buffers are successfully locked */
695static bool buffer_migrate_lock_buffers(struct buffer_head *head,
696 enum migrate_mode mode)
697{
698 struct buffer_head *bh = head;
699
700 /* Simple case, sync compaction */
701 if (mode != MIGRATE_ASYNC) {
702 do {
84ade7c1
JK
703 lock_buffer(bh);
704 bh = bh->b_this_page;
705
706 } while (bh != head);
707
708 return true;
709 }
710
711 /* async case, we cannot block on lock_buffer so use trylock_buffer */
712 do {
84ade7c1
JK
713 if (!trylock_buffer(bh)) {
714 /*
715 * We failed to lock the buffer and cannot stall in
716 * async migration. Release the taken locks
717 */
718 struct buffer_head *failed_bh = bh;
84ade7c1
JK
719 bh = head;
720 while (bh != failed_bh) {
721 unlock_buffer(bh);
84ade7c1
JK
722 bh = bh->b_this_page;
723 }
724 return false;
725 }
726
727 bh = bh->b_this_page;
728 } while (bh != head);
729 return true;
730}
731
67235182
MWO
732static int __buffer_migrate_folio(struct address_space *mapping,
733 struct folio *dst, struct folio *src, enum migrate_mode mode,
89cb0888 734 bool check_refs)
1d8b85cc 735{
1d8b85cc
CL
736 struct buffer_head *bh, *head;
737 int rc;
cc4f11e6 738 int expected_count;
1d8b85cc 739
67235182
MWO
740 head = folio_buffers(src);
741 if (!head)
54184650 742 return migrate_folio(mapping, dst, src, mode);
1d8b85cc 743
cc4f11e6 744 /* Check whether page does not have extra refs before we do more work */
108ca835 745 expected_count = folio_expected_refs(mapping, src);
67235182 746 if (folio_ref_count(src) != expected_count)
cc4f11e6 747 return -EAGAIN;
1d8b85cc 748
cc4f11e6
JK
749 if (!buffer_migrate_lock_buffers(head, mode))
750 return -EAGAIN;
1d8b85cc 751
89cb0888
JK
752 if (check_refs) {
753 bool busy;
754 bool invalidated = false;
755
756recheck_buffers:
757 busy = false;
758 spin_lock(&mapping->private_lock);
759 bh = head;
760 do {
761 if (atomic_read(&bh->b_count)) {
762 busy = true;
763 break;
764 }
765 bh = bh->b_this_page;
766 } while (bh != head);
89cb0888
JK
767 if (busy) {
768 if (invalidated) {
769 rc = -EAGAIN;
770 goto unlock_buffers;
771 }
ebdf4de5 772 spin_unlock(&mapping->private_lock);
89cb0888
JK
773 invalidate_bh_lrus();
774 invalidated = true;
775 goto recheck_buffers;
776 }
777 }
778
67235182 779 rc = folio_migrate_mapping(mapping, dst, src, 0);
78bd5209 780 if (rc != MIGRATEPAGE_SUCCESS)
cc4f11e6 781 goto unlock_buffers;
1d8b85cc 782
67235182 783 folio_attach_private(dst, folio_detach_private(src));
1d8b85cc
CL
784
785 bh = head;
786 do {
67235182 787 set_bh_page(bh, &dst->page, bh_offset(bh));
1d8b85cc 788 bh = bh->b_this_page;
1d8b85cc
CL
789 } while (bh != head);
790
2916ecc0 791 if (mode != MIGRATE_SYNC_NO_COPY)
67235182 792 folio_migrate_copy(dst, src);
2916ecc0 793 else
67235182 794 folio_migrate_flags(dst, src);
1d8b85cc 795
cc4f11e6
JK
796 rc = MIGRATEPAGE_SUCCESS;
797unlock_buffers:
ebdf4de5
JK
798 if (check_refs)
799 spin_unlock(&mapping->private_lock);
1d8b85cc
CL
800 bh = head;
801 do {
802 unlock_buffer(bh);
1d8b85cc 803 bh = bh->b_this_page;
1d8b85cc
CL
804 } while (bh != head);
805
cc4f11e6 806 return rc;
1d8b85cc 807}
89cb0888 808
67235182
MWO
809/**
810 * buffer_migrate_folio() - Migration function for folios with buffers.
811 * @mapping: The address space containing @src.
812 * @dst: The folio to migrate to.
813 * @src: The folio to migrate from.
814 * @mode: How to migrate the folio.
815 *
816 * This function can only be used if the underlying filesystem guarantees
817 * that no other references to @src exist. For example attached buffer
818 * heads are accessed only under the folio lock. If your filesystem cannot
819 * provide this guarantee, buffer_migrate_folio_norefs() may be more
820 * appropriate.
821 *
822 * Return: 0 on success or a negative errno on failure.
89cb0888 823 */
67235182
MWO
824int buffer_migrate_folio(struct address_space *mapping,
825 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 826{
67235182 827 return __buffer_migrate_folio(mapping, dst, src, mode, false);
89cb0888 828}
67235182
MWO
829EXPORT_SYMBOL(buffer_migrate_folio);
830
831/**
832 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
833 * @mapping: The address space containing @src.
834 * @dst: The folio to migrate to.
835 * @src: The folio to migrate from.
836 * @mode: How to migrate the folio.
837 *
838 * Like buffer_migrate_folio() except that this variant is more careful
839 * and checks that there are also no buffer head references. This function
840 * is the right one for mappings where buffer heads are directly looked
841 * up and referenced (such as block device mappings).
842 *
843 * Return: 0 on success or a negative errno on failure.
89cb0888 844 */
67235182
MWO
845int buffer_migrate_folio_norefs(struct address_space *mapping,
846 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 847{
67235182 848 return __buffer_migrate_folio(mapping, dst, src, mode, true);
89cb0888 849}
e26355e2 850EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
9361401e 851#endif
1d8b85cc 852
2ec810d5
MWO
853int filemap_migrate_folio(struct address_space *mapping,
854 struct folio *dst, struct folio *src, enum migrate_mode mode)
855{
856 int ret;
857
858 ret = folio_migrate_mapping(mapping, dst, src, 0);
859 if (ret != MIGRATEPAGE_SUCCESS)
860 return ret;
861
862 if (folio_get_private(src))
863 folio_attach_private(dst, folio_detach_private(src));
864
865 if (mode != MIGRATE_SYNC_NO_COPY)
866 folio_migrate_copy(dst, src);
867 else
868 folio_migrate_flags(dst, src);
869 return MIGRATEPAGE_SUCCESS;
870}
871EXPORT_SYMBOL_GPL(filemap_migrate_folio);
872
04e62a29 873/*
2be7fa10 874 * Writeback a folio to clean the dirty state
04e62a29 875 */
2be7fa10 876static int writeout(struct address_space *mapping, struct folio *folio)
8351a6e4 877{
04e62a29
CL
878 struct writeback_control wbc = {
879 .sync_mode = WB_SYNC_NONE,
880 .nr_to_write = 1,
881 .range_start = 0,
882 .range_end = LLONG_MAX,
04e62a29
CL
883 .for_reclaim = 1
884 };
885 int rc;
886
887 if (!mapping->a_ops->writepage)
888 /* No write method for the address space */
889 return -EINVAL;
890
2be7fa10 891 if (!folio_clear_dirty_for_io(folio))
04e62a29
CL
892 /* Someone else already triggered a write */
893 return -EAGAIN;
894
8351a6e4 895 /*
2be7fa10
MWO
896 * A dirty folio may imply that the underlying filesystem has
897 * the folio on some queue. So the folio must be clean for
898 * migration. Writeout may mean we lose the lock and the
899 * folio state is no longer what we checked for earlier.
04e62a29
CL
900 * At this point we know that the migration attempt cannot
901 * be successful.
8351a6e4 902 */
4eecb8b9 903 remove_migration_ptes(folio, folio, false);
8351a6e4 904
2be7fa10 905 rc = mapping->a_ops->writepage(&folio->page, &wbc);
8351a6e4 906
04e62a29
CL
907 if (rc != AOP_WRITEPAGE_ACTIVATE)
908 /* unlocked. Relock */
2be7fa10 909 folio_lock(folio);
04e62a29 910
bda8550d 911 return (rc < 0) ? -EIO : -EAGAIN;
04e62a29
CL
912}
913
914/*
915 * Default handling if a filesystem does not provide a migration function.
916 */
8faa8ef5
MWO
917static int fallback_migrate_folio(struct address_space *mapping,
918 struct folio *dst, struct folio *src, enum migrate_mode mode)
04e62a29 919{
8faa8ef5
MWO
920 if (folio_test_dirty(src)) {
921 /* Only writeback folios in full synchronous migration */
2916ecc0
JG
922 switch (mode) {
923 case MIGRATE_SYNC:
924 case MIGRATE_SYNC_NO_COPY:
925 break;
926 default:
b969c4ab 927 return -EBUSY;
2916ecc0 928 }
2be7fa10 929 return writeout(mapping, src);
b969c4ab 930 }
8351a6e4
CL
931
932 /*
933 * Buffers may be managed in a filesystem specific way.
934 * We must have no buffers or drop them.
935 */
8faa8ef5
MWO
936 if (folio_test_private(src) &&
937 !filemap_release_folio(src, GFP_KERNEL))
806031bb 938 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
8351a6e4 939
54184650 940 return migrate_folio(mapping, dst, src, mode);
8351a6e4
CL
941}
942
e24f0b8f
CL
943/*
944 * Move a page to a newly allocated page
945 * The page is locked and all ptes have been successfully removed.
946 *
947 * The new page will have replaced the old page if this function
948 * is successful.
894bc310
LS
949 *
950 * Return value:
951 * < 0 - error code
78bd5209 952 * MIGRATEPAGE_SUCCESS - success
e24f0b8f 953 */
e7e3ffeb 954static int move_to_new_folio(struct folio *dst, struct folio *src,
5c3f9a67 955 enum migrate_mode mode)
e24f0b8f 956{
bda807d4 957 int rc = -EAGAIN;
e7e3ffeb 958 bool is_lru = !__PageMovable(&src->page);
e24f0b8f 959
e7e3ffeb
MWO
960 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
961 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
e24f0b8f 962
bda807d4 963 if (likely(is_lru)) {
68f2736a
MWO
964 struct address_space *mapping = folio_mapping(src);
965
bda807d4 966 if (!mapping)
54184650 967 rc = migrate_folio(mapping, dst, src, mode);
5490da4f 968 else if (mapping->a_ops->migrate_folio)
bda807d4 969 /*
5490da4f
MWO
970 * Most folios have a mapping and most filesystems
971 * provide a migrate_folio callback. Anonymous folios
bda807d4 972 * are part of swap space which also has its own
5490da4f 973 * migrate_folio callback. This is the most common path
bda807d4
MK
974 * for page migration.
975 */
5490da4f
MWO
976 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
977 mode);
bda807d4 978 else
8faa8ef5 979 rc = fallback_migrate_folio(mapping, dst, src, mode);
bda807d4 980 } else {
68f2736a
MWO
981 const struct movable_operations *mops;
982
e24f0b8f 983 /*
bda807d4
MK
984 * In case of non-lru page, it could be released after
985 * isolation step. In that case, we shouldn't try migration.
e24f0b8f 986 */
e7e3ffeb
MWO
987 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
988 if (!folio_test_movable(src)) {
bda807d4 989 rc = MIGRATEPAGE_SUCCESS;
e7e3ffeb 990 folio_clear_isolated(src);
bda807d4
MK
991 goto out;
992 }
993
da707a6d 994 mops = folio_movable_ops(src);
68f2736a 995 rc = mops->migrate_page(&dst->page, &src->page, mode);
bda807d4 996 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
e7e3ffeb 997 !folio_test_isolated(src));
bda807d4 998 }
e24f0b8f 999
5c3f9a67 1000 /*
e7e3ffeb
MWO
1001 * When successful, old pagecache src->mapping must be cleared before
1002 * src is freed; but stats require that PageAnon be left as PageAnon.
5c3f9a67
HD
1003 */
1004 if (rc == MIGRATEPAGE_SUCCESS) {
e7e3ffeb
MWO
1005 if (__PageMovable(&src->page)) {
1006 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
bda807d4
MK
1007
1008 /*
1009 * We clear PG_movable under page_lock so any compactor
1010 * cannot try to migrate this page.
1011 */
e7e3ffeb 1012 folio_clear_isolated(src);
bda807d4
MK
1013 }
1014
1015 /*
e7e3ffeb 1016 * Anonymous and movable src->mapping will be cleared by
bda807d4
MK
1017 * free_pages_prepare so don't reset it here for keeping
1018 * the type to work PageAnon, for example.
1019 */
e7e3ffeb
MWO
1020 if (!folio_mapping_flags(src))
1021 src->mapping = NULL;
d2b2c6dd 1022
e7e3ffeb
MWO
1023 if (likely(!folio_is_zone_device(dst)))
1024 flush_dcache_folio(dst);
3fe2011f 1025 }
bda807d4 1026out:
e24f0b8f
CL
1027 return rc;
1028}
1029
682a71a1 1030static int __unmap_and_move(struct folio *src, struct folio *dst,
9c620e2b 1031 int force, enum migrate_mode mode)
e24f0b8f 1032{
0dabec93 1033 int rc = -EAGAIN;
213ecb31 1034 bool page_was_mapped = false;
3f6c8272 1035 struct anon_vma *anon_vma = NULL;
682a71a1 1036 bool is_lru = !__PageMovable(&src->page);
95a402c3 1037
682a71a1 1038 if (!folio_trylock(src)) {
a6bc32b8 1039 if (!force || mode == MIGRATE_ASYNC)
0dabec93 1040 goto out;
3e7d3449
MG
1041
1042 /*
1043 * It's not safe for direct compaction to call lock_page.
1044 * For example, during page readahead pages are added locked
1045 * to the LRU. Later, when the IO completes the pages are
1046 * marked uptodate and unlocked. However, the queueing
1047 * could be merging multiple pages for one bio (e.g.
d4388340 1048 * mpage_readahead). If an allocation happens for the
3e7d3449
MG
1049 * second or third page, the process can end up locking
1050 * the same page twice and deadlocking. Rather than
1051 * trying to be clever about what pages can be locked,
1052 * avoid the use of lock_page for direct compaction
1053 * altogether.
1054 */
1055 if (current->flags & PF_MEMALLOC)
0dabec93 1056 goto out;
3e7d3449 1057
682a71a1 1058 folio_lock(src);
e24f0b8f
CL
1059 }
1060
682a71a1 1061 if (folio_test_writeback(src)) {
11bc82d6 1062 /*
fed5b64a 1063 * Only in the case of a full synchronous migration is it
a6bc32b8
MG
1064 * necessary to wait for PageWriteback. In the async case,
1065 * the retry loop is too short and in the sync-light case,
1066 * the overhead of stalling is too much
11bc82d6 1067 */
2916ecc0
JG
1068 switch (mode) {
1069 case MIGRATE_SYNC:
1070 case MIGRATE_SYNC_NO_COPY:
1071 break;
1072 default:
11bc82d6 1073 rc = -EBUSY;
0a31bc97 1074 goto out_unlock;
11bc82d6
AA
1075 }
1076 if (!force)
0a31bc97 1077 goto out_unlock;
682a71a1 1078 folio_wait_writeback(src);
e24f0b8f 1079 }
03f15c86 1080
e24f0b8f 1081 /*
682a71a1
MWO
1082 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1083 * we cannot notice that anon_vma is freed while we migrate a page.
1ce82b69 1084 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d 1085 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
1086 * File Caches may use write_page() or lock_page() in migration, then,
1087 * just care Anon page here.
03f15c86 1088 *
29eea9b5 1089 * Only folio_get_anon_vma() understands the subtleties of
03f15c86
HD
1090 * getting a hold on an anon_vma from outside one of its mms.
1091 * But if we cannot get anon_vma, then we won't need it anyway,
1092 * because that implies that the anon page is no longer mapped
1093 * (and cannot be remapped so long as we hold the page lock).
dc386d4d 1094 */
682a71a1 1095 if (folio_test_anon(src) && !folio_test_ksm(src))
29eea9b5 1096 anon_vma = folio_get_anon_vma(src);
62e1c553 1097
7db7671f
HD
1098 /*
1099 * Block others from accessing the new page when we get around to
1100 * establishing additional references. We are usually the only one
682a71a1
MWO
1101 * holding a reference to dst at this point. We used to have a BUG
1102 * here if folio_trylock(dst) fails, but would like to allow for
1103 * cases where there might be a race with the previous use of dst.
7db7671f
HD
1104 * This is much like races on refcount of oldpage: just don't BUG().
1105 */
682a71a1 1106 if (unlikely(!folio_trylock(dst)))
7db7671f
HD
1107 goto out_unlock;
1108
bda807d4 1109 if (unlikely(!is_lru)) {
682a71a1 1110 rc = move_to_new_folio(dst, src, mode);
bda807d4
MK
1111 goto out_unlock_both;
1112 }
1113
dc386d4d 1114 /*
62e1c553
SL
1115 * Corner case handling:
1116 * 1. When a new swap-cache page is read into, it is added to the LRU
1117 * and treated as swapcache but it has no rmap yet.
682a71a1 1118 * Calling try_to_unmap() against a src->mapping==NULL page will
62e1c553 1119 * trigger a BUG. So handle it here.
d12b8951 1120 * 2. An orphaned page (see truncate_cleanup_page) might have
62e1c553
SL
1121 * fs-private metadata. The page can be picked up due to memory
1122 * offlining. Everywhere else except page reclaim, the page is
1123 * invisible to the vm, so the page can not be migrated. So try to
1124 * free the metadata, so the page can be freed.
e24f0b8f 1125 */
682a71a1
MWO
1126 if (!src->mapping) {
1127 if (folio_test_private(src)) {
1128 try_to_free_buffers(src);
7db7671f 1129 goto out_unlock_both;
62e1c553 1130 }
682a71a1 1131 } else if (folio_mapped(src)) {
7db7671f 1132 /* Establish migration ptes */
682a71a1
MWO
1133 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1134 !folio_test_ksm(src) && !anon_vma, src);
1135 try_to_migrate(src, 0);
213ecb31 1136 page_was_mapped = true;
2ebba6b7 1137 }
dc386d4d 1138
682a71a1
MWO
1139 if (!folio_mapped(src))
1140 rc = move_to_new_folio(dst, src, mode);
e24f0b8f 1141
c3096e67 1142 /*
682a71a1 1143 * When successful, push dst to LRU immediately: so that if it
c3096e67 1144 * turns out to be an mlocked page, remove_migration_ptes() will
682a71a1 1145 * automatically build up the correct dst->mlock_count for it.
c3096e67
HD
1146 *
1147 * We would like to do something similar for the old page, when
1148 * unsuccessful, and other cases when a page has been temporarily
1149 * isolated from the unevictable LRU: but this case is the easiest.
1150 */
1151 if (rc == MIGRATEPAGE_SUCCESS) {
682a71a1 1152 folio_add_lru(dst);
c3096e67
HD
1153 if (page_was_mapped)
1154 lru_add_drain();
1155 }
1156
5c3f9a67 1157 if (page_was_mapped)
682a71a1
MWO
1158 remove_migration_ptes(src,
1159 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
3f6c8272 1160
7db7671f 1161out_unlock_both:
682a71a1 1162 folio_unlock(dst);
7db7671f 1163out_unlock:
3f6c8272 1164 /* Drop an anon_vma reference if we took one */
76545066 1165 if (anon_vma)
9e60109f 1166 put_anon_vma(anon_vma);
682a71a1 1167 folio_unlock(src);
0dabec93 1168out:
c6c919eb 1169 /*
682a71a1 1170 * If migration is successful, decrease refcount of dst,
c6c919eb 1171 * which will not free the page because new page owner increased
c3096e67 1172 * refcounter.
c6c919eb 1173 */
c3096e67 1174 if (rc == MIGRATEPAGE_SUCCESS)
682a71a1 1175 folio_put(dst);
c6c919eb 1176
0dabec93
MK
1177 return rc;
1178}
95a402c3 1179
0dabec93 1180/*
49f51859
HY
1181 * Obtain the lock on folio, remove all ptes and migrate the folio
1182 * to the newly allocated folio in dst.
0dabec93 1183 */
6ec4476a 1184static int unmap_and_move(new_page_t get_new_page,
ef2a5153 1185 free_page_t put_new_page,
49f51859 1186 unsigned long private, struct folio *src,
add05cec 1187 int force, enum migrate_mode mode,
dd4ae78a
YS
1188 enum migrate_reason reason,
1189 struct list_head *ret)
0dabec93 1190{
49f51859 1191 struct folio *dst;
2def7424 1192 int rc = MIGRATEPAGE_SUCCESS;
74d4a579 1193 struct page *newpage = NULL;
0dabec93 1194
49f51859 1195 if (!thp_migration_supported() && folio_test_transhuge(src))
d532e2e5 1196 return -ENOSYS;
94723aaf 1197
49f51859
HY
1198 if (folio_ref_count(src) == 1) {
1199 /* Folio was freed from under us. So we are done. */
1200 folio_clear_active(src);
1201 folio_clear_unevictable(src);
160088b3 1202 /* free_pages_prepare() will clear PG_isolated. */
0dabec93
MK
1203 goto out;
1204 }
1205
49f51859 1206 newpage = get_new_page(&src->page, private);
74d4a579
YS
1207 if (!newpage)
1208 return -ENOMEM;
682a71a1 1209 dst = page_folio(newpage);
74d4a579 1210
4c74b65f 1211 dst->private = NULL;
682a71a1 1212 rc = __unmap_and_move(src, dst, force, mode);
c6c919eb 1213 if (rc == MIGRATEPAGE_SUCCESS)
49f51859 1214 set_page_owner_migrate_reason(&dst->page, reason);
bf6bddf1 1215
0dabec93 1216out:
e24f0b8f 1217 if (rc != -EAGAIN) {
0dabec93 1218 /*
49f51859
HY
1219 * A folio that has been migrated has all references
1220 * removed and will be freed. A folio that has not been
c23a0c99 1221 * migrated will have kept its references and be restored.
0dabec93 1222 */
49f51859 1223 list_del(&src->lru);
dd4ae78a 1224 }
6afcf8ef 1225
dd4ae78a
YS
1226 /*
1227 * If migration is successful, releases reference grabbed during
49f51859 1228 * isolation. Otherwise, restore the folio to right list unless
dd4ae78a
YS
1229 * we want to retry.
1230 */
1231 if (rc == MIGRATEPAGE_SUCCESS) {
6afcf8ef 1232 /*
49f51859 1233 * Compaction can migrate also non-LRU folios which are
6afcf8ef 1234 * not accounted to NR_ISOLATED_*. They can be recognized
49f51859 1235 * as __folio_test_movable
6afcf8ef 1236 */
49f51859
HY
1237 if (likely(!__folio_test_movable(src)))
1238 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1239 folio_is_file_lru(src), -folio_nr_pages(src));
c6c919eb 1240
79f5f8fa 1241 if (reason != MR_MEMORY_FAILURE)
d7e69488 1242 /*
49f51859 1243 * We release the folio in page_handle_poison.
d7e69488 1244 */
49f51859 1245 folio_put(src);
c6c919eb 1246 } else {
dd4ae78a 1247 if (rc != -EAGAIN)
49f51859 1248 list_add_tail(&src->lru, ret);
bda807d4 1249
c6c919eb 1250 if (put_new_page)
49f51859 1251 put_new_page(&dst->page, private);
c6c919eb 1252 else
49f51859 1253 folio_put(dst);
e24f0b8f 1254 }
68711a74 1255
e24f0b8f
CL
1256 return rc;
1257}
1258
290408d4
NH
1259/*
1260 * Counterpart of unmap_and_move_page() for hugepage migration.
1261 *
1262 * This function doesn't wait the completion of hugepage I/O
1263 * because there is no race between I/O and migration for hugepage.
1264 * Note that currently hugepage I/O occurs only in direct I/O
1265 * where no lock is held and PG_writeback is irrelevant,
1266 * and writeback status of all subpages are counted in the reference
1267 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1268 * under direct I/O, the reference of the head page is 512 and a bit more.)
1269 * This means that when we try to migrate hugepage whose subpages are
1270 * doing direct I/O, some references remain after try_to_unmap() and
1271 * hugepage migration fails without data corruption.
1272 *
1273 * There is also no race when direct I/O is issued on the page under migration,
1274 * because then pte is replaced with migration swap entry and direct I/O code
1275 * will wait in the page fault for migration to complete.
1276 */
1277static int unmap_and_move_huge_page(new_page_t get_new_page,
68711a74
DR
1278 free_page_t put_new_page, unsigned long private,
1279 struct page *hpage, int force,
dd4ae78a
YS
1280 enum migrate_mode mode, int reason,
1281 struct list_head *ret)
290408d4 1282{
4eecb8b9 1283 struct folio *dst, *src = page_folio(hpage);
2def7424 1284 int rc = -EAGAIN;
2ebba6b7 1285 int page_was_mapped = 0;
32665f2b 1286 struct page *new_hpage;
290408d4 1287 struct anon_vma *anon_vma = NULL;
c0d0381a 1288 struct address_space *mapping = NULL;
290408d4 1289
83467efb 1290 /*
7ed2c31d 1291 * Migratability of hugepages depends on architectures and their size.
83467efb
NH
1292 * This check is necessary because some callers of hugepage migration
1293 * like soft offline and memory hotremove don't walk through page
1294 * tables or check whether the hugepage is pmd-based or not before
1295 * kicking migration.
1296 */
577be05c 1297 if (!hugepage_migration_supported(page_hstate(hpage)))
83467efb
NH
1298 return -ENOSYS;
1299
c33db292 1300 if (folio_ref_count(src) == 1) {
71a64f61 1301 /* page was freed from under us. So we are done. */
ea8e72f4 1302 folio_putback_active_hugetlb(src);
71a64f61
MS
1303 return MIGRATEPAGE_SUCCESS;
1304 }
1305
666feb21 1306 new_hpage = get_new_page(hpage, private);
290408d4
NH
1307 if (!new_hpage)
1308 return -ENOMEM;
4eecb8b9 1309 dst = page_folio(new_hpage);
290408d4 1310
c33db292 1311 if (!folio_trylock(src)) {
2916ecc0 1312 if (!force)
290408d4 1313 goto out;
2916ecc0
JG
1314 switch (mode) {
1315 case MIGRATE_SYNC:
1316 case MIGRATE_SYNC_NO_COPY:
1317 break;
1318 default:
1319 goto out;
1320 }
c33db292 1321 folio_lock(src);
290408d4
NH
1322 }
1323
cb6acd01
MK
1324 /*
1325 * Check for pages which are in the process of being freed. Without
c33db292 1326 * folio_mapping() set, hugetlbfs specific move page routine will not
cb6acd01
MK
1327 * be called and we could leak usage counts for subpools.
1328 */
345c62d1 1329 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
cb6acd01
MK
1330 rc = -EBUSY;
1331 goto out_unlock;
1332 }
1333
c33db292 1334 if (folio_test_anon(src))
29eea9b5 1335 anon_vma = folio_get_anon_vma(src);
290408d4 1336
c33db292 1337 if (unlikely(!folio_trylock(dst)))
7db7671f
HD
1338 goto put_anon;
1339
c33db292 1340 if (folio_mapped(src)) {
a98a2f0c 1341 enum ttu_flags ttu = 0;
336bf30e 1342
c33db292 1343 if (!folio_test_anon(src)) {
336bf30e
MK
1344 /*
1345 * In shared mappings, try_to_unmap could potentially
1346 * call huge_pmd_unshare. Because of this, take
1347 * semaphore in write mode here and set TTU_RMAP_LOCKED
1348 * to let lower levels know we have taken the lock.
1349 */
1350 mapping = hugetlb_page_mapping_lock_write(hpage);
1351 if (unlikely(!mapping))
1352 goto unlock_put_anon;
1353
5202978b 1354 ttu = TTU_RMAP_LOCKED;
336bf30e 1355 }
c0d0381a 1356
4b8554c5 1357 try_to_migrate(src, ttu);
2ebba6b7 1358 page_was_mapped = 1;
336bf30e 1359
5202978b 1360 if (ttu & TTU_RMAP_LOCKED)
336bf30e 1361 i_mmap_unlock_write(mapping);
2ebba6b7 1362 }
290408d4 1363
c33db292 1364 if (!folio_mapped(src))
e7e3ffeb 1365 rc = move_to_new_folio(dst, src, mode);
290408d4 1366
336bf30e 1367 if (page_was_mapped)
4eecb8b9
MWO
1368 remove_migration_ptes(src,
1369 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
290408d4 1370
c0d0381a 1371unlock_put_anon:
c33db292 1372 folio_unlock(dst);
7db7671f
HD
1373
1374put_anon:
fd4a4663 1375 if (anon_vma)
9e60109f 1376 put_anon_vma(anon_vma);
8e6ac7fa 1377
2def7424 1378 if (rc == MIGRATEPAGE_SUCCESS) {
345c62d1 1379 move_hugetlb_state(src, dst, reason);
2def7424
HD
1380 put_new_page = NULL;
1381 }
8e6ac7fa 1382
cb6acd01 1383out_unlock:
c33db292 1384 folio_unlock(src);
09761333 1385out:
dd4ae78a 1386 if (rc == MIGRATEPAGE_SUCCESS)
ea8e72f4 1387 folio_putback_active_hugetlb(src);
a04840c6 1388 else if (rc != -EAGAIN)
c33db292 1389 list_move_tail(&src->lru, ret);
68711a74
DR
1390
1391 /*
1392 * If migration was not successful and there's a freeing callback, use
1393 * it. Otherwise, put_page() will drop the reference grabbed during
1394 * isolation.
1395 */
2def7424 1396 if (put_new_page)
68711a74
DR
1397 put_new_page(new_hpage, private);
1398 else
ea8e72f4 1399 folio_putback_active_hugetlb(dst);
68711a74 1400
290408d4
NH
1401 return rc;
1402}
1403
eaec4e63 1404static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
d532e2e5 1405{
9c62ff00 1406 int rc;
d532e2e5 1407
eaec4e63
HY
1408 folio_lock(folio);
1409 rc = split_folio_to_list(folio, split_folios);
1410 folio_unlock(folio);
e6fa8a79 1411 if (!rc)
eaec4e63 1412 list_move_tail(&folio->lru, split_folios);
d532e2e5
YS
1413
1414 return rc;
1415}
1416
e5bfff8b
HY
1417#define NR_MAX_MIGRATE_PAGES_RETRY 10
1418
5b855937
HY
1419struct migrate_pages_stats {
1420 int nr_succeeded; /* Normal and large folios migrated successfully, in
1421 units of base pages */
1422 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1423 units of base pages. Untried folios aren't counted */
1424 int nr_thp_succeeded; /* THP migrated successfully */
1425 int nr_thp_failed; /* THP failed to be migrated */
1426 int nr_thp_split; /* THP split before migrating */
1427};
1428
e5bfff8b
HY
1429/*
1430 * Returns the number of hugetlb folios that were not migrated, or an error code
1431 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1432 * any more because the list has become empty or no retryable hugetlb folios
1433 * exist any more. It is caller's responsibility to call putback_movable_pages()
1434 * only if ret != 0.
1435 */
1436static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
1437 free_page_t put_new_page, unsigned long private,
1438 enum migrate_mode mode, int reason,
1439 struct migrate_pages_stats *stats,
1440 struct list_head *ret_folios)
1441{
1442 int retry = 1;
1443 int nr_failed = 0;
1444 int nr_retry_pages = 0;
1445 int pass = 0;
1446 struct folio *folio, *folio2;
1447 int rc, nr_pages;
1448
1449 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1450 retry = 0;
1451 nr_retry_pages = 0;
1452
1453 list_for_each_entry_safe(folio, folio2, from, lru) {
1454 if (!folio_test_hugetlb(folio))
1455 continue;
1456
1457 nr_pages = folio_nr_pages(folio);
1458
1459 cond_resched();
1460
1461 rc = unmap_and_move_huge_page(get_new_page,
1462 put_new_page, private,
1463 &folio->page, pass > 2, mode,
1464 reason, ret_folios);
1465 /*
1466 * The rules are:
1467 * Success: hugetlb folio will be put back
1468 * -EAGAIN: stay on the from list
1469 * -ENOMEM: stay on the from list
1470 * -ENOSYS: stay on the from list
1471 * Other errno: put on ret_folios list
1472 */
1473 switch(rc) {
1474 case -ENOSYS:
1475 /* Hugetlb migration is unsupported */
1476 nr_failed++;
1477 stats->nr_failed_pages += nr_pages;
1478 list_move_tail(&folio->lru, ret_folios);
1479 break;
1480 case -ENOMEM:
1481 /*
1482 * When memory is low, don't bother to try to migrate
1483 * other folios, just exit.
1484 */
1485 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1486 return -ENOMEM;
1487 case -EAGAIN:
1488 retry++;
1489 nr_retry_pages += nr_pages;
1490 break;
1491 case MIGRATEPAGE_SUCCESS:
1492 stats->nr_succeeded += nr_pages;
1493 break;
1494 default:
1495 /*
1496 * Permanent failure (-EBUSY, etc.):
1497 * unlike -EAGAIN case, the failed folio is
1498 * removed from migration folio list and not
1499 * retried in the next outer loop.
1500 */
1501 nr_failed++;
1502 stats->nr_failed_pages += nr_pages;
1503 break;
1504 }
1505 }
1506 }
1507 /*
1508 * nr_failed is number of hugetlb folios failed to be migrated. After
1509 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1510 * folios as failed.
1511 */
1512 nr_failed += retry;
1513 stats->nr_failed_pages += nr_retry_pages;
1514
1515 return nr_failed;
1516}
1517
b20a3503 1518/*
eaec4e63 1519 * migrate_pages - migrate the folios specified in a list, to the free folios
c73e5c9c 1520 * supplied as the target for the page migration
b20a3503 1521 *
eaec4e63
HY
1522 * @from: The list of folios to be migrated.
1523 * @get_new_page: The function used to allocate free folios to be used
1524 * as the target of the folio migration.
1525 * @put_new_page: The function used to free target folios if migration
68711a74 1526 * fails, or NULL if no special handling is necessary.
c73e5c9c
SB
1527 * @private: Private data to be passed on to get_new_page()
1528 * @mode: The migration mode that specifies the constraints for
eaec4e63
HY
1529 * folio migration, if any.
1530 * @reason: The reason for folio migration.
1531 * @ret_succeeded: Set to the number of folios migrated successfully if
5ac95884 1532 * the caller passes a non-NULL pointer.
b20a3503 1533 *
e5bfff8b
HY
1534 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1535 * are movable any more because the list has become empty or no retryable folios
1536 * exist any more. It is caller's responsibility to call putback_movable_pages()
1537 * only if ret != 0.
b20a3503 1538 *
eaec4e63
HY
1539 * Returns the number of {normal folio, large folio, hugetlb} that were not
1540 * migrated, or an error code. The number of large folio splits will be
1541 * considered as the number of non-migrated large folio, no matter how many
1542 * split folios of the large folio are migrated successfully.
b20a3503 1543 */
9c620e2b 1544int migrate_pages(struct list_head *from, new_page_t get_new_page,
68711a74 1545 free_page_t put_new_page, unsigned long private,
5ac95884 1546 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
b20a3503 1547{
e24f0b8f 1548 int retry = 1;
eaec4e63 1549 int large_retry = 1;
1a5bae25 1550 int thp_retry = 1;
e5bfff8b 1551 int nr_failed;
077309bc 1552 int nr_retry_pages = 0;
eaec4e63 1553 int nr_large_failed = 0;
b20a3503 1554 int pass = 0;
eaec4e63 1555 bool is_large = false;
1a5bae25 1556 bool is_thp = false;
eaec4e63
HY
1557 struct folio *folio, *folio2;
1558 int rc, nr_pages;
1559 LIST_HEAD(ret_folios);
1560 LIST_HEAD(split_folios);
b0b515bf 1561 bool nosplit = (reason == MR_NUMA_MISPLACED);
eaec4e63 1562 bool no_split_folio_counting = false;
5b855937 1563 struct migrate_pages_stats stats;
b20a3503 1564
7bc1aec5
LM
1565 trace_mm_migrate_pages_start(mode, reason);
1566
5b855937 1567 memset(&stats, 0, sizeof(stats));
e5bfff8b
HY
1568 rc = migrate_hugetlbs(from, get_new_page, put_new_page, private, mode, reason,
1569 &stats, &ret_folios);
1570 if (rc < 0)
1571 goto out;
1572 nr_failed = rc;
1573
eaec4e63 1574split_folio_migration:
e5bfff8b
HY
1575 for (pass = 0;
1576 pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry);
1577 pass++) {
e24f0b8f 1578 retry = 0;
eaec4e63 1579 large_retry = 0;
1a5bae25 1580 thp_retry = 0;
077309bc 1581 nr_retry_pages = 0;
b20a3503 1582
eaec4e63 1583 list_for_each_entry_safe(folio, folio2, from, lru) {
e5bfff8b
HY
1584 /* Retried hugetlb folios will be kept in list */
1585 if (folio_test_hugetlb(folio)) {
1586 list_move_tail(&folio->lru, &ret_folios);
1587 continue;
1588 }
1589
1a5bae25 1590 /*
eaec4e63
HY
1591 * Large folio statistics is based on the source large
1592 * folio. Capture required information that might get
1593 * lost during migration.
1a5bae25 1594 */
e5bfff8b 1595 is_large = folio_test_large(folio);
eaec4e63
HY
1596 is_thp = is_large && folio_test_pmd_mappable(folio);
1597 nr_pages = folio_nr_pages(folio);
e5bfff8b 1598
e24f0b8f 1599 cond_resched();
2d1db3b1 1600
e5bfff8b
HY
1601 rc = unmap_and_move(get_new_page, put_new_page,
1602 private, folio, pass > 2, mode,
1603 reason, &ret_folios);
dd4ae78a
YS
1604 /*
1605 * The rules are:
e5bfff8b 1606 * Success: folio will be freed
dd4ae78a
YS
1607 * -EAGAIN: stay on the from list
1608 * -ENOMEM: stay on the from list
577be05c 1609 * -ENOSYS: stay on the from list
eaec4e63 1610 * Other errno: put on ret_folios list then splice to
dd4ae78a
YS
1611 * from list
1612 */
e24f0b8f 1613 switch(rc) {
d532e2e5 1614 /*
eaec4e63
HY
1615 * Large folio migration might be unsupported or
1616 * the allocation could've failed so we should retry
1617 * on the same folio with the large folio split
1618 * to normal folios.
d532e2e5 1619 *
eaec4e63 1620 * Split folios are put in split_folios, and
e6fa8a79
HY
1621 * we will migrate them after the rest of the
1622 * list is processed.
d532e2e5
YS
1623 */
1624 case -ENOSYS:
eaec4e63
HY
1625 /* Large folio migration is unsupported */
1626 if (is_large) {
1627 nr_large_failed++;
5b855937 1628 stats.nr_thp_failed += is_thp;
eaec4e63 1629 if (!try_split_folio(folio, &split_folios)) {
5b855937 1630 stats.nr_thp_split += is_thp;
e6fa8a79 1631 break;
d532e2e5 1632 }
eaec4e63 1633 } else if (!no_split_folio_counting) {
b5bade97 1634 nr_failed++;
f430893b
ML
1635 }
1636
5b855937 1637 stats.nr_failed_pages += nr_pages;
eaec4e63 1638 list_move_tail(&folio->lru, &ret_folios);
d532e2e5 1639 break;
95a402c3 1640 case -ENOMEM:
94723aaf 1641 /*
d532e2e5 1642 * When memory is low, don't bother to try to migrate
eaec4e63 1643 * other folios, just exit.
94723aaf 1644 */
eaec4e63
HY
1645 if (is_large) {
1646 nr_large_failed++;
5b855937 1647 stats.nr_thp_failed += is_thp;
eaec4e63 1648 /* Large folio NUMA faulting doesn't split to retry. */
fd4a7ac3 1649 if (!nosplit) {
eaec4e63 1650 int ret = try_split_folio(folio, &split_folios);
fd4a7ac3
BW
1651
1652 if (!ret) {
5b855937 1653 stats.nr_thp_split += is_thp;
fd4a7ac3
BW
1654 break;
1655 } else if (reason == MR_LONGTERM_PIN &&
1656 ret == -EAGAIN) {
1657 /*
eaec4e63
HY
1658 * Try again to split large folio to
1659 * mitigate the failure of longterm pinning.
fd4a7ac3 1660 */
eaec4e63
HY
1661 large_retry++;
1662 thp_retry += is_thp;
1663 nr_retry_pages += nr_pages;
fd4a7ac3
BW
1664 break;
1665 }
94723aaf 1666 }
eaec4e63 1667 } else if (!no_split_folio_counting) {
f430893b 1668 nr_failed++;
1a5bae25 1669 }
b5bade97 1670
5b855937 1671 stats.nr_failed_pages += nr_pages + nr_retry_pages;
69a041ff 1672 /*
eaec4e63
HY
1673 * There might be some split folios of fail-to-migrate large
1674 * folios left in split_folios list. Move them back to migration
69a041ff 1675 * list so that they could be put back to the right list by
eaec4e63 1676 * the caller otherwise the folio refcnt will be leaked.
69a041ff 1677 */
eaec4e63 1678 list_splice_init(&split_folios, from);
fbed53b4 1679 /* nr_failed isn't updated for not used */
eaec4e63 1680 nr_large_failed += large_retry;
5b855937 1681 stats.nr_thp_failed += thp_retry;
95a402c3 1682 goto out;
e24f0b8f 1683 case -EAGAIN:
eaec4e63
HY
1684 if (is_large) {
1685 large_retry++;
1686 thp_retry += is_thp;
1687 } else if (!no_split_folio_counting) {
f430893b 1688 retry++;
eaec4e63
HY
1689 }
1690 nr_retry_pages += nr_pages;
e24f0b8f 1691 break;
78bd5209 1692 case MIGRATEPAGE_SUCCESS:
5b855937
HY
1693 stats.nr_succeeded += nr_pages;
1694 stats.nr_thp_succeeded += is_thp;
e24f0b8f
CL
1695 break;
1696 default:
354a3363 1697 /*
d532e2e5 1698 * Permanent failure (-EBUSY, etc.):
eaec4e63
HY
1699 * unlike -EAGAIN case, the failed folio is
1700 * removed from migration folio list and not
354a3363
NH
1701 * retried in the next outer loop.
1702 */
eaec4e63
HY
1703 if (is_large) {
1704 nr_large_failed++;
5b855937 1705 stats.nr_thp_failed += is_thp;
eaec4e63 1706 } else if (!no_split_folio_counting) {
b5bade97 1707 nr_failed++;
eaec4e63 1708 }
f430893b 1709
5b855937 1710 stats.nr_failed_pages += nr_pages;
e24f0b8f 1711 break;
2d1db3b1 1712 }
b20a3503
CL
1713 }
1714 }
7047b5a4 1715 nr_failed += retry;
eaec4e63 1716 nr_large_failed += large_retry;
5b855937
HY
1717 stats.nr_thp_failed += thp_retry;
1718 stats.nr_failed_pages += nr_retry_pages;
b5bade97 1719 /*
eaec4e63
HY
1720 * Try to migrate split folios of fail-to-migrate large folios, no
1721 * nr_failed counting in this round, since all split folios of a
1722 * large folio is counted as 1 failure in the first round.
b5bade97 1723 */
eaec4e63 1724 if (!list_empty(&split_folios)) {
b5bade97 1725 /*
e5bfff8b
HY
1726 * Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY
1727 * retries) to ret_folios to avoid migrating them again.
b5bade97 1728 */
eaec4e63
HY
1729 list_splice_init(from, &ret_folios);
1730 list_splice_init(&split_folios, from);
1731 no_split_folio_counting = true;
b5bade97 1732 retry = 1;
eaec4e63 1733 goto split_folio_migration;
b5bade97
BW
1734 }
1735
eaec4e63 1736 rc = nr_failed + nr_large_failed;
95a402c3 1737out:
dd4ae78a 1738 /*
eaec4e63 1739 * Put the permanent failure folio back to migration list, they
dd4ae78a
YS
1740 * will be put back to the right list by the caller.
1741 */
eaec4e63 1742 list_splice(&ret_folios, from);
dd4ae78a 1743
03e5f82e 1744 /*
eaec4e63
HY
1745 * Return 0 in case all split folios of fail-to-migrate large folios
1746 * are migrated successfully.
03e5f82e
BW
1747 */
1748 if (list_empty(from))
1749 rc = 0;
1750
5b855937
HY
1751 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
1752 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
1753 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
1754 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
1755 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
1756 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
1757 stats.nr_thp_succeeded, stats.nr_thp_failed,
1758 stats.nr_thp_split, mode, reason);
7b2a2d4a 1759
5ac95884 1760 if (ret_succeeded)
5b855937 1761 *ret_succeeded = stats.nr_succeeded;
5ac95884 1762
78bd5209 1763 return rc;
b20a3503 1764}
95a402c3 1765
19fc7bed 1766struct page *alloc_migration_target(struct page *page, unsigned long private)
b4b38223 1767{
ffe06786 1768 struct folio *folio = page_folio(page);
19fc7bed
JK
1769 struct migration_target_control *mtc;
1770 gfp_t gfp_mask;
b4b38223 1771 unsigned int order = 0;
e37d3e83 1772 struct folio *hugetlb_folio = NULL;
ffe06786 1773 struct folio *new_folio = NULL;
19fc7bed
JK
1774 int nid;
1775 int zidx;
1776
1777 mtc = (struct migration_target_control *)private;
1778 gfp_mask = mtc->gfp_mask;
1779 nid = mtc->nid;
1780 if (nid == NUMA_NO_NODE)
ffe06786 1781 nid = folio_nid(folio);
b4b38223 1782
ffe06786 1783 if (folio_test_hugetlb(folio)) {
e51da3a9 1784 struct hstate *h = folio_hstate(folio);
d92bbc27 1785
19fc7bed 1786 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
e37d3e83
SK
1787 hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
1788 mtc->nmask, gfp_mask);
1789 return &hugetlb_folio->page;
d92bbc27 1790 }
b4b38223 1791
ffe06786 1792 if (folio_test_large(folio)) {
9933a0c8
JK
1793 /*
1794 * clear __GFP_RECLAIM to make the migration callback
1795 * consistent with regular THP allocations.
1796 */
1797 gfp_mask &= ~__GFP_RECLAIM;
b4b38223 1798 gfp_mask |= GFP_TRANSHUGE;
ffe06786 1799 order = folio_order(folio);
b4b38223 1800 }
ffe06786 1801 zidx = zone_idx(folio_zone(folio));
19fc7bed 1802 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
b4b38223
JK
1803 gfp_mask |= __GFP_HIGHMEM;
1804
ffe06786 1805 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
b4b38223 1806
ffe06786 1807 return &new_folio->page;
b4b38223
JK
1808}
1809
742755a1 1810#ifdef CONFIG_NUMA
742755a1 1811
a49bd4d7 1812static int store_status(int __user *status, int start, int value, int nr)
742755a1 1813{
a49bd4d7
MH
1814 while (nr-- > 0) {
1815 if (put_user(value, status + start))
1816 return -EFAULT;
1817 start++;
1818 }
1819
1820 return 0;
1821}
1822
1823static int do_move_pages_to_node(struct mm_struct *mm,
1824 struct list_head *pagelist, int node)
1825{
1826 int err;
a0976311
JK
1827 struct migration_target_control mtc = {
1828 .nid = node,
1829 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1830 };
a49bd4d7 1831
a0976311 1832 err = migrate_pages(pagelist, alloc_migration_target, NULL,
5ac95884 1833 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
a49bd4d7
MH
1834 if (err)
1835 putback_movable_pages(pagelist);
1836 return err;
742755a1
CL
1837}
1838
1839/*
a49bd4d7
MH
1840 * Resolves the given address to a struct page, isolates it from the LRU and
1841 * puts it to the given pagelist.
e0153fc2
YS
1842 * Returns:
1843 * errno - if the page cannot be found/isolated
1844 * 0 - when it doesn't have to be migrated because it is already on the
1845 * target node
1846 * 1 - when it has been queued
742755a1 1847 */
a49bd4d7
MH
1848static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1849 int node, struct list_head *pagelist, bool migrate_all)
742755a1 1850{
a49bd4d7
MH
1851 struct vm_area_struct *vma;
1852 struct page *page;
742755a1 1853 int err;
742755a1 1854
d8ed45c5 1855 mmap_read_lock(mm);
a49bd4d7 1856 err = -EFAULT;
cb1c37b1
ML
1857 vma = vma_lookup(mm, addr);
1858 if (!vma || !vma_migratable(vma))
a49bd4d7 1859 goto out;
742755a1 1860
a49bd4d7 1861 /* FOLL_DUMP to ignore special (like zero) pages */
87d2762e 1862 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da 1863
a49bd4d7
MH
1864 err = PTR_ERR(page);
1865 if (IS_ERR(page))
1866 goto out;
89f5b7da 1867
a49bd4d7 1868 err = -ENOENT;
f7091ed6 1869 if (!page)
a49bd4d7 1870 goto out;
742755a1 1871
f7091ed6
HW
1872 if (is_zone_device_page(page))
1873 goto out_putpage;
1874
a49bd4d7
MH
1875 err = 0;
1876 if (page_to_nid(page) == node)
1877 goto out_putpage;
742755a1 1878
a49bd4d7
MH
1879 err = -EACCES;
1880 if (page_mapcount(page) > 1 && !migrate_all)
1881 goto out_putpage;
742755a1 1882
a49bd4d7
MH
1883 if (PageHuge(page)) {
1884 if (PageHead(page)) {
6aa3a920 1885 err = isolate_hugetlb(page_folio(page), pagelist);
7ce82f4c
ML
1886 if (!err)
1887 err = 1;
e632a938 1888 }
a49bd4d7
MH
1889 } else {
1890 struct page *head;
e632a938 1891
e8db67eb
NH
1892 head = compound_head(page);
1893 err = isolate_lru_page(head);
cf608ac1 1894 if (err)
a49bd4d7 1895 goto out_putpage;
742755a1 1896
e0153fc2 1897 err = 1;
a49bd4d7
MH
1898 list_add_tail(&head->lru, pagelist);
1899 mod_node_page_state(page_pgdat(head),
9de4f22a 1900 NR_ISOLATED_ANON + page_is_file_lru(head),
6c357848 1901 thp_nr_pages(head));
a49bd4d7
MH
1902 }
1903out_putpage:
1904 /*
1905 * Either remove the duplicate refcount from
1906 * isolate_lru_page() or drop the page ref if it was
1907 * not isolated.
1908 */
1909 put_page(page);
1910out:
d8ed45c5 1911 mmap_read_unlock(mm);
742755a1
CL
1912 return err;
1913}
1914
7ca8783a
WY
1915static int move_pages_and_store_status(struct mm_struct *mm, int node,
1916 struct list_head *pagelist, int __user *status,
1917 int start, int i, unsigned long nr_pages)
1918{
1919 int err;
1920
5d7ae891
WY
1921 if (list_empty(pagelist))
1922 return 0;
1923
7ca8783a
WY
1924 err = do_move_pages_to_node(mm, pagelist, node);
1925 if (err) {
1926 /*
1927 * Positive err means the number of failed
1928 * pages to migrate. Since we are going to
1929 * abort and return the number of non-migrated
ab9dd4f8 1930 * pages, so need to include the rest of the
7ca8783a
WY
1931 * nr_pages that have not been attempted as
1932 * well.
1933 */
1934 if (err > 0)
a7504ed1 1935 err += nr_pages - i;
7ca8783a
WY
1936 return err;
1937 }
1938 return store_status(status, start, node, i - start);
1939}
1940
5e9a0f02
BG
1941/*
1942 * Migrate an array of page address onto an array of nodes and fill
1943 * the corresponding array of status.
1944 */
3268c63e 1945static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f02
BG
1946 unsigned long nr_pages,
1947 const void __user * __user *pages,
1948 const int __user *nodes,
1949 int __user *status, int flags)
1950{
a49bd4d7
MH
1951 int current_node = NUMA_NO_NODE;
1952 LIST_HEAD(pagelist);
1953 int start, i;
1954 int err = 0, err1;
35282a2d 1955
361a2a22 1956 lru_cache_disable();
35282a2d 1957
a49bd4d7
MH
1958 for (i = start = 0; i < nr_pages; i++) {
1959 const void __user *p;
1960 unsigned long addr;
1961 int node;
3140a227 1962
a49bd4d7
MH
1963 err = -EFAULT;
1964 if (get_user(p, pages + i))
1965 goto out_flush;
1966 if (get_user(node, nodes + i))
1967 goto out_flush;
057d3389 1968 addr = (unsigned long)untagged_addr(p);
a49bd4d7
MH
1969
1970 err = -ENODEV;
1971 if (node < 0 || node >= MAX_NUMNODES)
1972 goto out_flush;
1973 if (!node_state(node, N_MEMORY))
1974 goto out_flush;
5e9a0f02 1975
a49bd4d7
MH
1976 err = -EACCES;
1977 if (!node_isset(node, task_nodes))
1978 goto out_flush;
1979
1980 if (current_node == NUMA_NO_NODE) {
1981 current_node = node;
1982 start = i;
1983 } else if (node != current_node) {
7ca8783a
WY
1984 err = move_pages_and_store_status(mm, current_node,
1985 &pagelist, status, start, i, nr_pages);
a49bd4d7
MH
1986 if (err)
1987 goto out;
1988 start = i;
1989 current_node = node;
3140a227
BG
1990 }
1991
a49bd4d7
MH
1992 /*
1993 * Errors in the page lookup or isolation are not fatal and we simply
1994 * report them via status
1995 */
1996 err = add_page_for_migration(mm, addr, current_node,
1997 &pagelist, flags & MPOL_MF_MOVE_ALL);
e0153fc2 1998
d08221a0 1999 if (err > 0) {
e0153fc2
YS
2000 /* The page is successfully queued for migration */
2001 continue;
2002 }
3140a227 2003
65462462
JH
2004 /*
2005 * The move_pages() man page does not have an -EEXIST choice, so
2006 * use -EFAULT instead.
2007 */
2008 if (err == -EEXIST)
2009 err = -EFAULT;
2010
d08221a0
WY
2011 /*
2012 * If the page is already on the target node (!err), store the
2013 * node, otherwise, store the err.
2014 */
2015 err = store_status(status, i, err ? : current_node, 1);
a49bd4d7
MH
2016 if (err)
2017 goto out_flush;
5e9a0f02 2018
7ca8783a
WY
2019 err = move_pages_and_store_status(mm, current_node, &pagelist,
2020 status, start, i, nr_pages);
a7504ed1
HY
2021 if (err) {
2022 /* We have accounted for page i */
2023 if (err > 0)
2024 err--;
4afdacec 2025 goto out;
a7504ed1 2026 }
a49bd4d7 2027 current_node = NUMA_NO_NODE;
3140a227 2028 }
a49bd4d7
MH
2029out_flush:
2030 /* Make sure we do not overwrite the existing error */
7ca8783a
WY
2031 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
2032 status, start, i, nr_pages);
dfe9aa23 2033 if (err >= 0)
a49bd4d7 2034 err = err1;
5e9a0f02 2035out:
361a2a22 2036 lru_cache_enable();
5e9a0f02
BG
2037 return err;
2038}
2039
742755a1 2040/*
2f007e74 2041 * Determine the nodes of an array of pages and store it in an array of status.
742755a1 2042 */
80bba129
BG
2043static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2044 const void __user **pages, int *status)
742755a1 2045{
2f007e74 2046 unsigned long i;
2f007e74 2047
d8ed45c5 2048 mmap_read_lock(mm);
742755a1 2049
2f007e74 2050 for (i = 0; i < nr_pages; i++) {
80bba129 2051 unsigned long addr = (unsigned long)(*pages);
742755a1
CL
2052 struct vm_area_struct *vma;
2053 struct page *page;
c095adbc 2054 int err = -EFAULT;
2f007e74 2055
059b8b48
LH
2056 vma = vma_lookup(mm, addr);
2057 if (!vma)
742755a1
CL
2058 goto set_status;
2059
d899844e 2060 /* FOLL_DUMP to ignore special (like zero) pages */
16fd6b31 2061 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da
LT
2062
2063 err = PTR_ERR(page);
2064 if (IS_ERR(page))
2065 goto set_status;
2066
f7091ed6
HW
2067 err = -ENOENT;
2068 if (!page)
2069 goto set_status;
2070
2071 if (!is_zone_device_page(page))
4cd61484 2072 err = page_to_nid(page);
f7091ed6 2073
16fd6b31 2074 put_page(page);
742755a1 2075set_status:
80bba129
BG
2076 *status = err;
2077
2078 pages++;
2079 status++;
2080 }
2081
d8ed45c5 2082 mmap_read_unlock(mm);
80bba129
BG
2083}
2084
5b1b561b
AB
2085static int get_compat_pages_array(const void __user *chunk_pages[],
2086 const void __user * __user *pages,
2087 unsigned long chunk_nr)
2088{
2089 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2090 compat_uptr_t p;
2091 int i;
2092
2093 for (i = 0; i < chunk_nr; i++) {
2094 if (get_user(p, pages32 + i))
2095 return -EFAULT;
2096 chunk_pages[i] = compat_ptr(p);
2097 }
2098
2099 return 0;
2100}
2101
80bba129
BG
2102/*
2103 * Determine the nodes of a user array of pages and store it in
2104 * a user array of status.
2105 */
2106static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2107 const void __user * __user *pages,
2108 int __user *status)
2109{
3eefb826 2110#define DO_PAGES_STAT_CHUNK_NR 16UL
80bba129
BG
2111 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2112 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba129 2113
87b8d1ad 2114 while (nr_pages) {
3eefb826 2115 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
87b8d1ad 2116
5b1b561b
AB
2117 if (in_compat_syscall()) {
2118 if (get_compat_pages_array(chunk_pages, pages,
2119 chunk_nr))
2120 break;
2121 } else {
2122 if (copy_from_user(chunk_pages, pages,
2123 chunk_nr * sizeof(*chunk_pages)))
2124 break;
2125 }
80bba129
BG
2126
2127 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2128
87b8d1ad
PA
2129 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2130 break;
742755a1 2131
87b8d1ad
PA
2132 pages += chunk_nr;
2133 status += chunk_nr;
2134 nr_pages -= chunk_nr;
2135 }
2136 return nr_pages ? -EFAULT : 0;
742755a1
CL
2137}
2138
4dc200ce 2139static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
742755a1 2140{
742755a1 2141 struct task_struct *task;
742755a1 2142 struct mm_struct *mm;
742755a1 2143
4dc200ce
ML
2144 /*
2145 * There is no need to check if current process has the right to modify
2146 * the specified process when they are same.
2147 */
2148 if (!pid) {
2149 mmget(current->mm);
2150 *mem_nodes = cpuset_mems_allowed(current);
2151 return current->mm;
2152 }
742755a1
CL
2153
2154 /* Find the mm_struct */
a879bf58 2155 rcu_read_lock();
4dc200ce 2156 task = find_task_by_vpid(pid);
742755a1 2157 if (!task) {
a879bf58 2158 rcu_read_unlock();
4dc200ce 2159 return ERR_PTR(-ESRCH);
742755a1 2160 }
3268c63e 2161 get_task_struct(task);
742755a1
CL
2162
2163 /*
2164 * Check if this process has the right to modify the specified
197e7e52 2165 * process. Use the regular "ptrace_may_access()" checks.
742755a1 2166 */
197e7e52 2167 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 2168 rcu_read_unlock();
4dc200ce 2169 mm = ERR_PTR(-EPERM);
5e9a0f02 2170 goto out;
742755a1 2171 }
c69e8d9c 2172 rcu_read_unlock();
742755a1 2173
4dc200ce
ML
2174 mm = ERR_PTR(security_task_movememory(task));
2175 if (IS_ERR(mm))
5e9a0f02 2176 goto out;
4dc200ce 2177 *mem_nodes = cpuset_mems_allowed(task);
3268c63e 2178 mm = get_task_mm(task);
4dc200ce 2179out:
3268c63e 2180 put_task_struct(task);
6e8b09ea 2181 if (!mm)
4dc200ce
ML
2182 mm = ERR_PTR(-EINVAL);
2183 return mm;
2184}
2185
2186/*
2187 * Move a list of pages in the address space of the currently executing
2188 * process.
2189 */
2190static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2191 const void __user * __user *pages,
2192 const int __user *nodes,
2193 int __user *status, int flags)
2194{
2195 struct mm_struct *mm;
2196 int err;
2197 nodemask_t task_nodes;
2198
2199 /* Check flags */
2200 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
6e8b09ea
SL
2201 return -EINVAL;
2202
4dc200ce
ML
2203 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2204 return -EPERM;
2205
2206 mm = find_mm_struct(pid, &task_nodes);
2207 if (IS_ERR(mm))
2208 return PTR_ERR(mm);
2209
6e8b09ea
SL
2210 if (nodes)
2211 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2212 nodes, status, flags);
2213 else
2214 err = do_pages_stat(mm, nr_pages, pages, status);
742755a1 2215
742755a1
CL
2216 mmput(mm);
2217 return err;
2218}
742755a1 2219
7addf443
DB
2220SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2221 const void __user * __user *, pages,
2222 const int __user *, nodes,
2223 int __user *, status, int, flags)
2224{
2225 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2226}
2227
7039e1db
PZ
2228#ifdef CONFIG_NUMA_BALANCING
2229/*
2230 * Returns true if this is a safe migration target node for misplaced NUMA
bc53008e 2231 * pages. Currently it only checks the watermarks which is crude.
7039e1db
PZ
2232 */
2233static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6 2234 unsigned long nr_migrate_pages)
7039e1db
PZ
2235{
2236 int z;
599d0c95 2237
7039e1db
PZ
2238 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2239 struct zone *zone = pgdat->node_zones + z;
2240
bc53008e 2241 if (!managed_zone(zone))
7039e1db
PZ
2242 continue;
2243
7039e1db
PZ
2244 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2245 if (!zone_watermark_ok(zone, 0,
2246 high_wmark_pages(zone) +
2247 nr_migrate_pages,
bfe9d006 2248 ZONE_MOVABLE, 0))
7039e1db
PZ
2249 continue;
2250 return true;
2251 }
2252 return false;
2253}
2254
2255static struct page *alloc_misplaced_dst_page(struct page *page,
666feb21 2256 unsigned long data)
7039e1db
PZ
2257{
2258 int nid = (int) data;
c185e494
MWO
2259 int order = compound_order(page);
2260 gfp_t gfp = __GFP_THISNODE;
2261 struct folio *new;
2262
2263 if (order > 0)
2264 gfp |= GFP_TRANSHUGE_LIGHT;
2265 else {
2266 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2267 __GFP_NOWARN;
2268 gfp &= ~__GFP_RECLAIM;
2269 }
2270 new = __folio_alloc_node(gfp, order, nid);
c5b5a3dd 2271
c185e494 2272 return &new->page;
c5b5a3dd
YS
2273}
2274
1c30e017 2275static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
b32967ff 2276{
2b9b624f 2277 int nr_pages = thp_nr_pages(page);
c574bbe9 2278 int order = compound_order(page);
a8f60772 2279
c574bbe9 2280 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
3abef4e6 2281
662aeea7
YS
2282 /* Do not migrate THP mapped by multiple processes */
2283 if (PageTransHuge(page) && total_mapcount(page) > 1)
2284 return 0;
2285
7039e1db 2286 /* Avoid migrating to a node that is nearly full */
c574bbe9
HY
2287 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2288 int z;
2289
2290 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2291 return 0;
2292 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
bc53008e 2293 if (managed_zone(pgdat->node_zones + z))
c574bbe9
HY
2294 break;
2295 }
2296 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
340ef390 2297 return 0;
c574bbe9 2298 }
7039e1db 2299
340ef390
HD
2300 if (isolate_lru_page(page))
2301 return 0;
7039e1db 2302
b75454e1 2303 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2b9b624f 2304 nr_pages);
340ef390 2305
149c33e1 2306 /*
340ef390
HD
2307 * Isolating the page has taken another reference, so the
2308 * caller's reference can be safely dropped without the page
2309 * disappearing underneath us during migration.
149c33e1
MG
2310 */
2311 put_page(page);
340ef390 2312 return 1;
b32967ff
MG
2313}
2314
2315/*
2316 * Attempt to migrate a misplaced page to the specified destination
2317 * node. Caller is expected to have an elevated reference count on
2318 * the page that will be dropped by this function before returning.
2319 */
1bc115d8
MG
2320int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2321 int node)
b32967ff
MG
2322{
2323 pg_data_t *pgdat = NODE_DATA(node);
340ef390 2324 int isolated;
b32967ff 2325 int nr_remaining;
e39bb6be 2326 unsigned int nr_succeeded;
b32967ff 2327 LIST_HEAD(migratepages);
b5916c02 2328 int nr_pages = thp_nr_pages(page);
c5b5a3dd 2329
b32967ff 2330 /*
1bc115d8
MG
2331 * Don't migrate file pages that are mapped in multiple processes
2332 * with execute permissions as they are probably shared libraries.
b32967ff 2333 */
7ee820ee
ML
2334 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2335 (vma->vm_flags & VM_EXEC))
b32967ff 2336 goto out;
b32967ff 2337
09a913a7
MG
2338 /*
2339 * Also do not migrate dirty pages as not all filesystems can move
2340 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2341 */
9de4f22a 2342 if (page_is_file_lru(page) && PageDirty(page))
09a913a7
MG
2343 goto out;
2344
b32967ff
MG
2345 isolated = numamigrate_isolate_page(pgdat, page);
2346 if (!isolated)
2347 goto out;
2348
2349 list_add(&page->lru, &migratepages);
c185e494
MWO
2350 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2351 NULL, node, MIGRATE_ASYNC,
2352 MR_NUMA_MISPLACED, &nr_succeeded);
b32967ff 2353 if (nr_remaining) {
59c82b70
JK
2354 if (!list_empty(&migratepages)) {
2355 list_del(&page->lru);
c5fc5c3a
YS
2356 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2357 page_is_file_lru(page), -nr_pages);
59c82b70
JK
2358 putback_lru_page(page);
2359 }
b32967ff 2360 isolated = 0;
e39bb6be
HY
2361 }
2362 if (nr_succeeded) {
2363 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2364 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2365 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2366 nr_succeeded);
2367 }
7039e1db 2368 BUG_ON(!list_empty(&migratepages));
7039e1db 2369 return isolated;
340ef390
HD
2370
2371out:
2372 put_page(page);
2373 return 0;
7039e1db 2374}
220018d3 2375#endif /* CONFIG_NUMA_BALANCING */
91952440 2376#endif /* CONFIG_NUMA */