mm/hugetlb: convert hugetlbfs_pagecache_present() to folios
[linux-block.git] / mm / migrate.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b20a3503 2/*
14e0f9bc 3 * Memory Migration functionality - linux/mm/migrate.c
b20a3503
CL
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
cde53535 13 * Christoph Lameter
b20a3503
CL
14 */
15
16#include <linux/migrate.h>
b95f1b31 17#include <linux/export.h>
b20a3503 18#include <linux/swap.h>
0697212a 19#include <linux/swapops.h>
b20a3503 20#include <linux/pagemap.h>
e23ca00b 21#include <linux/buffer_head.h>
b20a3503 22#include <linux/mm_inline.h>
b488893a 23#include <linux/nsproxy.h>
b20a3503 24#include <linux/pagevec.h>
e9995ef9 25#include <linux/ksm.h>
b20a3503
CL
26#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
04e62a29 30#include <linux/writeback.h>
742755a1
CL
31#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
86c3a764 33#include <linux/security.h>
42cb14b1 34#include <linux/backing-dev.h>
bda807d4 35#include <linux/compaction.h>
4f5ca265 36#include <linux/syscalls.h>
7addf443 37#include <linux/compat.h>
290408d4 38#include <linux/hugetlb.h>
8e6ac7fa 39#include <linux/hugetlb_cgroup.h>
5a0e3ad6 40#include <linux/gfp.h>
df6ad698 41#include <linux/pfn_t.h>
a5430dda 42#include <linux/memremap.h>
8315ada7 43#include <linux/userfaultfd_k.h>
bf6bddf1 44#include <linux/balloon_compaction.h>
33c3fc71 45#include <linux/page_idle.h>
d435edca 46#include <linux/page_owner.h>
6e84f315 47#include <linux/sched/mm.h>
197e7e52 48#include <linux/ptrace.h>
34290e2c 49#include <linux/oom.h>
884a6e5d 50#include <linux/memory.h>
ac16ec83 51#include <linux/random.h>
c574bbe9 52#include <linux/sched/sysctl.h>
467b171a 53#include <linux/memory-tiers.h>
b20a3503 54
0d1836c3
MN
55#include <asm/tlbflush.h>
56
7b2a2d4a
MG
57#include <trace/events/migrate.h>
58
b20a3503
CL
59#include "internal.h"
60
9e5bcd61 61int isolate_movable_page(struct page *page, isolate_mode_t mode)
bda807d4 62{
68f2736a 63 const struct movable_operations *mops;
bda807d4
MK
64
65 /*
66 * Avoid burning cycles with pages that are yet under __free_pages(),
67 * or just got freed under us.
68 *
69 * In case we 'win' a race for a movable page being freed under us and
70 * raise its refcount preventing __free_pages() from doing its job
71 * the put_page() at the end of this block will take care of
72 * release this page, thus avoiding a nasty leakage.
73 */
74 if (unlikely(!get_page_unless_zero(page)))
75 goto out;
76
8b881763
VB
77 if (unlikely(PageSlab(page)))
78 goto out_putpage;
79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80 smp_rmb();
bda807d4 81 /*
8b881763
VB
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
bda807d4
MK
85 */
86 if (unlikely(!__PageMovable(page)))
87 goto out_putpage;
8b881763
VB
88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89 smp_rmb();
90 if (unlikely(PageSlab(page)))
91 goto out_putpage;
92
bda807d4
MK
93 /*
94 * As movable pages are not isolated from LRU lists, concurrent
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
97 *
98 * In order to avoid having an already isolated movable page
99 * being (wrongly) re-isolated while it is under migration,
100 * or to avoid attempting to isolate pages being released,
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
103 */
104 if (unlikely(!trylock_page(page)))
105 goto out_putpage;
106
107 if (!PageMovable(page) || PageIsolated(page))
108 goto out_no_isolated;
109
68f2736a
MWO
110 mops = page_movable_ops(page);
111 VM_BUG_ON_PAGE(!mops, page);
bda807d4 112
68f2736a 113 if (!mops->isolate_page(page, mode))
bda807d4
MK
114 goto out_no_isolated;
115
116 /* Driver shouldn't use PG_isolated bit of page->flags */
117 WARN_ON_ONCE(PageIsolated(page));
356ea386 118 SetPageIsolated(page);
bda807d4
MK
119 unlock_page(page);
120
9e5bcd61 121 return 0;
bda807d4
MK
122
123out_no_isolated:
124 unlock_page(page);
125out_putpage:
126 put_page(page);
127out:
9e5bcd61 128 return -EBUSY;
bda807d4
MK
129}
130
606a6f71 131static void putback_movable_page(struct page *page)
bda807d4 132{
68f2736a 133 const struct movable_operations *mops = page_movable_ops(page);
bda807d4 134
68f2736a 135 mops->putback_page(page);
356ea386 136 ClearPageIsolated(page);
bda807d4
MK
137}
138
5733c7d1
RA
139/*
140 * Put previously isolated pages back onto the appropriate lists
141 * from where they were once taken off for compaction/migration.
142 *
59c82b70
JK
143 * This function shall be used whenever the isolated pageset has been
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
7ce82f4c 145 * and isolate_hugetlb().
5733c7d1
RA
146 */
147void putback_movable_pages(struct list_head *l)
148{
149 struct page *page;
150 struct page *page2;
151
b20a3503 152 list_for_each_entry_safe(page, page2, l, lru) {
31caf665
NH
153 if (unlikely(PageHuge(page))) {
154 putback_active_hugepage(page);
155 continue;
156 }
e24f0b8f 157 list_del(&page->lru);
bda807d4
MK
158 /*
159 * We isolated non-lru movable page so here we can use
160 * __PageMovable because LRU page's mapping cannot have
161 * PAGE_MAPPING_MOVABLE.
162 */
b1123ea6 163 if (unlikely(__PageMovable(page))) {
bda807d4
MK
164 VM_BUG_ON_PAGE(!PageIsolated(page), page);
165 lock_page(page);
166 if (PageMovable(page))
167 putback_movable_page(page);
168 else
356ea386 169 ClearPageIsolated(page);
bda807d4
MK
170 unlock_page(page);
171 put_page(page);
172 } else {
e8db67eb 173 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
6c357848 174 page_is_file_lru(page), -thp_nr_pages(page));
fc280fe8 175 putback_lru_page(page);
bda807d4 176 }
b20a3503 177 }
b20a3503
CL
178}
179
0697212a
CL
180/*
181 * Restore a potential migration pte to a working pte entry
182 */
2f031c6f
MWO
183static bool remove_migration_pte(struct folio *folio,
184 struct vm_area_struct *vma, unsigned long addr, void *old)
0697212a 185{
4eecb8b9 186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
0697212a 187
3fe87967 188 while (page_vma_mapped_walk(&pvmw)) {
6c287605 189 rmap_t rmap_flags = RMAP_NONE;
4eecb8b9
MWO
190 pte_t pte;
191 swp_entry_t entry;
192 struct page *new;
193 unsigned long idx = 0;
194
195 /* pgoff is invalid for ksm pages, but they are never large */
196 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
197 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
198 new = folio_page(folio, idx);
0697212a 199
616b8371
ZY
200#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
201 /* PMD-mapped THP migration entry */
202 if (!pvmw.pte) {
4eecb8b9
MWO
203 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
204 !folio_test_pmd_mappable(folio), folio);
616b8371
ZY
205 remove_migration_pmd(&pvmw, new);
206 continue;
207 }
208#endif
209
4eecb8b9 210 folio_get(folio);
2e346877 211 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
3fe87967
KS
212 if (pte_swp_soft_dirty(*pvmw.pte))
213 pte = pte_mksoft_dirty(pte);
0697212a 214
3fe87967
KS
215 /*
216 * Recheck VMA as permissions can change since migration started
217 */
218 entry = pte_to_swp_entry(*pvmw.pte);
2e346877
PX
219 if (!is_migration_entry_young(entry))
220 pte = pte_mkold(pte);
221 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
222 pte = pte_mkdirty(pte);
4dd845b5 223 if (is_writable_migration_entry(entry))
3fe87967 224 pte = maybe_mkwrite(pte, vma);
f45ec5ff
PX
225 else if (pte_swp_uffd_wp(*pvmw.pte))
226 pte = pte_mkuffd_wp(pte);
d3cb8bf6 227
6c287605
DH
228 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
229 rmap_flags |= RMAP_EXCLUSIVE;
230
6128763f 231 if (unlikely(is_device_private_page(new))) {
4dd845b5
AP
232 if (pte_write(pte))
233 entry = make_writable_device_private_entry(
234 page_to_pfn(new));
235 else
236 entry = make_readable_device_private_entry(
237 page_to_pfn(new));
6128763f 238 pte = swp_entry_to_pte(entry);
3d321bf8
RC
239 if (pte_swp_soft_dirty(*pvmw.pte))
240 pte = pte_swp_mksoft_dirty(pte);
6128763f
RC
241 if (pte_swp_uffd_wp(*pvmw.pte))
242 pte = pte_swp_mkuffd_wp(pte);
d2b2c6dd 243 }
a5430dda 244
3ef8fd7f 245#ifdef CONFIG_HUGETLB_PAGE
4eecb8b9 246 if (folio_test_hugetlb(folio)) {
79c1c594
CL
247 unsigned int shift = huge_page_shift(hstate_vma(vma));
248
3fe87967 249 pte = pte_mkhuge(pte);
79c1c594 250 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
4eecb8b9 251 if (folio_test_anon(folio))
28c5209d 252 hugepage_add_anon_rmap(new, vma, pvmw.address,
6c287605 253 rmap_flags);
3fe87967 254 else
fb3d824d 255 page_dup_file_rmap(new, true);
1eba86c0 256 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
383321ab
AK
257 } else
258#endif
259 {
4eecb8b9 260 if (folio_test_anon(folio))
f1e2db12 261 page_add_anon_rmap(new, vma, pvmw.address,
6c287605 262 rmap_flags);
383321ab 263 else
cea86fe2 264 page_add_file_rmap(new, vma, false);
1eba86c0 265 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
383321ab 266 }
b7435507 267 if (vma->vm_flags & VM_LOCKED)
96f97c43 268 mlock_drain_local();
e125fe40 269
4cc79b33
AK
270 trace_remove_migration_pte(pvmw.address, pte_val(pte),
271 compound_order(new));
272
3fe87967
KS
273 /* No need to invalidate - it was non-present before */
274 update_mmu_cache(vma, pvmw.address, pvmw.pte);
275 }
51afb12b 276
e4b82222 277 return true;
0697212a
CL
278}
279
04e62a29
CL
280/*
281 * Get rid of all migration entries and replace them by
282 * references to the indicated page.
283 */
4eecb8b9 284void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
04e62a29 285{
051ac83a
JK
286 struct rmap_walk_control rwc = {
287 .rmap_one = remove_migration_pte,
4eecb8b9 288 .arg = src,
051ac83a
JK
289 };
290
e388466d 291 if (locked)
2f031c6f 292 rmap_walk_locked(dst, &rwc);
e388466d 293 else
2f031c6f 294 rmap_walk(dst, &rwc);
04e62a29
CL
295}
296
0697212a
CL
297/*
298 * Something used the pte of a page under migration. We need to
299 * get to the page and wait until migration is finished.
300 * When we return from this function the fault will be retried.
0697212a 301 */
e66f17ff 302void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
30dad309 303 spinlock_t *ptl)
0697212a 304{
30dad309 305 pte_t pte;
0697212a 306 swp_entry_t entry;
0697212a 307
30dad309 308 spin_lock(ptl);
0697212a
CL
309 pte = *ptep;
310 if (!is_swap_pte(pte))
311 goto out;
312
313 entry = pte_to_swp_entry(pte);
314 if (!is_migration_entry(entry))
315 goto out;
316
ffa65753 317 migration_entry_wait_on_locked(entry, ptep, ptl);
0697212a
CL
318 return;
319out:
320 pte_unmap_unlock(ptep, ptl);
321}
322
30dad309
NH
323void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
324 unsigned long address)
325{
326 spinlock_t *ptl = pte_lockptr(mm, pmd);
327 pte_t *ptep = pte_offset_map(pmd, address);
328 __migration_entry_wait(mm, ptep, ptl);
329}
330
ad1ac596 331#ifdef CONFIG_HUGETLB_PAGE
fcd48540
PX
332/*
333 * The vma read lock must be held upon entry. Holding that lock prevents either
334 * the pte or the ptl from being freed.
335 *
336 * This function will release the vma lock before returning.
337 */
338void __migration_entry_wait_huge(struct vm_area_struct *vma,
339 pte_t *ptep, spinlock_t *ptl)
30dad309 340{
ad1ac596
ML
341 pte_t pte;
342
fcd48540 343 hugetlb_vma_assert_locked(vma);
ad1ac596
ML
344 spin_lock(ptl);
345 pte = huge_ptep_get(ptep);
346
fcd48540 347 if (unlikely(!is_hugetlb_entry_migration(pte))) {
ad1ac596 348 spin_unlock(ptl);
fcd48540
PX
349 hugetlb_vma_unlock_read(vma);
350 } else {
351 /*
352 * If migration entry existed, safe to release vma lock
353 * here because the pgtable page won't be freed without the
354 * pgtable lock released. See comment right above pgtable
355 * lock release in migration_entry_wait_on_locked().
356 */
357 hugetlb_vma_unlock_read(vma);
ad1ac596 358 migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
fcd48540 359 }
30dad309
NH
360}
361
ad1ac596
ML
362void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
363{
364 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
365
fcd48540 366 __migration_entry_wait_huge(vma, pte, ptl);
ad1ac596
ML
367}
368#endif
369
616b8371
ZY
370#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
371void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
372{
373 spinlock_t *ptl;
616b8371
ZY
374
375 ptl = pmd_lock(mm, pmd);
376 if (!is_pmd_migration_entry(*pmd))
377 goto unlock;
ffa65753 378 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
616b8371
ZY
379 return;
380unlock:
381 spin_unlock(ptl);
382}
383#endif
384
108ca835
MWO
385static int folio_expected_refs(struct address_space *mapping,
386 struct folio *folio)
0b3901b3 387{
108ca835
MWO
388 int refs = 1;
389 if (!mapping)
390 return refs;
0b3901b3 391
108ca835
MWO
392 refs += folio_nr_pages(folio);
393 if (folio_test_private(folio))
394 refs++;
395
396 return refs;
0b3901b3
JK
397}
398
b20a3503 399/*
c3fcf8a5 400 * Replace the page in the mapping.
5b5c7120
CL
401 *
402 * The number of remaining references must be:
403 * 1 for anonymous pages without a mapping
404 * 2 for pages with a mapping
266cf658 405 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a3503 406 */
3417013e
MWO
407int folio_migrate_mapping(struct address_space *mapping,
408 struct folio *newfolio, struct folio *folio, int extra_count)
b20a3503 409{
3417013e 410 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
42cb14b1
HD
411 struct zone *oldzone, *newzone;
412 int dirty;
108ca835 413 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
3417013e 414 long nr = folio_nr_pages(folio);
8763cb45 415
6c5240ae 416 if (!mapping) {
0e8c7d0f 417 /* Anonymous page without mapping */
3417013e 418 if (folio_ref_count(folio) != expected_count)
6c5240ae 419 return -EAGAIN;
cf4b769a
HD
420
421 /* No turning back from here */
3417013e
MWO
422 newfolio->index = folio->index;
423 newfolio->mapping = folio->mapping;
424 if (folio_test_swapbacked(folio))
425 __folio_set_swapbacked(newfolio);
cf4b769a 426
78bd5209 427 return MIGRATEPAGE_SUCCESS;
6c5240ae
CL
428 }
429
3417013e
MWO
430 oldzone = folio_zone(folio);
431 newzone = folio_zone(newfolio);
42cb14b1 432
89eb946a 433 xas_lock_irq(&xas);
3417013e 434 if (!folio_ref_freeze(folio, expected_count)) {
89eb946a 435 xas_unlock_irq(&xas);
e286781d
NP
436 return -EAGAIN;
437 }
438
b20a3503 439 /*
3417013e 440 * Now we know that no one else is looking at the folio:
cf4b769a 441 * no turning back from here.
b20a3503 442 */
3417013e
MWO
443 newfolio->index = folio->index;
444 newfolio->mapping = folio->mapping;
445 folio_ref_add(newfolio, nr); /* add cache reference */
446 if (folio_test_swapbacked(folio)) {
447 __folio_set_swapbacked(newfolio);
448 if (folio_test_swapcache(folio)) {
449 folio_set_swapcache(newfolio);
450 newfolio->private = folio_get_private(folio);
6326fec1
NP
451 }
452 } else {
3417013e 453 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
b20a3503
CL
454 }
455
42cb14b1 456 /* Move dirty while page refs frozen and newpage not yet exposed */
3417013e 457 dirty = folio_test_dirty(folio);
42cb14b1 458 if (dirty) {
3417013e
MWO
459 folio_clear_dirty(folio);
460 folio_set_dirty(newfolio);
42cb14b1
HD
461 }
462
3417013e 463 xas_store(&xas, newfolio);
7cf9c2c7
NP
464
465 /*
937a94c9
JG
466 * Drop cache reference from old page by unfreezing
467 * to one less reference.
7cf9c2c7
NP
468 * We know this isn't the last reference.
469 */
3417013e 470 folio_ref_unfreeze(folio, expected_count - nr);
7cf9c2c7 471
89eb946a 472 xas_unlock(&xas);
42cb14b1
HD
473 /* Leave irq disabled to prevent preemption while updating stats */
474
0e8c7d0f
CL
475 /*
476 * If moved to a different zone then also account
477 * the page for that zone. Other VM counters will be
478 * taken care of when we establish references to the
479 * new page and drop references to the old page.
480 *
481 * Note that anonymous pages are accounted for
4b9d0fab 482 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
0e8c7d0f
CL
483 * are mapped to swap space.
484 */
42cb14b1 485 if (newzone != oldzone) {
0d1c2072
JW
486 struct lruvec *old_lruvec, *new_lruvec;
487 struct mem_cgroup *memcg;
488
3417013e 489 memcg = folio_memcg(folio);
0d1c2072
JW
490 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
491 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
492
5c447d27
SB
493 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
494 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
3417013e 495 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
5c447d27
SB
496 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
497 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
42cb14b1 498 }
b6038942 499#ifdef CONFIG_SWAP
3417013e 500 if (folio_test_swapcache(folio)) {
b6038942
SB
501 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
502 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
503 }
504#endif
f56753ac 505 if (dirty && mapping_can_writeback(mapping)) {
5c447d27
SB
506 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
507 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
508 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
509 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
42cb14b1 510 }
4b02108a 511 }
42cb14b1 512 local_irq_enable();
b20a3503 513
78bd5209 514 return MIGRATEPAGE_SUCCESS;
b20a3503 515}
3417013e 516EXPORT_SYMBOL(folio_migrate_mapping);
b20a3503 517
290408d4
NH
518/*
519 * The expected number of remaining references is the same as that
3417013e 520 * of folio_migrate_mapping().
290408d4
NH
521 */
522int migrate_huge_page_move_mapping(struct address_space *mapping,
b890ec2a 523 struct folio *dst, struct folio *src)
290408d4 524{
b890ec2a 525 XA_STATE(xas, &mapping->i_pages, folio_index(src));
290408d4 526 int expected_count;
290408d4 527
89eb946a 528 xas_lock_irq(&xas);
b890ec2a
MWO
529 expected_count = 2 + folio_has_private(src);
530 if (!folio_ref_freeze(src, expected_count)) {
89eb946a 531 xas_unlock_irq(&xas);
290408d4
NH
532 return -EAGAIN;
533 }
534
b890ec2a
MWO
535 dst->index = src->index;
536 dst->mapping = src->mapping;
6a93ca8f 537
b890ec2a 538 folio_get(dst);
290408d4 539
b890ec2a 540 xas_store(&xas, dst);
290408d4 541
b890ec2a 542 folio_ref_unfreeze(src, expected_count - 1);
290408d4 543
89eb946a 544 xas_unlock_irq(&xas);
6a93ca8f 545
78bd5209 546 return MIGRATEPAGE_SUCCESS;
290408d4
NH
547}
548
b20a3503 549/*
19138349 550 * Copy the flags and some other ancillary information
b20a3503 551 */
19138349 552void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
b20a3503 553{
7851a45c
RR
554 int cpupid;
555
19138349
MWO
556 if (folio_test_error(folio))
557 folio_set_error(newfolio);
558 if (folio_test_referenced(folio))
559 folio_set_referenced(newfolio);
560 if (folio_test_uptodate(folio))
561 folio_mark_uptodate(newfolio);
562 if (folio_test_clear_active(folio)) {
563 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
564 folio_set_active(newfolio);
565 } else if (folio_test_clear_unevictable(folio))
566 folio_set_unevictable(newfolio);
567 if (folio_test_workingset(folio))
568 folio_set_workingset(newfolio);
569 if (folio_test_checked(folio))
570 folio_set_checked(newfolio);
6c287605
DH
571 /*
572 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
573 * migration entries. We can still have PG_anon_exclusive set on an
574 * effectively unmapped and unreferenced first sub-pages of an
575 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
576 */
19138349
MWO
577 if (folio_test_mappedtodisk(folio))
578 folio_set_mappedtodisk(newfolio);
b20a3503 579
3417013e 580 /* Move dirty on pages not done by folio_migrate_mapping() */
19138349
MWO
581 if (folio_test_dirty(folio))
582 folio_set_dirty(newfolio);
b20a3503 583
19138349
MWO
584 if (folio_test_young(folio))
585 folio_set_young(newfolio);
586 if (folio_test_idle(folio))
587 folio_set_idle(newfolio);
33c3fc71 588
7851a45c
RR
589 /*
590 * Copy NUMA information to the new page, to prevent over-eager
591 * future migrations of this same page.
592 */
19138349 593 cpupid = page_cpupid_xchg_last(&folio->page, -1);
33024536
HY
594 /*
595 * For memory tiering mode, when migrate between slow and fast
596 * memory node, reset cpupid, because that is used to record
597 * page access time in slow memory node.
598 */
599 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
600 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
601 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
602
603 if (f_toptier != t_toptier)
604 cpupid = -1;
605 }
19138349 606 page_cpupid_xchg_last(&newfolio->page, cpupid);
7851a45c 607
19138349 608 folio_migrate_ksm(newfolio, folio);
c8d6553b
HD
609 /*
610 * Please do not reorder this without considering how mm/ksm.c's
611 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
612 */
19138349
MWO
613 if (folio_test_swapcache(folio))
614 folio_clear_swapcache(folio);
615 folio_clear_private(folio);
ad2fa371
MS
616
617 /* page->private contains hugetlb specific flags */
19138349
MWO
618 if (!folio_test_hugetlb(folio))
619 folio->private = NULL;
b20a3503
CL
620
621 /*
622 * If any waiters have accumulated on the new page then
623 * wake them up.
624 */
19138349
MWO
625 if (folio_test_writeback(newfolio))
626 folio_end_writeback(newfolio);
d435edca 627
6aeff241
YS
628 /*
629 * PG_readahead shares the same bit with PG_reclaim. The above
630 * end_page_writeback() may clear PG_readahead mistakenly, so set the
631 * bit after that.
632 */
19138349
MWO
633 if (folio_test_readahead(folio))
634 folio_set_readahead(newfolio);
6aeff241 635
19138349 636 folio_copy_owner(newfolio, folio);
74485cf2 637
19138349 638 if (!folio_test_hugetlb(folio))
d21bba2b 639 mem_cgroup_migrate(folio, newfolio);
b20a3503 640}
19138349 641EXPORT_SYMBOL(folio_migrate_flags);
2916ecc0 642
715cbfd6 643void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
2916ecc0 644{
715cbfd6
MWO
645 folio_copy(newfolio, folio);
646 folio_migrate_flags(newfolio, folio);
2916ecc0 647}
715cbfd6 648EXPORT_SYMBOL(folio_migrate_copy);
b20a3503 649
1d8b85cc
CL
650/************************************************************
651 * Migration functions
652 ***********************************************************/
653
16ce101d
AP
654int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
655 struct folio *src, enum migrate_mode mode, int extra_count)
656{
657 int rc;
658
659 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
660
661 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
662
663 if (rc != MIGRATEPAGE_SUCCESS)
664 return rc;
665
666 if (mode != MIGRATE_SYNC_NO_COPY)
667 folio_migrate_copy(dst, src);
668 else
669 folio_migrate_flags(dst, src);
670 return MIGRATEPAGE_SUCCESS;
671}
672
54184650
MWO
673/**
674 * migrate_folio() - Simple folio migration.
675 * @mapping: The address_space containing the folio.
676 * @dst: The folio to migrate the data to.
677 * @src: The folio containing the current data.
678 * @mode: How to migrate the page.
679 *
680 * Common logic to directly migrate a single LRU folio suitable for
681 * folios that do not use PagePrivate/PagePrivate2.
b20a3503 682 *
54184650 683 * Folios are locked upon entry and exit.
b20a3503 684 */
54184650
MWO
685int migrate_folio(struct address_space *mapping, struct folio *dst,
686 struct folio *src, enum migrate_mode mode)
b20a3503 687{
16ce101d 688 return migrate_folio_extra(mapping, dst, src, mode, 0);
b20a3503 689}
54184650 690EXPORT_SYMBOL(migrate_folio);
b20a3503 691
9361401e 692#ifdef CONFIG_BLOCK
84ade7c1
JK
693/* Returns true if all buffers are successfully locked */
694static bool buffer_migrate_lock_buffers(struct buffer_head *head,
695 enum migrate_mode mode)
696{
697 struct buffer_head *bh = head;
698
699 /* Simple case, sync compaction */
700 if (mode != MIGRATE_ASYNC) {
701 do {
84ade7c1
JK
702 lock_buffer(bh);
703 bh = bh->b_this_page;
704
705 } while (bh != head);
706
707 return true;
708 }
709
710 /* async case, we cannot block on lock_buffer so use trylock_buffer */
711 do {
84ade7c1
JK
712 if (!trylock_buffer(bh)) {
713 /*
714 * We failed to lock the buffer and cannot stall in
715 * async migration. Release the taken locks
716 */
717 struct buffer_head *failed_bh = bh;
84ade7c1
JK
718 bh = head;
719 while (bh != failed_bh) {
720 unlock_buffer(bh);
84ade7c1
JK
721 bh = bh->b_this_page;
722 }
723 return false;
724 }
725
726 bh = bh->b_this_page;
727 } while (bh != head);
728 return true;
729}
730
67235182
MWO
731static int __buffer_migrate_folio(struct address_space *mapping,
732 struct folio *dst, struct folio *src, enum migrate_mode mode,
89cb0888 733 bool check_refs)
1d8b85cc 734{
1d8b85cc
CL
735 struct buffer_head *bh, *head;
736 int rc;
cc4f11e6 737 int expected_count;
1d8b85cc 738
67235182
MWO
739 head = folio_buffers(src);
740 if (!head)
54184650 741 return migrate_folio(mapping, dst, src, mode);
1d8b85cc 742
cc4f11e6 743 /* Check whether page does not have extra refs before we do more work */
108ca835 744 expected_count = folio_expected_refs(mapping, src);
67235182 745 if (folio_ref_count(src) != expected_count)
cc4f11e6 746 return -EAGAIN;
1d8b85cc 747
cc4f11e6
JK
748 if (!buffer_migrate_lock_buffers(head, mode))
749 return -EAGAIN;
1d8b85cc 750
89cb0888
JK
751 if (check_refs) {
752 bool busy;
753 bool invalidated = false;
754
755recheck_buffers:
756 busy = false;
757 spin_lock(&mapping->private_lock);
758 bh = head;
759 do {
760 if (atomic_read(&bh->b_count)) {
761 busy = true;
762 break;
763 }
764 bh = bh->b_this_page;
765 } while (bh != head);
89cb0888
JK
766 if (busy) {
767 if (invalidated) {
768 rc = -EAGAIN;
769 goto unlock_buffers;
770 }
ebdf4de5 771 spin_unlock(&mapping->private_lock);
89cb0888
JK
772 invalidate_bh_lrus();
773 invalidated = true;
774 goto recheck_buffers;
775 }
776 }
777
67235182 778 rc = folio_migrate_mapping(mapping, dst, src, 0);
78bd5209 779 if (rc != MIGRATEPAGE_SUCCESS)
cc4f11e6 780 goto unlock_buffers;
1d8b85cc 781
67235182 782 folio_attach_private(dst, folio_detach_private(src));
1d8b85cc
CL
783
784 bh = head;
785 do {
67235182 786 set_bh_page(bh, &dst->page, bh_offset(bh));
1d8b85cc 787 bh = bh->b_this_page;
1d8b85cc
CL
788 } while (bh != head);
789
2916ecc0 790 if (mode != MIGRATE_SYNC_NO_COPY)
67235182 791 folio_migrate_copy(dst, src);
2916ecc0 792 else
67235182 793 folio_migrate_flags(dst, src);
1d8b85cc 794
cc4f11e6
JK
795 rc = MIGRATEPAGE_SUCCESS;
796unlock_buffers:
ebdf4de5
JK
797 if (check_refs)
798 spin_unlock(&mapping->private_lock);
1d8b85cc
CL
799 bh = head;
800 do {
801 unlock_buffer(bh);
1d8b85cc 802 bh = bh->b_this_page;
1d8b85cc
CL
803 } while (bh != head);
804
cc4f11e6 805 return rc;
1d8b85cc 806}
89cb0888 807
67235182
MWO
808/**
809 * buffer_migrate_folio() - Migration function for folios with buffers.
810 * @mapping: The address space containing @src.
811 * @dst: The folio to migrate to.
812 * @src: The folio to migrate from.
813 * @mode: How to migrate the folio.
814 *
815 * This function can only be used if the underlying filesystem guarantees
816 * that no other references to @src exist. For example attached buffer
817 * heads are accessed only under the folio lock. If your filesystem cannot
818 * provide this guarantee, buffer_migrate_folio_norefs() may be more
819 * appropriate.
820 *
821 * Return: 0 on success or a negative errno on failure.
89cb0888 822 */
67235182
MWO
823int buffer_migrate_folio(struct address_space *mapping,
824 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 825{
67235182 826 return __buffer_migrate_folio(mapping, dst, src, mode, false);
89cb0888 827}
67235182
MWO
828EXPORT_SYMBOL(buffer_migrate_folio);
829
830/**
831 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
832 * @mapping: The address space containing @src.
833 * @dst: The folio to migrate to.
834 * @src: The folio to migrate from.
835 * @mode: How to migrate the folio.
836 *
837 * Like buffer_migrate_folio() except that this variant is more careful
838 * and checks that there are also no buffer head references. This function
839 * is the right one for mappings where buffer heads are directly looked
840 * up and referenced (such as block device mappings).
841 *
842 * Return: 0 on success or a negative errno on failure.
89cb0888 843 */
67235182
MWO
844int buffer_migrate_folio_norefs(struct address_space *mapping,
845 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 846{
67235182 847 return __buffer_migrate_folio(mapping, dst, src, mode, true);
89cb0888 848}
e26355e2 849EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
9361401e 850#endif
1d8b85cc 851
2ec810d5
MWO
852int filemap_migrate_folio(struct address_space *mapping,
853 struct folio *dst, struct folio *src, enum migrate_mode mode)
854{
855 int ret;
856
857 ret = folio_migrate_mapping(mapping, dst, src, 0);
858 if (ret != MIGRATEPAGE_SUCCESS)
859 return ret;
860
861 if (folio_get_private(src))
862 folio_attach_private(dst, folio_detach_private(src));
863
864 if (mode != MIGRATE_SYNC_NO_COPY)
865 folio_migrate_copy(dst, src);
866 else
867 folio_migrate_flags(dst, src);
868 return MIGRATEPAGE_SUCCESS;
869}
870EXPORT_SYMBOL_GPL(filemap_migrate_folio);
871
04e62a29 872/*
2be7fa10 873 * Writeback a folio to clean the dirty state
04e62a29 874 */
2be7fa10 875static int writeout(struct address_space *mapping, struct folio *folio)
8351a6e4 876{
04e62a29
CL
877 struct writeback_control wbc = {
878 .sync_mode = WB_SYNC_NONE,
879 .nr_to_write = 1,
880 .range_start = 0,
881 .range_end = LLONG_MAX,
04e62a29
CL
882 .for_reclaim = 1
883 };
884 int rc;
885
886 if (!mapping->a_ops->writepage)
887 /* No write method for the address space */
888 return -EINVAL;
889
2be7fa10 890 if (!folio_clear_dirty_for_io(folio))
04e62a29
CL
891 /* Someone else already triggered a write */
892 return -EAGAIN;
893
8351a6e4 894 /*
2be7fa10
MWO
895 * A dirty folio may imply that the underlying filesystem has
896 * the folio on some queue. So the folio must be clean for
897 * migration. Writeout may mean we lose the lock and the
898 * folio state is no longer what we checked for earlier.
04e62a29
CL
899 * At this point we know that the migration attempt cannot
900 * be successful.
8351a6e4 901 */
4eecb8b9 902 remove_migration_ptes(folio, folio, false);
8351a6e4 903
2be7fa10 904 rc = mapping->a_ops->writepage(&folio->page, &wbc);
8351a6e4 905
04e62a29
CL
906 if (rc != AOP_WRITEPAGE_ACTIVATE)
907 /* unlocked. Relock */
2be7fa10 908 folio_lock(folio);
04e62a29 909
bda8550d 910 return (rc < 0) ? -EIO : -EAGAIN;
04e62a29
CL
911}
912
913/*
914 * Default handling if a filesystem does not provide a migration function.
915 */
8faa8ef5
MWO
916static int fallback_migrate_folio(struct address_space *mapping,
917 struct folio *dst, struct folio *src, enum migrate_mode mode)
04e62a29 918{
8faa8ef5
MWO
919 if (folio_test_dirty(src)) {
920 /* Only writeback folios in full synchronous migration */
2916ecc0
JG
921 switch (mode) {
922 case MIGRATE_SYNC:
923 case MIGRATE_SYNC_NO_COPY:
924 break;
925 default:
b969c4ab 926 return -EBUSY;
2916ecc0 927 }
2be7fa10 928 return writeout(mapping, src);
b969c4ab 929 }
8351a6e4
CL
930
931 /*
932 * Buffers may be managed in a filesystem specific way.
933 * We must have no buffers or drop them.
934 */
8faa8ef5
MWO
935 if (folio_test_private(src) &&
936 !filemap_release_folio(src, GFP_KERNEL))
806031bb 937 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
8351a6e4 938
54184650 939 return migrate_folio(mapping, dst, src, mode);
8351a6e4
CL
940}
941
e24f0b8f
CL
942/*
943 * Move a page to a newly allocated page
944 * The page is locked and all ptes have been successfully removed.
945 *
946 * The new page will have replaced the old page if this function
947 * is successful.
894bc310
LS
948 *
949 * Return value:
950 * < 0 - error code
78bd5209 951 * MIGRATEPAGE_SUCCESS - success
e24f0b8f 952 */
e7e3ffeb 953static int move_to_new_folio(struct folio *dst, struct folio *src,
5c3f9a67 954 enum migrate_mode mode)
e24f0b8f 955{
bda807d4 956 int rc = -EAGAIN;
e7e3ffeb 957 bool is_lru = !__PageMovable(&src->page);
e24f0b8f 958
e7e3ffeb
MWO
959 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
960 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
e24f0b8f 961
bda807d4 962 if (likely(is_lru)) {
68f2736a
MWO
963 struct address_space *mapping = folio_mapping(src);
964
bda807d4 965 if (!mapping)
54184650 966 rc = migrate_folio(mapping, dst, src, mode);
5490da4f 967 else if (mapping->a_ops->migrate_folio)
bda807d4 968 /*
5490da4f
MWO
969 * Most folios have a mapping and most filesystems
970 * provide a migrate_folio callback. Anonymous folios
bda807d4 971 * are part of swap space which also has its own
5490da4f 972 * migrate_folio callback. This is the most common path
bda807d4
MK
973 * for page migration.
974 */
5490da4f
MWO
975 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
976 mode);
bda807d4 977 else
8faa8ef5 978 rc = fallback_migrate_folio(mapping, dst, src, mode);
bda807d4 979 } else {
68f2736a
MWO
980 const struct movable_operations *mops;
981
e24f0b8f 982 /*
bda807d4
MK
983 * In case of non-lru page, it could be released after
984 * isolation step. In that case, we shouldn't try migration.
e24f0b8f 985 */
e7e3ffeb
MWO
986 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
987 if (!folio_test_movable(src)) {
bda807d4 988 rc = MIGRATEPAGE_SUCCESS;
e7e3ffeb 989 folio_clear_isolated(src);
bda807d4
MK
990 goto out;
991 }
992
68f2736a
MWO
993 mops = page_movable_ops(&src->page);
994 rc = mops->migrate_page(&dst->page, &src->page, mode);
bda807d4 995 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
e7e3ffeb 996 !folio_test_isolated(src));
bda807d4 997 }
e24f0b8f 998
5c3f9a67 999 /*
e7e3ffeb
MWO
1000 * When successful, old pagecache src->mapping must be cleared before
1001 * src is freed; but stats require that PageAnon be left as PageAnon.
5c3f9a67
HD
1002 */
1003 if (rc == MIGRATEPAGE_SUCCESS) {
e7e3ffeb
MWO
1004 if (__PageMovable(&src->page)) {
1005 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
bda807d4
MK
1006
1007 /*
1008 * We clear PG_movable under page_lock so any compactor
1009 * cannot try to migrate this page.
1010 */
e7e3ffeb 1011 folio_clear_isolated(src);
bda807d4
MK
1012 }
1013
1014 /*
e7e3ffeb 1015 * Anonymous and movable src->mapping will be cleared by
bda807d4
MK
1016 * free_pages_prepare so don't reset it here for keeping
1017 * the type to work PageAnon, for example.
1018 */
e7e3ffeb
MWO
1019 if (!folio_mapping_flags(src))
1020 src->mapping = NULL;
d2b2c6dd 1021
e7e3ffeb
MWO
1022 if (likely(!folio_is_zone_device(dst)))
1023 flush_dcache_folio(dst);
3fe2011f 1024 }
bda807d4 1025out:
e24f0b8f
CL
1026 return rc;
1027}
1028
682a71a1 1029static int __unmap_and_move(struct folio *src, struct folio *dst,
9c620e2b 1030 int force, enum migrate_mode mode)
e24f0b8f 1031{
0dabec93 1032 int rc = -EAGAIN;
213ecb31 1033 bool page_was_mapped = false;
3f6c8272 1034 struct anon_vma *anon_vma = NULL;
682a71a1 1035 bool is_lru = !__PageMovable(&src->page);
95a402c3 1036
682a71a1 1037 if (!folio_trylock(src)) {
a6bc32b8 1038 if (!force || mode == MIGRATE_ASYNC)
0dabec93 1039 goto out;
3e7d3449
MG
1040
1041 /*
1042 * It's not safe for direct compaction to call lock_page.
1043 * For example, during page readahead pages are added locked
1044 * to the LRU. Later, when the IO completes the pages are
1045 * marked uptodate and unlocked. However, the queueing
1046 * could be merging multiple pages for one bio (e.g.
d4388340 1047 * mpage_readahead). If an allocation happens for the
3e7d3449
MG
1048 * second or third page, the process can end up locking
1049 * the same page twice and deadlocking. Rather than
1050 * trying to be clever about what pages can be locked,
1051 * avoid the use of lock_page for direct compaction
1052 * altogether.
1053 */
1054 if (current->flags & PF_MEMALLOC)
0dabec93 1055 goto out;
3e7d3449 1056
682a71a1 1057 folio_lock(src);
e24f0b8f
CL
1058 }
1059
682a71a1 1060 if (folio_test_writeback(src)) {
11bc82d6 1061 /*
fed5b64a 1062 * Only in the case of a full synchronous migration is it
a6bc32b8
MG
1063 * necessary to wait for PageWriteback. In the async case,
1064 * the retry loop is too short and in the sync-light case,
1065 * the overhead of stalling is too much
11bc82d6 1066 */
2916ecc0
JG
1067 switch (mode) {
1068 case MIGRATE_SYNC:
1069 case MIGRATE_SYNC_NO_COPY:
1070 break;
1071 default:
11bc82d6 1072 rc = -EBUSY;
0a31bc97 1073 goto out_unlock;
11bc82d6
AA
1074 }
1075 if (!force)
0a31bc97 1076 goto out_unlock;
682a71a1 1077 folio_wait_writeback(src);
e24f0b8f 1078 }
03f15c86 1079
e24f0b8f 1080 /*
682a71a1
MWO
1081 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1082 * we cannot notice that anon_vma is freed while we migrate a page.
1ce82b69 1083 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d 1084 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
1085 * File Caches may use write_page() or lock_page() in migration, then,
1086 * just care Anon page here.
03f15c86 1087 *
29eea9b5 1088 * Only folio_get_anon_vma() understands the subtleties of
03f15c86
HD
1089 * getting a hold on an anon_vma from outside one of its mms.
1090 * But if we cannot get anon_vma, then we won't need it anyway,
1091 * because that implies that the anon page is no longer mapped
1092 * (and cannot be remapped so long as we hold the page lock).
dc386d4d 1093 */
682a71a1 1094 if (folio_test_anon(src) && !folio_test_ksm(src))
29eea9b5 1095 anon_vma = folio_get_anon_vma(src);
62e1c553 1096
7db7671f
HD
1097 /*
1098 * Block others from accessing the new page when we get around to
1099 * establishing additional references. We are usually the only one
682a71a1
MWO
1100 * holding a reference to dst at this point. We used to have a BUG
1101 * here if folio_trylock(dst) fails, but would like to allow for
1102 * cases where there might be a race with the previous use of dst.
7db7671f
HD
1103 * This is much like races on refcount of oldpage: just don't BUG().
1104 */
682a71a1 1105 if (unlikely(!folio_trylock(dst)))
7db7671f
HD
1106 goto out_unlock;
1107
bda807d4 1108 if (unlikely(!is_lru)) {
682a71a1 1109 rc = move_to_new_folio(dst, src, mode);
bda807d4
MK
1110 goto out_unlock_both;
1111 }
1112
dc386d4d 1113 /*
62e1c553
SL
1114 * Corner case handling:
1115 * 1. When a new swap-cache page is read into, it is added to the LRU
1116 * and treated as swapcache but it has no rmap yet.
682a71a1 1117 * Calling try_to_unmap() against a src->mapping==NULL page will
62e1c553 1118 * trigger a BUG. So handle it here.
d12b8951 1119 * 2. An orphaned page (see truncate_cleanup_page) might have
62e1c553
SL
1120 * fs-private metadata. The page can be picked up due to memory
1121 * offlining. Everywhere else except page reclaim, the page is
1122 * invisible to the vm, so the page can not be migrated. So try to
1123 * free the metadata, so the page can be freed.
e24f0b8f 1124 */
682a71a1
MWO
1125 if (!src->mapping) {
1126 if (folio_test_private(src)) {
1127 try_to_free_buffers(src);
7db7671f 1128 goto out_unlock_both;
62e1c553 1129 }
682a71a1 1130 } else if (folio_mapped(src)) {
7db7671f 1131 /* Establish migration ptes */
682a71a1
MWO
1132 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1133 !folio_test_ksm(src) && !anon_vma, src);
1134 try_to_migrate(src, 0);
213ecb31 1135 page_was_mapped = true;
2ebba6b7 1136 }
dc386d4d 1137
682a71a1
MWO
1138 if (!folio_mapped(src))
1139 rc = move_to_new_folio(dst, src, mode);
e24f0b8f 1140
c3096e67 1141 /*
682a71a1 1142 * When successful, push dst to LRU immediately: so that if it
c3096e67 1143 * turns out to be an mlocked page, remove_migration_ptes() will
682a71a1 1144 * automatically build up the correct dst->mlock_count for it.
c3096e67
HD
1145 *
1146 * We would like to do something similar for the old page, when
1147 * unsuccessful, and other cases when a page has been temporarily
1148 * isolated from the unevictable LRU: but this case is the easiest.
1149 */
1150 if (rc == MIGRATEPAGE_SUCCESS) {
682a71a1 1151 folio_add_lru(dst);
c3096e67
HD
1152 if (page_was_mapped)
1153 lru_add_drain();
1154 }
1155
5c3f9a67 1156 if (page_was_mapped)
682a71a1
MWO
1157 remove_migration_ptes(src,
1158 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
3f6c8272 1159
7db7671f 1160out_unlock_both:
682a71a1 1161 folio_unlock(dst);
7db7671f 1162out_unlock:
3f6c8272 1163 /* Drop an anon_vma reference if we took one */
76545066 1164 if (anon_vma)
9e60109f 1165 put_anon_vma(anon_vma);
682a71a1 1166 folio_unlock(src);
0dabec93 1167out:
c6c919eb 1168 /*
682a71a1 1169 * If migration is successful, decrease refcount of dst,
c6c919eb 1170 * which will not free the page because new page owner increased
c3096e67 1171 * refcounter.
c6c919eb 1172 */
c3096e67 1173 if (rc == MIGRATEPAGE_SUCCESS)
682a71a1 1174 folio_put(dst);
c6c919eb 1175
0dabec93
MK
1176 return rc;
1177}
95a402c3 1178
0dabec93 1179/*
49f51859
HY
1180 * Obtain the lock on folio, remove all ptes and migrate the folio
1181 * to the newly allocated folio in dst.
0dabec93 1182 */
6ec4476a 1183static int unmap_and_move(new_page_t get_new_page,
ef2a5153 1184 free_page_t put_new_page,
49f51859 1185 unsigned long private, struct folio *src,
add05cec 1186 int force, enum migrate_mode mode,
dd4ae78a
YS
1187 enum migrate_reason reason,
1188 struct list_head *ret)
0dabec93 1189{
49f51859 1190 struct folio *dst;
2def7424 1191 int rc = MIGRATEPAGE_SUCCESS;
74d4a579 1192 struct page *newpage = NULL;
0dabec93 1193
49f51859 1194 if (!thp_migration_supported() && folio_test_transhuge(src))
d532e2e5 1195 return -ENOSYS;
94723aaf 1196
49f51859
HY
1197 if (folio_ref_count(src) == 1) {
1198 /* Folio was freed from under us. So we are done. */
1199 folio_clear_active(src);
1200 folio_clear_unevictable(src);
160088b3 1201 /* free_pages_prepare() will clear PG_isolated. */
0dabec93
MK
1202 goto out;
1203 }
1204
49f51859 1205 newpage = get_new_page(&src->page, private);
74d4a579
YS
1206 if (!newpage)
1207 return -ENOMEM;
682a71a1 1208 dst = page_folio(newpage);
74d4a579 1209
4c74b65f 1210 dst->private = NULL;
682a71a1 1211 rc = __unmap_and_move(src, dst, force, mode);
c6c919eb 1212 if (rc == MIGRATEPAGE_SUCCESS)
49f51859 1213 set_page_owner_migrate_reason(&dst->page, reason);
bf6bddf1 1214
0dabec93 1215out:
e24f0b8f 1216 if (rc != -EAGAIN) {
0dabec93 1217 /*
49f51859
HY
1218 * A folio that has been migrated has all references
1219 * removed and will be freed. A folio that has not been
c23a0c99 1220 * migrated will have kept its references and be restored.
0dabec93 1221 */
49f51859 1222 list_del(&src->lru);
dd4ae78a 1223 }
6afcf8ef 1224
dd4ae78a
YS
1225 /*
1226 * If migration is successful, releases reference grabbed during
49f51859 1227 * isolation. Otherwise, restore the folio to right list unless
dd4ae78a
YS
1228 * we want to retry.
1229 */
1230 if (rc == MIGRATEPAGE_SUCCESS) {
6afcf8ef 1231 /*
49f51859 1232 * Compaction can migrate also non-LRU folios which are
6afcf8ef 1233 * not accounted to NR_ISOLATED_*. They can be recognized
49f51859 1234 * as __folio_test_movable
6afcf8ef 1235 */
49f51859
HY
1236 if (likely(!__folio_test_movable(src)))
1237 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1238 folio_is_file_lru(src), -folio_nr_pages(src));
c6c919eb 1239
79f5f8fa 1240 if (reason != MR_MEMORY_FAILURE)
d7e69488 1241 /*
49f51859 1242 * We release the folio in page_handle_poison.
d7e69488 1243 */
49f51859 1244 folio_put(src);
c6c919eb 1245 } else {
dd4ae78a 1246 if (rc != -EAGAIN)
49f51859 1247 list_add_tail(&src->lru, ret);
bda807d4 1248
c6c919eb 1249 if (put_new_page)
49f51859 1250 put_new_page(&dst->page, private);
c6c919eb 1251 else
49f51859 1252 folio_put(dst);
e24f0b8f 1253 }
68711a74 1254
e24f0b8f
CL
1255 return rc;
1256}
1257
290408d4
NH
1258/*
1259 * Counterpart of unmap_and_move_page() for hugepage migration.
1260 *
1261 * This function doesn't wait the completion of hugepage I/O
1262 * because there is no race between I/O and migration for hugepage.
1263 * Note that currently hugepage I/O occurs only in direct I/O
1264 * where no lock is held and PG_writeback is irrelevant,
1265 * and writeback status of all subpages are counted in the reference
1266 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1267 * under direct I/O, the reference of the head page is 512 and a bit more.)
1268 * This means that when we try to migrate hugepage whose subpages are
1269 * doing direct I/O, some references remain after try_to_unmap() and
1270 * hugepage migration fails without data corruption.
1271 *
1272 * There is also no race when direct I/O is issued on the page under migration,
1273 * because then pte is replaced with migration swap entry and direct I/O code
1274 * will wait in the page fault for migration to complete.
1275 */
1276static int unmap_and_move_huge_page(new_page_t get_new_page,
68711a74
DR
1277 free_page_t put_new_page, unsigned long private,
1278 struct page *hpage, int force,
dd4ae78a
YS
1279 enum migrate_mode mode, int reason,
1280 struct list_head *ret)
290408d4 1281{
4eecb8b9 1282 struct folio *dst, *src = page_folio(hpage);
2def7424 1283 int rc = -EAGAIN;
2ebba6b7 1284 int page_was_mapped = 0;
32665f2b 1285 struct page *new_hpage;
290408d4 1286 struct anon_vma *anon_vma = NULL;
c0d0381a 1287 struct address_space *mapping = NULL;
290408d4 1288
83467efb 1289 /*
7ed2c31d 1290 * Migratability of hugepages depends on architectures and their size.
83467efb
NH
1291 * This check is necessary because some callers of hugepage migration
1292 * like soft offline and memory hotremove don't walk through page
1293 * tables or check whether the hugepage is pmd-based or not before
1294 * kicking migration.
1295 */
577be05c 1296 if (!hugepage_migration_supported(page_hstate(hpage)))
83467efb
NH
1297 return -ENOSYS;
1298
c33db292 1299 if (folio_ref_count(src) == 1) {
71a64f61
MS
1300 /* page was freed from under us. So we are done. */
1301 putback_active_hugepage(hpage);
1302 return MIGRATEPAGE_SUCCESS;
1303 }
1304
666feb21 1305 new_hpage = get_new_page(hpage, private);
290408d4
NH
1306 if (!new_hpage)
1307 return -ENOMEM;
4eecb8b9 1308 dst = page_folio(new_hpage);
290408d4 1309
c33db292 1310 if (!folio_trylock(src)) {
2916ecc0 1311 if (!force)
290408d4 1312 goto out;
2916ecc0
JG
1313 switch (mode) {
1314 case MIGRATE_SYNC:
1315 case MIGRATE_SYNC_NO_COPY:
1316 break;
1317 default:
1318 goto out;
1319 }
c33db292 1320 folio_lock(src);
290408d4
NH
1321 }
1322
cb6acd01
MK
1323 /*
1324 * Check for pages which are in the process of being freed. Without
c33db292 1325 * folio_mapping() set, hugetlbfs specific move page routine will not
cb6acd01
MK
1326 * be called and we could leak usage counts for subpools.
1327 */
345c62d1 1328 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
cb6acd01
MK
1329 rc = -EBUSY;
1330 goto out_unlock;
1331 }
1332
c33db292 1333 if (folio_test_anon(src))
29eea9b5 1334 anon_vma = folio_get_anon_vma(src);
290408d4 1335
c33db292 1336 if (unlikely(!folio_trylock(dst)))
7db7671f
HD
1337 goto put_anon;
1338
c33db292 1339 if (folio_mapped(src)) {
a98a2f0c 1340 enum ttu_flags ttu = 0;
336bf30e 1341
c33db292 1342 if (!folio_test_anon(src)) {
336bf30e
MK
1343 /*
1344 * In shared mappings, try_to_unmap could potentially
1345 * call huge_pmd_unshare. Because of this, take
1346 * semaphore in write mode here and set TTU_RMAP_LOCKED
1347 * to let lower levels know we have taken the lock.
1348 */
1349 mapping = hugetlb_page_mapping_lock_write(hpage);
1350 if (unlikely(!mapping))
1351 goto unlock_put_anon;
1352
5202978b 1353 ttu = TTU_RMAP_LOCKED;
336bf30e 1354 }
c0d0381a 1355
4b8554c5 1356 try_to_migrate(src, ttu);
2ebba6b7 1357 page_was_mapped = 1;
336bf30e 1358
5202978b 1359 if (ttu & TTU_RMAP_LOCKED)
336bf30e 1360 i_mmap_unlock_write(mapping);
2ebba6b7 1361 }
290408d4 1362
c33db292 1363 if (!folio_mapped(src))
e7e3ffeb 1364 rc = move_to_new_folio(dst, src, mode);
290408d4 1365
336bf30e 1366 if (page_was_mapped)
4eecb8b9
MWO
1367 remove_migration_ptes(src,
1368 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
290408d4 1369
c0d0381a 1370unlock_put_anon:
c33db292 1371 folio_unlock(dst);
7db7671f
HD
1372
1373put_anon:
fd4a4663 1374 if (anon_vma)
9e60109f 1375 put_anon_vma(anon_vma);
8e6ac7fa 1376
2def7424 1377 if (rc == MIGRATEPAGE_SUCCESS) {
345c62d1 1378 move_hugetlb_state(src, dst, reason);
2def7424
HD
1379 put_new_page = NULL;
1380 }
8e6ac7fa 1381
cb6acd01 1382out_unlock:
c33db292 1383 folio_unlock(src);
09761333 1384out:
dd4ae78a 1385 if (rc == MIGRATEPAGE_SUCCESS)
b8ec1cee 1386 putback_active_hugepage(hpage);
a04840c6 1387 else if (rc != -EAGAIN)
c33db292 1388 list_move_tail(&src->lru, ret);
68711a74
DR
1389
1390 /*
1391 * If migration was not successful and there's a freeing callback, use
1392 * it. Otherwise, put_page() will drop the reference grabbed during
1393 * isolation.
1394 */
2def7424 1395 if (put_new_page)
68711a74
DR
1396 put_new_page(new_hpage, private);
1397 else
3aaa76e1 1398 putback_active_hugepage(new_hpage);
68711a74 1399
290408d4
NH
1400 return rc;
1401}
1402
eaec4e63 1403static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
d532e2e5 1404{
9c62ff00 1405 int rc;
d532e2e5 1406
eaec4e63
HY
1407 folio_lock(folio);
1408 rc = split_folio_to_list(folio, split_folios);
1409 folio_unlock(folio);
e6fa8a79 1410 if (!rc)
eaec4e63 1411 list_move_tail(&folio->lru, split_folios);
d532e2e5
YS
1412
1413 return rc;
1414}
1415
b20a3503 1416/*
eaec4e63 1417 * migrate_pages - migrate the folios specified in a list, to the free folios
c73e5c9c 1418 * supplied as the target for the page migration
b20a3503 1419 *
eaec4e63
HY
1420 * @from: The list of folios to be migrated.
1421 * @get_new_page: The function used to allocate free folios to be used
1422 * as the target of the folio migration.
1423 * @put_new_page: The function used to free target folios if migration
68711a74 1424 * fails, or NULL if no special handling is necessary.
c73e5c9c
SB
1425 * @private: Private data to be passed on to get_new_page()
1426 * @mode: The migration mode that specifies the constraints for
eaec4e63
HY
1427 * folio migration, if any.
1428 * @reason: The reason for folio migration.
1429 * @ret_succeeded: Set to the number of folios migrated successfully if
5ac95884 1430 * the caller passes a non-NULL pointer.
b20a3503 1431 *
eaec4e63
HY
1432 * The function returns after 10 attempts or if no folios are movable any more
1433 * because the list has become empty or no retryable folios exist any more.
1434 * It is caller's responsibility to call putback_movable_pages() to return folios
dd4ae78a 1435 * to the LRU or free list only if ret != 0.
b20a3503 1436 *
eaec4e63
HY
1437 * Returns the number of {normal folio, large folio, hugetlb} that were not
1438 * migrated, or an error code. The number of large folio splits will be
1439 * considered as the number of non-migrated large folio, no matter how many
1440 * split folios of the large folio are migrated successfully.
b20a3503 1441 */
9c620e2b 1442int migrate_pages(struct list_head *from, new_page_t get_new_page,
68711a74 1443 free_page_t put_new_page, unsigned long private,
5ac95884 1444 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
b20a3503 1445{
e24f0b8f 1446 int retry = 1;
eaec4e63 1447 int large_retry = 1;
1a5bae25 1448 int thp_retry = 1;
b20a3503 1449 int nr_failed = 0;
b5bade97 1450 int nr_failed_pages = 0;
077309bc 1451 int nr_retry_pages = 0;
5647bc29 1452 int nr_succeeded = 0;
1a5bae25 1453 int nr_thp_succeeded = 0;
eaec4e63 1454 int nr_large_failed = 0;
1a5bae25
AK
1455 int nr_thp_failed = 0;
1456 int nr_thp_split = 0;
b20a3503 1457 int pass = 0;
eaec4e63 1458 bool is_large = false;
1a5bae25 1459 bool is_thp = false;
eaec4e63
HY
1460 struct folio *folio, *folio2;
1461 int rc, nr_pages;
1462 LIST_HEAD(ret_folios);
1463 LIST_HEAD(split_folios);
b0b515bf 1464 bool nosplit = (reason == MR_NUMA_MISPLACED);
eaec4e63 1465 bool no_split_folio_counting = false;
b20a3503 1466
7bc1aec5
LM
1467 trace_mm_migrate_pages_start(mode, reason);
1468
eaec4e63
HY
1469split_folio_migration:
1470 for (pass = 0; pass < 10 && (retry || large_retry); pass++) {
e24f0b8f 1471 retry = 0;
eaec4e63 1472 large_retry = 0;
1a5bae25 1473 thp_retry = 0;
077309bc 1474 nr_retry_pages = 0;
b20a3503 1475
eaec4e63 1476 list_for_each_entry_safe(folio, folio2, from, lru) {
1a5bae25 1477 /*
eaec4e63
HY
1478 * Large folio statistics is based on the source large
1479 * folio. Capture required information that might get
1480 * lost during migration.
1a5bae25 1481 */
eaec4e63
HY
1482 is_large = folio_test_large(folio) && !folio_test_hugetlb(folio);
1483 is_thp = is_large && folio_test_pmd_mappable(folio);
1484 nr_pages = folio_nr_pages(folio);
e24f0b8f 1485 cond_resched();
2d1db3b1 1486
eaec4e63 1487 if (folio_test_hugetlb(folio))
31caf665 1488 rc = unmap_and_move_huge_page(get_new_page,
eaec4e63
HY
1489 put_new_page, private,
1490 &folio->page, pass > 2, mode,
1491 reason,
1492 &ret_folios);
31caf665 1493 else
68711a74 1494 rc = unmap_and_move(get_new_page, put_new_page,
eaec4e63
HY
1495 private, folio, pass > 2, mode,
1496 reason, &ret_folios);
dd4ae78a
YS
1497 /*
1498 * The rules are:
eaec4e63
HY
1499 * Success: non hugetlb folio will be freed, hugetlb
1500 * folio will be put back
dd4ae78a
YS
1501 * -EAGAIN: stay on the from list
1502 * -ENOMEM: stay on the from list
577be05c 1503 * -ENOSYS: stay on the from list
eaec4e63 1504 * Other errno: put on ret_folios list then splice to
dd4ae78a
YS
1505 * from list
1506 */
e24f0b8f 1507 switch(rc) {
d532e2e5 1508 /*
eaec4e63
HY
1509 * Large folio migration might be unsupported or
1510 * the allocation could've failed so we should retry
1511 * on the same folio with the large folio split
1512 * to normal folios.
d532e2e5 1513 *
eaec4e63 1514 * Split folios are put in split_folios, and
e6fa8a79
HY
1515 * we will migrate them after the rest of the
1516 * list is processed.
d532e2e5
YS
1517 */
1518 case -ENOSYS:
eaec4e63
HY
1519 /* Large folio migration is unsupported */
1520 if (is_large) {
1521 nr_large_failed++;
1522 nr_thp_failed += is_thp;
1523 if (!try_split_folio(folio, &split_folios)) {
1524 nr_thp_split += is_thp;
e6fa8a79 1525 break;
d532e2e5 1526 }
d532e2e5 1527 /* Hugetlb migration is unsupported */
eaec4e63 1528 } else if (!no_split_folio_counting) {
b5bade97 1529 nr_failed++;
f430893b
ML
1530 }
1531
eaec4e63
HY
1532 nr_failed_pages += nr_pages;
1533 list_move_tail(&folio->lru, &ret_folios);
d532e2e5 1534 break;
95a402c3 1535 case -ENOMEM:
94723aaf 1536 /*
d532e2e5 1537 * When memory is low, don't bother to try to migrate
eaec4e63 1538 * other folios, just exit.
94723aaf 1539 */
eaec4e63
HY
1540 if (is_large) {
1541 nr_large_failed++;
1542 nr_thp_failed += is_thp;
1543 /* Large folio NUMA faulting doesn't split to retry. */
fd4a7ac3 1544 if (!nosplit) {
eaec4e63 1545 int ret = try_split_folio(folio, &split_folios);
fd4a7ac3
BW
1546
1547 if (!ret) {
eaec4e63 1548 nr_thp_split += is_thp;
fd4a7ac3
BW
1549 break;
1550 } else if (reason == MR_LONGTERM_PIN &&
1551 ret == -EAGAIN) {
1552 /*
eaec4e63
HY
1553 * Try again to split large folio to
1554 * mitigate the failure of longterm pinning.
fd4a7ac3 1555 */
eaec4e63
HY
1556 large_retry++;
1557 thp_retry += is_thp;
1558 nr_retry_pages += nr_pages;
fd4a7ac3
BW
1559 break;
1560 }
94723aaf 1561 }
eaec4e63 1562 } else if (!no_split_folio_counting) {
f430893b 1563 nr_failed++;
1a5bae25 1564 }
b5bade97 1565
eaec4e63 1566 nr_failed_pages += nr_pages + nr_retry_pages;
69a041ff 1567 /*
eaec4e63
HY
1568 * There might be some split folios of fail-to-migrate large
1569 * folios left in split_folios list. Move them back to migration
69a041ff 1570 * list so that they could be put back to the right list by
eaec4e63 1571 * the caller otherwise the folio refcnt will be leaked.
69a041ff 1572 */
eaec4e63 1573 list_splice_init(&split_folios, from);
fbed53b4 1574 /* nr_failed isn't updated for not used */
eaec4e63 1575 nr_large_failed += large_retry;
69a041ff 1576 nr_thp_failed += thp_retry;
95a402c3 1577 goto out;
e24f0b8f 1578 case -EAGAIN:
eaec4e63
HY
1579 if (is_large) {
1580 large_retry++;
1581 thp_retry += is_thp;
1582 } else if (!no_split_folio_counting) {
f430893b 1583 retry++;
eaec4e63
HY
1584 }
1585 nr_retry_pages += nr_pages;
e24f0b8f 1586 break;
78bd5209 1587 case MIGRATEPAGE_SUCCESS:
eaec4e63
HY
1588 nr_succeeded += nr_pages;
1589 nr_thp_succeeded += is_thp;
e24f0b8f
CL
1590 break;
1591 default:
354a3363 1592 /*
d532e2e5 1593 * Permanent failure (-EBUSY, etc.):
eaec4e63
HY
1594 * unlike -EAGAIN case, the failed folio is
1595 * removed from migration folio list and not
354a3363
NH
1596 * retried in the next outer loop.
1597 */
eaec4e63
HY
1598 if (is_large) {
1599 nr_large_failed++;
1600 nr_thp_failed += is_thp;
1601 } else if (!no_split_folio_counting) {
b5bade97 1602 nr_failed++;
eaec4e63 1603 }
f430893b 1604
eaec4e63 1605 nr_failed_pages += nr_pages;
e24f0b8f 1606 break;
2d1db3b1 1607 }
b20a3503
CL
1608 }
1609 }
7047b5a4 1610 nr_failed += retry;
eaec4e63 1611 nr_large_failed += large_retry;
1a5bae25 1612 nr_thp_failed += thp_retry;
077309bc 1613 nr_failed_pages += nr_retry_pages;
b5bade97 1614 /*
eaec4e63
HY
1615 * Try to migrate split folios of fail-to-migrate large folios, no
1616 * nr_failed counting in this round, since all split folios of a
1617 * large folio is counted as 1 failure in the first round.
b5bade97 1618 */
eaec4e63 1619 if (!list_empty(&split_folios)) {
b5bade97 1620 /*
eaec4e63 1621 * Move non-migrated folios (after 10 retries) to ret_folios
b5bade97
BW
1622 * to avoid migrating them again.
1623 */
eaec4e63
HY
1624 list_splice_init(from, &ret_folios);
1625 list_splice_init(&split_folios, from);
1626 no_split_folio_counting = true;
b5bade97 1627 retry = 1;
eaec4e63 1628 goto split_folio_migration;
b5bade97
BW
1629 }
1630
eaec4e63 1631 rc = nr_failed + nr_large_failed;
95a402c3 1632out:
dd4ae78a 1633 /*
eaec4e63 1634 * Put the permanent failure folio back to migration list, they
dd4ae78a
YS
1635 * will be put back to the right list by the caller.
1636 */
eaec4e63 1637 list_splice(&ret_folios, from);
dd4ae78a 1638
03e5f82e 1639 /*
eaec4e63
HY
1640 * Return 0 in case all split folios of fail-to-migrate large folios
1641 * are migrated successfully.
03e5f82e
BW
1642 */
1643 if (list_empty(from))
1644 rc = 0;
1645
1a5bae25 1646 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
b5bade97 1647 count_vm_events(PGMIGRATE_FAIL, nr_failed_pages);
1a5bae25
AK
1648 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1649 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1650 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
b5bade97 1651 trace_mm_migrate_pages(nr_succeeded, nr_failed_pages, nr_thp_succeeded,
1a5bae25 1652 nr_thp_failed, nr_thp_split, mode, reason);
7b2a2d4a 1653
5ac95884
YS
1654 if (ret_succeeded)
1655 *ret_succeeded = nr_succeeded;
1656
78bd5209 1657 return rc;
b20a3503 1658}
95a402c3 1659
19fc7bed 1660struct page *alloc_migration_target(struct page *page, unsigned long private)
b4b38223 1661{
ffe06786 1662 struct folio *folio = page_folio(page);
19fc7bed
JK
1663 struct migration_target_control *mtc;
1664 gfp_t gfp_mask;
b4b38223 1665 unsigned int order = 0;
e37d3e83 1666 struct folio *hugetlb_folio = NULL;
ffe06786 1667 struct folio *new_folio = NULL;
19fc7bed
JK
1668 int nid;
1669 int zidx;
1670
1671 mtc = (struct migration_target_control *)private;
1672 gfp_mask = mtc->gfp_mask;
1673 nid = mtc->nid;
1674 if (nid == NUMA_NO_NODE)
ffe06786 1675 nid = folio_nid(folio);
b4b38223 1676
ffe06786 1677 if (folio_test_hugetlb(folio)) {
e51da3a9 1678 struct hstate *h = folio_hstate(folio);
d92bbc27 1679
19fc7bed 1680 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
e37d3e83
SK
1681 hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
1682 mtc->nmask, gfp_mask);
1683 return &hugetlb_folio->page;
d92bbc27 1684 }
b4b38223 1685
ffe06786 1686 if (folio_test_large(folio)) {
9933a0c8
JK
1687 /*
1688 * clear __GFP_RECLAIM to make the migration callback
1689 * consistent with regular THP allocations.
1690 */
1691 gfp_mask &= ~__GFP_RECLAIM;
b4b38223 1692 gfp_mask |= GFP_TRANSHUGE;
ffe06786 1693 order = folio_order(folio);
b4b38223 1694 }
ffe06786 1695 zidx = zone_idx(folio_zone(folio));
19fc7bed 1696 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
b4b38223
JK
1697 gfp_mask |= __GFP_HIGHMEM;
1698
ffe06786 1699 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
b4b38223 1700
ffe06786 1701 return &new_folio->page;
b4b38223
JK
1702}
1703
742755a1 1704#ifdef CONFIG_NUMA
742755a1 1705
a49bd4d7 1706static int store_status(int __user *status, int start, int value, int nr)
742755a1 1707{
a49bd4d7
MH
1708 while (nr-- > 0) {
1709 if (put_user(value, status + start))
1710 return -EFAULT;
1711 start++;
1712 }
1713
1714 return 0;
1715}
1716
1717static int do_move_pages_to_node(struct mm_struct *mm,
1718 struct list_head *pagelist, int node)
1719{
1720 int err;
a0976311
JK
1721 struct migration_target_control mtc = {
1722 .nid = node,
1723 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1724 };
a49bd4d7 1725
a0976311 1726 err = migrate_pages(pagelist, alloc_migration_target, NULL,
5ac95884 1727 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
a49bd4d7
MH
1728 if (err)
1729 putback_movable_pages(pagelist);
1730 return err;
742755a1
CL
1731}
1732
1733/*
a49bd4d7
MH
1734 * Resolves the given address to a struct page, isolates it from the LRU and
1735 * puts it to the given pagelist.
e0153fc2
YS
1736 * Returns:
1737 * errno - if the page cannot be found/isolated
1738 * 0 - when it doesn't have to be migrated because it is already on the
1739 * target node
1740 * 1 - when it has been queued
742755a1 1741 */
a49bd4d7
MH
1742static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1743 int node, struct list_head *pagelist, bool migrate_all)
742755a1 1744{
a49bd4d7
MH
1745 struct vm_area_struct *vma;
1746 struct page *page;
742755a1 1747 int err;
742755a1 1748
d8ed45c5 1749 mmap_read_lock(mm);
a49bd4d7 1750 err = -EFAULT;
cb1c37b1
ML
1751 vma = vma_lookup(mm, addr);
1752 if (!vma || !vma_migratable(vma))
a49bd4d7 1753 goto out;
742755a1 1754
a49bd4d7 1755 /* FOLL_DUMP to ignore special (like zero) pages */
87d2762e 1756 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da 1757
a49bd4d7
MH
1758 err = PTR_ERR(page);
1759 if (IS_ERR(page))
1760 goto out;
89f5b7da 1761
a49bd4d7 1762 err = -ENOENT;
f7091ed6 1763 if (!page)
a49bd4d7 1764 goto out;
742755a1 1765
f7091ed6
HW
1766 if (is_zone_device_page(page))
1767 goto out_putpage;
1768
a49bd4d7
MH
1769 err = 0;
1770 if (page_to_nid(page) == node)
1771 goto out_putpage;
742755a1 1772
a49bd4d7
MH
1773 err = -EACCES;
1774 if (page_mapcount(page) > 1 && !migrate_all)
1775 goto out_putpage;
742755a1 1776
a49bd4d7
MH
1777 if (PageHuge(page)) {
1778 if (PageHead(page)) {
6aa3a920 1779 err = isolate_hugetlb(page_folio(page), pagelist);
7ce82f4c
ML
1780 if (!err)
1781 err = 1;
e632a938 1782 }
a49bd4d7
MH
1783 } else {
1784 struct page *head;
e632a938 1785
e8db67eb
NH
1786 head = compound_head(page);
1787 err = isolate_lru_page(head);
cf608ac1 1788 if (err)
a49bd4d7 1789 goto out_putpage;
742755a1 1790
e0153fc2 1791 err = 1;
a49bd4d7
MH
1792 list_add_tail(&head->lru, pagelist);
1793 mod_node_page_state(page_pgdat(head),
9de4f22a 1794 NR_ISOLATED_ANON + page_is_file_lru(head),
6c357848 1795 thp_nr_pages(head));
a49bd4d7
MH
1796 }
1797out_putpage:
1798 /*
1799 * Either remove the duplicate refcount from
1800 * isolate_lru_page() or drop the page ref if it was
1801 * not isolated.
1802 */
1803 put_page(page);
1804out:
d8ed45c5 1805 mmap_read_unlock(mm);
742755a1
CL
1806 return err;
1807}
1808
7ca8783a
WY
1809static int move_pages_and_store_status(struct mm_struct *mm, int node,
1810 struct list_head *pagelist, int __user *status,
1811 int start, int i, unsigned long nr_pages)
1812{
1813 int err;
1814
5d7ae891
WY
1815 if (list_empty(pagelist))
1816 return 0;
1817
7ca8783a
WY
1818 err = do_move_pages_to_node(mm, pagelist, node);
1819 if (err) {
1820 /*
1821 * Positive err means the number of failed
1822 * pages to migrate. Since we are going to
1823 * abort and return the number of non-migrated
ab9dd4f8 1824 * pages, so need to include the rest of the
7ca8783a
WY
1825 * nr_pages that have not been attempted as
1826 * well.
1827 */
1828 if (err > 0)
a7504ed1 1829 err += nr_pages - i;
7ca8783a
WY
1830 return err;
1831 }
1832 return store_status(status, start, node, i - start);
1833}
1834
5e9a0f02
BG
1835/*
1836 * Migrate an array of page address onto an array of nodes and fill
1837 * the corresponding array of status.
1838 */
3268c63e 1839static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f02
BG
1840 unsigned long nr_pages,
1841 const void __user * __user *pages,
1842 const int __user *nodes,
1843 int __user *status, int flags)
1844{
a49bd4d7
MH
1845 int current_node = NUMA_NO_NODE;
1846 LIST_HEAD(pagelist);
1847 int start, i;
1848 int err = 0, err1;
35282a2d 1849
361a2a22 1850 lru_cache_disable();
35282a2d 1851
a49bd4d7
MH
1852 for (i = start = 0; i < nr_pages; i++) {
1853 const void __user *p;
1854 unsigned long addr;
1855 int node;
3140a227 1856
a49bd4d7
MH
1857 err = -EFAULT;
1858 if (get_user(p, pages + i))
1859 goto out_flush;
1860 if (get_user(node, nodes + i))
1861 goto out_flush;
057d3389 1862 addr = (unsigned long)untagged_addr(p);
a49bd4d7
MH
1863
1864 err = -ENODEV;
1865 if (node < 0 || node >= MAX_NUMNODES)
1866 goto out_flush;
1867 if (!node_state(node, N_MEMORY))
1868 goto out_flush;
5e9a0f02 1869
a49bd4d7
MH
1870 err = -EACCES;
1871 if (!node_isset(node, task_nodes))
1872 goto out_flush;
1873
1874 if (current_node == NUMA_NO_NODE) {
1875 current_node = node;
1876 start = i;
1877 } else if (node != current_node) {
7ca8783a
WY
1878 err = move_pages_and_store_status(mm, current_node,
1879 &pagelist, status, start, i, nr_pages);
a49bd4d7
MH
1880 if (err)
1881 goto out;
1882 start = i;
1883 current_node = node;
3140a227
BG
1884 }
1885
a49bd4d7
MH
1886 /*
1887 * Errors in the page lookup or isolation are not fatal and we simply
1888 * report them via status
1889 */
1890 err = add_page_for_migration(mm, addr, current_node,
1891 &pagelist, flags & MPOL_MF_MOVE_ALL);
e0153fc2 1892
d08221a0 1893 if (err > 0) {
e0153fc2
YS
1894 /* The page is successfully queued for migration */
1895 continue;
1896 }
3140a227 1897
65462462
JH
1898 /*
1899 * The move_pages() man page does not have an -EEXIST choice, so
1900 * use -EFAULT instead.
1901 */
1902 if (err == -EEXIST)
1903 err = -EFAULT;
1904
d08221a0
WY
1905 /*
1906 * If the page is already on the target node (!err), store the
1907 * node, otherwise, store the err.
1908 */
1909 err = store_status(status, i, err ? : current_node, 1);
a49bd4d7
MH
1910 if (err)
1911 goto out_flush;
5e9a0f02 1912
7ca8783a
WY
1913 err = move_pages_and_store_status(mm, current_node, &pagelist,
1914 status, start, i, nr_pages);
a7504ed1
HY
1915 if (err) {
1916 /* We have accounted for page i */
1917 if (err > 0)
1918 err--;
4afdacec 1919 goto out;
a7504ed1 1920 }
a49bd4d7 1921 current_node = NUMA_NO_NODE;
3140a227 1922 }
a49bd4d7
MH
1923out_flush:
1924 /* Make sure we do not overwrite the existing error */
7ca8783a
WY
1925 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1926 status, start, i, nr_pages);
dfe9aa23 1927 if (err >= 0)
a49bd4d7 1928 err = err1;
5e9a0f02 1929out:
361a2a22 1930 lru_cache_enable();
5e9a0f02
BG
1931 return err;
1932}
1933
742755a1 1934/*
2f007e74 1935 * Determine the nodes of an array of pages and store it in an array of status.
742755a1 1936 */
80bba129
BG
1937static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1938 const void __user **pages, int *status)
742755a1 1939{
2f007e74 1940 unsigned long i;
2f007e74 1941
d8ed45c5 1942 mmap_read_lock(mm);
742755a1 1943
2f007e74 1944 for (i = 0; i < nr_pages; i++) {
80bba129 1945 unsigned long addr = (unsigned long)(*pages);
742755a1
CL
1946 struct vm_area_struct *vma;
1947 struct page *page;
c095adbc 1948 int err = -EFAULT;
2f007e74 1949
059b8b48
LH
1950 vma = vma_lookup(mm, addr);
1951 if (!vma)
742755a1
CL
1952 goto set_status;
1953
d899844e 1954 /* FOLL_DUMP to ignore special (like zero) pages */
16fd6b31 1955 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da
LT
1956
1957 err = PTR_ERR(page);
1958 if (IS_ERR(page))
1959 goto set_status;
1960
f7091ed6
HW
1961 err = -ENOENT;
1962 if (!page)
1963 goto set_status;
1964
1965 if (!is_zone_device_page(page))
4cd61484 1966 err = page_to_nid(page);
f7091ed6 1967
16fd6b31 1968 put_page(page);
742755a1 1969set_status:
80bba129
BG
1970 *status = err;
1971
1972 pages++;
1973 status++;
1974 }
1975
d8ed45c5 1976 mmap_read_unlock(mm);
80bba129
BG
1977}
1978
5b1b561b
AB
1979static int get_compat_pages_array(const void __user *chunk_pages[],
1980 const void __user * __user *pages,
1981 unsigned long chunk_nr)
1982{
1983 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
1984 compat_uptr_t p;
1985 int i;
1986
1987 for (i = 0; i < chunk_nr; i++) {
1988 if (get_user(p, pages32 + i))
1989 return -EFAULT;
1990 chunk_pages[i] = compat_ptr(p);
1991 }
1992
1993 return 0;
1994}
1995
80bba129
BG
1996/*
1997 * Determine the nodes of a user array of pages and store it in
1998 * a user array of status.
1999 */
2000static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2001 const void __user * __user *pages,
2002 int __user *status)
2003{
3eefb826 2004#define DO_PAGES_STAT_CHUNK_NR 16UL
80bba129
BG
2005 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2006 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba129 2007
87b8d1ad 2008 while (nr_pages) {
3eefb826 2009 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
87b8d1ad 2010
5b1b561b
AB
2011 if (in_compat_syscall()) {
2012 if (get_compat_pages_array(chunk_pages, pages,
2013 chunk_nr))
2014 break;
2015 } else {
2016 if (copy_from_user(chunk_pages, pages,
2017 chunk_nr * sizeof(*chunk_pages)))
2018 break;
2019 }
80bba129
BG
2020
2021 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2022
87b8d1ad
PA
2023 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2024 break;
742755a1 2025
87b8d1ad
PA
2026 pages += chunk_nr;
2027 status += chunk_nr;
2028 nr_pages -= chunk_nr;
2029 }
2030 return nr_pages ? -EFAULT : 0;
742755a1
CL
2031}
2032
4dc200ce 2033static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
742755a1 2034{
742755a1 2035 struct task_struct *task;
742755a1 2036 struct mm_struct *mm;
742755a1 2037
4dc200ce
ML
2038 /*
2039 * There is no need to check if current process has the right to modify
2040 * the specified process when they are same.
2041 */
2042 if (!pid) {
2043 mmget(current->mm);
2044 *mem_nodes = cpuset_mems_allowed(current);
2045 return current->mm;
2046 }
742755a1
CL
2047
2048 /* Find the mm_struct */
a879bf58 2049 rcu_read_lock();
4dc200ce 2050 task = find_task_by_vpid(pid);
742755a1 2051 if (!task) {
a879bf58 2052 rcu_read_unlock();
4dc200ce 2053 return ERR_PTR(-ESRCH);
742755a1 2054 }
3268c63e 2055 get_task_struct(task);
742755a1
CL
2056
2057 /*
2058 * Check if this process has the right to modify the specified
197e7e52 2059 * process. Use the regular "ptrace_may_access()" checks.
742755a1 2060 */
197e7e52 2061 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 2062 rcu_read_unlock();
4dc200ce 2063 mm = ERR_PTR(-EPERM);
5e9a0f02 2064 goto out;
742755a1 2065 }
c69e8d9c 2066 rcu_read_unlock();
742755a1 2067
4dc200ce
ML
2068 mm = ERR_PTR(security_task_movememory(task));
2069 if (IS_ERR(mm))
5e9a0f02 2070 goto out;
4dc200ce 2071 *mem_nodes = cpuset_mems_allowed(task);
3268c63e 2072 mm = get_task_mm(task);
4dc200ce 2073out:
3268c63e 2074 put_task_struct(task);
6e8b09ea 2075 if (!mm)
4dc200ce
ML
2076 mm = ERR_PTR(-EINVAL);
2077 return mm;
2078}
2079
2080/*
2081 * Move a list of pages in the address space of the currently executing
2082 * process.
2083 */
2084static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2085 const void __user * __user *pages,
2086 const int __user *nodes,
2087 int __user *status, int flags)
2088{
2089 struct mm_struct *mm;
2090 int err;
2091 nodemask_t task_nodes;
2092
2093 /* Check flags */
2094 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
6e8b09ea
SL
2095 return -EINVAL;
2096
4dc200ce
ML
2097 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2098 return -EPERM;
2099
2100 mm = find_mm_struct(pid, &task_nodes);
2101 if (IS_ERR(mm))
2102 return PTR_ERR(mm);
2103
6e8b09ea
SL
2104 if (nodes)
2105 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2106 nodes, status, flags);
2107 else
2108 err = do_pages_stat(mm, nr_pages, pages, status);
742755a1 2109
742755a1
CL
2110 mmput(mm);
2111 return err;
2112}
742755a1 2113
7addf443
DB
2114SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2115 const void __user * __user *, pages,
2116 const int __user *, nodes,
2117 int __user *, status, int, flags)
2118{
2119 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2120}
2121
7039e1db
PZ
2122#ifdef CONFIG_NUMA_BALANCING
2123/*
2124 * Returns true if this is a safe migration target node for misplaced NUMA
bc53008e 2125 * pages. Currently it only checks the watermarks which is crude.
7039e1db
PZ
2126 */
2127static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6 2128 unsigned long nr_migrate_pages)
7039e1db
PZ
2129{
2130 int z;
599d0c95 2131
7039e1db
PZ
2132 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2133 struct zone *zone = pgdat->node_zones + z;
2134
bc53008e 2135 if (!managed_zone(zone))
7039e1db
PZ
2136 continue;
2137
7039e1db
PZ
2138 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2139 if (!zone_watermark_ok(zone, 0,
2140 high_wmark_pages(zone) +
2141 nr_migrate_pages,
bfe9d006 2142 ZONE_MOVABLE, 0))
7039e1db
PZ
2143 continue;
2144 return true;
2145 }
2146 return false;
2147}
2148
2149static struct page *alloc_misplaced_dst_page(struct page *page,
666feb21 2150 unsigned long data)
7039e1db
PZ
2151{
2152 int nid = (int) data;
c185e494
MWO
2153 int order = compound_order(page);
2154 gfp_t gfp = __GFP_THISNODE;
2155 struct folio *new;
2156
2157 if (order > 0)
2158 gfp |= GFP_TRANSHUGE_LIGHT;
2159 else {
2160 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2161 __GFP_NOWARN;
2162 gfp &= ~__GFP_RECLAIM;
2163 }
2164 new = __folio_alloc_node(gfp, order, nid);
c5b5a3dd 2165
c185e494 2166 return &new->page;
c5b5a3dd
YS
2167}
2168
1c30e017 2169static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
b32967ff 2170{
2b9b624f 2171 int nr_pages = thp_nr_pages(page);
c574bbe9 2172 int order = compound_order(page);
a8f60772 2173
c574bbe9 2174 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
3abef4e6 2175
662aeea7
YS
2176 /* Do not migrate THP mapped by multiple processes */
2177 if (PageTransHuge(page) && total_mapcount(page) > 1)
2178 return 0;
2179
7039e1db 2180 /* Avoid migrating to a node that is nearly full */
c574bbe9
HY
2181 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2182 int z;
2183
2184 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2185 return 0;
2186 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
bc53008e 2187 if (managed_zone(pgdat->node_zones + z))
c574bbe9
HY
2188 break;
2189 }
2190 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
340ef390 2191 return 0;
c574bbe9 2192 }
7039e1db 2193
340ef390
HD
2194 if (isolate_lru_page(page))
2195 return 0;
7039e1db 2196
b75454e1 2197 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2b9b624f 2198 nr_pages);
340ef390 2199
149c33e1 2200 /*
340ef390
HD
2201 * Isolating the page has taken another reference, so the
2202 * caller's reference can be safely dropped without the page
2203 * disappearing underneath us during migration.
149c33e1
MG
2204 */
2205 put_page(page);
340ef390 2206 return 1;
b32967ff
MG
2207}
2208
2209/*
2210 * Attempt to migrate a misplaced page to the specified destination
2211 * node. Caller is expected to have an elevated reference count on
2212 * the page that will be dropped by this function before returning.
2213 */
1bc115d8
MG
2214int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2215 int node)
b32967ff
MG
2216{
2217 pg_data_t *pgdat = NODE_DATA(node);
340ef390 2218 int isolated;
b32967ff 2219 int nr_remaining;
e39bb6be 2220 unsigned int nr_succeeded;
b32967ff 2221 LIST_HEAD(migratepages);
b5916c02 2222 int nr_pages = thp_nr_pages(page);
c5b5a3dd 2223
b32967ff 2224 /*
1bc115d8
MG
2225 * Don't migrate file pages that are mapped in multiple processes
2226 * with execute permissions as they are probably shared libraries.
b32967ff 2227 */
7ee820ee
ML
2228 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2229 (vma->vm_flags & VM_EXEC))
b32967ff 2230 goto out;
b32967ff 2231
09a913a7
MG
2232 /*
2233 * Also do not migrate dirty pages as not all filesystems can move
2234 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2235 */
9de4f22a 2236 if (page_is_file_lru(page) && PageDirty(page))
09a913a7
MG
2237 goto out;
2238
b32967ff
MG
2239 isolated = numamigrate_isolate_page(pgdat, page);
2240 if (!isolated)
2241 goto out;
2242
2243 list_add(&page->lru, &migratepages);
c185e494
MWO
2244 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2245 NULL, node, MIGRATE_ASYNC,
2246 MR_NUMA_MISPLACED, &nr_succeeded);
b32967ff 2247 if (nr_remaining) {
59c82b70
JK
2248 if (!list_empty(&migratepages)) {
2249 list_del(&page->lru);
c5fc5c3a
YS
2250 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2251 page_is_file_lru(page), -nr_pages);
59c82b70
JK
2252 putback_lru_page(page);
2253 }
b32967ff 2254 isolated = 0;
e39bb6be
HY
2255 }
2256 if (nr_succeeded) {
2257 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2258 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2259 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2260 nr_succeeded);
2261 }
7039e1db 2262 BUG_ON(!list_empty(&migratepages));
7039e1db 2263 return isolated;
340ef390
HD
2264
2265out:
2266 put_page(page);
2267 return 0;
7039e1db 2268}
220018d3 2269#endif /* CONFIG_NUMA_BALANCING */
91952440 2270#endif /* CONFIG_NUMA */