selftests: cgroup: fix unexpected failure on test_memcg_sock
[linux-2.6-block.git] / mm / migrate.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b20a3503 2/*
14e0f9bc 3 * Memory Migration functionality - linux/mm/migrate.c
b20a3503
CL
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
cde53535 13 * Christoph Lameter
b20a3503
CL
14 */
15
16#include <linux/migrate.h>
b95f1b31 17#include <linux/export.h>
b20a3503 18#include <linux/swap.h>
0697212a 19#include <linux/swapops.h>
b20a3503 20#include <linux/pagemap.h>
e23ca00b 21#include <linux/buffer_head.h>
b20a3503 22#include <linux/mm_inline.h>
b488893a 23#include <linux/nsproxy.h>
b20a3503 24#include <linux/pagevec.h>
e9995ef9 25#include <linux/ksm.h>
b20a3503
CL
26#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
04e62a29 30#include <linux/writeback.h>
742755a1
CL
31#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
86c3a764 33#include <linux/security.h>
42cb14b1 34#include <linux/backing-dev.h>
bda807d4 35#include <linux/compaction.h>
4f5ca265 36#include <linux/syscalls.h>
7addf443 37#include <linux/compat.h>
290408d4 38#include <linux/hugetlb.h>
8e6ac7fa 39#include <linux/hugetlb_cgroup.h>
5a0e3ad6 40#include <linux/gfp.h>
df6ad698 41#include <linux/pfn_t.h>
a5430dda 42#include <linux/memremap.h>
8315ada7 43#include <linux/userfaultfd_k.h>
bf6bddf1 44#include <linux/balloon_compaction.h>
33c3fc71 45#include <linux/page_idle.h>
d435edca 46#include <linux/page_owner.h>
6e84f315 47#include <linux/sched/mm.h>
197e7e52 48#include <linux/ptrace.h>
34290e2c 49#include <linux/oom.h>
884a6e5d 50#include <linux/memory.h>
ac16ec83 51#include <linux/random.h>
c574bbe9 52#include <linux/sched/sysctl.h>
467b171a 53#include <linux/memory-tiers.h>
b20a3503 54
0d1836c3
MN
55#include <asm/tlbflush.h>
56
7b2a2d4a
MG
57#include <trace/events/migrate.h>
58
b20a3503
CL
59#include "internal.h"
60
cd775580 61bool isolate_movable_page(struct page *page, isolate_mode_t mode)
bda807d4 62{
19979497 63 struct folio *folio = folio_get_nontail_page(page);
68f2736a 64 const struct movable_operations *mops;
bda807d4
MK
65
66 /*
67 * Avoid burning cycles with pages that are yet under __free_pages(),
68 * or just got freed under us.
69 *
70 * In case we 'win' a race for a movable page being freed under us and
71 * raise its refcount preventing __free_pages() from doing its job
72 * the put_page() at the end of this block will take care of
73 * release this page, thus avoiding a nasty leakage.
74 */
19979497 75 if (!folio)
bda807d4
MK
76 goto out;
77
19979497
VMO
78 if (unlikely(folio_test_slab(folio)))
79 goto out_putfolio;
8b881763
VB
80 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
81 smp_rmb();
bda807d4 82 /*
8b881763
VB
83 * Check movable flag before taking the page lock because
84 * we use non-atomic bitops on newly allocated page flags so
85 * unconditionally grabbing the lock ruins page's owner side.
bda807d4 86 */
19979497
VMO
87 if (unlikely(!__folio_test_movable(folio)))
88 goto out_putfolio;
8b881763
VB
89 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
90 smp_rmb();
19979497
VMO
91 if (unlikely(folio_test_slab(folio)))
92 goto out_putfolio;
8b881763 93
bda807d4
MK
94 /*
95 * As movable pages are not isolated from LRU lists, concurrent
96 * compaction threads can race against page migration functions
97 * as well as race against the releasing a page.
98 *
99 * In order to avoid having an already isolated movable page
100 * being (wrongly) re-isolated while it is under migration,
101 * or to avoid attempting to isolate pages being released,
102 * lets be sure we have the page lock
103 * before proceeding with the movable page isolation steps.
104 */
19979497
VMO
105 if (unlikely(!folio_trylock(folio)))
106 goto out_putfolio;
bda807d4 107
19979497 108 if (!folio_test_movable(folio) || folio_test_isolated(folio))
bda807d4
MK
109 goto out_no_isolated;
110
19979497
VMO
111 mops = folio_movable_ops(folio);
112 VM_BUG_ON_FOLIO(!mops, folio);
bda807d4 113
19979497 114 if (!mops->isolate_page(&folio->page, mode))
bda807d4
MK
115 goto out_no_isolated;
116
117 /* Driver shouldn't use PG_isolated bit of page->flags */
19979497
VMO
118 WARN_ON_ONCE(folio_test_isolated(folio));
119 folio_set_isolated(folio);
120 folio_unlock(folio);
bda807d4 121
cd775580 122 return true;
bda807d4
MK
123
124out_no_isolated:
19979497
VMO
125 folio_unlock(folio);
126out_putfolio:
127 folio_put(folio);
bda807d4 128out:
cd775580 129 return false;
bda807d4
MK
130}
131
280d724a 132static void putback_movable_folio(struct folio *folio)
bda807d4 133{
280d724a 134 const struct movable_operations *mops = folio_movable_ops(folio);
bda807d4 135
280d724a
VMO
136 mops->putback_page(&folio->page);
137 folio_clear_isolated(folio);
bda807d4
MK
138}
139
5733c7d1
RA
140/*
141 * Put previously isolated pages back onto the appropriate lists
142 * from where they were once taken off for compaction/migration.
143 *
59c82b70
JK
144 * This function shall be used whenever the isolated pageset has been
145 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
7ce82f4c 146 * and isolate_hugetlb().
5733c7d1
RA
147 */
148void putback_movable_pages(struct list_head *l)
149{
280d724a
VMO
150 struct folio *folio;
151 struct folio *folio2;
5733c7d1 152
280d724a
VMO
153 list_for_each_entry_safe(folio, folio2, l, lru) {
154 if (unlikely(folio_test_hugetlb(folio))) {
155 folio_putback_active_hugetlb(folio);
31caf665
NH
156 continue;
157 }
280d724a 158 list_del(&folio->lru);
bda807d4 159 /*
280d724a
VMO
160 * We isolated non-lru movable folio so here we can use
161 * __PageMovable because LRU folio's mapping cannot have
bda807d4
MK
162 * PAGE_MAPPING_MOVABLE.
163 */
280d724a
VMO
164 if (unlikely(__folio_test_movable(folio))) {
165 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
166 folio_lock(folio);
167 if (folio_test_movable(folio))
168 putback_movable_folio(folio);
bda807d4 169 else
280d724a
VMO
170 folio_clear_isolated(folio);
171 folio_unlock(folio);
172 folio_put(folio);
bda807d4 173 } else {
280d724a
VMO
174 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
175 folio_is_file_lru(folio), -folio_nr_pages(folio));
176 folio_putback_lru(folio);
bda807d4 177 }
b20a3503 178 }
b20a3503
CL
179}
180
0697212a
CL
181/*
182 * Restore a potential migration pte to a working pte entry
183 */
2f031c6f
MWO
184static bool remove_migration_pte(struct folio *folio,
185 struct vm_area_struct *vma, unsigned long addr, void *old)
0697212a 186{
4eecb8b9 187 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
0697212a 188
3fe87967 189 while (page_vma_mapped_walk(&pvmw)) {
6c287605 190 rmap_t rmap_flags = RMAP_NONE;
c33c7948 191 pte_t old_pte;
4eecb8b9
MWO
192 pte_t pte;
193 swp_entry_t entry;
194 struct page *new;
195 unsigned long idx = 0;
196
197 /* pgoff is invalid for ksm pages, but they are never large */
198 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
199 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
200 new = folio_page(folio, idx);
0697212a 201
616b8371
ZY
202#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
203 /* PMD-mapped THP migration entry */
204 if (!pvmw.pte) {
4eecb8b9
MWO
205 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
206 !folio_test_pmd_mappable(folio), folio);
616b8371
ZY
207 remove_migration_pmd(&pvmw, new);
208 continue;
209 }
210#endif
211
4eecb8b9 212 folio_get(folio);
2e346877 213 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
c33c7948
RR
214 old_pte = ptep_get(pvmw.pte);
215 if (pte_swp_soft_dirty(old_pte))
3fe87967 216 pte = pte_mksoft_dirty(pte);
0697212a 217
c33c7948 218 entry = pte_to_swp_entry(old_pte);
2e346877
PX
219 if (!is_migration_entry_young(entry))
220 pte = pte_mkold(pte);
221 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
222 pte = pte_mkdirty(pte);
4dd845b5 223 if (is_writable_migration_entry(entry))
f3ebdf04 224 pte = pte_mkwrite(pte);
c33c7948 225 else if (pte_swp_uffd_wp(old_pte))
f45ec5ff 226 pte = pte_mkuffd_wp(pte);
d3cb8bf6 227
6c287605
DH
228 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
229 rmap_flags |= RMAP_EXCLUSIVE;
230
6128763f 231 if (unlikely(is_device_private_page(new))) {
4dd845b5
AP
232 if (pte_write(pte))
233 entry = make_writable_device_private_entry(
234 page_to_pfn(new));
235 else
236 entry = make_readable_device_private_entry(
237 page_to_pfn(new));
6128763f 238 pte = swp_entry_to_pte(entry);
c33c7948 239 if (pte_swp_soft_dirty(old_pte))
3d321bf8 240 pte = pte_swp_mksoft_dirty(pte);
c33c7948 241 if (pte_swp_uffd_wp(old_pte))
6128763f 242 pte = pte_swp_mkuffd_wp(pte);
d2b2c6dd 243 }
a5430dda 244
3ef8fd7f 245#ifdef CONFIG_HUGETLB_PAGE
4eecb8b9 246 if (folio_test_hugetlb(folio)) {
79c1c594
CL
247 unsigned int shift = huge_page_shift(hstate_vma(vma));
248
79c1c594 249 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
4eecb8b9 250 if (folio_test_anon(folio))
28c5209d 251 hugepage_add_anon_rmap(new, vma, pvmw.address,
6c287605 252 rmap_flags);
3fe87967 253 else
fb3d824d 254 page_dup_file_rmap(new, true);
1eba86c0 255 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
383321ab
AK
256 } else
257#endif
258 {
4eecb8b9 259 if (folio_test_anon(folio))
f1e2db12 260 page_add_anon_rmap(new, vma, pvmw.address,
6c287605 261 rmap_flags);
383321ab 262 else
cea86fe2 263 page_add_file_rmap(new, vma, false);
1eba86c0 264 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
383321ab 265 }
b7435507 266 if (vma->vm_flags & VM_LOCKED)
96f97c43 267 mlock_drain_local();
e125fe40 268
4cc79b33
AK
269 trace_remove_migration_pte(pvmw.address, pte_val(pte),
270 compound_order(new));
271
3fe87967
KS
272 /* No need to invalidate - it was non-present before */
273 update_mmu_cache(vma, pvmw.address, pvmw.pte);
274 }
51afb12b 275
e4b82222 276 return true;
0697212a
CL
277}
278
04e62a29
CL
279/*
280 * Get rid of all migration entries and replace them by
281 * references to the indicated page.
282 */
4eecb8b9 283void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
04e62a29 284{
051ac83a
JK
285 struct rmap_walk_control rwc = {
286 .rmap_one = remove_migration_pte,
4eecb8b9 287 .arg = src,
051ac83a
JK
288 };
289
e388466d 290 if (locked)
2f031c6f 291 rmap_walk_locked(dst, &rwc);
e388466d 292 else
2f031c6f 293 rmap_walk(dst, &rwc);
04e62a29
CL
294}
295
0697212a
CL
296/*
297 * Something used the pte of a page under migration. We need to
298 * get to the page and wait until migration is finished.
299 * When we return from this function the fault will be retried.
0697212a 300 */
0cb8fd4d
HD
301void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
302 unsigned long address)
0697212a 303{
0cb8fd4d
HD
304 spinlock_t *ptl;
305 pte_t *ptep;
30dad309 306 pte_t pte;
0697212a 307 swp_entry_t entry;
0697212a 308
0cb8fd4d 309 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
04dee9e8
HD
310 if (!ptep)
311 return;
312
c33c7948 313 pte = ptep_get(ptep);
0cb8fd4d
HD
314 pte_unmap(ptep);
315
0697212a
CL
316 if (!is_swap_pte(pte))
317 goto out;
318
319 entry = pte_to_swp_entry(pte);
320 if (!is_migration_entry(entry))
321 goto out;
322
0cb8fd4d 323 migration_entry_wait_on_locked(entry, ptl);
0697212a
CL
324 return;
325out:
0cb8fd4d 326 spin_unlock(ptl);
30dad309
NH
327}
328
ad1ac596 329#ifdef CONFIG_HUGETLB_PAGE
fcd48540
PX
330/*
331 * The vma read lock must be held upon entry. Holding that lock prevents either
332 * the pte or the ptl from being freed.
333 *
334 * This function will release the vma lock before returning.
335 */
0cb8fd4d 336void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
30dad309 337{
0cb8fd4d 338 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
ad1ac596
ML
339 pte_t pte;
340
fcd48540 341 hugetlb_vma_assert_locked(vma);
ad1ac596
ML
342 spin_lock(ptl);
343 pte = huge_ptep_get(ptep);
344
fcd48540 345 if (unlikely(!is_hugetlb_entry_migration(pte))) {
ad1ac596 346 spin_unlock(ptl);
fcd48540
PX
347 hugetlb_vma_unlock_read(vma);
348 } else {
349 /*
350 * If migration entry existed, safe to release vma lock
351 * here because the pgtable page won't be freed without the
352 * pgtable lock released. See comment right above pgtable
353 * lock release in migration_entry_wait_on_locked().
354 */
355 hugetlb_vma_unlock_read(vma);
0cb8fd4d 356 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
fcd48540 357 }
30dad309 358}
ad1ac596
ML
359#endif
360
616b8371
ZY
361#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
362void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
363{
364 spinlock_t *ptl;
616b8371
ZY
365
366 ptl = pmd_lock(mm, pmd);
367 if (!is_pmd_migration_entry(*pmd))
368 goto unlock;
0cb8fd4d 369 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
616b8371
ZY
370 return;
371unlock:
372 spin_unlock(ptl);
373}
374#endif
375
108ca835
MWO
376static int folio_expected_refs(struct address_space *mapping,
377 struct folio *folio)
0b3901b3 378{
108ca835
MWO
379 int refs = 1;
380 if (!mapping)
381 return refs;
0b3901b3 382
108ca835
MWO
383 refs += folio_nr_pages(folio);
384 if (folio_test_private(folio))
385 refs++;
386
387 return refs;
0b3901b3
JK
388}
389
b20a3503 390/*
c3fcf8a5 391 * Replace the page in the mapping.
5b5c7120
CL
392 *
393 * The number of remaining references must be:
394 * 1 for anonymous pages without a mapping
395 * 2 for pages with a mapping
266cf658 396 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a3503 397 */
3417013e
MWO
398int folio_migrate_mapping(struct address_space *mapping,
399 struct folio *newfolio, struct folio *folio, int extra_count)
b20a3503 400{
3417013e 401 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
42cb14b1
HD
402 struct zone *oldzone, *newzone;
403 int dirty;
108ca835 404 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
3417013e 405 long nr = folio_nr_pages(folio);
8763cb45 406
6c5240ae 407 if (!mapping) {
0e8c7d0f 408 /* Anonymous page without mapping */
3417013e 409 if (folio_ref_count(folio) != expected_count)
6c5240ae 410 return -EAGAIN;
cf4b769a
HD
411
412 /* No turning back from here */
3417013e
MWO
413 newfolio->index = folio->index;
414 newfolio->mapping = folio->mapping;
415 if (folio_test_swapbacked(folio))
416 __folio_set_swapbacked(newfolio);
cf4b769a 417
78bd5209 418 return MIGRATEPAGE_SUCCESS;
6c5240ae
CL
419 }
420
3417013e
MWO
421 oldzone = folio_zone(folio);
422 newzone = folio_zone(newfolio);
42cb14b1 423
89eb946a 424 xas_lock_irq(&xas);
3417013e 425 if (!folio_ref_freeze(folio, expected_count)) {
89eb946a 426 xas_unlock_irq(&xas);
e286781d
NP
427 return -EAGAIN;
428 }
429
b20a3503 430 /*
3417013e 431 * Now we know that no one else is looking at the folio:
cf4b769a 432 * no turning back from here.
b20a3503 433 */
3417013e
MWO
434 newfolio->index = folio->index;
435 newfolio->mapping = folio->mapping;
436 folio_ref_add(newfolio, nr); /* add cache reference */
437 if (folio_test_swapbacked(folio)) {
438 __folio_set_swapbacked(newfolio);
439 if (folio_test_swapcache(folio)) {
440 folio_set_swapcache(newfolio);
441 newfolio->private = folio_get_private(folio);
6326fec1
NP
442 }
443 } else {
3417013e 444 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
b20a3503
CL
445 }
446
42cb14b1 447 /* Move dirty while page refs frozen and newpage not yet exposed */
3417013e 448 dirty = folio_test_dirty(folio);
42cb14b1 449 if (dirty) {
3417013e
MWO
450 folio_clear_dirty(folio);
451 folio_set_dirty(newfolio);
42cb14b1
HD
452 }
453
3417013e 454 xas_store(&xas, newfolio);
7cf9c2c7
NP
455
456 /*
937a94c9
JG
457 * Drop cache reference from old page by unfreezing
458 * to one less reference.
7cf9c2c7
NP
459 * We know this isn't the last reference.
460 */
3417013e 461 folio_ref_unfreeze(folio, expected_count - nr);
7cf9c2c7 462
89eb946a 463 xas_unlock(&xas);
42cb14b1
HD
464 /* Leave irq disabled to prevent preemption while updating stats */
465
0e8c7d0f
CL
466 /*
467 * If moved to a different zone then also account
468 * the page for that zone. Other VM counters will be
469 * taken care of when we establish references to the
470 * new page and drop references to the old page.
471 *
472 * Note that anonymous pages are accounted for
4b9d0fab 473 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
0e8c7d0f
CL
474 * are mapped to swap space.
475 */
42cb14b1 476 if (newzone != oldzone) {
0d1c2072
JW
477 struct lruvec *old_lruvec, *new_lruvec;
478 struct mem_cgroup *memcg;
479
3417013e 480 memcg = folio_memcg(folio);
0d1c2072
JW
481 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
482 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
483
5c447d27
SB
484 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
485 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
3417013e 486 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
5c447d27
SB
487 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
488 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
42cb14b1 489 }
b6038942 490#ifdef CONFIG_SWAP
3417013e 491 if (folio_test_swapcache(folio)) {
b6038942
SB
492 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
493 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
494 }
495#endif
f56753ac 496 if (dirty && mapping_can_writeback(mapping)) {
5c447d27
SB
497 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
498 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
499 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
500 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
42cb14b1 501 }
4b02108a 502 }
42cb14b1 503 local_irq_enable();
b20a3503 504
78bd5209 505 return MIGRATEPAGE_SUCCESS;
b20a3503 506}
3417013e 507EXPORT_SYMBOL(folio_migrate_mapping);
b20a3503 508
290408d4
NH
509/*
510 * The expected number of remaining references is the same as that
3417013e 511 * of folio_migrate_mapping().
290408d4
NH
512 */
513int migrate_huge_page_move_mapping(struct address_space *mapping,
b890ec2a 514 struct folio *dst, struct folio *src)
290408d4 515{
b890ec2a 516 XA_STATE(xas, &mapping->i_pages, folio_index(src));
290408d4 517 int expected_count;
290408d4 518
89eb946a 519 xas_lock_irq(&xas);
b890ec2a
MWO
520 expected_count = 2 + folio_has_private(src);
521 if (!folio_ref_freeze(src, expected_count)) {
89eb946a 522 xas_unlock_irq(&xas);
290408d4
NH
523 return -EAGAIN;
524 }
525
b890ec2a
MWO
526 dst->index = src->index;
527 dst->mapping = src->mapping;
6a93ca8f 528
b890ec2a 529 folio_get(dst);
290408d4 530
b890ec2a 531 xas_store(&xas, dst);
290408d4 532
b890ec2a 533 folio_ref_unfreeze(src, expected_count - 1);
290408d4 534
89eb946a 535 xas_unlock_irq(&xas);
6a93ca8f 536
78bd5209 537 return MIGRATEPAGE_SUCCESS;
290408d4
NH
538}
539
b20a3503 540/*
19138349 541 * Copy the flags and some other ancillary information
b20a3503 542 */
19138349 543void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
b20a3503 544{
7851a45c
RR
545 int cpupid;
546
19138349
MWO
547 if (folio_test_error(folio))
548 folio_set_error(newfolio);
549 if (folio_test_referenced(folio))
550 folio_set_referenced(newfolio);
551 if (folio_test_uptodate(folio))
552 folio_mark_uptodate(newfolio);
553 if (folio_test_clear_active(folio)) {
554 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
555 folio_set_active(newfolio);
556 } else if (folio_test_clear_unevictable(folio))
557 folio_set_unevictable(newfolio);
558 if (folio_test_workingset(folio))
559 folio_set_workingset(newfolio);
560 if (folio_test_checked(folio))
561 folio_set_checked(newfolio);
6c287605
DH
562 /*
563 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
564 * migration entries. We can still have PG_anon_exclusive set on an
565 * effectively unmapped and unreferenced first sub-pages of an
566 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
567 */
19138349
MWO
568 if (folio_test_mappedtodisk(folio))
569 folio_set_mappedtodisk(newfolio);
b20a3503 570
3417013e 571 /* Move dirty on pages not done by folio_migrate_mapping() */
19138349
MWO
572 if (folio_test_dirty(folio))
573 folio_set_dirty(newfolio);
b20a3503 574
19138349
MWO
575 if (folio_test_young(folio))
576 folio_set_young(newfolio);
577 if (folio_test_idle(folio))
578 folio_set_idle(newfolio);
33c3fc71 579
7851a45c
RR
580 /*
581 * Copy NUMA information to the new page, to prevent over-eager
582 * future migrations of this same page.
583 */
19138349 584 cpupid = page_cpupid_xchg_last(&folio->page, -1);
33024536
HY
585 /*
586 * For memory tiering mode, when migrate between slow and fast
587 * memory node, reset cpupid, because that is used to record
588 * page access time in slow memory node.
589 */
590 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
591 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
592 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
593
594 if (f_toptier != t_toptier)
595 cpupid = -1;
596 }
19138349 597 page_cpupid_xchg_last(&newfolio->page, cpupid);
7851a45c 598
19138349 599 folio_migrate_ksm(newfolio, folio);
c8d6553b
HD
600 /*
601 * Please do not reorder this without considering how mm/ksm.c's
602 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
603 */
19138349
MWO
604 if (folio_test_swapcache(folio))
605 folio_clear_swapcache(folio);
606 folio_clear_private(folio);
ad2fa371
MS
607
608 /* page->private contains hugetlb specific flags */
19138349
MWO
609 if (!folio_test_hugetlb(folio))
610 folio->private = NULL;
b20a3503
CL
611
612 /*
613 * If any waiters have accumulated on the new page then
614 * wake them up.
615 */
19138349
MWO
616 if (folio_test_writeback(newfolio))
617 folio_end_writeback(newfolio);
d435edca 618
6aeff241
YS
619 /*
620 * PG_readahead shares the same bit with PG_reclaim. The above
621 * end_page_writeback() may clear PG_readahead mistakenly, so set the
622 * bit after that.
623 */
19138349
MWO
624 if (folio_test_readahead(folio))
625 folio_set_readahead(newfolio);
6aeff241 626
19138349 627 folio_copy_owner(newfolio, folio);
74485cf2 628
19138349 629 if (!folio_test_hugetlb(folio))
d21bba2b 630 mem_cgroup_migrate(folio, newfolio);
b20a3503 631}
19138349 632EXPORT_SYMBOL(folio_migrate_flags);
2916ecc0 633
715cbfd6 634void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
2916ecc0 635{
715cbfd6
MWO
636 folio_copy(newfolio, folio);
637 folio_migrate_flags(newfolio, folio);
2916ecc0 638}
715cbfd6 639EXPORT_SYMBOL(folio_migrate_copy);
b20a3503 640
1d8b85cc
CL
641/************************************************************
642 * Migration functions
643 ***********************************************************/
644
16ce101d
AP
645int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
646 struct folio *src, enum migrate_mode mode, int extra_count)
647{
648 int rc;
649
650 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
651
652 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
653
654 if (rc != MIGRATEPAGE_SUCCESS)
655 return rc;
656
657 if (mode != MIGRATE_SYNC_NO_COPY)
658 folio_migrate_copy(dst, src);
659 else
660 folio_migrate_flags(dst, src);
661 return MIGRATEPAGE_SUCCESS;
662}
663
54184650
MWO
664/**
665 * migrate_folio() - Simple folio migration.
666 * @mapping: The address_space containing the folio.
667 * @dst: The folio to migrate the data to.
668 * @src: The folio containing the current data.
669 * @mode: How to migrate the page.
670 *
671 * Common logic to directly migrate a single LRU folio suitable for
672 * folios that do not use PagePrivate/PagePrivate2.
b20a3503 673 *
54184650 674 * Folios are locked upon entry and exit.
b20a3503 675 */
54184650
MWO
676int migrate_folio(struct address_space *mapping, struct folio *dst,
677 struct folio *src, enum migrate_mode mode)
b20a3503 678{
16ce101d 679 return migrate_folio_extra(mapping, dst, src, mode, 0);
b20a3503 680}
54184650 681EXPORT_SYMBOL(migrate_folio);
b20a3503 682
9361401e 683#ifdef CONFIG_BLOCK
84ade7c1
JK
684/* Returns true if all buffers are successfully locked */
685static bool buffer_migrate_lock_buffers(struct buffer_head *head,
686 enum migrate_mode mode)
687{
688 struct buffer_head *bh = head;
4bb6dc79 689 struct buffer_head *failed_bh;
84ade7c1 690
84ade7c1 691 do {
84ade7c1 692 if (!trylock_buffer(bh)) {
4bb6dc79
DA
693 if (mode == MIGRATE_ASYNC)
694 goto unlock;
695 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
696 goto unlock;
697 lock_buffer(bh);
84ade7c1
JK
698 }
699
700 bh = bh->b_this_page;
701 } while (bh != head);
4bb6dc79 702
84ade7c1 703 return true;
4bb6dc79
DA
704
705unlock:
706 /* We failed to lock the buffer and cannot stall. */
707 failed_bh = bh;
708 bh = head;
709 while (bh != failed_bh) {
710 unlock_buffer(bh);
711 bh = bh->b_this_page;
712 }
713
714 return false;
84ade7c1
JK
715}
716
67235182
MWO
717static int __buffer_migrate_folio(struct address_space *mapping,
718 struct folio *dst, struct folio *src, enum migrate_mode mode,
89cb0888 719 bool check_refs)
1d8b85cc 720{
1d8b85cc
CL
721 struct buffer_head *bh, *head;
722 int rc;
cc4f11e6 723 int expected_count;
1d8b85cc 724
67235182
MWO
725 head = folio_buffers(src);
726 if (!head)
54184650 727 return migrate_folio(mapping, dst, src, mode);
1d8b85cc 728
cc4f11e6 729 /* Check whether page does not have extra refs before we do more work */
108ca835 730 expected_count = folio_expected_refs(mapping, src);
67235182 731 if (folio_ref_count(src) != expected_count)
cc4f11e6 732 return -EAGAIN;
1d8b85cc 733
cc4f11e6
JK
734 if (!buffer_migrate_lock_buffers(head, mode))
735 return -EAGAIN;
1d8b85cc 736
89cb0888
JK
737 if (check_refs) {
738 bool busy;
739 bool invalidated = false;
740
741recheck_buffers:
742 busy = false;
743 spin_lock(&mapping->private_lock);
744 bh = head;
745 do {
746 if (atomic_read(&bh->b_count)) {
747 busy = true;
748 break;
749 }
750 bh = bh->b_this_page;
751 } while (bh != head);
89cb0888
JK
752 if (busy) {
753 if (invalidated) {
754 rc = -EAGAIN;
755 goto unlock_buffers;
756 }
ebdf4de5 757 spin_unlock(&mapping->private_lock);
89cb0888
JK
758 invalidate_bh_lrus();
759 invalidated = true;
760 goto recheck_buffers;
761 }
762 }
763
67235182 764 rc = folio_migrate_mapping(mapping, dst, src, 0);
78bd5209 765 if (rc != MIGRATEPAGE_SUCCESS)
cc4f11e6 766 goto unlock_buffers;
1d8b85cc 767
67235182 768 folio_attach_private(dst, folio_detach_private(src));
1d8b85cc
CL
769
770 bh = head;
771 do {
67235182 772 set_bh_page(bh, &dst->page, bh_offset(bh));
1d8b85cc 773 bh = bh->b_this_page;
1d8b85cc
CL
774 } while (bh != head);
775
2916ecc0 776 if (mode != MIGRATE_SYNC_NO_COPY)
67235182 777 folio_migrate_copy(dst, src);
2916ecc0 778 else
67235182 779 folio_migrate_flags(dst, src);
1d8b85cc 780
cc4f11e6
JK
781 rc = MIGRATEPAGE_SUCCESS;
782unlock_buffers:
ebdf4de5
JK
783 if (check_refs)
784 spin_unlock(&mapping->private_lock);
1d8b85cc
CL
785 bh = head;
786 do {
787 unlock_buffer(bh);
1d8b85cc 788 bh = bh->b_this_page;
1d8b85cc
CL
789 } while (bh != head);
790
cc4f11e6 791 return rc;
1d8b85cc 792}
89cb0888 793
67235182
MWO
794/**
795 * buffer_migrate_folio() - Migration function for folios with buffers.
796 * @mapping: The address space containing @src.
797 * @dst: The folio to migrate to.
798 * @src: The folio to migrate from.
799 * @mode: How to migrate the folio.
800 *
801 * This function can only be used if the underlying filesystem guarantees
802 * that no other references to @src exist. For example attached buffer
803 * heads are accessed only under the folio lock. If your filesystem cannot
804 * provide this guarantee, buffer_migrate_folio_norefs() may be more
805 * appropriate.
806 *
807 * Return: 0 on success or a negative errno on failure.
89cb0888 808 */
67235182
MWO
809int buffer_migrate_folio(struct address_space *mapping,
810 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 811{
67235182 812 return __buffer_migrate_folio(mapping, dst, src, mode, false);
89cb0888 813}
67235182
MWO
814EXPORT_SYMBOL(buffer_migrate_folio);
815
816/**
817 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
818 * @mapping: The address space containing @src.
819 * @dst: The folio to migrate to.
820 * @src: The folio to migrate from.
821 * @mode: How to migrate the folio.
822 *
823 * Like buffer_migrate_folio() except that this variant is more careful
824 * and checks that there are also no buffer head references. This function
825 * is the right one for mappings where buffer heads are directly looked
826 * up and referenced (such as block device mappings).
827 *
828 * Return: 0 on success or a negative errno on failure.
89cb0888 829 */
67235182
MWO
830int buffer_migrate_folio_norefs(struct address_space *mapping,
831 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 832{
67235182 833 return __buffer_migrate_folio(mapping, dst, src, mode, true);
89cb0888 834}
e26355e2 835EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
9361401e 836#endif
1d8b85cc 837
2ec810d5
MWO
838int filemap_migrate_folio(struct address_space *mapping,
839 struct folio *dst, struct folio *src, enum migrate_mode mode)
840{
841 int ret;
842
843 ret = folio_migrate_mapping(mapping, dst, src, 0);
844 if (ret != MIGRATEPAGE_SUCCESS)
845 return ret;
846
847 if (folio_get_private(src))
848 folio_attach_private(dst, folio_detach_private(src));
849
850 if (mode != MIGRATE_SYNC_NO_COPY)
851 folio_migrate_copy(dst, src);
852 else
853 folio_migrate_flags(dst, src);
854 return MIGRATEPAGE_SUCCESS;
855}
856EXPORT_SYMBOL_GPL(filemap_migrate_folio);
857
04e62a29 858/*
2be7fa10 859 * Writeback a folio to clean the dirty state
04e62a29 860 */
2be7fa10 861static int writeout(struct address_space *mapping, struct folio *folio)
8351a6e4 862{
04e62a29
CL
863 struct writeback_control wbc = {
864 .sync_mode = WB_SYNC_NONE,
865 .nr_to_write = 1,
866 .range_start = 0,
867 .range_end = LLONG_MAX,
04e62a29
CL
868 .for_reclaim = 1
869 };
870 int rc;
871
872 if (!mapping->a_ops->writepage)
873 /* No write method for the address space */
874 return -EINVAL;
875
2be7fa10 876 if (!folio_clear_dirty_for_io(folio))
04e62a29
CL
877 /* Someone else already triggered a write */
878 return -EAGAIN;
879
8351a6e4 880 /*
2be7fa10
MWO
881 * A dirty folio may imply that the underlying filesystem has
882 * the folio on some queue. So the folio must be clean for
883 * migration. Writeout may mean we lose the lock and the
884 * folio state is no longer what we checked for earlier.
04e62a29
CL
885 * At this point we know that the migration attempt cannot
886 * be successful.
8351a6e4 887 */
4eecb8b9 888 remove_migration_ptes(folio, folio, false);
8351a6e4 889
2be7fa10 890 rc = mapping->a_ops->writepage(&folio->page, &wbc);
8351a6e4 891
04e62a29
CL
892 if (rc != AOP_WRITEPAGE_ACTIVATE)
893 /* unlocked. Relock */
2be7fa10 894 folio_lock(folio);
04e62a29 895
bda8550d 896 return (rc < 0) ? -EIO : -EAGAIN;
04e62a29
CL
897}
898
899/*
900 * Default handling if a filesystem does not provide a migration function.
901 */
8faa8ef5
MWO
902static int fallback_migrate_folio(struct address_space *mapping,
903 struct folio *dst, struct folio *src, enum migrate_mode mode)
04e62a29 904{
8faa8ef5
MWO
905 if (folio_test_dirty(src)) {
906 /* Only writeback folios in full synchronous migration */
2916ecc0
JG
907 switch (mode) {
908 case MIGRATE_SYNC:
909 case MIGRATE_SYNC_NO_COPY:
910 break;
911 default:
b969c4ab 912 return -EBUSY;
2916ecc0 913 }
2be7fa10 914 return writeout(mapping, src);
b969c4ab 915 }
8351a6e4
CL
916
917 /*
918 * Buffers may be managed in a filesystem specific way.
919 * We must have no buffers or drop them.
920 */
8faa8ef5
MWO
921 if (folio_test_private(src) &&
922 !filemap_release_folio(src, GFP_KERNEL))
806031bb 923 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
8351a6e4 924
54184650 925 return migrate_folio(mapping, dst, src, mode);
8351a6e4
CL
926}
927
e24f0b8f
CL
928/*
929 * Move a page to a newly allocated page
930 * The page is locked and all ptes have been successfully removed.
931 *
932 * The new page will have replaced the old page if this function
933 * is successful.
894bc310
LS
934 *
935 * Return value:
936 * < 0 - error code
78bd5209 937 * MIGRATEPAGE_SUCCESS - success
e24f0b8f 938 */
e7e3ffeb 939static int move_to_new_folio(struct folio *dst, struct folio *src,
5c3f9a67 940 enum migrate_mode mode)
e24f0b8f 941{
bda807d4 942 int rc = -EAGAIN;
e7e3ffeb 943 bool is_lru = !__PageMovable(&src->page);
e24f0b8f 944
e7e3ffeb
MWO
945 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
946 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
e24f0b8f 947
bda807d4 948 if (likely(is_lru)) {
68f2736a
MWO
949 struct address_space *mapping = folio_mapping(src);
950
bda807d4 951 if (!mapping)
54184650 952 rc = migrate_folio(mapping, dst, src, mode);
5490da4f 953 else if (mapping->a_ops->migrate_folio)
bda807d4 954 /*
5490da4f
MWO
955 * Most folios have a mapping and most filesystems
956 * provide a migrate_folio callback. Anonymous folios
bda807d4 957 * are part of swap space which also has its own
5490da4f 958 * migrate_folio callback. This is the most common path
bda807d4
MK
959 * for page migration.
960 */
5490da4f
MWO
961 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
962 mode);
bda807d4 963 else
8faa8ef5 964 rc = fallback_migrate_folio(mapping, dst, src, mode);
bda807d4 965 } else {
68f2736a
MWO
966 const struct movable_operations *mops;
967
e24f0b8f 968 /*
bda807d4
MK
969 * In case of non-lru page, it could be released after
970 * isolation step. In that case, we shouldn't try migration.
e24f0b8f 971 */
e7e3ffeb
MWO
972 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
973 if (!folio_test_movable(src)) {
bda807d4 974 rc = MIGRATEPAGE_SUCCESS;
e7e3ffeb 975 folio_clear_isolated(src);
bda807d4
MK
976 goto out;
977 }
978
da707a6d 979 mops = folio_movable_ops(src);
68f2736a 980 rc = mops->migrate_page(&dst->page, &src->page, mode);
bda807d4 981 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
e7e3ffeb 982 !folio_test_isolated(src));
bda807d4 983 }
e24f0b8f 984
5c3f9a67 985 /*
e7e3ffeb
MWO
986 * When successful, old pagecache src->mapping must be cleared before
987 * src is freed; but stats require that PageAnon be left as PageAnon.
5c3f9a67
HD
988 */
989 if (rc == MIGRATEPAGE_SUCCESS) {
e7e3ffeb
MWO
990 if (__PageMovable(&src->page)) {
991 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
bda807d4
MK
992
993 /*
994 * We clear PG_movable under page_lock so any compactor
995 * cannot try to migrate this page.
996 */
e7e3ffeb 997 folio_clear_isolated(src);
bda807d4
MK
998 }
999
1000 /*
e7e3ffeb 1001 * Anonymous and movable src->mapping will be cleared by
bda807d4
MK
1002 * free_pages_prepare so don't reset it here for keeping
1003 * the type to work PageAnon, for example.
1004 */
e7e3ffeb
MWO
1005 if (!folio_mapping_flags(src))
1006 src->mapping = NULL;
d2b2c6dd 1007
e7e3ffeb
MWO
1008 if (likely(!folio_is_zone_device(dst)))
1009 flush_dcache_folio(dst);
3fe2011f 1010 }
bda807d4 1011out:
e24f0b8f
CL
1012 return rc;
1013}
1014
64c8902e
HY
1015/*
1016 * To record some information during migration, we use some unused
1017 * fields (mapping and private) of struct folio of the newly allocated
1018 * destination folio. This is safe because nobody is using them
1019 * except us.
1020 */
e77d587a
LT
1021union migration_ptr {
1022 struct anon_vma *anon_vma;
1023 struct address_space *mapping;
1024};
64c8902e
HY
1025static void __migrate_folio_record(struct folio *dst,
1026 unsigned long page_was_mapped,
1027 struct anon_vma *anon_vma)
1028{
e77d587a
LT
1029 union migration_ptr ptr = { .anon_vma = anon_vma };
1030 dst->mapping = ptr.mapping;
64c8902e
HY
1031 dst->private = (void *)page_was_mapped;
1032}
1033
1034static void __migrate_folio_extract(struct folio *dst,
1035 int *page_was_mappedp,
1036 struct anon_vma **anon_vmap)
1037{
e77d587a
LT
1038 union migration_ptr ptr = { .mapping = dst->mapping };
1039 *anon_vmap = ptr.anon_vma;
64c8902e
HY
1040 *page_was_mappedp = (unsigned long)dst->private;
1041 dst->mapping = NULL;
1042 dst->private = NULL;
1043}
1044
5dfab109
HY
1045/* Restore the source folio to the original state upon failure */
1046static void migrate_folio_undo_src(struct folio *src,
1047 int page_was_mapped,
1048 struct anon_vma *anon_vma,
ebe75e47 1049 bool locked,
5dfab109
HY
1050 struct list_head *ret)
1051{
1052 if (page_was_mapped)
1053 remove_migration_ptes(src, src, false);
1054 /* Drop an anon_vma reference if we took one */
1055 if (anon_vma)
1056 put_anon_vma(anon_vma);
ebe75e47
HY
1057 if (locked)
1058 folio_unlock(src);
1059 if (ret)
1060 list_move_tail(&src->lru, ret);
5dfab109
HY
1061}
1062
1063/* Restore the destination folio to the original state upon failure */
4e096ae1
MWO
1064static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1065 free_folio_t put_new_folio, unsigned long private)
5dfab109 1066{
ebe75e47
HY
1067 if (locked)
1068 folio_unlock(dst);
4e096ae1
MWO
1069 if (put_new_folio)
1070 put_new_folio(dst, private);
5dfab109
HY
1071 else
1072 folio_put(dst);
1073}
1074
64c8902e
HY
1075/* Cleanup src folio upon migration success */
1076static void migrate_folio_done(struct folio *src,
1077 enum migrate_reason reason)
1078{
1079 /*
1080 * Compaction can migrate also non-LRU pages which are
1081 * not accounted to NR_ISOLATED_*. They can be recognized
1082 * as __PageMovable
1083 */
1084 if (likely(!__folio_test_movable(src)))
1085 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1086 folio_is_file_lru(src), -folio_nr_pages(src));
1087
1088 if (reason != MR_MEMORY_FAILURE)
1089 /* We release the page in page_handle_poison. */
1090 folio_put(src);
1091}
1092
ebe75e47 1093/* Obtain the lock on page, remove all ptes. */
4e096ae1
MWO
1094static int migrate_folio_unmap(new_folio_t get_new_folio,
1095 free_folio_t put_new_folio, unsigned long private,
1096 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1097 enum migrate_reason reason, struct list_head *ret)
e24f0b8f 1098{
ebe75e47 1099 struct folio *dst;
0dabec93 1100 int rc = -EAGAIN;
64c8902e 1101 int page_was_mapped = 0;
3f6c8272 1102 struct anon_vma *anon_vma = NULL;
682a71a1 1103 bool is_lru = !__PageMovable(&src->page);
ebe75e47
HY
1104 bool locked = false;
1105 bool dst_locked = false;
1106
ebe75e47
HY
1107 if (folio_ref_count(src) == 1) {
1108 /* Folio was freed from under us. So we are done. */
1109 folio_clear_active(src);
1110 folio_clear_unevictable(src);
1111 /* free_pages_prepare() will clear PG_isolated. */
1112 list_del(&src->lru);
1113 migrate_folio_done(src, reason);
1114 return MIGRATEPAGE_SUCCESS;
1115 }
1116
4e096ae1
MWO
1117 dst = get_new_folio(src, private);
1118 if (!dst)
ebe75e47 1119 return -ENOMEM;
ebe75e47
HY
1120 *dstp = dst;
1121
1122 dst->private = NULL;
95a402c3 1123
682a71a1 1124 if (!folio_trylock(src)) {
2ef7dbb2 1125 if (mode == MIGRATE_ASYNC)
0dabec93 1126 goto out;
3e7d3449
MG
1127
1128 /*
1129 * It's not safe for direct compaction to call lock_page.
1130 * For example, during page readahead pages are added locked
1131 * to the LRU. Later, when the IO completes the pages are
1132 * marked uptodate and unlocked. However, the queueing
1133 * could be merging multiple pages for one bio (e.g.
d4388340 1134 * mpage_readahead). If an allocation happens for the
3e7d3449
MG
1135 * second or third page, the process can end up locking
1136 * the same page twice and deadlocking. Rather than
1137 * trying to be clever about what pages can be locked,
1138 * avoid the use of lock_page for direct compaction
1139 * altogether.
1140 */
1141 if (current->flags & PF_MEMALLOC)
0dabec93 1142 goto out;
3e7d3449 1143
4bb6dc79
DA
1144 /*
1145 * In "light" mode, we can wait for transient locks (eg
1146 * inserting a page into the page table), but it's not
1147 * worth waiting for I/O.
1148 */
1149 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1150 goto out;
1151
682a71a1 1152 folio_lock(src);
e24f0b8f 1153 }
ebe75e47 1154 locked = true;
e24f0b8f 1155
682a71a1 1156 if (folio_test_writeback(src)) {
11bc82d6 1157 /*
fed5b64a 1158 * Only in the case of a full synchronous migration is it
a6bc32b8
MG
1159 * necessary to wait for PageWriteback. In the async case,
1160 * the retry loop is too short and in the sync-light case,
1161 * the overhead of stalling is too much
11bc82d6 1162 */
2916ecc0
JG
1163 switch (mode) {
1164 case MIGRATE_SYNC:
1165 case MIGRATE_SYNC_NO_COPY:
1166 break;
1167 default:
11bc82d6 1168 rc = -EBUSY;
ebe75e47 1169 goto out;
11bc82d6 1170 }
682a71a1 1171 folio_wait_writeback(src);
e24f0b8f 1172 }
03f15c86 1173
e24f0b8f 1174 /*
682a71a1
MWO
1175 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1176 * we cannot notice that anon_vma is freed while we migrate a page.
1ce82b69 1177 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d 1178 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
1179 * File Caches may use write_page() or lock_page() in migration, then,
1180 * just care Anon page here.
03f15c86 1181 *
29eea9b5 1182 * Only folio_get_anon_vma() understands the subtleties of
03f15c86
HD
1183 * getting a hold on an anon_vma from outside one of its mms.
1184 * But if we cannot get anon_vma, then we won't need it anyway,
1185 * because that implies that the anon page is no longer mapped
1186 * (and cannot be remapped so long as we hold the page lock).
dc386d4d 1187 */
682a71a1 1188 if (folio_test_anon(src) && !folio_test_ksm(src))
29eea9b5 1189 anon_vma = folio_get_anon_vma(src);
62e1c553 1190
7db7671f
HD
1191 /*
1192 * Block others from accessing the new page when we get around to
1193 * establishing additional references. We are usually the only one
682a71a1
MWO
1194 * holding a reference to dst at this point. We used to have a BUG
1195 * here if folio_trylock(dst) fails, but would like to allow for
1196 * cases where there might be a race with the previous use of dst.
7db7671f
HD
1197 * This is much like races on refcount of oldpage: just don't BUG().
1198 */
682a71a1 1199 if (unlikely(!folio_trylock(dst)))
ebe75e47
HY
1200 goto out;
1201 dst_locked = true;
7db7671f 1202
bda807d4 1203 if (unlikely(!is_lru)) {
64c8902e
HY
1204 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1205 return MIGRATEPAGE_UNMAP;
bda807d4
MK
1206 }
1207
dc386d4d 1208 /*
62e1c553
SL
1209 * Corner case handling:
1210 * 1. When a new swap-cache page is read into, it is added to the LRU
1211 * and treated as swapcache but it has no rmap yet.
682a71a1 1212 * Calling try_to_unmap() against a src->mapping==NULL page will
62e1c553 1213 * trigger a BUG. So handle it here.
d12b8951 1214 * 2. An orphaned page (see truncate_cleanup_page) might have
62e1c553
SL
1215 * fs-private metadata. The page can be picked up due to memory
1216 * offlining. Everywhere else except page reclaim, the page is
1217 * invisible to the vm, so the page can not be migrated. So try to
1218 * free the metadata, so the page can be freed.
e24f0b8f 1219 */
682a71a1
MWO
1220 if (!src->mapping) {
1221 if (folio_test_private(src)) {
1222 try_to_free_buffers(src);
ebe75e47 1223 goto out;
62e1c553 1224 }
682a71a1 1225 } else if (folio_mapped(src)) {
7db7671f 1226 /* Establish migration ptes */
682a71a1
MWO
1227 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1228 !folio_test_ksm(src) && !anon_vma, src);
fb3592c4 1229 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
64c8902e 1230 page_was_mapped = 1;
2ebba6b7 1231 }
dc386d4d 1232
64c8902e
HY
1233 if (!folio_mapped(src)) {
1234 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1235 return MIGRATEPAGE_UNMAP;
1236 }
1237
64c8902e 1238out:
80562ba0
HY
1239 /*
1240 * A folio that has not been unmapped will be restored to
1241 * right list unless we want to retry.
1242 */
fb3592c4 1243 if (rc == -EAGAIN)
ebe75e47 1244 ret = NULL;
80562ba0 1245
ebe75e47 1246 migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
4e096ae1 1247 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
80562ba0
HY
1248
1249 return rc;
1250}
1251
ebe75e47 1252/* Migrate the folio to the newly allocated folio in dst. */
4e096ae1 1253static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
ebe75e47
HY
1254 struct folio *src, struct folio *dst,
1255 enum migrate_mode mode, enum migrate_reason reason,
1256 struct list_head *ret)
64c8902e
HY
1257{
1258 int rc;
1259 int page_was_mapped = 0;
1260 struct anon_vma *anon_vma = NULL;
1261 bool is_lru = !__PageMovable(&src->page);
5dfab109 1262 struct list_head *prev;
64c8902e
HY
1263
1264 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
5dfab109
HY
1265 prev = dst->lru.prev;
1266 list_del(&dst->lru);
64c8902e
HY
1267
1268 rc = move_to_new_folio(dst, src, mode);
ebe75e47
HY
1269 if (rc)
1270 goto out;
5dfab109 1271
64c8902e
HY
1272 if (unlikely(!is_lru))
1273 goto out_unlock_both;
e24f0b8f 1274
c3096e67 1275 /*
682a71a1 1276 * When successful, push dst to LRU immediately: so that if it
c3096e67 1277 * turns out to be an mlocked page, remove_migration_ptes() will
682a71a1 1278 * automatically build up the correct dst->mlock_count for it.
c3096e67
HD
1279 *
1280 * We would like to do something similar for the old page, when
1281 * unsuccessful, and other cases when a page has been temporarily
1282 * isolated from the unevictable LRU: but this case is the easiest.
1283 */
ebe75e47
HY
1284 folio_add_lru(dst);
1285 if (page_was_mapped)
1286 lru_add_drain();
c3096e67 1287
5c3f9a67 1288 if (page_was_mapped)
ebe75e47 1289 remove_migration_ptes(src, dst, false);
3f6c8272 1290
7db7671f 1291out_unlock_both:
682a71a1 1292 folio_unlock(dst);
ebe75e47 1293 set_page_owner_migrate_reason(&dst->page, reason);
c6c919eb 1294 /*
682a71a1 1295 * If migration is successful, decrease refcount of dst,
c6c919eb 1296 * which will not free the page because new page owner increased
c3096e67 1297 * refcounter.
c6c919eb 1298 */
ebe75e47 1299 folio_put(dst);
c6c919eb 1300
dd4ae78a 1301 /*
ebe75e47
HY
1302 * A folio that has been migrated has all references removed
1303 * and will be freed.
dd4ae78a 1304 */
ebe75e47
HY
1305 list_del(&src->lru);
1306 /* Drop an anon_vma reference if we took one */
1307 if (anon_vma)
1308 put_anon_vma(anon_vma);
1309 folio_unlock(src);
1310 migrate_folio_done(src, reason);
bf6bddf1 1311
ebe75e47 1312 return rc;
0dabec93 1313out:
dd4ae78a 1314 /*
ebe75e47
HY
1315 * A folio that has not been migrated will be restored to
1316 * right list unless we want to retry.
dd4ae78a 1317 */
ebe75e47
HY
1318 if (rc == -EAGAIN) {
1319 list_add(&dst->lru, prev);
1320 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1321 return rc;
e24f0b8f 1322 }
68711a74 1323
ebe75e47 1324 migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
4e096ae1 1325 migrate_folio_undo_dst(dst, true, put_new_folio, private);
ebe75e47 1326
e24f0b8f
CL
1327 return rc;
1328}
1329
290408d4
NH
1330/*
1331 * Counterpart of unmap_and_move_page() for hugepage migration.
1332 *
1333 * This function doesn't wait the completion of hugepage I/O
1334 * because there is no race between I/O and migration for hugepage.
1335 * Note that currently hugepage I/O occurs only in direct I/O
1336 * where no lock is held and PG_writeback is irrelevant,
1337 * and writeback status of all subpages are counted in the reference
1338 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1339 * under direct I/O, the reference of the head page is 512 and a bit more.)
1340 * This means that when we try to migrate hugepage whose subpages are
1341 * doing direct I/O, some references remain after try_to_unmap() and
1342 * hugepage migration fails without data corruption.
1343 *
1344 * There is also no race when direct I/O is issued on the page under migration,
1345 * because then pte is replaced with migration swap entry and direct I/O code
1346 * will wait in the page fault for migration to complete.
1347 */
4e096ae1
MWO
1348static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1349 free_folio_t put_new_folio, unsigned long private,
1350 struct folio *src, int force, enum migrate_mode mode,
1351 int reason, struct list_head *ret)
290408d4 1352{
4e096ae1 1353 struct folio *dst;
2def7424 1354 int rc = -EAGAIN;
2ebba6b7 1355 int page_was_mapped = 0;
290408d4 1356 struct anon_vma *anon_vma = NULL;
c0d0381a 1357 struct address_space *mapping = NULL;
290408d4 1358
c33db292 1359 if (folio_ref_count(src) == 1) {
71a64f61 1360 /* page was freed from under us. So we are done. */
ea8e72f4 1361 folio_putback_active_hugetlb(src);
71a64f61
MS
1362 return MIGRATEPAGE_SUCCESS;
1363 }
1364
4e096ae1
MWO
1365 dst = get_new_folio(src, private);
1366 if (!dst)
290408d4
NH
1367 return -ENOMEM;
1368
c33db292 1369 if (!folio_trylock(src)) {
2916ecc0 1370 if (!force)
290408d4 1371 goto out;
2916ecc0
JG
1372 switch (mode) {
1373 case MIGRATE_SYNC:
1374 case MIGRATE_SYNC_NO_COPY:
1375 break;
1376 default:
1377 goto out;
1378 }
c33db292 1379 folio_lock(src);
290408d4
NH
1380 }
1381
cb6acd01
MK
1382 /*
1383 * Check for pages which are in the process of being freed. Without
c33db292 1384 * folio_mapping() set, hugetlbfs specific move page routine will not
cb6acd01
MK
1385 * be called and we could leak usage counts for subpools.
1386 */
345c62d1 1387 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
cb6acd01
MK
1388 rc = -EBUSY;
1389 goto out_unlock;
1390 }
1391
c33db292 1392 if (folio_test_anon(src))
29eea9b5 1393 anon_vma = folio_get_anon_vma(src);
290408d4 1394
c33db292 1395 if (unlikely(!folio_trylock(dst)))
7db7671f
HD
1396 goto put_anon;
1397
c33db292 1398 if (folio_mapped(src)) {
a98a2f0c 1399 enum ttu_flags ttu = 0;
336bf30e 1400
c33db292 1401 if (!folio_test_anon(src)) {
336bf30e
MK
1402 /*
1403 * In shared mappings, try_to_unmap could potentially
1404 * call huge_pmd_unshare. Because of this, take
1405 * semaphore in write mode here and set TTU_RMAP_LOCKED
1406 * to let lower levels know we have taken the lock.
1407 */
4e096ae1 1408 mapping = hugetlb_page_mapping_lock_write(&src->page);
336bf30e
MK
1409 if (unlikely(!mapping))
1410 goto unlock_put_anon;
1411
5202978b 1412 ttu = TTU_RMAP_LOCKED;
336bf30e 1413 }
c0d0381a 1414
4b8554c5 1415 try_to_migrate(src, ttu);
2ebba6b7 1416 page_was_mapped = 1;
336bf30e 1417
5202978b 1418 if (ttu & TTU_RMAP_LOCKED)
336bf30e 1419 i_mmap_unlock_write(mapping);
2ebba6b7 1420 }
290408d4 1421
c33db292 1422 if (!folio_mapped(src))
e7e3ffeb 1423 rc = move_to_new_folio(dst, src, mode);
290408d4 1424
336bf30e 1425 if (page_was_mapped)
4eecb8b9
MWO
1426 remove_migration_ptes(src,
1427 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
290408d4 1428
c0d0381a 1429unlock_put_anon:
c33db292 1430 folio_unlock(dst);
7db7671f
HD
1431
1432put_anon:
fd4a4663 1433 if (anon_vma)
9e60109f 1434 put_anon_vma(anon_vma);
8e6ac7fa 1435
2def7424 1436 if (rc == MIGRATEPAGE_SUCCESS) {
345c62d1 1437 move_hugetlb_state(src, dst, reason);
4e096ae1 1438 put_new_folio = NULL;
2def7424 1439 }
8e6ac7fa 1440
cb6acd01 1441out_unlock:
c33db292 1442 folio_unlock(src);
09761333 1443out:
dd4ae78a 1444 if (rc == MIGRATEPAGE_SUCCESS)
ea8e72f4 1445 folio_putback_active_hugetlb(src);
a04840c6 1446 else if (rc != -EAGAIN)
c33db292 1447 list_move_tail(&src->lru, ret);
68711a74
DR
1448
1449 /*
1450 * If migration was not successful and there's a freeing callback, use
1451 * it. Otherwise, put_page() will drop the reference grabbed during
1452 * isolation.
1453 */
4e096ae1
MWO
1454 if (put_new_folio)
1455 put_new_folio(dst, private);
68711a74 1456 else
ea8e72f4 1457 folio_putback_active_hugetlb(dst);
68711a74 1458
290408d4
NH
1459 return rc;
1460}
1461
eaec4e63 1462static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
d532e2e5 1463{
9c62ff00 1464 int rc;
d532e2e5 1465
eaec4e63
HY
1466 folio_lock(folio);
1467 rc = split_folio_to_list(folio, split_folios);
1468 folio_unlock(folio);
e6fa8a79 1469 if (!rc)
eaec4e63 1470 list_move_tail(&folio->lru, split_folios);
d532e2e5
YS
1471
1472 return rc;
1473}
1474
42012e04
HY
1475#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1476#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1477#else
1478#define NR_MAX_BATCHED_MIGRATION 512
1479#endif
e5bfff8b 1480#define NR_MAX_MIGRATE_PAGES_RETRY 10
2ef7dbb2
HY
1481#define NR_MAX_MIGRATE_ASYNC_RETRY 3
1482#define NR_MAX_MIGRATE_SYNC_RETRY \
1483 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
e5bfff8b 1484
5b855937
HY
1485struct migrate_pages_stats {
1486 int nr_succeeded; /* Normal and large folios migrated successfully, in
1487 units of base pages */
1488 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1489 units of base pages. Untried folios aren't counted */
1490 int nr_thp_succeeded; /* THP migrated successfully */
1491 int nr_thp_failed; /* THP failed to be migrated */
1492 int nr_thp_split; /* THP split before migrating */
1493};
1494
b20a3503 1495/*
e5bfff8b
HY
1496 * Returns the number of hugetlb folios that were not migrated, or an error code
1497 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1498 * any more because the list has become empty or no retryable hugetlb folios
1499 * exist any more. It is caller's responsibility to call putback_movable_pages()
1500 * only if ret != 0.
b20a3503 1501 */
4e096ae1
MWO
1502static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1503 free_folio_t put_new_folio, unsigned long private,
e5bfff8b
HY
1504 enum migrate_mode mode, int reason,
1505 struct migrate_pages_stats *stats,
1506 struct list_head *ret_folios)
b20a3503 1507{
e24f0b8f 1508 int retry = 1;
e5bfff8b
HY
1509 int nr_failed = 0;
1510 int nr_retry_pages = 0;
1511 int pass = 0;
1512 struct folio *folio, *folio2;
1513 int rc, nr_pages;
1514
1515 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1516 retry = 0;
1517 nr_retry_pages = 0;
1518
1519 list_for_each_entry_safe(folio, folio2, from, lru) {
1520 if (!folio_test_hugetlb(folio))
1521 continue;
1522
1523 nr_pages = folio_nr_pages(folio);
1524
1525 cond_resched();
1526
6f7d760e
HY
1527 /*
1528 * Migratability of hugepages depends on architectures and
1529 * their size. This check is necessary because some callers
1530 * of hugepage migration like soft offline and memory
1531 * hotremove don't walk through page tables or check whether
1532 * the hugepage is pmd-based or not before kicking migration.
1533 */
1534 if (!hugepage_migration_supported(folio_hstate(folio))) {
1535 nr_failed++;
1536 stats->nr_failed_pages += nr_pages;
1537 list_move_tail(&folio->lru, ret_folios);
1538 continue;
1539 }
1540
4e096ae1
MWO
1541 rc = unmap_and_move_huge_page(get_new_folio,
1542 put_new_folio, private,
1543 folio, pass > 2, mode,
e5bfff8b
HY
1544 reason, ret_folios);
1545 /*
1546 * The rules are:
1547 * Success: hugetlb folio will be put back
1548 * -EAGAIN: stay on the from list
1549 * -ENOMEM: stay on the from list
e5bfff8b
HY
1550 * Other errno: put on ret_folios list
1551 */
1552 switch(rc) {
e5bfff8b
HY
1553 case -ENOMEM:
1554 /*
1555 * When memory is low, don't bother to try to migrate
1556 * other folios, just exit.
1557 */
1558 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1559 return -ENOMEM;
1560 case -EAGAIN:
1561 retry++;
1562 nr_retry_pages += nr_pages;
1563 break;
1564 case MIGRATEPAGE_SUCCESS:
1565 stats->nr_succeeded += nr_pages;
1566 break;
1567 default:
1568 /*
1569 * Permanent failure (-EBUSY, etc.):
1570 * unlike -EAGAIN case, the failed folio is
1571 * removed from migration folio list and not
1572 * retried in the next outer loop.
1573 */
1574 nr_failed++;
1575 stats->nr_failed_pages += nr_pages;
1576 break;
1577 }
1578 }
1579 }
1580 /*
1581 * nr_failed is number of hugetlb folios failed to be migrated. After
1582 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1583 * folios as failed.
1584 */
1585 nr_failed += retry;
1586 stats->nr_failed_pages += nr_retry_pages;
1587
1588 return nr_failed;
1589}
1590
5dfab109
HY
1591/*
1592 * migrate_pages_batch() first unmaps folios in the from list as many as
1593 * possible, then move the unmapped folios.
fb3592c4
HY
1594 *
1595 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1596 * lock or bit when we have locked more than one folio. Which may cause
1597 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1598 * length of the from list must be <= 1.
5dfab109 1599 */
4e096ae1
MWO
1600static int migrate_pages_batch(struct list_head *from,
1601 new_folio_t get_new_folio, free_folio_t put_new_folio,
1602 unsigned long private, enum migrate_mode mode, int reason,
1603 struct list_head *ret_folios, struct list_head *split_folios,
1604 struct migrate_pages_stats *stats, int nr_pass)
b20a3503 1605{
a21d2133 1606 int retry = 1;
1a5bae25 1607 int thp_retry = 1;
b20a3503 1608 int nr_failed = 0;
077309bc 1609 int nr_retry_pages = 0;
b20a3503 1610 int pass = 0;
1a5bae25 1611 bool is_thp = false;
5dfab109 1612 struct folio *folio, *folio2, *dst = NULL, *dst2;
a21d2133 1613 int rc, rc_saved = 0, nr_pages;
5dfab109
HY
1614 LIST_HEAD(unmap_folios);
1615 LIST_HEAD(dst_folios);
b0b515bf 1616 bool nosplit = (reason == MR_NUMA_MISPLACED);
e5bfff8b 1617
fb3592c4
HY
1618 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1619 !list_empty(from) && !list_is_singular(from));
a21d2133 1620
124abced 1621 for (pass = 0; pass < nr_pass && retry; pass++) {
e24f0b8f 1622 retry = 0;
1a5bae25 1623 thp_retry = 0;
077309bc 1624 nr_retry_pages = 0;
b20a3503 1625
eaec4e63 1626 list_for_each_entry_safe(folio, folio2, from, lru) {
124abced 1627 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
eaec4e63 1628 nr_pages = folio_nr_pages(folio);
e5bfff8b 1629
e24f0b8f 1630 cond_resched();
2d1db3b1 1631
d532e2e5 1632 /*
eaec4e63 1633 * Large folio migration might be unsupported or
6f7d760e 1634 * the allocation might be failed so we should retry
eaec4e63
HY
1635 * on the same folio with the large folio split
1636 * to normal folios.
d532e2e5 1637 *
eaec4e63 1638 * Split folios are put in split_folios, and
e6fa8a79
HY
1639 * we will migrate them after the rest of the
1640 * list is processed.
d532e2e5 1641 */
6f7d760e 1642 if (!thp_migration_supported() && is_thp) {
124abced 1643 nr_failed++;
6f7d760e 1644 stats->nr_thp_failed++;
a21d2133 1645 if (!try_split_folio(folio, split_folios)) {
6f7d760e
HY
1646 stats->nr_thp_split++;
1647 continue;
f430893b 1648 }
6f7d760e
HY
1649 stats->nr_failed_pages += nr_pages;
1650 list_move_tail(&folio->lru, ret_folios);
1651 continue;
1652 }
f430893b 1653
4e096ae1
MWO
1654 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1655 private, folio, &dst, mode, reason,
1656 ret_folios);
dd4ae78a
YS
1657 /*
1658 * The rules are:
e5bfff8b 1659 * Success: folio will be freed
5dfab109
HY
1660 * Unmap: folio will be put on unmap_folios list,
1661 * dst folio put on dst_folios list
dd4ae78a
YS
1662 * -EAGAIN: stay on the from list
1663 * -ENOMEM: stay on the from list
42012e04 1664 * Other errno: put on ret_folios list
dd4ae78a 1665 */
e24f0b8f 1666 switch(rc) {
95a402c3 1667 case -ENOMEM:
94723aaf 1668 /*
d532e2e5 1669 * When memory is low, don't bother to try to migrate
5dfab109 1670 * other folios, move unmapped folios, then exit.
94723aaf 1671 */
124abced
HY
1672 nr_failed++;
1673 stats->nr_thp_failed += is_thp;
1674 /* Large folio NUMA faulting doesn't split to retry. */
1675 if (folio_test_large(folio) && !nosplit) {
1676 int ret = try_split_folio(folio, split_folios);
1677
1678 if (!ret) {
1679 stats->nr_thp_split += is_thp;
1680 break;
1681 } else if (reason == MR_LONGTERM_PIN &&
1682 ret == -EAGAIN) {
1683 /*
1684 * Try again to split large folio to
1685 * mitigate the failure of longterm pinning.
1686 */
1687 retry++;
1688 thp_retry += is_thp;
1689 nr_retry_pages += nr_pages;
1690 /* Undo duplicated failure counting. */
1691 nr_failed--;
1692 stats->nr_thp_failed -= is_thp;
1693 break;
94723aaf 1694 }
1a5bae25 1695 }
b5bade97 1696
42012e04 1697 stats->nr_failed_pages += nr_pages + nr_retry_pages;
fbed53b4 1698 /* nr_failed isn't updated for not used */
42012e04 1699 stats->nr_thp_failed += thp_retry;
5dfab109
HY
1700 rc_saved = rc;
1701 if (list_empty(&unmap_folios))
1702 goto out;
1703 else
1704 goto move;
e24f0b8f 1705 case -EAGAIN:
124abced
HY
1706 retry++;
1707 thp_retry += is_thp;
eaec4e63 1708 nr_retry_pages += nr_pages;
e24f0b8f 1709 break;
78bd5209 1710 case MIGRATEPAGE_SUCCESS:
42012e04
HY
1711 stats->nr_succeeded += nr_pages;
1712 stats->nr_thp_succeeded += is_thp;
e24f0b8f 1713 break;
5dfab109 1714 case MIGRATEPAGE_UNMAP:
5dfab109
HY
1715 list_move_tail(&folio->lru, &unmap_folios);
1716 list_add_tail(&dst->lru, &dst_folios);
e24f0b8f
CL
1717 break;
1718 default:
354a3363 1719 /*
d532e2e5 1720 * Permanent failure (-EBUSY, etc.):
eaec4e63
HY
1721 * unlike -EAGAIN case, the failed folio is
1722 * removed from migration folio list and not
354a3363
NH
1723 * retried in the next outer loop.
1724 */
124abced
HY
1725 nr_failed++;
1726 stats->nr_thp_failed += is_thp;
42012e04 1727 stats->nr_failed_pages += nr_pages;
e24f0b8f 1728 break;
2d1db3b1 1729 }
b20a3503
CL
1730 }
1731 }
7047b5a4 1732 nr_failed += retry;
42012e04
HY
1733 stats->nr_thp_failed += thp_retry;
1734 stats->nr_failed_pages += nr_retry_pages;
5dfab109 1735move:
7e12beb8
HY
1736 /* Flush TLBs for all unmapped folios */
1737 try_to_unmap_flush();
1738
5dfab109 1739 retry = 1;
124abced 1740 for (pass = 0; pass < nr_pass && retry; pass++) {
5dfab109 1741 retry = 0;
5dfab109
HY
1742 thp_retry = 0;
1743 nr_retry_pages = 0;
1744
1745 dst = list_first_entry(&dst_folios, struct folio, lru);
1746 dst2 = list_next_entry(dst, lru);
1747 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
124abced 1748 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
5dfab109
HY
1749 nr_pages = folio_nr_pages(folio);
1750
1751 cond_resched();
1752
4e096ae1 1753 rc = migrate_folio_move(put_new_folio, private,
5dfab109
HY
1754 folio, dst, mode,
1755 reason, ret_folios);
1756 /*
1757 * The rules are:
1758 * Success: folio will be freed
1759 * -EAGAIN: stay on the unmap_folios list
1760 * Other errno: put on ret_folios list
1761 */
1762 switch(rc) {
1763 case -EAGAIN:
124abced
HY
1764 retry++;
1765 thp_retry += is_thp;
5dfab109
HY
1766 nr_retry_pages += nr_pages;
1767 break;
1768 case MIGRATEPAGE_SUCCESS:
1769 stats->nr_succeeded += nr_pages;
1770 stats->nr_thp_succeeded += is_thp;
1771 break;
1772 default:
124abced
HY
1773 nr_failed++;
1774 stats->nr_thp_failed += is_thp;
5dfab109 1775 stats->nr_failed_pages += nr_pages;
e24f0b8f 1776 break;
2d1db3b1 1777 }
5dfab109
HY
1778 dst = dst2;
1779 dst2 = list_next_entry(dst, lru);
b20a3503
CL
1780 }
1781 }
7047b5a4 1782 nr_failed += retry;
5dfab109
HY
1783 stats->nr_thp_failed += thp_retry;
1784 stats->nr_failed_pages += nr_retry_pages;
1785
124abced 1786 rc = rc_saved ? : nr_failed;
5dfab109
HY
1787out:
1788 /* Cleanup remaining folios */
1789 dst = list_first_entry(&dst_folios, struct folio, lru);
1790 dst2 = list_next_entry(dst, lru);
1791 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1792 int page_was_mapped = 0;
1793 struct anon_vma *anon_vma = NULL;
1794
1795 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
1796 migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
ebe75e47 1797 true, ret_folios);
5dfab109 1798 list_del(&dst->lru);
4e096ae1 1799 migrate_folio_undo_dst(dst, true, put_new_folio, private);
5dfab109
HY
1800 dst = dst2;
1801 dst2 = list_next_entry(dst, lru);
1802 }
1803
42012e04
HY
1804 return rc;
1805}
1806
4e096ae1
MWO
1807static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1808 free_folio_t put_new_folio, unsigned long private,
1809 enum migrate_mode mode, int reason,
1810 struct list_head *ret_folios, struct list_head *split_folios,
1811 struct migrate_pages_stats *stats)
2ef7dbb2
HY
1812{
1813 int rc, nr_failed = 0;
1814 LIST_HEAD(folios);
1815 struct migrate_pages_stats astats;
1816
1817 memset(&astats, 0, sizeof(astats));
1818 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
4e096ae1 1819 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
2ef7dbb2
HY
1820 reason, &folios, split_folios, &astats,
1821 NR_MAX_MIGRATE_ASYNC_RETRY);
1822 stats->nr_succeeded += astats.nr_succeeded;
1823 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1824 stats->nr_thp_split += astats.nr_thp_split;
1825 if (rc < 0) {
1826 stats->nr_failed_pages += astats.nr_failed_pages;
1827 stats->nr_thp_failed += astats.nr_thp_failed;
1828 list_splice_tail(&folios, ret_folios);
1829 return rc;
1830 }
1831 stats->nr_thp_failed += astats.nr_thp_split;
1832 nr_failed += astats.nr_thp_split;
1833 /*
1834 * Fall back to migrate all failed folios one by one synchronously. All
1835 * failed folios except split THPs will be retried, so their failure
1836 * isn't counted
1837 */
1838 list_splice_tail_init(&folios, from);
1839 while (!list_empty(from)) {
1840 list_move(from->next, &folios);
4e096ae1 1841 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2ef7dbb2
HY
1842 private, mode, reason, ret_folios,
1843 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1844 list_splice_tail_init(&folios, ret_folios);
1845 if (rc < 0)
1846 return rc;
1847 nr_failed += rc;
1848 }
1849
1850 return nr_failed;
1851}
1852
42012e04
HY
1853/*
1854 * migrate_pages - migrate the folios specified in a list, to the free folios
1855 * supplied as the target for the page migration
1856 *
1857 * @from: The list of folios to be migrated.
4e096ae1 1858 * @get_new_folio: The function used to allocate free folios to be used
42012e04 1859 * as the target of the folio migration.
4e096ae1 1860 * @put_new_folio: The function used to free target folios if migration
42012e04 1861 * fails, or NULL if no special handling is necessary.
4e096ae1 1862 * @private: Private data to be passed on to get_new_folio()
42012e04
HY
1863 * @mode: The migration mode that specifies the constraints for
1864 * folio migration, if any.
1865 * @reason: The reason for folio migration.
1866 * @ret_succeeded: Set to the number of folios migrated successfully if
1867 * the caller passes a non-NULL pointer.
1868 *
1869 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1870 * are movable any more because the list has become empty or no retryable folios
1871 * exist any more. It is caller's responsibility to call putback_movable_pages()
1872 * only if ret != 0.
1873 *
1874 * Returns the number of {normal folio, large folio, hugetlb} that were not
1875 * migrated, or an error code. The number of large folio splits will be
1876 * considered as the number of non-migrated large folio, no matter how many
1877 * split folios of the large folio are migrated successfully.
1878 */
4e096ae1
MWO
1879int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1880 free_folio_t put_new_folio, unsigned long private,
42012e04
HY
1881 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1882{
1883 int rc, rc_gather;
2ef7dbb2 1884 int nr_pages;
42012e04
HY
1885 struct folio *folio, *folio2;
1886 LIST_HEAD(folios);
1887 LIST_HEAD(ret_folios);
a21d2133 1888 LIST_HEAD(split_folios);
42012e04
HY
1889 struct migrate_pages_stats stats;
1890
1891 trace_mm_migrate_pages_start(mode, reason);
1892
1893 memset(&stats, 0, sizeof(stats));
1894
4e096ae1 1895 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
42012e04
HY
1896 mode, reason, &stats, &ret_folios);
1897 if (rc_gather < 0)
1898 goto out;
fb3592c4 1899
42012e04
HY
1900again:
1901 nr_pages = 0;
1902 list_for_each_entry_safe(folio, folio2, from, lru) {
1903 /* Retried hugetlb folios will be kept in list */
1904 if (folio_test_hugetlb(folio)) {
1905 list_move_tail(&folio->lru, &ret_folios);
1906 continue;
1907 }
1908
1909 nr_pages += folio_nr_pages(folio);
2ef7dbb2 1910 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
42012e04
HY
1911 break;
1912 }
2ef7dbb2 1913 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
fb3592c4 1914 list_cut_before(&folios, from, &folio2->lru);
42012e04
HY
1915 else
1916 list_splice_init(from, &folios);
2ef7dbb2 1917 if (mode == MIGRATE_ASYNC)
4e096ae1
MWO
1918 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1919 private, mode, reason, &ret_folios,
1920 &split_folios, &stats,
1921 NR_MAX_MIGRATE_PAGES_RETRY);
2ef7dbb2 1922 else
4e096ae1
MWO
1923 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1924 private, mode, reason, &ret_folios,
1925 &split_folios, &stats);
42012e04
HY
1926 list_splice_tail_init(&folios, &ret_folios);
1927 if (rc < 0) {
1928 rc_gather = rc;
a21d2133 1929 list_splice_tail(&split_folios, &ret_folios);
42012e04
HY
1930 goto out;
1931 }
a21d2133
HY
1932 if (!list_empty(&split_folios)) {
1933 /*
1934 * Failure isn't counted since all split folios of a large folio
1935 * is counted as 1 failure already. And, we only try to migrate
1936 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1937 */
4e096ae1
MWO
1938 migrate_pages_batch(&split_folios, get_new_folio,
1939 put_new_folio, private, MIGRATE_ASYNC, reason,
1940 &ret_folios, NULL, &stats, 1);
a21d2133
HY
1941 list_splice_tail_init(&split_folios, &ret_folios);
1942 }
42012e04
HY
1943 rc_gather += rc;
1944 if (!list_empty(from))
1945 goto again;
95a402c3 1946out:
dd4ae78a 1947 /*
eaec4e63 1948 * Put the permanent failure folio back to migration list, they
dd4ae78a
YS
1949 * will be put back to the right list by the caller.
1950 */
eaec4e63 1951 list_splice(&ret_folios, from);
dd4ae78a 1952
03e5f82e 1953 /*
eaec4e63
HY
1954 * Return 0 in case all split folios of fail-to-migrate large folios
1955 * are migrated successfully.
03e5f82e
BW
1956 */
1957 if (list_empty(from))
42012e04 1958 rc_gather = 0;
03e5f82e 1959
5b855937
HY
1960 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
1961 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
1962 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
1963 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
1964 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
1965 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
1966 stats.nr_thp_succeeded, stats.nr_thp_failed,
1967 stats.nr_thp_split, mode, reason);
7b2a2d4a 1968
5ac95884 1969 if (ret_succeeded)
5b855937 1970 *ret_succeeded = stats.nr_succeeded;
5ac95884 1971
42012e04 1972 return rc_gather;
b20a3503 1973}
95a402c3 1974
4e096ae1 1975struct folio *alloc_migration_target(struct folio *src, unsigned long private)
b4b38223 1976{
19fc7bed
JK
1977 struct migration_target_control *mtc;
1978 gfp_t gfp_mask;
b4b38223 1979 unsigned int order = 0;
19fc7bed
JK
1980 int nid;
1981 int zidx;
1982
1983 mtc = (struct migration_target_control *)private;
1984 gfp_mask = mtc->gfp_mask;
1985 nid = mtc->nid;
1986 if (nid == NUMA_NO_NODE)
4e096ae1 1987 nid = folio_nid(src);
b4b38223 1988
4e096ae1
MWO
1989 if (folio_test_hugetlb(src)) {
1990 struct hstate *h = folio_hstate(src);
d92bbc27 1991
19fc7bed 1992 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
4e096ae1 1993 return alloc_hugetlb_folio_nodemask(h, nid,
e37d3e83 1994 mtc->nmask, gfp_mask);
d92bbc27 1995 }
b4b38223 1996
4e096ae1 1997 if (folio_test_large(src)) {
9933a0c8
JK
1998 /*
1999 * clear __GFP_RECLAIM to make the migration callback
2000 * consistent with regular THP allocations.
2001 */
2002 gfp_mask &= ~__GFP_RECLAIM;
b4b38223 2003 gfp_mask |= GFP_TRANSHUGE;
4e096ae1 2004 order = folio_order(src);
b4b38223 2005 }
4e096ae1 2006 zidx = zone_idx(folio_zone(src));
19fc7bed 2007 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
b4b38223
JK
2008 gfp_mask |= __GFP_HIGHMEM;
2009
4e096ae1 2010 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
b4b38223
JK
2011}
2012
742755a1 2013#ifdef CONFIG_NUMA
742755a1 2014
a49bd4d7 2015static int store_status(int __user *status, int start, int value, int nr)
742755a1 2016{
a49bd4d7
MH
2017 while (nr-- > 0) {
2018 if (put_user(value, status + start))
2019 return -EFAULT;
2020 start++;
2021 }
2022
2023 return 0;
2024}
2025
2026static int do_move_pages_to_node(struct mm_struct *mm,
2027 struct list_head *pagelist, int node)
2028{
2029 int err;
a0976311
JK
2030 struct migration_target_control mtc = {
2031 .nid = node,
2032 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2033 };
a49bd4d7 2034
a0976311 2035 err = migrate_pages(pagelist, alloc_migration_target, NULL,
5ac95884 2036 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
a49bd4d7
MH
2037 if (err)
2038 putback_movable_pages(pagelist);
2039 return err;
742755a1
CL
2040}
2041
2042/*
a49bd4d7
MH
2043 * Resolves the given address to a struct page, isolates it from the LRU and
2044 * puts it to the given pagelist.
e0153fc2
YS
2045 * Returns:
2046 * errno - if the page cannot be found/isolated
2047 * 0 - when it doesn't have to be migrated because it is already on the
2048 * target node
2049 * 1 - when it has been queued
742755a1 2050 */
428e106a 2051static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
a49bd4d7 2052 int node, struct list_head *pagelist, bool migrate_all)
742755a1 2053{
a49bd4d7 2054 struct vm_area_struct *vma;
428e106a 2055 unsigned long addr;
a49bd4d7 2056 struct page *page;
742755a1 2057 int err;
9747b9e9 2058 bool isolated;
742755a1 2059
d8ed45c5 2060 mmap_read_lock(mm);
428e106a
KS
2061 addr = (unsigned long)untagged_addr_remote(mm, p);
2062
a49bd4d7 2063 err = -EFAULT;
cb1c37b1
ML
2064 vma = vma_lookup(mm, addr);
2065 if (!vma || !vma_migratable(vma))
a49bd4d7 2066 goto out;
742755a1 2067
a49bd4d7 2068 /* FOLL_DUMP to ignore special (like zero) pages */
87d2762e 2069 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da 2070
a49bd4d7
MH
2071 err = PTR_ERR(page);
2072 if (IS_ERR(page))
2073 goto out;
89f5b7da 2074
a49bd4d7 2075 err = -ENOENT;
f7091ed6 2076 if (!page)
a49bd4d7 2077 goto out;
742755a1 2078
f7091ed6
HW
2079 if (is_zone_device_page(page))
2080 goto out_putpage;
2081
a49bd4d7
MH
2082 err = 0;
2083 if (page_to_nid(page) == node)
2084 goto out_putpage;
742755a1 2085
a49bd4d7
MH
2086 err = -EACCES;
2087 if (page_mapcount(page) > 1 && !migrate_all)
2088 goto out_putpage;
742755a1 2089
a49bd4d7
MH
2090 if (PageHuge(page)) {
2091 if (PageHead(page)) {
9747b9e9
BW
2092 isolated = isolate_hugetlb(page_folio(page), pagelist);
2093 err = isolated ? 1 : -EBUSY;
e632a938 2094 }
a49bd4d7
MH
2095 } else {
2096 struct page *head;
e632a938 2097
e8db67eb 2098 head = compound_head(page);
f7f9c00d
BW
2099 isolated = isolate_lru_page(head);
2100 if (!isolated) {
2101 err = -EBUSY;
a49bd4d7 2102 goto out_putpage;
f7f9c00d 2103 }
742755a1 2104
e0153fc2 2105 err = 1;
a49bd4d7
MH
2106 list_add_tail(&head->lru, pagelist);
2107 mod_node_page_state(page_pgdat(head),
9de4f22a 2108 NR_ISOLATED_ANON + page_is_file_lru(head),
6c357848 2109 thp_nr_pages(head));
a49bd4d7
MH
2110 }
2111out_putpage:
2112 /*
2113 * Either remove the duplicate refcount from
2114 * isolate_lru_page() or drop the page ref if it was
2115 * not isolated.
2116 */
2117 put_page(page);
2118out:
d8ed45c5 2119 mmap_read_unlock(mm);
742755a1
CL
2120 return err;
2121}
2122
7ca8783a
WY
2123static int move_pages_and_store_status(struct mm_struct *mm, int node,
2124 struct list_head *pagelist, int __user *status,
2125 int start, int i, unsigned long nr_pages)
2126{
2127 int err;
2128
5d7ae891
WY
2129 if (list_empty(pagelist))
2130 return 0;
2131
7ca8783a
WY
2132 err = do_move_pages_to_node(mm, pagelist, node);
2133 if (err) {
2134 /*
2135 * Positive err means the number of failed
2136 * pages to migrate. Since we are going to
2137 * abort and return the number of non-migrated
ab9dd4f8 2138 * pages, so need to include the rest of the
7ca8783a
WY
2139 * nr_pages that have not been attempted as
2140 * well.
2141 */
2142 if (err > 0)
a7504ed1 2143 err += nr_pages - i;
7ca8783a
WY
2144 return err;
2145 }
2146 return store_status(status, start, node, i - start);
2147}
2148
5e9a0f02
BG
2149/*
2150 * Migrate an array of page address onto an array of nodes and fill
2151 * the corresponding array of status.
2152 */
3268c63e 2153static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f02
BG
2154 unsigned long nr_pages,
2155 const void __user * __user *pages,
2156 const int __user *nodes,
2157 int __user *status, int flags)
2158{
a49bd4d7
MH
2159 int current_node = NUMA_NO_NODE;
2160 LIST_HEAD(pagelist);
2161 int start, i;
2162 int err = 0, err1;
35282a2d 2163
361a2a22 2164 lru_cache_disable();
35282a2d 2165
a49bd4d7
MH
2166 for (i = start = 0; i < nr_pages; i++) {
2167 const void __user *p;
a49bd4d7 2168 int node;
3140a227 2169
a49bd4d7
MH
2170 err = -EFAULT;
2171 if (get_user(p, pages + i))
2172 goto out_flush;
2173 if (get_user(node, nodes + i))
2174 goto out_flush;
a49bd4d7
MH
2175
2176 err = -ENODEV;
2177 if (node < 0 || node >= MAX_NUMNODES)
2178 goto out_flush;
2179 if (!node_state(node, N_MEMORY))
2180 goto out_flush;
5e9a0f02 2181
a49bd4d7
MH
2182 err = -EACCES;
2183 if (!node_isset(node, task_nodes))
2184 goto out_flush;
2185
2186 if (current_node == NUMA_NO_NODE) {
2187 current_node = node;
2188 start = i;
2189 } else if (node != current_node) {
7ca8783a
WY
2190 err = move_pages_and_store_status(mm, current_node,
2191 &pagelist, status, start, i, nr_pages);
a49bd4d7
MH
2192 if (err)
2193 goto out;
2194 start = i;
2195 current_node = node;
3140a227
BG
2196 }
2197
a49bd4d7
MH
2198 /*
2199 * Errors in the page lookup or isolation are not fatal and we simply
2200 * report them via status
2201 */
428e106a
KS
2202 err = add_page_for_migration(mm, p, current_node, &pagelist,
2203 flags & MPOL_MF_MOVE_ALL);
e0153fc2 2204
d08221a0 2205 if (err > 0) {
e0153fc2
YS
2206 /* The page is successfully queued for migration */
2207 continue;
2208 }
3140a227 2209
65462462
JH
2210 /*
2211 * The move_pages() man page does not have an -EEXIST choice, so
2212 * use -EFAULT instead.
2213 */
2214 if (err == -EEXIST)
2215 err = -EFAULT;
2216
d08221a0
WY
2217 /*
2218 * If the page is already on the target node (!err), store the
2219 * node, otherwise, store the err.
2220 */
2221 err = store_status(status, i, err ? : current_node, 1);
a49bd4d7
MH
2222 if (err)
2223 goto out_flush;
5e9a0f02 2224
7ca8783a
WY
2225 err = move_pages_and_store_status(mm, current_node, &pagelist,
2226 status, start, i, nr_pages);
a7504ed1
HY
2227 if (err) {
2228 /* We have accounted for page i */
2229 if (err > 0)
2230 err--;
4afdacec 2231 goto out;
a7504ed1 2232 }
a49bd4d7 2233 current_node = NUMA_NO_NODE;
3140a227 2234 }
a49bd4d7
MH
2235out_flush:
2236 /* Make sure we do not overwrite the existing error */
7ca8783a
WY
2237 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
2238 status, start, i, nr_pages);
dfe9aa23 2239 if (err >= 0)
a49bd4d7 2240 err = err1;
5e9a0f02 2241out:
361a2a22 2242 lru_cache_enable();
5e9a0f02
BG
2243 return err;
2244}
2245
742755a1 2246/*
2f007e74 2247 * Determine the nodes of an array of pages and store it in an array of status.
742755a1 2248 */
80bba129
BG
2249static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2250 const void __user **pages, int *status)
742755a1 2251{
2f007e74 2252 unsigned long i;
2f007e74 2253
d8ed45c5 2254 mmap_read_lock(mm);
742755a1 2255
2f007e74 2256 for (i = 0; i < nr_pages; i++) {
80bba129 2257 unsigned long addr = (unsigned long)(*pages);
742755a1
CL
2258 struct vm_area_struct *vma;
2259 struct page *page;
c095adbc 2260 int err = -EFAULT;
2f007e74 2261
059b8b48
LH
2262 vma = vma_lookup(mm, addr);
2263 if (!vma)
742755a1
CL
2264 goto set_status;
2265
d899844e 2266 /* FOLL_DUMP to ignore special (like zero) pages */
16fd6b31 2267 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da
LT
2268
2269 err = PTR_ERR(page);
2270 if (IS_ERR(page))
2271 goto set_status;
2272
f7091ed6
HW
2273 err = -ENOENT;
2274 if (!page)
2275 goto set_status;
2276
2277 if (!is_zone_device_page(page))
4cd61484 2278 err = page_to_nid(page);
f7091ed6 2279
16fd6b31 2280 put_page(page);
742755a1 2281set_status:
80bba129
BG
2282 *status = err;
2283
2284 pages++;
2285 status++;
2286 }
2287
d8ed45c5 2288 mmap_read_unlock(mm);
80bba129
BG
2289}
2290
5b1b561b
AB
2291static int get_compat_pages_array(const void __user *chunk_pages[],
2292 const void __user * __user *pages,
2293 unsigned long chunk_nr)
2294{
2295 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2296 compat_uptr_t p;
2297 int i;
2298
2299 for (i = 0; i < chunk_nr; i++) {
2300 if (get_user(p, pages32 + i))
2301 return -EFAULT;
2302 chunk_pages[i] = compat_ptr(p);
2303 }
2304
2305 return 0;
2306}
2307
80bba129
BG
2308/*
2309 * Determine the nodes of a user array of pages and store it in
2310 * a user array of status.
2311 */
2312static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2313 const void __user * __user *pages,
2314 int __user *status)
2315{
3eefb826 2316#define DO_PAGES_STAT_CHUNK_NR 16UL
80bba129
BG
2317 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2318 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba129 2319
87b8d1ad 2320 while (nr_pages) {
3eefb826 2321 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
87b8d1ad 2322
5b1b561b
AB
2323 if (in_compat_syscall()) {
2324 if (get_compat_pages_array(chunk_pages, pages,
2325 chunk_nr))
2326 break;
2327 } else {
2328 if (copy_from_user(chunk_pages, pages,
2329 chunk_nr * sizeof(*chunk_pages)))
2330 break;
2331 }
80bba129
BG
2332
2333 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2334
87b8d1ad
PA
2335 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2336 break;
742755a1 2337
87b8d1ad
PA
2338 pages += chunk_nr;
2339 status += chunk_nr;
2340 nr_pages -= chunk_nr;
2341 }
2342 return nr_pages ? -EFAULT : 0;
742755a1
CL
2343}
2344
4dc200ce 2345static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
742755a1 2346{
742755a1 2347 struct task_struct *task;
742755a1 2348 struct mm_struct *mm;
742755a1 2349
4dc200ce
ML
2350 /*
2351 * There is no need to check if current process has the right to modify
2352 * the specified process when they are same.
2353 */
2354 if (!pid) {
2355 mmget(current->mm);
2356 *mem_nodes = cpuset_mems_allowed(current);
2357 return current->mm;
2358 }
742755a1
CL
2359
2360 /* Find the mm_struct */
a879bf58 2361 rcu_read_lock();
4dc200ce 2362 task = find_task_by_vpid(pid);
742755a1 2363 if (!task) {
a879bf58 2364 rcu_read_unlock();
4dc200ce 2365 return ERR_PTR(-ESRCH);
742755a1 2366 }
3268c63e 2367 get_task_struct(task);
742755a1
CL
2368
2369 /*
2370 * Check if this process has the right to modify the specified
197e7e52 2371 * process. Use the regular "ptrace_may_access()" checks.
742755a1 2372 */
197e7e52 2373 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 2374 rcu_read_unlock();
4dc200ce 2375 mm = ERR_PTR(-EPERM);
5e9a0f02 2376 goto out;
742755a1 2377 }
c69e8d9c 2378 rcu_read_unlock();
742755a1 2379
4dc200ce
ML
2380 mm = ERR_PTR(security_task_movememory(task));
2381 if (IS_ERR(mm))
5e9a0f02 2382 goto out;
4dc200ce 2383 *mem_nodes = cpuset_mems_allowed(task);
3268c63e 2384 mm = get_task_mm(task);
4dc200ce 2385out:
3268c63e 2386 put_task_struct(task);
6e8b09ea 2387 if (!mm)
4dc200ce
ML
2388 mm = ERR_PTR(-EINVAL);
2389 return mm;
2390}
2391
2392/*
2393 * Move a list of pages in the address space of the currently executing
2394 * process.
2395 */
2396static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2397 const void __user * __user *pages,
2398 const int __user *nodes,
2399 int __user *status, int flags)
2400{
2401 struct mm_struct *mm;
2402 int err;
2403 nodemask_t task_nodes;
2404
2405 /* Check flags */
2406 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
6e8b09ea
SL
2407 return -EINVAL;
2408
4dc200ce
ML
2409 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2410 return -EPERM;
2411
2412 mm = find_mm_struct(pid, &task_nodes);
2413 if (IS_ERR(mm))
2414 return PTR_ERR(mm);
2415
6e8b09ea
SL
2416 if (nodes)
2417 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2418 nodes, status, flags);
2419 else
2420 err = do_pages_stat(mm, nr_pages, pages, status);
742755a1 2421
742755a1
CL
2422 mmput(mm);
2423 return err;
2424}
742755a1 2425
7addf443
DB
2426SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2427 const void __user * __user *, pages,
2428 const int __user *, nodes,
2429 int __user *, status, int, flags)
2430{
2431 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2432}
2433
7039e1db
PZ
2434#ifdef CONFIG_NUMA_BALANCING
2435/*
2436 * Returns true if this is a safe migration target node for misplaced NUMA
bc53008e 2437 * pages. Currently it only checks the watermarks which is crude.
7039e1db
PZ
2438 */
2439static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6 2440 unsigned long nr_migrate_pages)
7039e1db
PZ
2441{
2442 int z;
599d0c95 2443
7039e1db
PZ
2444 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2445 struct zone *zone = pgdat->node_zones + z;
2446
bc53008e 2447 if (!managed_zone(zone))
7039e1db
PZ
2448 continue;
2449
7039e1db
PZ
2450 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2451 if (!zone_watermark_ok(zone, 0,
2452 high_wmark_pages(zone) +
2453 nr_migrate_pages,
bfe9d006 2454 ZONE_MOVABLE, 0))
7039e1db
PZ
2455 continue;
2456 return true;
2457 }
2458 return false;
2459}
2460
4e096ae1 2461static struct folio *alloc_misplaced_dst_folio(struct folio *src,
666feb21 2462 unsigned long data)
7039e1db
PZ
2463{
2464 int nid = (int) data;
4e096ae1 2465 int order = folio_order(src);
c185e494 2466 gfp_t gfp = __GFP_THISNODE;
c185e494
MWO
2467
2468 if (order > 0)
2469 gfp |= GFP_TRANSHUGE_LIGHT;
2470 else {
2471 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2472 __GFP_NOWARN;
2473 gfp &= ~__GFP_RECLAIM;
2474 }
4e096ae1 2475 return __folio_alloc_node(gfp, order, nid);
c5b5a3dd
YS
2476}
2477
1c30e017 2478static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
b32967ff 2479{
2b9b624f 2480 int nr_pages = thp_nr_pages(page);
c574bbe9 2481 int order = compound_order(page);
a8f60772 2482
c574bbe9 2483 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
3abef4e6 2484
662aeea7
YS
2485 /* Do not migrate THP mapped by multiple processes */
2486 if (PageTransHuge(page) && total_mapcount(page) > 1)
2487 return 0;
2488
7039e1db 2489 /* Avoid migrating to a node that is nearly full */
c574bbe9
HY
2490 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2491 int z;
2492
2493 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2494 return 0;
2495 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
bc53008e 2496 if (managed_zone(pgdat->node_zones + z))
c574bbe9
HY
2497 break;
2498 }
2499 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
340ef390 2500 return 0;
c574bbe9 2501 }
7039e1db 2502
f7f9c00d 2503 if (!isolate_lru_page(page))
340ef390 2504 return 0;
7039e1db 2505
b75454e1 2506 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2b9b624f 2507 nr_pages);
340ef390 2508
149c33e1 2509 /*
340ef390
HD
2510 * Isolating the page has taken another reference, so the
2511 * caller's reference can be safely dropped without the page
2512 * disappearing underneath us during migration.
149c33e1
MG
2513 */
2514 put_page(page);
340ef390 2515 return 1;
b32967ff
MG
2516}
2517
2518/*
2519 * Attempt to migrate a misplaced page to the specified destination
2520 * node. Caller is expected to have an elevated reference count on
2521 * the page that will be dropped by this function before returning.
2522 */
1bc115d8
MG
2523int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2524 int node)
b32967ff
MG
2525{
2526 pg_data_t *pgdat = NODE_DATA(node);
340ef390 2527 int isolated;
b32967ff 2528 int nr_remaining;
e39bb6be 2529 unsigned int nr_succeeded;
b32967ff 2530 LIST_HEAD(migratepages);
b5916c02 2531 int nr_pages = thp_nr_pages(page);
c5b5a3dd 2532
b32967ff 2533 /*
1bc115d8
MG
2534 * Don't migrate file pages that are mapped in multiple processes
2535 * with execute permissions as they are probably shared libraries.
b32967ff 2536 */
7ee820ee
ML
2537 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2538 (vma->vm_flags & VM_EXEC))
b32967ff 2539 goto out;
b32967ff 2540
09a913a7
MG
2541 /*
2542 * Also do not migrate dirty pages as not all filesystems can move
2543 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2544 */
9de4f22a 2545 if (page_is_file_lru(page) && PageDirty(page))
09a913a7
MG
2546 goto out;
2547
b32967ff
MG
2548 isolated = numamigrate_isolate_page(pgdat, page);
2549 if (!isolated)
2550 goto out;
2551
2552 list_add(&page->lru, &migratepages);
4e096ae1 2553 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
c185e494
MWO
2554 NULL, node, MIGRATE_ASYNC,
2555 MR_NUMA_MISPLACED, &nr_succeeded);
b32967ff 2556 if (nr_remaining) {
59c82b70
JK
2557 if (!list_empty(&migratepages)) {
2558 list_del(&page->lru);
c5fc5c3a
YS
2559 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2560 page_is_file_lru(page), -nr_pages);
59c82b70
JK
2561 putback_lru_page(page);
2562 }
b32967ff 2563 isolated = 0;
e39bb6be
HY
2564 }
2565 if (nr_succeeded) {
2566 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2567 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2568 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2569 nr_succeeded);
2570 }
7039e1db 2571 BUG_ON(!list_empty(&migratepages));
7039e1db 2572 return isolated;
340ef390
HD
2573
2574out:
2575 put_page(page);
2576 return 0;
7039e1db 2577}
220018d3 2578#endif /* CONFIG_NUMA_BALANCING */
91952440 2579#endif /* CONFIG_NUMA */