Merge tag 's390-6.10-7' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-block.git] / mm / migrate.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b20a3503 2/*
14e0f9bc 3 * Memory Migration functionality - linux/mm/migrate.c
b20a3503
CL
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
cde53535 13 * Christoph Lameter
b20a3503
CL
14 */
15
16#include <linux/migrate.h>
b95f1b31 17#include <linux/export.h>
b20a3503 18#include <linux/swap.h>
0697212a 19#include <linux/swapops.h>
b20a3503 20#include <linux/pagemap.h>
e23ca00b 21#include <linux/buffer_head.h>
b20a3503 22#include <linux/mm_inline.h>
b488893a 23#include <linux/nsproxy.h>
e9995ef9 24#include <linux/ksm.h>
b20a3503
CL
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
04e62a29 29#include <linux/writeback.h>
742755a1
CL
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
86c3a764 32#include <linux/security.h>
42cb14b1 33#include <linux/backing-dev.h>
bda807d4 34#include <linux/compaction.h>
4f5ca265 35#include <linux/syscalls.h>
7addf443 36#include <linux/compat.h>
290408d4 37#include <linux/hugetlb.h>
8e6ac7fa 38#include <linux/hugetlb_cgroup.h>
5a0e3ad6 39#include <linux/gfp.h>
df6ad698 40#include <linux/pfn_t.h>
a5430dda 41#include <linux/memremap.h>
8315ada7 42#include <linux/userfaultfd_k.h>
bf6bddf1 43#include <linux/balloon_compaction.h>
33c3fc71 44#include <linux/page_idle.h>
d435edca 45#include <linux/page_owner.h>
6e84f315 46#include <linux/sched/mm.h>
197e7e52 47#include <linux/ptrace.h>
34290e2c 48#include <linux/oom.h>
884a6e5d 49#include <linux/memory.h>
ac16ec83 50#include <linux/random.h>
c574bbe9 51#include <linux/sched/sysctl.h>
467b171a 52#include <linux/memory-tiers.h>
b20a3503 53
0d1836c3
MN
54#include <asm/tlbflush.h>
55
7b2a2d4a
MG
56#include <trace/events/migrate.h>
57
b20a3503
CL
58#include "internal.h"
59
cd775580 60bool isolate_movable_page(struct page *page, isolate_mode_t mode)
bda807d4 61{
19979497 62 struct folio *folio = folio_get_nontail_page(page);
68f2736a 63 const struct movable_operations *mops;
bda807d4
MK
64
65 /*
66 * Avoid burning cycles with pages that are yet under __free_pages(),
67 * or just got freed under us.
68 *
69 * In case we 'win' a race for a movable page being freed under us and
70 * raise its refcount preventing __free_pages() from doing its job
71 * the put_page() at the end of this block will take care of
72 * release this page, thus avoiding a nasty leakage.
73 */
19979497 74 if (!folio)
bda807d4
MK
75 goto out;
76
19979497
VMO
77 if (unlikely(folio_test_slab(folio)))
78 goto out_putfolio;
8b881763
VB
79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80 smp_rmb();
bda807d4 81 /*
8b881763
VB
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
bda807d4 85 */
19979497
VMO
86 if (unlikely(!__folio_test_movable(folio)))
87 goto out_putfolio;
8b881763
VB
88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89 smp_rmb();
19979497
VMO
90 if (unlikely(folio_test_slab(folio)))
91 goto out_putfolio;
8b881763 92
bda807d4
MK
93 /*
94 * As movable pages are not isolated from LRU lists, concurrent
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
97 *
98 * In order to avoid having an already isolated movable page
99 * being (wrongly) re-isolated while it is under migration,
100 * or to avoid attempting to isolate pages being released,
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
103 */
19979497
VMO
104 if (unlikely(!folio_trylock(folio)))
105 goto out_putfolio;
bda807d4 106
19979497 107 if (!folio_test_movable(folio) || folio_test_isolated(folio))
bda807d4
MK
108 goto out_no_isolated;
109
19979497
VMO
110 mops = folio_movable_ops(folio);
111 VM_BUG_ON_FOLIO(!mops, folio);
bda807d4 112
19979497 113 if (!mops->isolate_page(&folio->page, mode))
bda807d4
MK
114 goto out_no_isolated;
115
4dc7d373 116 /* Driver shouldn't use the isolated flag */
19979497
VMO
117 WARN_ON_ONCE(folio_test_isolated(folio));
118 folio_set_isolated(folio);
119 folio_unlock(folio);
bda807d4 120
cd775580 121 return true;
bda807d4
MK
122
123out_no_isolated:
19979497
VMO
124 folio_unlock(folio);
125out_putfolio:
126 folio_put(folio);
bda807d4 127out:
cd775580 128 return false;
bda807d4
MK
129}
130
280d724a 131static void putback_movable_folio(struct folio *folio)
bda807d4 132{
280d724a 133 const struct movable_operations *mops = folio_movable_ops(folio);
bda807d4 134
280d724a
VMO
135 mops->putback_page(&folio->page);
136 folio_clear_isolated(folio);
bda807d4
MK
137}
138
5733c7d1
RA
139/*
140 * Put previously isolated pages back onto the appropriate lists
141 * from where they were once taken off for compaction/migration.
142 *
59c82b70
JK
143 * This function shall be used whenever the isolated pageset has been
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
7ce82f4c 145 * and isolate_hugetlb().
5733c7d1
RA
146 */
147void putback_movable_pages(struct list_head *l)
148{
280d724a
VMO
149 struct folio *folio;
150 struct folio *folio2;
5733c7d1 151
280d724a
VMO
152 list_for_each_entry_safe(folio, folio2, l, lru) {
153 if (unlikely(folio_test_hugetlb(folio))) {
154 folio_putback_active_hugetlb(folio);
31caf665
NH
155 continue;
156 }
280d724a 157 list_del(&folio->lru);
bda807d4 158 /*
280d724a 159 * We isolated non-lru movable folio so here we can use
7e2a5e5a
KW
160 * __folio_test_movable because LRU folio's mapping cannot
161 * have PAGE_MAPPING_MOVABLE.
bda807d4 162 */
280d724a
VMO
163 if (unlikely(__folio_test_movable(folio))) {
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165 folio_lock(folio);
166 if (folio_test_movable(folio))
167 putback_movable_folio(folio);
bda807d4 168 else
280d724a
VMO
169 folio_clear_isolated(folio);
170 folio_unlock(folio);
171 folio_put(folio);
bda807d4 172 } else {
280d724a
VMO
173 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 folio_is_file_lru(folio), -folio_nr_pages(folio));
175 folio_putback_lru(folio);
bda807d4 176 }
b20a3503 177 }
b20a3503
CL
178}
179
0697212a
CL
180/*
181 * Restore a potential migration pte to a working pte entry
182 */
2f031c6f
MWO
183static bool remove_migration_pte(struct folio *folio,
184 struct vm_area_struct *vma, unsigned long addr, void *old)
0697212a 185{
4eecb8b9 186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
0697212a 187
3fe87967 188 while (page_vma_mapped_walk(&pvmw)) {
6c287605 189 rmap_t rmap_flags = RMAP_NONE;
c33c7948 190 pte_t old_pte;
4eecb8b9
MWO
191 pte_t pte;
192 swp_entry_t entry;
193 struct page *new;
194 unsigned long idx = 0;
195
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 new = folio_page(folio, idx);
0697212a 200
616b8371
ZY
201#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
203 if (!pvmw.pte) {
4eecb8b9
MWO
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
616b8371
ZY
206 remove_migration_pmd(&pvmw, new);
207 continue;
208 }
209#endif
210
4eecb8b9 211 folio_get(folio);
2e346877 212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
c33c7948 213 old_pte = ptep_get(pvmw.pte);
0697212a 214
c33c7948 215 entry = pte_to_swp_entry(old_pte);
2e346877
PX
216 if (!is_migration_entry_young(entry))
217 pte = pte_mkold(pte);
218 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
219 pte = pte_mkdirty(pte);
055267fe
PG
220 if (pte_swp_soft_dirty(old_pte))
221 pte = pte_mksoft_dirty(pte);
222 else
223 pte = pte_clear_soft_dirty(pte);
224
4dd845b5 225 if (is_writable_migration_entry(entry))
161e393c 226 pte = pte_mkwrite(pte, vma);
c33c7948 227 else if (pte_swp_uffd_wp(old_pte))
f45ec5ff 228 pte = pte_mkuffd_wp(pte);
d3cb8bf6 229
6c287605
DH
230 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
231 rmap_flags |= RMAP_EXCLUSIVE;
232
6128763f 233 if (unlikely(is_device_private_page(new))) {
4dd845b5
AP
234 if (pte_write(pte))
235 entry = make_writable_device_private_entry(
236 page_to_pfn(new));
237 else
238 entry = make_readable_device_private_entry(
239 page_to_pfn(new));
6128763f 240 pte = swp_entry_to_pte(entry);
c33c7948 241 if (pte_swp_soft_dirty(old_pte))
3d321bf8 242 pte = pte_swp_mksoft_dirty(pte);
c33c7948 243 if (pte_swp_uffd_wp(old_pte))
6128763f 244 pte = pte_swp_mkuffd_wp(pte);
d2b2c6dd 245 }
a5430dda 246
3ef8fd7f 247#ifdef CONFIG_HUGETLB_PAGE
4eecb8b9 248 if (folio_test_hugetlb(folio)) {
935d4f0c
RR
249 struct hstate *h = hstate_vma(vma);
250 unsigned int shift = huge_page_shift(h);
251 unsigned long psize = huge_page_size(h);
79c1c594 252
79c1c594 253 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
4eecb8b9 254 if (folio_test_anon(folio))
9d5fafd5
DH
255 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
256 rmap_flags);
3fe87967 257 else
44887f39 258 hugetlb_add_file_rmap(folio);
935d4f0c
RR
259 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
260 psize);
383321ab
AK
261 } else
262#endif
263 {
4eecb8b9 264 if (folio_test_anon(folio))
a15dc478
DH
265 folio_add_anon_rmap_pte(folio, new, vma,
266 pvmw.address, rmap_flags);
383321ab 267 else
c4dffb0b 268 folio_add_file_rmap_pte(folio, new, vma);
1eba86c0 269 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
383321ab 270 }
b7435507 271 if (vma->vm_flags & VM_LOCKED)
96f97c43 272 mlock_drain_local();
e125fe40 273
4cc79b33
AK
274 trace_remove_migration_pte(pvmw.address, pte_val(pte),
275 compound_order(new));
276
3fe87967
KS
277 /* No need to invalidate - it was non-present before */
278 update_mmu_cache(vma, pvmw.address, pvmw.pte);
279 }
51afb12b 280
e4b82222 281 return true;
0697212a
CL
282}
283
04e62a29
CL
284/*
285 * Get rid of all migration entries and replace them by
286 * references to the indicated page.
287 */
4eecb8b9 288void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
04e62a29 289{
051ac83a
JK
290 struct rmap_walk_control rwc = {
291 .rmap_one = remove_migration_pte,
4eecb8b9 292 .arg = src,
051ac83a
JK
293 };
294
e388466d 295 if (locked)
2f031c6f 296 rmap_walk_locked(dst, &rwc);
e388466d 297 else
2f031c6f 298 rmap_walk(dst, &rwc);
04e62a29
CL
299}
300
0697212a
CL
301/*
302 * Something used the pte of a page under migration. We need to
303 * get to the page and wait until migration is finished.
304 * When we return from this function the fault will be retried.
0697212a 305 */
0cb8fd4d
HD
306void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
307 unsigned long address)
0697212a 308{
0cb8fd4d
HD
309 spinlock_t *ptl;
310 pte_t *ptep;
30dad309 311 pte_t pte;
0697212a 312 swp_entry_t entry;
0697212a 313
0cb8fd4d 314 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
04dee9e8
HD
315 if (!ptep)
316 return;
317
c33c7948 318 pte = ptep_get(ptep);
0cb8fd4d
HD
319 pte_unmap(ptep);
320
0697212a
CL
321 if (!is_swap_pte(pte))
322 goto out;
323
324 entry = pte_to_swp_entry(pte);
325 if (!is_migration_entry(entry))
326 goto out;
327
0cb8fd4d 328 migration_entry_wait_on_locked(entry, ptl);
0697212a
CL
329 return;
330out:
0cb8fd4d 331 spin_unlock(ptl);
30dad309
NH
332}
333
ad1ac596 334#ifdef CONFIG_HUGETLB_PAGE
fcd48540
PX
335/*
336 * The vma read lock must be held upon entry. Holding that lock prevents either
337 * the pte or the ptl from being freed.
338 *
339 * This function will release the vma lock before returning.
340 */
0cb8fd4d 341void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
30dad309 342{
0cb8fd4d 343 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
ad1ac596
ML
344 pte_t pte;
345
fcd48540 346 hugetlb_vma_assert_locked(vma);
ad1ac596
ML
347 spin_lock(ptl);
348 pte = huge_ptep_get(ptep);
349
fcd48540 350 if (unlikely(!is_hugetlb_entry_migration(pte))) {
ad1ac596 351 spin_unlock(ptl);
fcd48540
PX
352 hugetlb_vma_unlock_read(vma);
353 } else {
354 /*
355 * If migration entry existed, safe to release vma lock
356 * here because the pgtable page won't be freed without the
357 * pgtable lock released. See comment right above pgtable
358 * lock release in migration_entry_wait_on_locked().
359 */
360 hugetlb_vma_unlock_read(vma);
0cb8fd4d 361 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
fcd48540 362 }
30dad309 363}
ad1ac596
ML
364#endif
365
616b8371
ZY
366#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
367void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
368{
369 spinlock_t *ptl;
616b8371
ZY
370
371 ptl = pmd_lock(mm, pmd);
372 if (!is_pmd_migration_entry(*pmd))
373 goto unlock;
0cb8fd4d 374 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
616b8371
ZY
375 return;
376unlock:
377 spin_unlock(ptl);
378}
379#endif
380
108ca835
MWO
381static int folio_expected_refs(struct address_space *mapping,
382 struct folio *folio)
0b3901b3 383{
108ca835
MWO
384 int refs = 1;
385 if (!mapping)
386 return refs;
0b3901b3 387
108ca835
MWO
388 refs += folio_nr_pages(folio);
389 if (folio_test_private(folio))
390 refs++;
391
392 return refs;
0b3901b3
JK
393}
394
b20a3503 395/*
c3fcf8a5 396 * Replace the page in the mapping.
5b5c7120
CL
397 *
398 * The number of remaining references must be:
399 * 1 for anonymous pages without a mapping
400 * 2 for pages with a mapping
266cf658 401 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a3503 402 */
3417013e
MWO
403int folio_migrate_mapping(struct address_space *mapping,
404 struct folio *newfolio, struct folio *folio, int extra_count)
b20a3503 405{
3417013e 406 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
42cb14b1
HD
407 struct zone *oldzone, *newzone;
408 int dirty;
108ca835 409 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
3417013e 410 long nr = folio_nr_pages(folio);
fc346d0a 411 long entries, i;
8763cb45 412
6c5240ae 413 if (!mapping) {
0e8c7d0f 414 /* Anonymous page without mapping */
3417013e 415 if (folio_ref_count(folio) != expected_count)
6c5240ae 416 return -EAGAIN;
cf4b769a
HD
417
418 /* No turning back from here */
3417013e
MWO
419 newfolio->index = folio->index;
420 newfolio->mapping = folio->mapping;
421 if (folio_test_swapbacked(folio))
422 __folio_set_swapbacked(newfolio);
cf4b769a 423
78bd5209 424 return MIGRATEPAGE_SUCCESS;
6c5240ae
CL
425 }
426
3417013e
MWO
427 oldzone = folio_zone(folio);
428 newzone = folio_zone(newfolio);
42cb14b1 429
89eb946a 430 xas_lock_irq(&xas);
3417013e 431 if (!folio_ref_freeze(folio, expected_count)) {
89eb946a 432 xas_unlock_irq(&xas);
e286781d
NP
433 return -EAGAIN;
434 }
435
b20a3503 436 /*
3417013e 437 * Now we know that no one else is looking at the folio:
cf4b769a 438 * no turning back from here.
b20a3503 439 */
3417013e
MWO
440 newfolio->index = folio->index;
441 newfolio->mapping = folio->mapping;
442 folio_ref_add(newfolio, nr); /* add cache reference */
443 if (folio_test_swapbacked(folio)) {
444 __folio_set_swapbacked(newfolio);
445 if (folio_test_swapcache(folio)) {
446 folio_set_swapcache(newfolio);
447 newfolio->private = folio_get_private(folio);
6326fec1 448 }
fc346d0a 449 entries = nr;
6326fec1 450 } else {
3417013e 451 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
fc346d0a 452 entries = 1;
b20a3503
CL
453 }
454
42cb14b1 455 /* Move dirty while page refs frozen and newpage not yet exposed */
3417013e 456 dirty = folio_test_dirty(folio);
42cb14b1 457 if (dirty) {
3417013e
MWO
458 folio_clear_dirty(folio);
459 folio_set_dirty(newfolio);
42cb14b1
HD
460 }
461
fc346d0a
CTK
462 /* Swap cache still stores N entries instead of a high-order entry */
463 for (i = 0; i < entries; i++) {
464 xas_store(&xas, newfolio);
465 xas_next(&xas);
466 }
7cf9c2c7
NP
467
468 /*
937a94c9
JG
469 * Drop cache reference from old page by unfreezing
470 * to one less reference.
7cf9c2c7
NP
471 * We know this isn't the last reference.
472 */
3417013e 473 folio_ref_unfreeze(folio, expected_count - nr);
7cf9c2c7 474
89eb946a 475 xas_unlock(&xas);
42cb14b1
HD
476 /* Leave irq disabled to prevent preemption while updating stats */
477
0e8c7d0f
CL
478 /*
479 * If moved to a different zone then also account
480 * the page for that zone. Other VM counters will be
481 * taken care of when we establish references to the
482 * new page and drop references to the old page.
483 *
484 * Note that anonymous pages are accounted for
4b9d0fab 485 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
0e8c7d0f
CL
486 * are mapped to swap space.
487 */
42cb14b1 488 if (newzone != oldzone) {
0d1c2072
JW
489 struct lruvec *old_lruvec, *new_lruvec;
490 struct mem_cgroup *memcg;
491
3417013e 492 memcg = folio_memcg(folio);
0d1c2072
JW
493 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
494 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
495
5c447d27
SB
496 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
497 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
3417013e 498 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
5c447d27
SB
499 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
500 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
0b52c420
JG
501
502 if (folio_test_pmd_mappable(folio)) {
503 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
504 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
505 }
42cb14b1 506 }
b6038942 507#ifdef CONFIG_SWAP
3417013e 508 if (folio_test_swapcache(folio)) {
b6038942
SB
509 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
510 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
511 }
512#endif
f56753ac 513 if (dirty && mapping_can_writeback(mapping)) {
5c447d27
SB
514 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
515 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
516 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
517 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
42cb14b1 518 }
4b02108a 519 }
42cb14b1 520 local_irq_enable();
b20a3503 521
78bd5209 522 return MIGRATEPAGE_SUCCESS;
b20a3503 523}
3417013e 524EXPORT_SYMBOL(folio_migrate_mapping);
b20a3503 525
290408d4
NH
526/*
527 * The expected number of remaining references is the same as that
3417013e 528 * of folio_migrate_mapping().
290408d4
NH
529 */
530int migrate_huge_page_move_mapping(struct address_space *mapping,
b890ec2a 531 struct folio *dst, struct folio *src)
290408d4 532{
b890ec2a 533 XA_STATE(xas, &mapping->i_pages, folio_index(src));
290408d4 534 int expected_count;
290408d4 535
89eb946a 536 xas_lock_irq(&xas);
a08c7193 537 expected_count = folio_expected_refs(mapping, src);
b890ec2a 538 if (!folio_ref_freeze(src, expected_count)) {
89eb946a 539 xas_unlock_irq(&xas);
290408d4
NH
540 return -EAGAIN;
541 }
542
b890ec2a
MWO
543 dst->index = src->index;
544 dst->mapping = src->mapping;
6a93ca8f 545
a08c7193 546 folio_ref_add(dst, folio_nr_pages(dst));
290408d4 547
b890ec2a 548 xas_store(&xas, dst);
290408d4 549
a08c7193 550 folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
290408d4 551
89eb946a 552 xas_unlock_irq(&xas);
6a93ca8f 553
78bd5209 554 return MIGRATEPAGE_SUCCESS;
290408d4
NH
555}
556
b20a3503 557/*
19138349 558 * Copy the flags and some other ancillary information
b20a3503 559 */
19138349 560void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
b20a3503 561{
7851a45c
RR
562 int cpupid;
563
19138349
MWO
564 if (folio_test_error(folio))
565 folio_set_error(newfolio);
566 if (folio_test_referenced(folio))
567 folio_set_referenced(newfolio);
568 if (folio_test_uptodate(folio))
569 folio_mark_uptodate(newfolio);
570 if (folio_test_clear_active(folio)) {
571 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
572 folio_set_active(newfolio);
573 } else if (folio_test_clear_unevictable(folio))
574 folio_set_unevictable(newfolio);
575 if (folio_test_workingset(folio))
576 folio_set_workingset(newfolio);
577 if (folio_test_checked(folio))
578 folio_set_checked(newfolio);
6c287605
DH
579 /*
580 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
581 * migration entries. We can still have PG_anon_exclusive set on an
582 * effectively unmapped and unreferenced first sub-pages of an
583 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
584 */
19138349
MWO
585 if (folio_test_mappedtodisk(folio))
586 folio_set_mappedtodisk(newfolio);
b20a3503 587
3417013e 588 /* Move dirty on pages not done by folio_migrate_mapping() */
19138349
MWO
589 if (folio_test_dirty(folio))
590 folio_set_dirty(newfolio);
b20a3503 591
19138349
MWO
592 if (folio_test_young(folio))
593 folio_set_young(newfolio);
594 if (folio_test_idle(folio))
595 folio_set_idle(newfolio);
33c3fc71 596
7851a45c
RR
597 /*
598 * Copy NUMA information to the new page, to prevent over-eager
599 * future migrations of this same page.
600 */
4e694fe4 601 cpupid = folio_xchg_last_cpupid(folio, -1);
33024536
HY
602 /*
603 * For memory tiering mode, when migrate between slow and fast
604 * memory node, reset cpupid, because that is used to record
605 * page access time in slow memory node.
606 */
607 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
4e694fe4
KW
608 bool f_toptier = node_is_toptier(folio_nid(folio));
609 bool t_toptier = node_is_toptier(folio_nid(newfolio));
33024536
HY
610
611 if (f_toptier != t_toptier)
612 cpupid = -1;
613 }
4e694fe4 614 folio_xchg_last_cpupid(newfolio, cpupid);
7851a45c 615
19138349 616 folio_migrate_ksm(newfolio, folio);
c8d6553b
HD
617 /*
618 * Please do not reorder this without considering how mm/ksm.c's
79899cce 619 * ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache().
c8d6553b 620 */
19138349
MWO
621 if (folio_test_swapcache(folio))
622 folio_clear_swapcache(folio);
623 folio_clear_private(folio);
ad2fa371
MS
624
625 /* page->private contains hugetlb specific flags */
19138349
MWO
626 if (!folio_test_hugetlb(folio))
627 folio->private = NULL;
b20a3503
CL
628
629 /*
630 * If any waiters have accumulated on the new page then
631 * wake them up.
632 */
19138349
MWO
633 if (folio_test_writeback(newfolio))
634 folio_end_writeback(newfolio);
d435edca 635
6aeff241
YS
636 /*
637 * PG_readahead shares the same bit with PG_reclaim. The above
638 * end_page_writeback() may clear PG_readahead mistakenly, so set the
639 * bit after that.
640 */
19138349
MWO
641 if (folio_test_readahead(folio))
642 folio_set_readahead(newfolio);
6aeff241 643
19138349 644 folio_copy_owner(newfolio, folio);
74485cf2 645
8cba9576 646 mem_cgroup_migrate(folio, newfolio);
b20a3503 647}
19138349 648EXPORT_SYMBOL(folio_migrate_flags);
2916ecc0 649
715cbfd6 650void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
2916ecc0 651{
715cbfd6
MWO
652 folio_copy(newfolio, folio);
653 folio_migrate_flags(newfolio, folio);
2916ecc0 654}
715cbfd6 655EXPORT_SYMBOL(folio_migrate_copy);
b20a3503 656
1d8b85cc
CL
657/************************************************************
658 * Migration functions
659 ***********************************************************/
660
16ce101d
AP
661int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
662 struct folio *src, enum migrate_mode mode, int extra_count)
663{
664 int rc;
665
666 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
667
668 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
669
670 if (rc != MIGRATEPAGE_SUCCESS)
671 return rc;
672
673 if (mode != MIGRATE_SYNC_NO_COPY)
674 folio_migrate_copy(dst, src);
675 else
676 folio_migrate_flags(dst, src);
677 return MIGRATEPAGE_SUCCESS;
678}
679
54184650
MWO
680/**
681 * migrate_folio() - Simple folio migration.
682 * @mapping: The address_space containing the folio.
683 * @dst: The folio to migrate the data to.
684 * @src: The folio containing the current data.
685 * @mode: How to migrate the page.
686 *
687 * Common logic to directly migrate a single LRU folio suitable for
688 * folios that do not use PagePrivate/PagePrivate2.
b20a3503 689 *
54184650 690 * Folios are locked upon entry and exit.
b20a3503 691 */
54184650
MWO
692int migrate_folio(struct address_space *mapping, struct folio *dst,
693 struct folio *src, enum migrate_mode mode)
b20a3503 694{
16ce101d 695 return migrate_folio_extra(mapping, dst, src, mode, 0);
b20a3503 696}
54184650 697EXPORT_SYMBOL(migrate_folio);
b20a3503 698
925c86a1 699#ifdef CONFIG_BUFFER_HEAD
84ade7c1
JK
700/* Returns true if all buffers are successfully locked */
701static bool buffer_migrate_lock_buffers(struct buffer_head *head,
702 enum migrate_mode mode)
703{
704 struct buffer_head *bh = head;
4bb6dc79 705 struct buffer_head *failed_bh;
84ade7c1 706
84ade7c1 707 do {
84ade7c1 708 if (!trylock_buffer(bh)) {
4bb6dc79
DA
709 if (mode == MIGRATE_ASYNC)
710 goto unlock;
711 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
712 goto unlock;
713 lock_buffer(bh);
84ade7c1
JK
714 }
715
716 bh = bh->b_this_page;
717 } while (bh != head);
4bb6dc79 718
84ade7c1 719 return true;
4bb6dc79
DA
720
721unlock:
722 /* We failed to lock the buffer and cannot stall. */
723 failed_bh = bh;
724 bh = head;
725 while (bh != failed_bh) {
726 unlock_buffer(bh);
727 bh = bh->b_this_page;
728 }
729
730 return false;
84ade7c1
JK
731}
732
67235182
MWO
733static int __buffer_migrate_folio(struct address_space *mapping,
734 struct folio *dst, struct folio *src, enum migrate_mode mode,
89cb0888 735 bool check_refs)
1d8b85cc 736{
1d8b85cc
CL
737 struct buffer_head *bh, *head;
738 int rc;
cc4f11e6 739 int expected_count;
1d8b85cc 740
67235182
MWO
741 head = folio_buffers(src);
742 if (!head)
54184650 743 return migrate_folio(mapping, dst, src, mode);
1d8b85cc 744
cc4f11e6 745 /* Check whether page does not have extra refs before we do more work */
108ca835 746 expected_count = folio_expected_refs(mapping, src);
67235182 747 if (folio_ref_count(src) != expected_count)
cc4f11e6 748 return -EAGAIN;
1d8b85cc 749
cc4f11e6
JK
750 if (!buffer_migrate_lock_buffers(head, mode))
751 return -EAGAIN;
1d8b85cc 752
89cb0888
JK
753 if (check_refs) {
754 bool busy;
755 bool invalidated = false;
756
757recheck_buffers:
758 busy = false;
600f111e 759 spin_lock(&mapping->i_private_lock);
89cb0888
JK
760 bh = head;
761 do {
762 if (atomic_read(&bh->b_count)) {
763 busy = true;
764 break;
765 }
766 bh = bh->b_this_page;
767 } while (bh != head);
89cb0888
JK
768 if (busy) {
769 if (invalidated) {
770 rc = -EAGAIN;
771 goto unlock_buffers;
772 }
600f111e 773 spin_unlock(&mapping->i_private_lock);
89cb0888
JK
774 invalidate_bh_lrus();
775 invalidated = true;
776 goto recheck_buffers;
777 }
778 }
779
67235182 780 rc = folio_migrate_mapping(mapping, dst, src, 0);
78bd5209 781 if (rc != MIGRATEPAGE_SUCCESS)
cc4f11e6 782 goto unlock_buffers;
1d8b85cc 783
67235182 784 folio_attach_private(dst, folio_detach_private(src));
1d8b85cc
CL
785
786 bh = head;
787 do {
d5db4f9d 788 folio_set_bh(bh, dst, bh_offset(bh));
1d8b85cc 789 bh = bh->b_this_page;
1d8b85cc
CL
790 } while (bh != head);
791
2916ecc0 792 if (mode != MIGRATE_SYNC_NO_COPY)
67235182 793 folio_migrate_copy(dst, src);
2916ecc0 794 else
67235182 795 folio_migrate_flags(dst, src);
1d8b85cc 796
cc4f11e6
JK
797 rc = MIGRATEPAGE_SUCCESS;
798unlock_buffers:
ebdf4de5 799 if (check_refs)
600f111e 800 spin_unlock(&mapping->i_private_lock);
1d8b85cc
CL
801 bh = head;
802 do {
803 unlock_buffer(bh);
1d8b85cc 804 bh = bh->b_this_page;
1d8b85cc
CL
805 } while (bh != head);
806
cc4f11e6 807 return rc;
1d8b85cc 808}
89cb0888 809
67235182
MWO
810/**
811 * buffer_migrate_folio() - Migration function for folios with buffers.
812 * @mapping: The address space containing @src.
813 * @dst: The folio to migrate to.
814 * @src: The folio to migrate from.
815 * @mode: How to migrate the folio.
816 *
817 * This function can only be used if the underlying filesystem guarantees
818 * that no other references to @src exist. For example attached buffer
819 * heads are accessed only under the folio lock. If your filesystem cannot
820 * provide this guarantee, buffer_migrate_folio_norefs() may be more
821 * appropriate.
822 *
823 * Return: 0 on success or a negative errno on failure.
89cb0888 824 */
67235182
MWO
825int buffer_migrate_folio(struct address_space *mapping,
826 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 827{
67235182 828 return __buffer_migrate_folio(mapping, dst, src, mode, false);
89cb0888 829}
67235182
MWO
830EXPORT_SYMBOL(buffer_migrate_folio);
831
832/**
833 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
834 * @mapping: The address space containing @src.
835 * @dst: The folio to migrate to.
836 * @src: The folio to migrate from.
837 * @mode: How to migrate the folio.
838 *
839 * Like buffer_migrate_folio() except that this variant is more careful
840 * and checks that there are also no buffer head references. This function
841 * is the right one for mappings where buffer heads are directly looked
842 * up and referenced (such as block device mappings).
843 *
844 * Return: 0 on success or a negative errno on failure.
89cb0888 845 */
67235182
MWO
846int buffer_migrate_folio_norefs(struct address_space *mapping,
847 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 848{
67235182 849 return __buffer_migrate_folio(mapping, dst, src, mode, true);
89cb0888 850}
e26355e2 851EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
925c86a1 852#endif /* CONFIG_BUFFER_HEAD */
1d8b85cc 853
2ec810d5
MWO
854int filemap_migrate_folio(struct address_space *mapping,
855 struct folio *dst, struct folio *src, enum migrate_mode mode)
856{
857 int ret;
858
859 ret = folio_migrate_mapping(mapping, dst, src, 0);
860 if (ret != MIGRATEPAGE_SUCCESS)
861 return ret;
862
863 if (folio_get_private(src))
864 folio_attach_private(dst, folio_detach_private(src));
865
866 if (mode != MIGRATE_SYNC_NO_COPY)
867 folio_migrate_copy(dst, src);
868 else
869 folio_migrate_flags(dst, src);
870 return MIGRATEPAGE_SUCCESS;
871}
872EXPORT_SYMBOL_GPL(filemap_migrate_folio);
873
04e62a29 874/*
2be7fa10 875 * Writeback a folio to clean the dirty state
04e62a29 876 */
2be7fa10 877static int writeout(struct address_space *mapping, struct folio *folio)
8351a6e4 878{
04e62a29
CL
879 struct writeback_control wbc = {
880 .sync_mode = WB_SYNC_NONE,
881 .nr_to_write = 1,
882 .range_start = 0,
883 .range_end = LLONG_MAX,
04e62a29
CL
884 .for_reclaim = 1
885 };
886 int rc;
887
888 if (!mapping->a_ops->writepage)
889 /* No write method for the address space */
890 return -EINVAL;
891
2be7fa10 892 if (!folio_clear_dirty_for_io(folio))
04e62a29
CL
893 /* Someone else already triggered a write */
894 return -EAGAIN;
895
8351a6e4 896 /*
2be7fa10
MWO
897 * A dirty folio may imply that the underlying filesystem has
898 * the folio on some queue. So the folio must be clean for
899 * migration. Writeout may mean we lose the lock and the
900 * folio state is no longer what we checked for earlier.
04e62a29
CL
901 * At this point we know that the migration attempt cannot
902 * be successful.
8351a6e4 903 */
4eecb8b9 904 remove_migration_ptes(folio, folio, false);
8351a6e4 905
2be7fa10 906 rc = mapping->a_ops->writepage(&folio->page, &wbc);
8351a6e4 907
04e62a29
CL
908 if (rc != AOP_WRITEPAGE_ACTIVATE)
909 /* unlocked. Relock */
2be7fa10 910 folio_lock(folio);
04e62a29 911
bda8550d 912 return (rc < 0) ? -EIO : -EAGAIN;
04e62a29
CL
913}
914
915/*
916 * Default handling if a filesystem does not provide a migration function.
917 */
8faa8ef5
MWO
918static int fallback_migrate_folio(struct address_space *mapping,
919 struct folio *dst, struct folio *src, enum migrate_mode mode)
04e62a29 920{
8faa8ef5
MWO
921 if (folio_test_dirty(src)) {
922 /* Only writeback folios in full synchronous migration */
2916ecc0
JG
923 switch (mode) {
924 case MIGRATE_SYNC:
925 case MIGRATE_SYNC_NO_COPY:
926 break;
927 default:
b969c4ab 928 return -EBUSY;
2916ecc0 929 }
2be7fa10 930 return writeout(mapping, src);
b969c4ab 931 }
8351a6e4
CL
932
933 /*
934 * Buffers may be managed in a filesystem specific way.
935 * We must have no buffers or drop them.
936 */
0201ebf2 937 if (!filemap_release_folio(src, GFP_KERNEL))
806031bb 938 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
8351a6e4 939
54184650 940 return migrate_folio(mapping, dst, src, mode);
8351a6e4
CL
941}
942
e24f0b8f
CL
943/*
944 * Move a page to a newly allocated page
945 * The page is locked and all ptes have been successfully removed.
946 *
947 * The new page will have replaced the old page if this function
948 * is successful.
894bc310
LS
949 *
950 * Return value:
951 * < 0 - error code
78bd5209 952 * MIGRATEPAGE_SUCCESS - success
e24f0b8f 953 */
e7e3ffeb 954static int move_to_new_folio(struct folio *dst, struct folio *src,
5c3f9a67 955 enum migrate_mode mode)
e24f0b8f 956{
bda807d4 957 int rc = -EAGAIN;
7e2a5e5a 958 bool is_lru = !__folio_test_movable(src);
e24f0b8f 959
e7e3ffeb
MWO
960 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
961 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
e24f0b8f 962
bda807d4 963 if (likely(is_lru)) {
68f2736a
MWO
964 struct address_space *mapping = folio_mapping(src);
965
bda807d4 966 if (!mapping)
54184650 967 rc = migrate_folio(mapping, dst, src, mode);
0003e2a4
SC
968 else if (mapping_unmovable(mapping))
969 rc = -EOPNOTSUPP;
5490da4f 970 else if (mapping->a_ops->migrate_folio)
bda807d4 971 /*
5490da4f
MWO
972 * Most folios have a mapping and most filesystems
973 * provide a migrate_folio callback. Anonymous folios
bda807d4 974 * are part of swap space which also has its own
5490da4f 975 * migrate_folio callback. This is the most common path
bda807d4
MK
976 * for page migration.
977 */
5490da4f
MWO
978 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
979 mode);
bda807d4 980 else
8faa8ef5 981 rc = fallback_migrate_folio(mapping, dst, src, mode);
bda807d4 982 } else {
68f2736a
MWO
983 const struct movable_operations *mops;
984
e24f0b8f 985 /*
bda807d4
MK
986 * In case of non-lru page, it could be released after
987 * isolation step. In that case, we shouldn't try migration.
e24f0b8f 988 */
e7e3ffeb
MWO
989 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
990 if (!folio_test_movable(src)) {
bda807d4 991 rc = MIGRATEPAGE_SUCCESS;
e7e3ffeb 992 folio_clear_isolated(src);
bda807d4
MK
993 goto out;
994 }
995
da707a6d 996 mops = folio_movable_ops(src);
68f2736a 997 rc = mops->migrate_page(&dst->page, &src->page, mode);
bda807d4 998 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
e7e3ffeb 999 !folio_test_isolated(src));
bda807d4 1000 }
e24f0b8f 1001
5c3f9a67 1002 /*
e7e3ffeb
MWO
1003 * When successful, old pagecache src->mapping must be cleared before
1004 * src is freed; but stats require that PageAnon be left as PageAnon.
5c3f9a67
HD
1005 */
1006 if (rc == MIGRATEPAGE_SUCCESS) {
7e2a5e5a 1007 if (__folio_test_movable(src)) {
e7e3ffeb 1008 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
bda807d4
MK
1009
1010 /*
1011 * We clear PG_movable under page_lock so any compactor
1012 * cannot try to migrate this page.
1013 */
e7e3ffeb 1014 folio_clear_isolated(src);
bda807d4
MK
1015 }
1016
1017 /*
e7e3ffeb 1018 * Anonymous and movable src->mapping will be cleared by
bda807d4
MK
1019 * free_pages_prepare so don't reset it here for keeping
1020 * the type to work PageAnon, for example.
1021 */
e7e3ffeb
MWO
1022 if (!folio_mapping_flags(src))
1023 src->mapping = NULL;
d2b2c6dd 1024
e7e3ffeb
MWO
1025 if (likely(!folio_is_zone_device(dst)))
1026 flush_dcache_folio(dst);
3fe2011f 1027 }
bda807d4 1028out:
e24f0b8f
CL
1029 return rc;
1030}
1031
64c8902e 1032/*
d1adb25d
BW
1033 * To record some information during migration, we use unused private
1034 * field of struct folio of the newly allocated destination folio.
1035 * This is safe because nobody is using it except us.
64c8902e 1036 */
eebb3dab
BW
1037enum {
1038 PAGE_WAS_MAPPED = BIT(0),
1039 PAGE_WAS_MLOCKED = BIT(1),
d1adb25d 1040 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
eebb3dab
BW
1041};
1042
64c8902e 1043static void __migrate_folio_record(struct folio *dst,
d1adb25d 1044 int old_page_state,
64c8902e
HY
1045 struct anon_vma *anon_vma)
1046{
d1adb25d 1047 dst->private = (void *)anon_vma + old_page_state;
64c8902e
HY
1048}
1049
1050static void __migrate_folio_extract(struct folio *dst,
eebb3dab 1051 int *old_page_state,
64c8902e
HY
1052 struct anon_vma **anon_vmap)
1053{
d1adb25d
BW
1054 unsigned long private = (unsigned long)dst->private;
1055
1056 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1057 *old_page_state = private & PAGE_OLD_STATES;
64c8902e
HY
1058 dst->private = NULL;
1059}
1060
5dfab109
HY
1061/* Restore the source folio to the original state upon failure */
1062static void migrate_folio_undo_src(struct folio *src,
1063 int page_was_mapped,
1064 struct anon_vma *anon_vma,
ebe75e47 1065 bool locked,
5dfab109
HY
1066 struct list_head *ret)
1067{
1068 if (page_was_mapped)
1069 remove_migration_ptes(src, src, false);
1070 /* Drop an anon_vma reference if we took one */
1071 if (anon_vma)
1072 put_anon_vma(anon_vma);
ebe75e47
HY
1073 if (locked)
1074 folio_unlock(src);
1075 if (ret)
1076 list_move_tail(&src->lru, ret);
5dfab109
HY
1077}
1078
1079/* Restore the destination folio to the original state upon failure */
4e096ae1
MWO
1080static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1081 free_folio_t put_new_folio, unsigned long private)
5dfab109 1082{
ebe75e47
HY
1083 if (locked)
1084 folio_unlock(dst);
4e096ae1
MWO
1085 if (put_new_folio)
1086 put_new_folio(dst, private);
5dfab109
HY
1087 else
1088 folio_put(dst);
1089}
1090
64c8902e
HY
1091/* Cleanup src folio upon migration success */
1092static void migrate_folio_done(struct folio *src,
1093 enum migrate_reason reason)
1094{
1095 /*
1096 * Compaction can migrate also non-LRU pages which are
1097 * not accounted to NR_ISOLATED_*. They can be recognized
7e2a5e5a 1098 * as __folio_test_movable
64c8902e
HY
1099 */
1100 if (likely(!__folio_test_movable(src)))
1101 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1102 folio_is_file_lru(src), -folio_nr_pages(src));
1103
1104 if (reason != MR_MEMORY_FAILURE)
1105 /* We release the page in page_handle_poison. */
1106 folio_put(src);
1107}
1108
ebe75e47 1109/* Obtain the lock on page, remove all ptes. */
4e096ae1
MWO
1110static int migrate_folio_unmap(new_folio_t get_new_folio,
1111 free_folio_t put_new_folio, unsigned long private,
1112 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1113 enum migrate_reason reason, struct list_head *ret)
e24f0b8f 1114{
ebe75e47 1115 struct folio *dst;
0dabec93 1116 int rc = -EAGAIN;
eebb3dab 1117 int old_page_state = 0;
3f6c8272 1118 struct anon_vma *anon_vma = NULL;
7e2a5e5a 1119 bool is_lru = !__folio_test_movable(src);
ebe75e47
HY
1120 bool locked = false;
1121 bool dst_locked = false;
1122
ebe75e47
HY
1123 if (folio_ref_count(src) == 1) {
1124 /* Folio was freed from under us. So we are done. */
1125 folio_clear_active(src);
1126 folio_clear_unevictable(src);
1127 /* free_pages_prepare() will clear PG_isolated. */
1128 list_del(&src->lru);
1129 migrate_folio_done(src, reason);
1130 return MIGRATEPAGE_SUCCESS;
1131 }
1132
4e096ae1
MWO
1133 dst = get_new_folio(src, private);
1134 if (!dst)
ebe75e47 1135 return -ENOMEM;
ebe75e47
HY
1136 *dstp = dst;
1137
1138 dst->private = NULL;
95a402c3 1139
682a71a1 1140 if (!folio_trylock(src)) {
2ef7dbb2 1141 if (mode == MIGRATE_ASYNC)
0dabec93 1142 goto out;
3e7d3449
MG
1143
1144 /*
1145 * It's not safe for direct compaction to call lock_page.
1146 * For example, during page readahead pages are added locked
1147 * to the LRU. Later, when the IO completes the pages are
1148 * marked uptodate and unlocked. However, the queueing
1149 * could be merging multiple pages for one bio (e.g.
d4388340 1150 * mpage_readahead). If an allocation happens for the
3e7d3449
MG
1151 * second or third page, the process can end up locking
1152 * the same page twice and deadlocking. Rather than
1153 * trying to be clever about what pages can be locked,
1154 * avoid the use of lock_page for direct compaction
1155 * altogether.
1156 */
1157 if (current->flags & PF_MEMALLOC)
0dabec93 1158 goto out;
3e7d3449 1159
4bb6dc79
DA
1160 /*
1161 * In "light" mode, we can wait for transient locks (eg
1162 * inserting a page into the page table), but it's not
1163 * worth waiting for I/O.
1164 */
1165 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1166 goto out;
1167
682a71a1 1168 folio_lock(src);
e24f0b8f 1169 }
ebe75e47 1170 locked = true;
eebb3dab
BW
1171 if (folio_test_mlocked(src))
1172 old_page_state |= PAGE_WAS_MLOCKED;
e24f0b8f 1173
682a71a1 1174 if (folio_test_writeback(src)) {
11bc82d6 1175 /*
fed5b64a 1176 * Only in the case of a full synchronous migration is it
a6bc32b8
MG
1177 * necessary to wait for PageWriteback. In the async case,
1178 * the retry loop is too short and in the sync-light case,
1179 * the overhead of stalling is too much
11bc82d6 1180 */
2916ecc0
JG
1181 switch (mode) {
1182 case MIGRATE_SYNC:
1183 case MIGRATE_SYNC_NO_COPY:
1184 break;
1185 default:
11bc82d6 1186 rc = -EBUSY;
ebe75e47 1187 goto out;
11bc82d6 1188 }
682a71a1 1189 folio_wait_writeback(src);
e24f0b8f 1190 }
03f15c86 1191
e24f0b8f 1192 /*
682a71a1
MWO
1193 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1194 * we cannot notice that anon_vma is freed while we migrate a page.
1ce82b69 1195 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d 1196 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
1197 * File Caches may use write_page() or lock_page() in migration, then,
1198 * just care Anon page here.
03f15c86 1199 *
29eea9b5 1200 * Only folio_get_anon_vma() understands the subtleties of
03f15c86
HD
1201 * getting a hold on an anon_vma from outside one of its mms.
1202 * But if we cannot get anon_vma, then we won't need it anyway,
1203 * because that implies that the anon page is no longer mapped
1204 * (and cannot be remapped so long as we hold the page lock).
dc386d4d 1205 */
682a71a1 1206 if (folio_test_anon(src) && !folio_test_ksm(src))
29eea9b5 1207 anon_vma = folio_get_anon_vma(src);
62e1c553 1208
7db7671f
HD
1209 /*
1210 * Block others from accessing the new page when we get around to
1211 * establishing additional references. We are usually the only one
682a71a1
MWO
1212 * holding a reference to dst at this point. We used to have a BUG
1213 * here if folio_trylock(dst) fails, but would like to allow for
1214 * cases where there might be a race with the previous use of dst.
7db7671f
HD
1215 * This is much like races on refcount of oldpage: just don't BUG().
1216 */
682a71a1 1217 if (unlikely(!folio_trylock(dst)))
ebe75e47
HY
1218 goto out;
1219 dst_locked = true;
7db7671f 1220
bda807d4 1221 if (unlikely(!is_lru)) {
eebb3dab 1222 __migrate_folio_record(dst, old_page_state, anon_vma);
64c8902e 1223 return MIGRATEPAGE_UNMAP;
bda807d4
MK
1224 }
1225
dc386d4d 1226 /*
62e1c553
SL
1227 * Corner case handling:
1228 * 1. When a new swap-cache page is read into, it is added to the LRU
1229 * and treated as swapcache but it has no rmap yet.
682a71a1 1230 * Calling try_to_unmap() against a src->mapping==NULL page will
62e1c553 1231 * trigger a BUG. So handle it here.
d12b8951 1232 * 2. An orphaned page (see truncate_cleanup_page) might have
62e1c553
SL
1233 * fs-private metadata. The page can be picked up due to memory
1234 * offlining. Everywhere else except page reclaim, the page is
1235 * invisible to the vm, so the page can not be migrated. So try to
1236 * free the metadata, so the page can be freed.
e24f0b8f 1237 */
682a71a1
MWO
1238 if (!src->mapping) {
1239 if (folio_test_private(src)) {
1240 try_to_free_buffers(src);
ebe75e47 1241 goto out;
62e1c553 1242 }
682a71a1 1243 } else if (folio_mapped(src)) {
7db7671f 1244 /* Establish migration ptes */
682a71a1
MWO
1245 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1246 !folio_test_ksm(src) && !anon_vma, src);
fb3592c4 1247 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
eebb3dab 1248 old_page_state |= PAGE_WAS_MAPPED;
2ebba6b7 1249 }
dc386d4d 1250
64c8902e 1251 if (!folio_mapped(src)) {
eebb3dab 1252 __migrate_folio_record(dst, old_page_state, anon_vma);
64c8902e
HY
1253 return MIGRATEPAGE_UNMAP;
1254 }
1255
64c8902e 1256out:
80562ba0
HY
1257 /*
1258 * A folio that has not been unmapped will be restored to
1259 * right list unless we want to retry.
1260 */
fb3592c4 1261 if (rc == -EAGAIN)
ebe75e47 1262 ret = NULL;
80562ba0 1263
eebb3dab
BW
1264 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1265 anon_vma, locked, ret);
4e096ae1 1266 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
80562ba0
HY
1267
1268 return rc;
1269}
1270
ebe75e47 1271/* Migrate the folio to the newly allocated folio in dst. */
4e096ae1 1272static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
ebe75e47
HY
1273 struct folio *src, struct folio *dst,
1274 enum migrate_mode mode, enum migrate_reason reason,
1275 struct list_head *ret)
64c8902e
HY
1276{
1277 int rc;
eebb3dab 1278 int old_page_state = 0;
64c8902e 1279 struct anon_vma *anon_vma = NULL;
7e2a5e5a 1280 bool is_lru = !__folio_test_movable(src);
5dfab109 1281 struct list_head *prev;
64c8902e 1282
eebb3dab 1283 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
5dfab109
HY
1284 prev = dst->lru.prev;
1285 list_del(&dst->lru);
64c8902e
HY
1286
1287 rc = move_to_new_folio(dst, src, mode);
ebe75e47
HY
1288 if (rc)
1289 goto out;
5dfab109 1290
64c8902e
HY
1291 if (unlikely(!is_lru))
1292 goto out_unlock_both;
e24f0b8f 1293
c3096e67 1294 /*
682a71a1 1295 * When successful, push dst to LRU immediately: so that if it
c3096e67 1296 * turns out to be an mlocked page, remove_migration_ptes() will
682a71a1 1297 * automatically build up the correct dst->mlock_count for it.
c3096e67
HD
1298 *
1299 * We would like to do something similar for the old page, when
1300 * unsuccessful, and other cases when a page has been temporarily
1301 * isolated from the unevictable LRU: but this case is the easiest.
1302 */
ebe75e47 1303 folio_add_lru(dst);
eebb3dab 1304 if (old_page_state & PAGE_WAS_MLOCKED)
ebe75e47 1305 lru_add_drain();
c3096e67 1306
eebb3dab 1307 if (old_page_state & PAGE_WAS_MAPPED)
ebe75e47 1308 remove_migration_ptes(src, dst, false);
3f6c8272 1309
7db7671f 1310out_unlock_both:
682a71a1 1311 folio_unlock(dst);
ebe75e47 1312 set_page_owner_migrate_reason(&dst->page, reason);
c6c919eb 1313 /*
682a71a1 1314 * If migration is successful, decrease refcount of dst,
c6c919eb 1315 * which will not free the page because new page owner increased
c3096e67 1316 * refcounter.
c6c919eb 1317 */
ebe75e47 1318 folio_put(dst);
c6c919eb 1319
dd4ae78a 1320 /*
ebe75e47
HY
1321 * A folio that has been migrated has all references removed
1322 * and will be freed.
dd4ae78a 1323 */
ebe75e47
HY
1324 list_del(&src->lru);
1325 /* Drop an anon_vma reference if we took one */
1326 if (anon_vma)
1327 put_anon_vma(anon_vma);
1328 folio_unlock(src);
1329 migrate_folio_done(src, reason);
bf6bddf1 1330
ebe75e47 1331 return rc;
0dabec93 1332out:
dd4ae78a 1333 /*
ebe75e47
HY
1334 * A folio that has not been migrated will be restored to
1335 * right list unless we want to retry.
dd4ae78a 1336 */
ebe75e47
HY
1337 if (rc == -EAGAIN) {
1338 list_add(&dst->lru, prev);
eebb3dab 1339 __migrate_folio_record(dst, old_page_state, anon_vma);
ebe75e47 1340 return rc;
e24f0b8f 1341 }
68711a74 1342
eebb3dab
BW
1343 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1344 anon_vma, true, ret);
4e096ae1 1345 migrate_folio_undo_dst(dst, true, put_new_folio, private);
ebe75e47 1346
e24f0b8f
CL
1347 return rc;
1348}
1349
290408d4
NH
1350/*
1351 * Counterpart of unmap_and_move_page() for hugepage migration.
1352 *
1353 * This function doesn't wait the completion of hugepage I/O
1354 * because there is no race between I/O and migration for hugepage.
1355 * Note that currently hugepage I/O occurs only in direct I/O
1356 * where no lock is held and PG_writeback is irrelevant,
1357 * and writeback status of all subpages are counted in the reference
1358 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1359 * under direct I/O, the reference of the head page is 512 and a bit more.)
1360 * This means that when we try to migrate hugepage whose subpages are
1361 * doing direct I/O, some references remain after try_to_unmap() and
1362 * hugepage migration fails without data corruption.
1363 *
1364 * There is also no race when direct I/O is issued on the page under migration,
1365 * because then pte is replaced with migration swap entry and direct I/O code
1366 * will wait in the page fault for migration to complete.
1367 */
4e096ae1
MWO
1368static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1369 free_folio_t put_new_folio, unsigned long private,
1370 struct folio *src, int force, enum migrate_mode mode,
1371 int reason, struct list_head *ret)
290408d4 1372{
4e096ae1 1373 struct folio *dst;
2def7424 1374 int rc = -EAGAIN;
2ebba6b7 1375 int page_was_mapped = 0;
290408d4 1376 struct anon_vma *anon_vma = NULL;
c0d0381a 1377 struct address_space *mapping = NULL;
290408d4 1378
c33db292 1379 if (folio_ref_count(src) == 1) {
71a64f61 1380 /* page was freed from under us. So we are done. */
ea8e72f4 1381 folio_putback_active_hugetlb(src);
71a64f61
MS
1382 return MIGRATEPAGE_SUCCESS;
1383 }
1384
4e096ae1
MWO
1385 dst = get_new_folio(src, private);
1386 if (!dst)
290408d4
NH
1387 return -ENOMEM;
1388
c33db292 1389 if (!folio_trylock(src)) {
2916ecc0 1390 if (!force)
290408d4 1391 goto out;
2916ecc0
JG
1392 switch (mode) {
1393 case MIGRATE_SYNC:
1394 case MIGRATE_SYNC_NO_COPY:
1395 break;
1396 default:
1397 goto out;
1398 }
c33db292 1399 folio_lock(src);
290408d4
NH
1400 }
1401
cb6acd01
MK
1402 /*
1403 * Check for pages which are in the process of being freed. Without
c33db292 1404 * folio_mapping() set, hugetlbfs specific move page routine will not
cb6acd01
MK
1405 * be called and we could leak usage counts for subpools.
1406 */
345c62d1 1407 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
cb6acd01
MK
1408 rc = -EBUSY;
1409 goto out_unlock;
1410 }
1411
c33db292 1412 if (folio_test_anon(src))
29eea9b5 1413 anon_vma = folio_get_anon_vma(src);
290408d4 1414
c33db292 1415 if (unlikely(!folio_trylock(dst)))
7db7671f
HD
1416 goto put_anon;
1417
c33db292 1418 if (folio_mapped(src)) {
a98a2f0c 1419 enum ttu_flags ttu = 0;
336bf30e 1420
c33db292 1421 if (!folio_test_anon(src)) {
336bf30e
MK
1422 /*
1423 * In shared mappings, try_to_unmap could potentially
1424 * call huge_pmd_unshare. Because of this, take
1425 * semaphore in write mode here and set TTU_RMAP_LOCKED
1426 * to let lower levels know we have taken the lock.
1427 */
6e8cda4c 1428 mapping = hugetlb_folio_mapping_lock_write(src);
336bf30e
MK
1429 if (unlikely(!mapping))
1430 goto unlock_put_anon;
1431
5202978b 1432 ttu = TTU_RMAP_LOCKED;
336bf30e 1433 }
c0d0381a 1434
4b8554c5 1435 try_to_migrate(src, ttu);
2ebba6b7 1436 page_was_mapped = 1;
336bf30e 1437
5202978b 1438 if (ttu & TTU_RMAP_LOCKED)
336bf30e 1439 i_mmap_unlock_write(mapping);
2ebba6b7 1440 }
290408d4 1441
c33db292 1442 if (!folio_mapped(src))
e7e3ffeb 1443 rc = move_to_new_folio(dst, src, mode);
290408d4 1444
336bf30e 1445 if (page_was_mapped)
4eecb8b9
MWO
1446 remove_migration_ptes(src,
1447 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
290408d4 1448
c0d0381a 1449unlock_put_anon:
c33db292 1450 folio_unlock(dst);
7db7671f
HD
1451
1452put_anon:
fd4a4663 1453 if (anon_vma)
9e60109f 1454 put_anon_vma(anon_vma);
8e6ac7fa 1455
2def7424 1456 if (rc == MIGRATEPAGE_SUCCESS) {
345c62d1 1457 move_hugetlb_state(src, dst, reason);
4e096ae1 1458 put_new_folio = NULL;
2def7424 1459 }
8e6ac7fa 1460
cb6acd01 1461out_unlock:
c33db292 1462 folio_unlock(src);
09761333 1463out:
dd4ae78a 1464 if (rc == MIGRATEPAGE_SUCCESS)
ea8e72f4 1465 folio_putback_active_hugetlb(src);
a04840c6 1466 else if (rc != -EAGAIN)
c33db292 1467 list_move_tail(&src->lru, ret);
68711a74
DR
1468
1469 /*
1470 * If migration was not successful and there's a freeing callback, use
1471 * it. Otherwise, put_page() will drop the reference grabbed during
1472 * isolation.
1473 */
4e096ae1
MWO
1474 if (put_new_folio)
1475 put_new_folio(dst, private);
68711a74 1476 else
ea8e72f4 1477 folio_putback_active_hugetlb(dst);
68711a74 1478
290408d4
NH
1479 return rc;
1480}
1481
eaec4e63 1482static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
d532e2e5 1483{
9c62ff00 1484 int rc;
d532e2e5 1485
eaec4e63
HY
1486 folio_lock(folio);
1487 rc = split_folio_to_list(folio, split_folios);
1488 folio_unlock(folio);
e6fa8a79 1489 if (!rc)
eaec4e63 1490 list_move_tail(&folio->lru, split_folios);
d532e2e5
YS
1491
1492 return rc;
1493}
1494
42012e04
HY
1495#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1496#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1497#else
1498#define NR_MAX_BATCHED_MIGRATION 512
1499#endif
e5bfff8b 1500#define NR_MAX_MIGRATE_PAGES_RETRY 10
2ef7dbb2
HY
1501#define NR_MAX_MIGRATE_ASYNC_RETRY 3
1502#define NR_MAX_MIGRATE_SYNC_RETRY \
1503 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
e5bfff8b 1504
5b855937
HY
1505struct migrate_pages_stats {
1506 int nr_succeeded; /* Normal and large folios migrated successfully, in
1507 units of base pages */
1508 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1509 units of base pages. Untried folios aren't counted */
1510 int nr_thp_succeeded; /* THP migrated successfully */
1511 int nr_thp_failed; /* THP failed to be migrated */
1512 int nr_thp_split; /* THP split before migrating */
a259945e 1513 int nr_split; /* Large folio (include THP) split before migrating */
5b855937
HY
1514};
1515
b20a3503 1516/*
e5bfff8b
HY
1517 * Returns the number of hugetlb folios that were not migrated, or an error code
1518 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1519 * any more because the list has become empty or no retryable hugetlb folios
1520 * exist any more. It is caller's responsibility to call putback_movable_pages()
1521 * only if ret != 0.
b20a3503 1522 */
4e096ae1
MWO
1523static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1524 free_folio_t put_new_folio, unsigned long private,
e5bfff8b
HY
1525 enum migrate_mode mode, int reason,
1526 struct migrate_pages_stats *stats,
1527 struct list_head *ret_folios)
b20a3503 1528{
e24f0b8f 1529 int retry = 1;
e5bfff8b
HY
1530 int nr_failed = 0;
1531 int nr_retry_pages = 0;
1532 int pass = 0;
1533 struct folio *folio, *folio2;
1534 int rc, nr_pages;
1535
1536 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1537 retry = 0;
1538 nr_retry_pages = 0;
1539
1540 list_for_each_entry_safe(folio, folio2, from, lru) {
1541 if (!folio_test_hugetlb(folio))
1542 continue;
1543
1544 nr_pages = folio_nr_pages(folio);
1545
1546 cond_resched();
1547
6f7d760e
HY
1548 /*
1549 * Migratability of hugepages depends on architectures and
1550 * their size. This check is necessary because some callers
1551 * of hugepage migration like soft offline and memory
1552 * hotremove don't walk through page tables or check whether
1553 * the hugepage is pmd-based or not before kicking migration.
1554 */
1555 if (!hugepage_migration_supported(folio_hstate(folio))) {
1556 nr_failed++;
1557 stats->nr_failed_pages += nr_pages;
1558 list_move_tail(&folio->lru, ret_folios);
1559 continue;
1560 }
1561
4e096ae1
MWO
1562 rc = unmap_and_move_huge_page(get_new_folio,
1563 put_new_folio, private,
1564 folio, pass > 2, mode,
e5bfff8b
HY
1565 reason, ret_folios);
1566 /*
1567 * The rules are:
1568 * Success: hugetlb folio will be put back
1569 * -EAGAIN: stay on the from list
1570 * -ENOMEM: stay on the from list
e5bfff8b
HY
1571 * Other errno: put on ret_folios list
1572 */
1573 switch(rc) {
e5bfff8b
HY
1574 case -ENOMEM:
1575 /*
1576 * When memory is low, don't bother to try to migrate
1577 * other folios, just exit.
1578 */
1579 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1580 return -ENOMEM;
1581 case -EAGAIN:
1582 retry++;
1583 nr_retry_pages += nr_pages;
1584 break;
1585 case MIGRATEPAGE_SUCCESS:
1586 stats->nr_succeeded += nr_pages;
1587 break;
1588 default:
1589 /*
1590 * Permanent failure (-EBUSY, etc.):
1591 * unlike -EAGAIN case, the failed folio is
1592 * removed from migration folio list and not
1593 * retried in the next outer loop.
1594 */
1595 nr_failed++;
1596 stats->nr_failed_pages += nr_pages;
1597 break;
1598 }
1599 }
1600 }
1601 /*
1602 * nr_failed is number of hugetlb folios failed to be migrated. After
1603 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1604 * folios as failed.
1605 */
1606 nr_failed += retry;
1607 stats->nr_failed_pages += nr_retry_pages;
1608
1609 return nr_failed;
1610}
1611
5dfab109
HY
1612/*
1613 * migrate_pages_batch() first unmaps folios in the from list as many as
1614 * possible, then move the unmapped folios.
fb3592c4
HY
1615 *
1616 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1617 * lock or bit when we have locked more than one folio. Which may cause
1618 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1619 * length of the from list must be <= 1.
5dfab109 1620 */
4e096ae1
MWO
1621static int migrate_pages_batch(struct list_head *from,
1622 new_folio_t get_new_folio, free_folio_t put_new_folio,
1623 unsigned long private, enum migrate_mode mode, int reason,
1624 struct list_head *ret_folios, struct list_head *split_folios,
1625 struct migrate_pages_stats *stats, int nr_pass)
b20a3503 1626{
a21d2133 1627 int retry = 1;
1a5bae25 1628 int thp_retry = 1;
b20a3503 1629 int nr_failed = 0;
077309bc 1630 int nr_retry_pages = 0;
b20a3503 1631 int pass = 0;
1a5bae25 1632 bool is_thp = false;
a259945e 1633 bool is_large = false;
5dfab109 1634 struct folio *folio, *folio2, *dst = NULL, *dst2;
a21d2133 1635 int rc, rc_saved = 0, nr_pages;
5dfab109
HY
1636 LIST_HEAD(unmap_folios);
1637 LIST_HEAD(dst_folios);
b0b515bf 1638 bool nosplit = (reason == MR_NUMA_MISPLACED);
e5bfff8b 1639
fb3592c4
HY
1640 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1641 !list_empty(from) && !list_is_singular(from));
a21d2133 1642
124abced 1643 for (pass = 0; pass < nr_pass && retry; pass++) {
e24f0b8f 1644 retry = 0;
1a5bae25 1645 thp_retry = 0;
077309bc 1646 nr_retry_pages = 0;
b20a3503 1647
eaec4e63 1648 list_for_each_entry_safe(folio, folio2, from, lru) {
a259945e
ZY
1649 is_large = folio_test_large(folio);
1650 is_thp = is_large && folio_test_pmd_mappable(folio);
eaec4e63 1651 nr_pages = folio_nr_pages(folio);
e5bfff8b 1652
e24f0b8f 1653 cond_resched();
2d1db3b1 1654
7262f208
ZY
1655 /*
1656 * The rare folio on the deferred split list should
8e279f97
HD
1657 * be split now. It should not count as a failure:
1658 * but increment nr_failed because, without doing so,
1659 * migrate_pages() may report success with (split but
1660 * unmigrated) pages still on its fromlist; whereas it
1661 * always reports success when its fromlist is empty.
c6408250
ZY
1662 * stats->nr_thp_failed should be increased too,
1663 * otherwise stats inconsistency will happen when
1664 * migrate_pages_batch is called via migrate_pages()
1665 * with MIGRATE_SYNC and MIGRATE_ASYNC.
8e279f97 1666 *
7262f208
ZY
1667 * Only check it without removing it from the list.
1668 * Since the folio can be on deferred_split_scan()
1669 * local list and removing it can cause the local list
1670 * corruption. Folio split process below can handle it
1671 * with the help of folio_ref_freeze().
1672 *
1673 * nr_pages > 2 is needed to avoid checking order-1
1674 * page cache folios. They exist, in contrast to
1675 * non-existent order-1 anonymous folios, and do not
1676 * use _deferred_list.
1677 */
1678 if (nr_pages > 2 &&
1679 !list_empty(&folio->_deferred_list)) {
1680 if (try_split_folio(folio, split_folios) == 0) {
8e279f97 1681 nr_failed++;
c6408250 1682 stats->nr_thp_failed += is_thp;
7262f208
ZY
1683 stats->nr_thp_split += is_thp;
1684 stats->nr_split++;
1685 continue;
1686 }
1687 }
1688
d532e2e5 1689 /*
eaec4e63 1690 * Large folio migration might be unsupported or
6f7d760e 1691 * the allocation might be failed so we should retry
eaec4e63
HY
1692 * on the same folio with the large folio split
1693 * to normal folios.
d532e2e5 1694 *
eaec4e63 1695 * Split folios are put in split_folios, and
e6fa8a79
HY
1696 * we will migrate them after the rest of the
1697 * list is processed.
d532e2e5 1698 */
6f7d760e 1699 if (!thp_migration_supported() && is_thp) {
124abced 1700 nr_failed++;
6f7d760e 1701 stats->nr_thp_failed++;
a21d2133 1702 if (!try_split_folio(folio, split_folios)) {
6f7d760e 1703 stats->nr_thp_split++;
a259945e 1704 stats->nr_split++;
6f7d760e 1705 continue;
f430893b 1706 }
6f7d760e
HY
1707 stats->nr_failed_pages += nr_pages;
1708 list_move_tail(&folio->lru, ret_folios);
1709 continue;
1710 }
f430893b 1711
4e096ae1
MWO
1712 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1713 private, folio, &dst, mode, reason,
1714 ret_folios);
dd4ae78a
YS
1715 /*
1716 * The rules are:
e5bfff8b 1717 * Success: folio will be freed
5dfab109
HY
1718 * Unmap: folio will be put on unmap_folios list,
1719 * dst folio put on dst_folios list
dd4ae78a
YS
1720 * -EAGAIN: stay on the from list
1721 * -ENOMEM: stay on the from list
42012e04 1722 * Other errno: put on ret_folios list
dd4ae78a 1723 */
e24f0b8f 1724 switch(rc) {
95a402c3 1725 case -ENOMEM:
94723aaf 1726 /*
d532e2e5 1727 * When memory is low, don't bother to try to migrate
5dfab109 1728 * other folios, move unmapped folios, then exit.
94723aaf 1729 */
124abced
HY
1730 nr_failed++;
1731 stats->nr_thp_failed += is_thp;
1732 /* Large folio NUMA faulting doesn't split to retry. */
a259945e 1733 if (is_large && !nosplit) {
124abced
HY
1734 int ret = try_split_folio(folio, split_folios);
1735
1736 if (!ret) {
1737 stats->nr_thp_split += is_thp;
49cac03a 1738 stats->nr_split++;
124abced
HY
1739 break;
1740 } else if (reason == MR_LONGTERM_PIN &&
1741 ret == -EAGAIN) {
1742 /*
1743 * Try again to split large folio to
1744 * mitigate the failure of longterm pinning.
1745 */
1746 retry++;
1747 thp_retry += is_thp;
1748 nr_retry_pages += nr_pages;
1749 /* Undo duplicated failure counting. */
1750 nr_failed--;
1751 stats->nr_thp_failed -= is_thp;
1752 break;
94723aaf 1753 }
1a5bae25 1754 }
b5bade97 1755
42012e04 1756 stats->nr_failed_pages += nr_pages + nr_retry_pages;
fbed53b4 1757 /* nr_failed isn't updated for not used */
42012e04 1758 stats->nr_thp_failed += thp_retry;
5dfab109
HY
1759 rc_saved = rc;
1760 if (list_empty(&unmap_folios))
1761 goto out;
1762 else
1763 goto move;
e24f0b8f 1764 case -EAGAIN:
124abced
HY
1765 retry++;
1766 thp_retry += is_thp;
eaec4e63 1767 nr_retry_pages += nr_pages;
e24f0b8f 1768 break;
78bd5209 1769 case MIGRATEPAGE_SUCCESS:
42012e04
HY
1770 stats->nr_succeeded += nr_pages;
1771 stats->nr_thp_succeeded += is_thp;
e24f0b8f 1772 break;
5dfab109 1773 case MIGRATEPAGE_UNMAP:
5dfab109
HY
1774 list_move_tail(&folio->lru, &unmap_folios);
1775 list_add_tail(&dst->lru, &dst_folios);
e24f0b8f
CL
1776 break;
1777 default:
354a3363 1778 /*
d532e2e5 1779 * Permanent failure (-EBUSY, etc.):
eaec4e63
HY
1780 * unlike -EAGAIN case, the failed folio is
1781 * removed from migration folio list and not
354a3363
NH
1782 * retried in the next outer loop.
1783 */
124abced
HY
1784 nr_failed++;
1785 stats->nr_thp_failed += is_thp;
42012e04 1786 stats->nr_failed_pages += nr_pages;
e24f0b8f 1787 break;
2d1db3b1 1788 }
b20a3503
CL
1789 }
1790 }
7047b5a4 1791 nr_failed += retry;
42012e04
HY
1792 stats->nr_thp_failed += thp_retry;
1793 stats->nr_failed_pages += nr_retry_pages;
5dfab109 1794move:
7e12beb8
HY
1795 /* Flush TLBs for all unmapped folios */
1796 try_to_unmap_flush();
1797
5dfab109 1798 retry = 1;
124abced 1799 for (pass = 0; pass < nr_pass && retry; pass++) {
5dfab109 1800 retry = 0;
5dfab109
HY
1801 thp_retry = 0;
1802 nr_retry_pages = 0;
1803
1804 dst = list_first_entry(&dst_folios, struct folio, lru);
1805 dst2 = list_next_entry(dst, lru);
1806 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
124abced 1807 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
5dfab109
HY
1808 nr_pages = folio_nr_pages(folio);
1809
1810 cond_resched();
1811
4e096ae1 1812 rc = migrate_folio_move(put_new_folio, private,
5dfab109
HY
1813 folio, dst, mode,
1814 reason, ret_folios);
1815 /*
1816 * The rules are:
1817 * Success: folio will be freed
1818 * -EAGAIN: stay on the unmap_folios list
1819 * Other errno: put on ret_folios list
1820 */
1821 switch(rc) {
1822 case -EAGAIN:
124abced
HY
1823 retry++;
1824 thp_retry += is_thp;
5dfab109
HY
1825 nr_retry_pages += nr_pages;
1826 break;
1827 case MIGRATEPAGE_SUCCESS:
1828 stats->nr_succeeded += nr_pages;
1829 stats->nr_thp_succeeded += is_thp;
1830 break;
1831 default:
124abced
HY
1832 nr_failed++;
1833 stats->nr_thp_failed += is_thp;
5dfab109 1834 stats->nr_failed_pages += nr_pages;
e24f0b8f 1835 break;
2d1db3b1 1836 }
5dfab109
HY
1837 dst = dst2;
1838 dst2 = list_next_entry(dst, lru);
b20a3503
CL
1839 }
1840 }
7047b5a4 1841 nr_failed += retry;
5dfab109
HY
1842 stats->nr_thp_failed += thp_retry;
1843 stats->nr_failed_pages += nr_retry_pages;
1844
124abced 1845 rc = rc_saved ? : nr_failed;
5dfab109
HY
1846out:
1847 /* Cleanup remaining folios */
1848 dst = list_first_entry(&dst_folios, struct folio, lru);
1849 dst2 = list_next_entry(dst, lru);
1850 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
eebb3dab 1851 int old_page_state = 0;
5dfab109
HY
1852 struct anon_vma *anon_vma = NULL;
1853
eebb3dab
BW
1854 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1855 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1856 anon_vma, true, ret_folios);
5dfab109 1857 list_del(&dst->lru);
4e096ae1 1858 migrate_folio_undo_dst(dst, true, put_new_folio, private);
5dfab109
HY
1859 dst = dst2;
1860 dst2 = list_next_entry(dst, lru);
1861 }
1862
42012e04
HY
1863 return rc;
1864}
1865
4e096ae1
MWO
1866static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1867 free_folio_t put_new_folio, unsigned long private,
1868 enum migrate_mode mode, int reason,
1869 struct list_head *ret_folios, struct list_head *split_folios,
1870 struct migrate_pages_stats *stats)
2ef7dbb2
HY
1871{
1872 int rc, nr_failed = 0;
1873 LIST_HEAD(folios);
1874 struct migrate_pages_stats astats;
1875
1876 memset(&astats, 0, sizeof(astats));
1877 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
4e096ae1 1878 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
2ef7dbb2
HY
1879 reason, &folios, split_folios, &astats,
1880 NR_MAX_MIGRATE_ASYNC_RETRY);
1881 stats->nr_succeeded += astats.nr_succeeded;
1882 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1883 stats->nr_thp_split += astats.nr_thp_split;
a259945e 1884 stats->nr_split += astats.nr_split;
2ef7dbb2
HY
1885 if (rc < 0) {
1886 stats->nr_failed_pages += astats.nr_failed_pages;
1887 stats->nr_thp_failed += astats.nr_thp_failed;
1888 list_splice_tail(&folios, ret_folios);
1889 return rc;
1890 }
1891 stats->nr_thp_failed += astats.nr_thp_split;
a259945e
ZY
1892 /*
1893 * Do not count rc, as pages will be retried below.
1894 * Count nr_split only, since it includes nr_thp_split.
1895 */
1896 nr_failed += astats.nr_split;
2ef7dbb2
HY
1897 /*
1898 * Fall back to migrate all failed folios one by one synchronously. All
1899 * failed folios except split THPs will be retried, so their failure
1900 * isn't counted
1901 */
1902 list_splice_tail_init(&folios, from);
1903 while (!list_empty(from)) {
1904 list_move(from->next, &folios);
4e096ae1 1905 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2ef7dbb2
HY
1906 private, mode, reason, ret_folios,
1907 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1908 list_splice_tail_init(&folios, ret_folios);
1909 if (rc < 0)
1910 return rc;
1911 nr_failed += rc;
1912 }
1913
1914 return nr_failed;
1915}
1916
42012e04
HY
1917/*
1918 * migrate_pages - migrate the folios specified in a list, to the free folios
1919 * supplied as the target for the page migration
1920 *
1921 * @from: The list of folios to be migrated.
4e096ae1 1922 * @get_new_folio: The function used to allocate free folios to be used
42012e04 1923 * as the target of the folio migration.
4e096ae1 1924 * @put_new_folio: The function used to free target folios if migration
42012e04 1925 * fails, or NULL if no special handling is necessary.
4e096ae1 1926 * @private: Private data to be passed on to get_new_folio()
42012e04
HY
1927 * @mode: The migration mode that specifies the constraints for
1928 * folio migration, if any.
1929 * @reason: The reason for folio migration.
1930 * @ret_succeeded: Set to the number of folios migrated successfully if
1931 * the caller passes a non-NULL pointer.
1932 *
1933 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1934 * are movable any more because the list has become empty or no retryable folios
1935 * exist any more. It is caller's responsibility to call putback_movable_pages()
1936 * only if ret != 0.
1937 *
1938 * Returns the number of {normal folio, large folio, hugetlb} that were not
1939 * migrated, or an error code. The number of large folio splits will be
1940 * considered as the number of non-migrated large folio, no matter how many
1941 * split folios of the large folio are migrated successfully.
1942 */
4e096ae1
MWO
1943int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1944 free_folio_t put_new_folio, unsigned long private,
42012e04
HY
1945 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1946{
1947 int rc, rc_gather;
2ef7dbb2 1948 int nr_pages;
42012e04
HY
1949 struct folio *folio, *folio2;
1950 LIST_HEAD(folios);
1951 LIST_HEAD(ret_folios);
a21d2133 1952 LIST_HEAD(split_folios);
42012e04
HY
1953 struct migrate_pages_stats stats;
1954
1955 trace_mm_migrate_pages_start(mode, reason);
1956
1957 memset(&stats, 0, sizeof(stats));
1958
4e096ae1 1959 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
42012e04
HY
1960 mode, reason, &stats, &ret_folios);
1961 if (rc_gather < 0)
1962 goto out;
fb3592c4 1963
42012e04
HY
1964again:
1965 nr_pages = 0;
1966 list_for_each_entry_safe(folio, folio2, from, lru) {
1967 /* Retried hugetlb folios will be kept in list */
1968 if (folio_test_hugetlb(folio)) {
1969 list_move_tail(&folio->lru, &ret_folios);
1970 continue;
1971 }
1972
1973 nr_pages += folio_nr_pages(folio);
2ef7dbb2 1974 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
42012e04
HY
1975 break;
1976 }
2ef7dbb2 1977 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
fb3592c4 1978 list_cut_before(&folios, from, &folio2->lru);
42012e04
HY
1979 else
1980 list_splice_init(from, &folios);
2ef7dbb2 1981 if (mode == MIGRATE_ASYNC)
4e096ae1
MWO
1982 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1983 private, mode, reason, &ret_folios,
1984 &split_folios, &stats,
1985 NR_MAX_MIGRATE_PAGES_RETRY);
2ef7dbb2 1986 else
4e096ae1
MWO
1987 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1988 private, mode, reason, &ret_folios,
1989 &split_folios, &stats);
42012e04
HY
1990 list_splice_tail_init(&folios, &ret_folios);
1991 if (rc < 0) {
1992 rc_gather = rc;
a21d2133 1993 list_splice_tail(&split_folios, &ret_folios);
42012e04
HY
1994 goto out;
1995 }
a21d2133
HY
1996 if (!list_empty(&split_folios)) {
1997 /*
1998 * Failure isn't counted since all split folios of a large folio
1999 * is counted as 1 failure already. And, we only try to migrate
2000 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2001 */
4e096ae1
MWO
2002 migrate_pages_batch(&split_folios, get_new_folio,
2003 put_new_folio, private, MIGRATE_ASYNC, reason,
2004 &ret_folios, NULL, &stats, 1);
a21d2133
HY
2005 list_splice_tail_init(&split_folios, &ret_folios);
2006 }
42012e04
HY
2007 rc_gather += rc;
2008 if (!list_empty(from))
2009 goto again;
95a402c3 2010out:
dd4ae78a 2011 /*
eaec4e63 2012 * Put the permanent failure folio back to migration list, they
dd4ae78a
YS
2013 * will be put back to the right list by the caller.
2014 */
eaec4e63 2015 list_splice(&ret_folios, from);
dd4ae78a 2016
03e5f82e 2017 /*
eaec4e63
HY
2018 * Return 0 in case all split folios of fail-to-migrate large folios
2019 * are migrated successfully.
03e5f82e
BW
2020 */
2021 if (list_empty(from))
42012e04 2022 rc_gather = 0;
03e5f82e 2023
5b855937
HY
2024 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2025 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2026 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2027 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2028 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2029 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2030 stats.nr_thp_succeeded, stats.nr_thp_failed,
49cac03a
ZY
2031 stats.nr_thp_split, stats.nr_split, mode,
2032 reason);
7b2a2d4a 2033
5ac95884 2034 if (ret_succeeded)
5b855937 2035 *ret_succeeded = stats.nr_succeeded;
5ac95884 2036
42012e04 2037 return rc_gather;
b20a3503 2038}
95a402c3 2039
4e096ae1 2040struct folio *alloc_migration_target(struct folio *src, unsigned long private)
b4b38223 2041{
19fc7bed
JK
2042 struct migration_target_control *mtc;
2043 gfp_t gfp_mask;
b4b38223 2044 unsigned int order = 0;
19fc7bed
JK
2045 int nid;
2046 int zidx;
2047
2048 mtc = (struct migration_target_control *)private;
2049 gfp_mask = mtc->gfp_mask;
2050 nid = mtc->nid;
2051 if (nid == NUMA_NO_NODE)
4e096ae1 2052 nid = folio_nid(src);
b4b38223 2053
4e096ae1
MWO
2054 if (folio_test_hugetlb(src)) {
2055 struct hstate *h = folio_hstate(src);
d92bbc27 2056
19fc7bed 2057 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
4e096ae1 2058 return alloc_hugetlb_folio_nodemask(h, nid,
42d0c3fb
BW
2059 mtc->nmask, gfp_mask,
2060 htlb_allow_alloc_fallback(mtc->reason));
d92bbc27 2061 }
b4b38223 2062
4e096ae1 2063 if (folio_test_large(src)) {
9933a0c8
JK
2064 /*
2065 * clear __GFP_RECLAIM to make the migration callback
2066 * consistent with regular THP allocations.
2067 */
2068 gfp_mask &= ~__GFP_RECLAIM;
b4b38223 2069 gfp_mask |= GFP_TRANSHUGE;
4e096ae1 2070 order = folio_order(src);
b4b38223 2071 }
4e096ae1 2072 zidx = zone_idx(folio_zone(src));
19fc7bed 2073 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
b4b38223
JK
2074 gfp_mask |= __GFP_HIGHMEM;
2075
4e096ae1 2076 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
b4b38223
JK
2077}
2078
742755a1 2079#ifdef CONFIG_NUMA
742755a1 2080
a49bd4d7 2081static int store_status(int __user *status, int start, int value, int nr)
742755a1 2082{
a49bd4d7
MH
2083 while (nr-- > 0) {
2084 if (put_user(value, status + start))
2085 return -EFAULT;
2086 start++;
2087 }
2088
2089 return 0;
2090}
2091
ec47e250 2092static int do_move_pages_to_node(struct list_head *pagelist, int node)
a49bd4d7
MH
2093{
2094 int err;
a0976311
JK
2095 struct migration_target_control mtc = {
2096 .nid = node,
2097 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
e42dfe4e 2098 .reason = MR_SYSCALL,
a0976311 2099 };
a49bd4d7 2100
a0976311 2101 err = migrate_pages(pagelist, alloc_migration_target, NULL,
5ac95884 2102 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
a49bd4d7
MH
2103 if (err)
2104 putback_movable_pages(pagelist);
2105 return err;
742755a1
CL
2106}
2107
2108/*
a49bd4d7
MH
2109 * Resolves the given address to a struct page, isolates it from the LRU and
2110 * puts it to the given pagelist.
e0153fc2
YS
2111 * Returns:
2112 * errno - if the page cannot be found/isolated
2113 * 0 - when it doesn't have to be migrated because it is already on the
2114 * target node
2115 * 1 - when it has been queued
742755a1 2116 */
428e106a 2117static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
a49bd4d7 2118 int node, struct list_head *pagelist, bool migrate_all)
742755a1 2119{
a49bd4d7 2120 struct vm_area_struct *vma;
428e106a 2121 unsigned long addr;
a49bd4d7 2122 struct page *page;
d64cfccb 2123 struct folio *folio;
742755a1 2124 int err;
742755a1 2125
d8ed45c5 2126 mmap_read_lock(mm);
428e106a
KS
2127 addr = (unsigned long)untagged_addr_remote(mm, p);
2128
a49bd4d7 2129 err = -EFAULT;
cb1c37b1
ML
2130 vma = vma_lookup(mm, addr);
2131 if (!vma || !vma_migratable(vma))
a49bd4d7 2132 goto out;
742755a1 2133
a49bd4d7 2134 /* FOLL_DUMP to ignore special (like zero) pages */
87d2762e 2135 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da 2136
a49bd4d7
MH
2137 err = PTR_ERR(page);
2138 if (IS_ERR(page))
2139 goto out;
89f5b7da 2140
a49bd4d7 2141 err = -ENOENT;
f7091ed6 2142 if (!page)
a49bd4d7 2143 goto out;
742755a1 2144
d64cfccb
KW
2145 folio = page_folio(page);
2146 if (folio_is_zone_device(folio))
2147 goto out_putfolio;
f7091ed6 2148
a49bd4d7 2149 err = 0;
d64cfccb
KW
2150 if (folio_nid(folio) == node)
2151 goto out_putfolio;
742755a1 2152
a49bd4d7 2153 err = -EACCES;
31ce0d7e 2154 if (folio_likely_mapped_shared(folio) && !migrate_all)
d64cfccb 2155 goto out_putfolio;
742755a1 2156
fa1df3f6 2157 err = -EBUSY;
d64cfccb 2158 if (folio_test_hugetlb(folio)) {
fa1df3f6
KW
2159 if (isolate_hugetlb(folio, pagelist))
2160 err = 1;
a49bd4d7 2161 } else {
fa1df3f6 2162 if (!folio_isolate_lru(folio))
d64cfccb 2163 goto out_putfolio;
742755a1 2164
e0153fc2 2165 err = 1;
d64cfccb
KW
2166 list_add_tail(&folio->lru, pagelist);
2167 node_stat_mod_folio(folio,
2168 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2169 folio_nr_pages(folio));
a49bd4d7 2170 }
d64cfccb 2171out_putfolio:
a49bd4d7 2172 /*
d64cfccb
KW
2173 * Either remove the duplicate refcount from folio_isolate_lru()
2174 * or drop the folio ref if it was not isolated.
a49bd4d7 2175 */
d64cfccb 2176 folio_put(folio);
a49bd4d7 2177out:
d8ed45c5 2178 mmap_read_unlock(mm);
742755a1
CL
2179 return err;
2180}
2181
ec47e250 2182static int move_pages_and_store_status(int node,
7ca8783a
WY
2183 struct list_head *pagelist, int __user *status,
2184 int start, int i, unsigned long nr_pages)
2185{
2186 int err;
2187
5d7ae891
WY
2188 if (list_empty(pagelist))
2189 return 0;
2190
ec47e250 2191 err = do_move_pages_to_node(pagelist, node);
7ca8783a
WY
2192 if (err) {
2193 /*
2194 * Positive err means the number of failed
2195 * pages to migrate. Since we are going to
2196 * abort and return the number of non-migrated
ab9dd4f8 2197 * pages, so need to include the rest of the
7ca8783a
WY
2198 * nr_pages that have not been attempted as
2199 * well.
2200 */
2201 if (err > 0)
a7504ed1 2202 err += nr_pages - i;
7ca8783a
WY
2203 return err;
2204 }
2205 return store_status(status, start, node, i - start);
2206}
2207
5e9a0f02
BG
2208/*
2209 * Migrate an array of page address onto an array of nodes and fill
2210 * the corresponding array of status.
2211 */
3268c63e 2212static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f02
BG
2213 unsigned long nr_pages,
2214 const void __user * __user *pages,
2215 const int __user *nodes,
2216 int __user *status, int flags)
2217{
229e2253 2218 compat_uptr_t __user *compat_pages = (void __user *)pages;
a49bd4d7
MH
2219 int current_node = NUMA_NO_NODE;
2220 LIST_HEAD(pagelist);
2221 int start, i;
2222 int err = 0, err1;
35282a2d 2223
361a2a22 2224 lru_cache_disable();
35282a2d 2225
a49bd4d7
MH
2226 for (i = start = 0; i < nr_pages; i++) {
2227 const void __user *p;
a49bd4d7 2228 int node;
3140a227 2229
a49bd4d7 2230 err = -EFAULT;
229e2253
GP
2231 if (in_compat_syscall()) {
2232 compat_uptr_t cp;
2233
2234 if (get_user(cp, compat_pages + i))
2235 goto out_flush;
2236
2237 p = compat_ptr(cp);
2238 } else {
2239 if (get_user(p, pages + i))
2240 goto out_flush;
2241 }
a49bd4d7
MH
2242 if (get_user(node, nodes + i))
2243 goto out_flush;
a49bd4d7
MH
2244
2245 err = -ENODEV;
2246 if (node < 0 || node >= MAX_NUMNODES)
2247 goto out_flush;
2248 if (!node_state(node, N_MEMORY))
2249 goto out_flush;
5e9a0f02 2250
a49bd4d7
MH
2251 err = -EACCES;
2252 if (!node_isset(node, task_nodes))
2253 goto out_flush;
2254
2255 if (current_node == NUMA_NO_NODE) {
2256 current_node = node;
2257 start = i;
2258 } else if (node != current_node) {
ec47e250 2259 err = move_pages_and_store_status(current_node,
7ca8783a 2260 &pagelist, status, start, i, nr_pages);
a49bd4d7
MH
2261 if (err)
2262 goto out;
2263 start = i;
2264 current_node = node;
3140a227
BG
2265 }
2266
a49bd4d7
MH
2267 /*
2268 * Errors in the page lookup or isolation are not fatal and we simply
2269 * report them via status
2270 */
428e106a
KS
2271 err = add_page_for_migration(mm, p, current_node, &pagelist,
2272 flags & MPOL_MF_MOVE_ALL);
e0153fc2 2273
d08221a0 2274 if (err > 0) {
e0153fc2
YS
2275 /* The page is successfully queued for migration */
2276 continue;
2277 }
3140a227 2278
65462462
JH
2279 /*
2280 * The move_pages() man page does not have an -EEXIST choice, so
2281 * use -EFAULT instead.
2282 */
2283 if (err == -EEXIST)
2284 err = -EFAULT;
2285
d08221a0
WY
2286 /*
2287 * If the page is already on the target node (!err), store the
2288 * node, otherwise, store the err.
2289 */
2290 err = store_status(status, i, err ? : current_node, 1);
a49bd4d7
MH
2291 if (err)
2292 goto out_flush;
5e9a0f02 2293
ec47e250 2294 err = move_pages_and_store_status(current_node, &pagelist,
7ca8783a 2295 status, start, i, nr_pages);
a7504ed1
HY
2296 if (err) {
2297 /* We have accounted for page i */
2298 if (err > 0)
2299 err--;
4afdacec 2300 goto out;
a7504ed1 2301 }
a49bd4d7 2302 current_node = NUMA_NO_NODE;
3140a227 2303 }
a49bd4d7
MH
2304out_flush:
2305 /* Make sure we do not overwrite the existing error */
ec47e250 2306 err1 = move_pages_and_store_status(current_node, &pagelist,
7ca8783a 2307 status, start, i, nr_pages);
dfe9aa23 2308 if (err >= 0)
a49bd4d7 2309 err = err1;
5e9a0f02 2310out:
361a2a22 2311 lru_cache_enable();
5e9a0f02
BG
2312 return err;
2313}
2314
742755a1 2315/*
2f007e74 2316 * Determine the nodes of an array of pages and store it in an array of status.
742755a1 2317 */
80bba129
BG
2318static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2319 const void __user **pages, int *status)
742755a1 2320{
2f007e74 2321 unsigned long i;
2f007e74 2322
d8ed45c5 2323 mmap_read_lock(mm);
742755a1 2324
2f007e74 2325 for (i = 0; i < nr_pages; i++) {
80bba129 2326 unsigned long addr = (unsigned long)(*pages);
742755a1
CL
2327 struct vm_area_struct *vma;
2328 struct page *page;
c095adbc 2329 int err = -EFAULT;
2f007e74 2330
059b8b48
LH
2331 vma = vma_lookup(mm, addr);
2332 if (!vma)
742755a1
CL
2333 goto set_status;
2334
d899844e 2335 /* FOLL_DUMP to ignore special (like zero) pages */
16fd6b31 2336 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da
LT
2337
2338 err = PTR_ERR(page);
2339 if (IS_ERR(page))
2340 goto set_status;
2341
f7091ed6
HW
2342 err = -ENOENT;
2343 if (!page)
2344 goto set_status;
2345
2346 if (!is_zone_device_page(page))
4cd61484 2347 err = page_to_nid(page);
f7091ed6 2348
16fd6b31 2349 put_page(page);
742755a1 2350set_status:
80bba129
BG
2351 *status = err;
2352
2353 pages++;
2354 status++;
2355 }
2356
d8ed45c5 2357 mmap_read_unlock(mm);
80bba129
BG
2358}
2359
5b1b561b
AB
2360static int get_compat_pages_array(const void __user *chunk_pages[],
2361 const void __user * __user *pages,
2362 unsigned long chunk_nr)
2363{
2364 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2365 compat_uptr_t p;
2366 int i;
2367
2368 for (i = 0; i < chunk_nr; i++) {
2369 if (get_user(p, pages32 + i))
2370 return -EFAULT;
2371 chunk_pages[i] = compat_ptr(p);
2372 }
2373
2374 return 0;
2375}
2376
80bba129
BG
2377/*
2378 * Determine the nodes of a user array of pages and store it in
2379 * a user array of status.
2380 */
2381static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2382 const void __user * __user *pages,
2383 int __user *status)
2384{
3eefb826 2385#define DO_PAGES_STAT_CHUNK_NR 16UL
80bba129
BG
2386 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2387 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba129 2388
87b8d1ad 2389 while (nr_pages) {
3eefb826 2390 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
87b8d1ad 2391
5b1b561b
AB
2392 if (in_compat_syscall()) {
2393 if (get_compat_pages_array(chunk_pages, pages,
2394 chunk_nr))
2395 break;
2396 } else {
2397 if (copy_from_user(chunk_pages, pages,
2398 chunk_nr * sizeof(*chunk_pages)))
2399 break;
2400 }
80bba129
BG
2401
2402 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2403
87b8d1ad
PA
2404 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2405 break;
742755a1 2406
87b8d1ad
PA
2407 pages += chunk_nr;
2408 status += chunk_nr;
2409 nr_pages -= chunk_nr;
2410 }
2411 return nr_pages ? -EFAULT : 0;
742755a1
CL
2412}
2413
4dc200ce 2414static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
742755a1 2415{
742755a1 2416 struct task_struct *task;
742755a1 2417 struct mm_struct *mm;
742755a1 2418
4dc200ce
ML
2419 /*
2420 * There is no need to check if current process has the right to modify
2421 * the specified process when they are same.
2422 */
2423 if (!pid) {
2424 mmget(current->mm);
2425 *mem_nodes = cpuset_mems_allowed(current);
2426 return current->mm;
2427 }
742755a1
CL
2428
2429 /* Find the mm_struct */
a879bf58 2430 rcu_read_lock();
4dc200ce 2431 task = find_task_by_vpid(pid);
742755a1 2432 if (!task) {
a879bf58 2433 rcu_read_unlock();
4dc200ce 2434 return ERR_PTR(-ESRCH);
742755a1 2435 }
3268c63e 2436 get_task_struct(task);
742755a1
CL
2437
2438 /*
2439 * Check if this process has the right to modify the specified
197e7e52 2440 * process. Use the regular "ptrace_may_access()" checks.
742755a1 2441 */
197e7e52 2442 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 2443 rcu_read_unlock();
4dc200ce 2444 mm = ERR_PTR(-EPERM);
5e9a0f02 2445 goto out;
742755a1 2446 }
c69e8d9c 2447 rcu_read_unlock();
742755a1 2448
4dc200ce
ML
2449 mm = ERR_PTR(security_task_movememory(task));
2450 if (IS_ERR(mm))
5e9a0f02 2451 goto out;
4dc200ce 2452 *mem_nodes = cpuset_mems_allowed(task);
3268c63e 2453 mm = get_task_mm(task);
4dc200ce 2454out:
3268c63e 2455 put_task_struct(task);
6e8b09ea 2456 if (!mm)
4dc200ce
ML
2457 mm = ERR_PTR(-EINVAL);
2458 return mm;
2459}
2460
2461/*
2462 * Move a list of pages in the address space of the currently executing
2463 * process.
2464 */
2465static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2466 const void __user * __user *pages,
2467 const int __user *nodes,
2468 int __user *status, int flags)
2469{
2470 struct mm_struct *mm;
2471 int err;
2472 nodemask_t task_nodes;
2473
2474 /* Check flags */
2475 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
6e8b09ea
SL
2476 return -EINVAL;
2477
4dc200ce
ML
2478 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2479 return -EPERM;
2480
2481 mm = find_mm_struct(pid, &task_nodes);
2482 if (IS_ERR(mm))
2483 return PTR_ERR(mm);
2484
6e8b09ea
SL
2485 if (nodes)
2486 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2487 nodes, status, flags);
2488 else
2489 err = do_pages_stat(mm, nr_pages, pages, status);
742755a1 2490
742755a1
CL
2491 mmput(mm);
2492 return err;
2493}
742755a1 2494
7addf443
DB
2495SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2496 const void __user * __user *, pages,
2497 const int __user *, nodes,
2498 int __user *, status, int, flags)
2499{
2500 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2501}
2502
7039e1db
PZ
2503#ifdef CONFIG_NUMA_BALANCING
2504/*
2505 * Returns true if this is a safe migration target node for misplaced NUMA
bc53008e 2506 * pages. Currently it only checks the watermarks which is crude.
7039e1db
PZ
2507 */
2508static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6 2509 unsigned long nr_migrate_pages)
7039e1db
PZ
2510{
2511 int z;
599d0c95 2512
7039e1db
PZ
2513 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2514 struct zone *zone = pgdat->node_zones + z;
2515
bc53008e 2516 if (!managed_zone(zone))
7039e1db
PZ
2517 continue;
2518
7039e1db
PZ
2519 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2520 if (!zone_watermark_ok(zone, 0,
2521 high_wmark_pages(zone) +
2522 nr_migrate_pages,
bfe9d006 2523 ZONE_MOVABLE, 0))
7039e1db
PZ
2524 continue;
2525 return true;
2526 }
2527 return false;
2528}
2529
4e096ae1 2530static struct folio *alloc_misplaced_dst_folio(struct folio *src,
666feb21 2531 unsigned long data)
7039e1db
PZ
2532{
2533 int nid = (int) data;
4e096ae1 2534 int order = folio_order(src);
c185e494 2535 gfp_t gfp = __GFP_THISNODE;
c185e494
MWO
2536
2537 if (order > 0)
2538 gfp |= GFP_TRANSHUGE_LIGHT;
2539 else {
2540 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2541 __GFP_NOWARN;
2542 gfp &= ~__GFP_RECLAIM;
2543 }
4e096ae1 2544 return __folio_alloc_node(gfp, order, nid);
c5b5a3dd
YS
2545}
2546
2ac9e99f 2547static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
b32967ff 2548{
2ac9e99f 2549 int nr_pages = folio_nr_pages(folio);
a8f60772 2550
7039e1db 2551 /* Avoid migrating to a node that is nearly full */
c574bbe9
HY
2552 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2553 int z;
2554
2555 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2556 return 0;
2557 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
bc53008e 2558 if (managed_zone(pgdat->node_zones + z))
c574bbe9
HY
2559 break;
2560 }
2774f256
BP
2561
2562 /*
2563 * If there are no managed zones, it should not proceed
2564 * further.
2565 */
2566 if (z < 0)
2567 return 0;
2568
2ac9e99f
KW
2569 wakeup_kswapd(pgdat->node_zones + z, 0,
2570 folio_order(folio), ZONE_MOVABLE);
340ef390 2571 return 0;
c574bbe9 2572 }
7039e1db 2573
2ac9e99f 2574 if (!folio_isolate_lru(folio))
340ef390 2575 return 0;
7039e1db 2576
2ac9e99f 2577 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2b9b624f 2578 nr_pages);
340ef390 2579
149c33e1 2580 /*
2ac9e99f
KW
2581 * Isolating the folio has taken another reference, so the
2582 * caller's reference can be safely dropped without the folio
340ef390 2583 * disappearing underneath us during migration.
149c33e1 2584 */
2ac9e99f 2585 folio_put(folio);
340ef390 2586 return 1;
b32967ff
MG
2587}
2588
2589/*
73eab3ca 2590 * Attempt to migrate a misplaced folio to the specified destination
b32967ff 2591 * node. Caller is expected to have an elevated reference count on
73eab3ca 2592 * the folio that will be dropped by this function before returning.
b32967ff 2593 */
73eab3ca
KW
2594int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2595 int node)
b32967ff
MG
2596{
2597 pg_data_t *pgdat = NODE_DATA(node);
340ef390 2598 int isolated;
b32967ff 2599 int nr_remaining;
e39bb6be 2600 unsigned int nr_succeeded;
b32967ff 2601 LIST_HEAD(migratepages);
73eab3ca 2602 int nr_pages = folio_nr_pages(folio);
c5b5a3dd 2603
b32967ff 2604 /*
73eab3ca 2605 * Don't migrate file folios that are mapped in multiple processes
1bc115d8 2606 * with execute permissions as they are probably shared libraries.
ebb34f78
DH
2607 *
2608 * See folio_likely_mapped_shared() on possible imprecision when we
2609 * cannot easily detect if a folio is shared.
b32967ff 2610 */
ebb34f78 2611 if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) &&
7ee820ee 2612 (vma->vm_flags & VM_EXEC))
b32967ff 2613 goto out;
b32967ff 2614
09a913a7 2615 /*
73eab3ca
KW
2616 * Also do not migrate dirty folios as not all filesystems can move
2617 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
09a913a7 2618 */
73eab3ca 2619 if (folio_is_file_lru(folio) && folio_test_dirty(folio))
09a913a7
MG
2620 goto out;
2621
73eab3ca 2622 isolated = numamigrate_isolate_folio(pgdat, folio);
b32967ff
MG
2623 if (!isolated)
2624 goto out;
2625
73eab3ca 2626 list_add(&folio->lru, &migratepages);
4e096ae1 2627 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
c185e494
MWO
2628 NULL, node, MIGRATE_ASYNC,
2629 MR_NUMA_MISPLACED, &nr_succeeded);
b32967ff 2630 if (nr_remaining) {
59c82b70 2631 if (!list_empty(&migratepages)) {
73eab3ca
KW
2632 list_del(&folio->lru);
2633 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
2634 folio_is_file_lru(folio), -nr_pages);
2635 folio_putback_lru(folio);
59c82b70 2636 }
b32967ff 2637 isolated = 0;
e39bb6be
HY
2638 }
2639 if (nr_succeeded) {
2640 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
73eab3ca 2641 if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
e39bb6be
HY
2642 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2643 nr_succeeded);
2644 }
7039e1db 2645 BUG_ON(!list_empty(&migratepages));
7039e1db 2646 return isolated;
340ef390
HD
2647
2648out:
73eab3ca 2649 folio_put(folio);
340ef390 2650 return 0;
7039e1db 2651}
220018d3 2652#endif /* CONFIG_NUMA_BALANCING */
91952440 2653#endif /* CONFIG_NUMA */