Merge tag 'for-6.6/dm-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/devic...
[linux-block.git] / mm / migrate.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b20a3503 2/*
14e0f9bc 3 * Memory Migration functionality - linux/mm/migrate.c
b20a3503
CL
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
cde53535 13 * Christoph Lameter
b20a3503
CL
14 */
15
16#include <linux/migrate.h>
b95f1b31 17#include <linux/export.h>
b20a3503 18#include <linux/swap.h>
0697212a 19#include <linux/swapops.h>
b20a3503 20#include <linux/pagemap.h>
e23ca00b 21#include <linux/buffer_head.h>
b20a3503 22#include <linux/mm_inline.h>
b488893a 23#include <linux/nsproxy.h>
e9995ef9 24#include <linux/ksm.h>
b20a3503
CL
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
04e62a29 29#include <linux/writeback.h>
742755a1
CL
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
86c3a764 32#include <linux/security.h>
42cb14b1 33#include <linux/backing-dev.h>
bda807d4 34#include <linux/compaction.h>
4f5ca265 35#include <linux/syscalls.h>
7addf443 36#include <linux/compat.h>
290408d4 37#include <linux/hugetlb.h>
8e6ac7fa 38#include <linux/hugetlb_cgroup.h>
5a0e3ad6 39#include <linux/gfp.h>
df6ad698 40#include <linux/pfn_t.h>
a5430dda 41#include <linux/memremap.h>
8315ada7 42#include <linux/userfaultfd_k.h>
bf6bddf1 43#include <linux/balloon_compaction.h>
33c3fc71 44#include <linux/page_idle.h>
d435edca 45#include <linux/page_owner.h>
6e84f315 46#include <linux/sched/mm.h>
197e7e52 47#include <linux/ptrace.h>
34290e2c 48#include <linux/oom.h>
884a6e5d 49#include <linux/memory.h>
ac16ec83 50#include <linux/random.h>
c574bbe9 51#include <linux/sched/sysctl.h>
467b171a 52#include <linux/memory-tiers.h>
b20a3503 53
0d1836c3
MN
54#include <asm/tlbflush.h>
55
7b2a2d4a
MG
56#include <trace/events/migrate.h>
57
b20a3503
CL
58#include "internal.h"
59
cd775580 60bool isolate_movable_page(struct page *page, isolate_mode_t mode)
bda807d4 61{
19979497 62 struct folio *folio = folio_get_nontail_page(page);
68f2736a 63 const struct movable_operations *mops;
bda807d4
MK
64
65 /*
66 * Avoid burning cycles with pages that are yet under __free_pages(),
67 * or just got freed under us.
68 *
69 * In case we 'win' a race for a movable page being freed under us and
70 * raise its refcount preventing __free_pages() from doing its job
71 * the put_page() at the end of this block will take care of
72 * release this page, thus avoiding a nasty leakage.
73 */
19979497 74 if (!folio)
bda807d4
MK
75 goto out;
76
19979497
VMO
77 if (unlikely(folio_test_slab(folio)))
78 goto out_putfolio;
8b881763
VB
79 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
80 smp_rmb();
bda807d4 81 /*
8b881763
VB
82 * Check movable flag before taking the page lock because
83 * we use non-atomic bitops on newly allocated page flags so
84 * unconditionally grabbing the lock ruins page's owner side.
bda807d4 85 */
19979497
VMO
86 if (unlikely(!__folio_test_movable(folio)))
87 goto out_putfolio;
8b881763
VB
88 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
89 smp_rmb();
19979497
VMO
90 if (unlikely(folio_test_slab(folio)))
91 goto out_putfolio;
8b881763 92
bda807d4
MK
93 /*
94 * As movable pages are not isolated from LRU lists, concurrent
95 * compaction threads can race against page migration functions
96 * as well as race against the releasing a page.
97 *
98 * In order to avoid having an already isolated movable page
99 * being (wrongly) re-isolated while it is under migration,
100 * or to avoid attempting to isolate pages being released,
101 * lets be sure we have the page lock
102 * before proceeding with the movable page isolation steps.
103 */
19979497
VMO
104 if (unlikely(!folio_trylock(folio)))
105 goto out_putfolio;
bda807d4 106
19979497 107 if (!folio_test_movable(folio) || folio_test_isolated(folio))
bda807d4
MK
108 goto out_no_isolated;
109
19979497
VMO
110 mops = folio_movable_ops(folio);
111 VM_BUG_ON_FOLIO(!mops, folio);
bda807d4 112
19979497 113 if (!mops->isolate_page(&folio->page, mode))
bda807d4
MK
114 goto out_no_isolated;
115
116 /* Driver shouldn't use PG_isolated bit of page->flags */
19979497
VMO
117 WARN_ON_ONCE(folio_test_isolated(folio));
118 folio_set_isolated(folio);
119 folio_unlock(folio);
bda807d4 120
cd775580 121 return true;
bda807d4
MK
122
123out_no_isolated:
19979497
VMO
124 folio_unlock(folio);
125out_putfolio:
126 folio_put(folio);
bda807d4 127out:
cd775580 128 return false;
bda807d4
MK
129}
130
280d724a 131static void putback_movable_folio(struct folio *folio)
bda807d4 132{
280d724a 133 const struct movable_operations *mops = folio_movable_ops(folio);
bda807d4 134
280d724a
VMO
135 mops->putback_page(&folio->page);
136 folio_clear_isolated(folio);
bda807d4
MK
137}
138
5733c7d1
RA
139/*
140 * Put previously isolated pages back onto the appropriate lists
141 * from where they were once taken off for compaction/migration.
142 *
59c82b70
JK
143 * This function shall be used whenever the isolated pageset has been
144 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
7ce82f4c 145 * and isolate_hugetlb().
5733c7d1
RA
146 */
147void putback_movable_pages(struct list_head *l)
148{
280d724a
VMO
149 struct folio *folio;
150 struct folio *folio2;
5733c7d1 151
280d724a
VMO
152 list_for_each_entry_safe(folio, folio2, l, lru) {
153 if (unlikely(folio_test_hugetlb(folio))) {
154 folio_putback_active_hugetlb(folio);
31caf665
NH
155 continue;
156 }
280d724a 157 list_del(&folio->lru);
bda807d4 158 /*
280d724a
VMO
159 * We isolated non-lru movable folio so here we can use
160 * __PageMovable because LRU folio's mapping cannot have
bda807d4
MK
161 * PAGE_MAPPING_MOVABLE.
162 */
280d724a
VMO
163 if (unlikely(__folio_test_movable(folio))) {
164 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165 folio_lock(folio);
166 if (folio_test_movable(folio))
167 putback_movable_folio(folio);
bda807d4 168 else
280d724a
VMO
169 folio_clear_isolated(folio);
170 folio_unlock(folio);
171 folio_put(folio);
bda807d4 172 } else {
280d724a
VMO
173 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174 folio_is_file_lru(folio), -folio_nr_pages(folio));
175 folio_putback_lru(folio);
bda807d4 176 }
b20a3503 177 }
b20a3503
CL
178}
179
0697212a
CL
180/*
181 * Restore a potential migration pte to a working pte entry
182 */
2f031c6f
MWO
183static bool remove_migration_pte(struct folio *folio,
184 struct vm_area_struct *vma, unsigned long addr, void *old)
0697212a 185{
4eecb8b9 186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
0697212a 187
3fe87967 188 while (page_vma_mapped_walk(&pvmw)) {
6c287605 189 rmap_t rmap_flags = RMAP_NONE;
c33c7948 190 pte_t old_pte;
4eecb8b9
MWO
191 pte_t pte;
192 swp_entry_t entry;
193 struct page *new;
194 unsigned long idx = 0;
195
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 new = folio_page(folio, idx);
0697212a 200
616b8371
ZY
201#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
203 if (!pvmw.pte) {
4eecb8b9
MWO
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
616b8371
ZY
206 remove_migration_pmd(&pvmw, new);
207 continue;
208 }
209#endif
210
4eecb8b9 211 folio_get(folio);
2e346877 212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
c33c7948
RR
213 old_pte = ptep_get(pvmw.pte);
214 if (pte_swp_soft_dirty(old_pte))
3fe87967 215 pte = pte_mksoft_dirty(pte);
0697212a 216
c33c7948 217 entry = pte_to_swp_entry(old_pte);
2e346877
PX
218 if (!is_migration_entry_young(entry))
219 pte = pte_mkold(pte);
220 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
221 pte = pte_mkdirty(pte);
4dd845b5 222 if (is_writable_migration_entry(entry))
161e393c 223 pte = pte_mkwrite(pte, vma);
c33c7948 224 else if (pte_swp_uffd_wp(old_pte))
f45ec5ff 225 pte = pte_mkuffd_wp(pte);
d3cb8bf6 226
6c287605
DH
227 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
228 rmap_flags |= RMAP_EXCLUSIVE;
229
6128763f 230 if (unlikely(is_device_private_page(new))) {
4dd845b5
AP
231 if (pte_write(pte))
232 entry = make_writable_device_private_entry(
233 page_to_pfn(new));
234 else
235 entry = make_readable_device_private_entry(
236 page_to_pfn(new));
6128763f 237 pte = swp_entry_to_pte(entry);
c33c7948 238 if (pte_swp_soft_dirty(old_pte))
3d321bf8 239 pte = pte_swp_mksoft_dirty(pte);
c33c7948 240 if (pte_swp_uffd_wp(old_pte))
6128763f 241 pte = pte_swp_mkuffd_wp(pte);
d2b2c6dd 242 }
a5430dda 243
3ef8fd7f 244#ifdef CONFIG_HUGETLB_PAGE
4eecb8b9 245 if (folio_test_hugetlb(folio)) {
935d4f0c
RR
246 struct hstate *h = hstate_vma(vma);
247 unsigned int shift = huge_page_shift(h);
248 unsigned long psize = huge_page_size(h);
79c1c594 249
79c1c594 250 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
4eecb8b9 251 if (folio_test_anon(folio))
28c5209d 252 hugepage_add_anon_rmap(new, vma, pvmw.address,
6c287605 253 rmap_flags);
3fe87967 254 else
fb3d824d 255 page_dup_file_rmap(new, true);
935d4f0c
RR
256 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
257 psize);
383321ab
AK
258 } else
259#endif
260 {
4eecb8b9 261 if (folio_test_anon(folio))
f1e2db12 262 page_add_anon_rmap(new, vma, pvmw.address,
6c287605 263 rmap_flags);
383321ab 264 else
cea86fe2 265 page_add_file_rmap(new, vma, false);
1eba86c0 266 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
383321ab 267 }
b7435507 268 if (vma->vm_flags & VM_LOCKED)
96f97c43 269 mlock_drain_local();
e125fe40 270
4cc79b33
AK
271 trace_remove_migration_pte(pvmw.address, pte_val(pte),
272 compound_order(new));
273
3fe87967
KS
274 /* No need to invalidate - it was non-present before */
275 update_mmu_cache(vma, pvmw.address, pvmw.pte);
276 }
51afb12b 277
e4b82222 278 return true;
0697212a
CL
279}
280
04e62a29
CL
281/*
282 * Get rid of all migration entries and replace them by
283 * references to the indicated page.
284 */
4eecb8b9 285void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
04e62a29 286{
051ac83a
JK
287 struct rmap_walk_control rwc = {
288 .rmap_one = remove_migration_pte,
4eecb8b9 289 .arg = src,
051ac83a
JK
290 };
291
e388466d 292 if (locked)
2f031c6f 293 rmap_walk_locked(dst, &rwc);
e388466d 294 else
2f031c6f 295 rmap_walk(dst, &rwc);
04e62a29
CL
296}
297
0697212a
CL
298/*
299 * Something used the pte of a page under migration. We need to
300 * get to the page and wait until migration is finished.
301 * When we return from this function the fault will be retried.
0697212a 302 */
0cb8fd4d
HD
303void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
304 unsigned long address)
0697212a 305{
0cb8fd4d
HD
306 spinlock_t *ptl;
307 pte_t *ptep;
30dad309 308 pte_t pte;
0697212a 309 swp_entry_t entry;
0697212a 310
0cb8fd4d 311 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
04dee9e8
HD
312 if (!ptep)
313 return;
314
c33c7948 315 pte = ptep_get(ptep);
0cb8fd4d
HD
316 pte_unmap(ptep);
317
0697212a
CL
318 if (!is_swap_pte(pte))
319 goto out;
320
321 entry = pte_to_swp_entry(pte);
322 if (!is_migration_entry(entry))
323 goto out;
324
0cb8fd4d 325 migration_entry_wait_on_locked(entry, ptl);
0697212a
CL
326 return;
327out:
0cb8fd4d 328 spin_unlock(ptl);
30dad309
NH
329}
330
ad1ac596 331#ifdef CONFIG_HUGETLB_PAGE
fcd48540
PX
332/*
333 * The vma read lock must be held upon entry. Holding that lock prevents either
334 * the pte or the ptl from being freed.
335 *
336 * This function will release the vma lock before returning.
337 */
0cb8fd4d 338void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
30dad309 339{
0cb8fd4d 340 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
ad1ac596
ML
341 pte_t pte;
342
fcd48540 343 hugetlb_vma_assert_locked(vma);
ad1ac596
ML
344 spin_lock(ptl);
345 pte = huge_ptep_get(ptep);
346
fcd48540 347 if (unlikely(!is_hugetlb_entry_migration(pte))) {
ad1ac596 348 spin_unlock(ptl);
fcd48540
PX
349 hugetlb_vma_unlock_read(vma);
350 } else {
351 /*
352 * If migration entry existed, safe to release vma lock
353 * here because the pgtable page won't be freed without the
354 * pgtable lock released. See comment right above pgtable
355 * lock release in migration_entry_wait_on_locked().
356 */
357 hugetlb_vma_unlock_read(vma);
0cb8fd4d 358 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
fcd48540 359 }
30dad309 360}
ad1ac596
ML
361#endif
362
616b8371
ZY
363#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
364void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
365{
366 spinlock_t *ptl;
616b8371
ZY
367
368 ptl = pmd_lock(mm, pmd);
369 if (!is_pmd_migration_entry(*pmd))
370 goto unlock;
0cb8fd4d 371 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
616b8371
ZY
372 return;
373unlock:
374 spin_unlock(ptl);
375}
376#endif
377
108ca835
MWO
378static int folio_expected_refs(struct address_space *mapping,
379 struct folio *folio)
0b3901b3 380{
108ca835
MWO
381 int refs = 1;
382 if (!mapping)
383 return refs;
0b3901b3 384
108ca835
MWO
385 refs += folio_nr_pages(folio);
386 if (folio_test_private(folio))
387 refs++;
388
389 return refs;
0b3901b3
JK
390}
391
b20a3503 392/*
c3fcf8a5 393 * Replace the page in the mapping.
5b5c7120
CL
394 *
395 * The number of remaining references must be:
396 * 1 for anonymous pages without a mapping
397 * 2 for pages with a mapping
266cf658 398 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a3503 399 */
3417013e
MWO
400int folio_migrate_mapping(struct address_space *mapping,
401 struct folio *newfolio, struct folio *folio, int extra_count)
b20a3503 402{
3417013e 403 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
42cb14b1
HD
404 struct zone *oldzone, *newzone;
405 int dirty;
108ca835 406 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
3417013e 407 long nr = folio_nr_pages(folio);
8763cb45 408
6c5240ae 409 if (!mapping) {
0e8c7d0f 410 /* Anonymous page without mapping */
3417013e 411 if (folio_ref_count(folio) != expected_count)
6c5240ae 412 return -EAGAIN;
cf4b769a
HD
413
414 /* No turning back from here */
3417013e
MWO
415 newfolio->index = folio->index;
416 newfolio->mapping = folio->mapping;
417 if (folio_test_swapbacked(folio))
418 __folio_set_swapbacked(newfolio);
cf4b769a 419
78bd5209 420 return MIGRATEPAGE_SUCCESS;
6c5240ae
CL
421 }
422
3417013e
MWO
423 oldzone = folio_zone(folio);
424 newzone = folio_zone(newfolio);
42cb14b1 425
89eb946a 426 xas_lock_irq(&xas);
3417013e 427 if (!folio_ref_freeze(folio, expected_count)) {
89eb946a 428 xas_unlock_irq(&xas);
e286781d
NP
429 return -EAGAIN;
430 }
431
b20a3503 432 /*
3417013e 433 * Now we know that no one else is looking at the folio:
cf4b769a 434 * no turning back from here.
b20a3503 435 */
3417013e
MWO
436 newfolio->index = folio->index;
437 newfolio->mapping = folio->mapping;
438 folio_ref_add(newfolio, nr); /* add cache reference */
439 if (folio_test_swapbacked(folio)) {
440 __folio_set_swapbacked(newfolio);
441 if (folio_test_swapcache(folio)) {
442 folio_set_swapcache(newfolio);
443 newfolio->private = folio_get_private(folio);
6326fec1
NP
444 }
445 } else {
3417013e 446 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
b20a3503
CL
447 }
448
42cb14b1 449 /* Move dirty while page refs frozen and newpage not yet exposed */
3417013e 450 dirty = folio_test_dirty(folio);
42cb14b1 451 if (dirty) {
3417013e
MWO
452 folio_clear_dirty(folio);
453 folio_set_dirty(newfolio);
42cb14b1
HD
454 }
455
3417013e 456 xas_store(&xas, newfolio);
7cf9c2c7
NP
457
458 /*
937a94c9
JG
459 * Drop cache reference from old page by unfreezing
460 * to one less reference.
7cf9c2c7
NP
461 * We know this isn't the last reference.
462 */
3417013e 463 folio_ref_unfreeze(folio, expected_count - nr);
7cf9c2c7 464
89eb946a 465 xas_unlock(&xas);
42cb14b1
HD
466 /* Leave irq disabled to prevent preemption while updating stats */
467
0e8c7d0f
CL
468 /*
469 * If moved to a different zone then also account
470 * the page for that zone. Other VM counters will be
471 * taken care of when we establish references to the
472 * new page and drop references to the old page.
473 *
474 * Note that anonymous pages are accounted for
4b9d0fab 475 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
0e8c7d0f
CL
476 * are mapped to swap space.
477 */
42cb14b1 478 if (newzone != oldzone) {
0d1c2072
JW
479 struct lruvec *old_lruvec, *new_lruvec;
480 struct mem_cgroup *memcg;
481
3417013e 482 memcg = folio_memcg(folio);
0d1c2072
JW
483 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
484 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
485
5c447d27
SB
486 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
487 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
3417013e 488 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
5c447d27
SB
489 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
490 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
0b52c420
JG
491
492 if (folio_test_pmd_mappable(folio)) {
493 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
494 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
495 }
42cb14b1 496 }
b6038942 497#ifdef CONFIG_SWAP
3417013e 498 if (folio_test_swapcache(folio)) {
b6038942
SB
499 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
500 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
501 }
502#endif
f56753ac 503 if (dirty && mapping_can_writeback(mapping)) {
5c447d27
SB
504 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
505 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
506 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
507 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
42cb14b1 508 }
4b02108a 509 }
42cb14b1 510 local_irq_enable();
b20a3503 511
78bd5209 512 return MIGRATEPAGE_SUCCESS;
b20a3503 513}
3417013e 514EXPORT_SYMBOL(folio_migrate_mapping);
b20a3503 515
290408d4
NH
516/*
517 * The expected number of remaining references is the same as that
3417013e 518 * of folio_migrate_mapping().
290408d4
NH
519 */
520int migrate_huge_page_move_mapping(struct address_space *mapping,
b890ec2a 521 struct folio *dst, struct folio *src)
290408d4 522{
b890ec2a 523 XA_STATE(xas, &mapping->i_pages, folio_index(src));
290408d4 524 int expected_count;
290408d4 525
89eb946a 526 xas_lock_irq(&xas);
b890ec2a
MWO
527 expected_count = 2 + folio_has_private(src);
528 if (!folio_ref_freeze(src, expected_count)) {
89eb946a 529 xas_unlock_irq(&xas);
290408d4
NH
530 return -EAGAIN;
531 }
532
b890ec2a
MWO
533 dst->index = src->index;
534 dst->mapping = src->mapping;
6a93ca8f 535
b890ec2a 536 folio_get(dst);
290408d4 537
b890ec2a 538 xas_store(&xas, dst);
290408d4 539
b890ec2a 540 folio_ref_unfreeze(src, expected_count - 1);
290408d4 541
89eb946a 542 xas_unlock_irq(&xas);
6a93ca8f 543
78bd5209 544 return MIGRATEPAGE_SUCCESS;
290408d4
NH
545}
546
b20a3503 547/*
19138349 548 * Copy the flags and some other ancillary information
b20a3503 549 */
19138349 550void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
b20a3503 551{
7851a45c
RR
552 int cpupid;
553
19138349
MWO
554 if (folio_test_error(folio))
555 folio_set_error(newfolio);
556 if (folio_test_referenced(folio))
557 folio_set_referenced(newfolio);
558 if (folio_test_uptodate(folio))
559 folio_mark_uptodate(newfolio);
560 if (folio_test_clear_active(folio)) {
561 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
562 folio_set_active(newfolio);
563 } else if (folio_test_clear_unevictable(folio))
564 folio_set_unevictable(newfolio);
565 if (folio_test_workingset(folio))
566 folio_set_workingset(newfolio);
567 if (folio_test_checked(folio))
568 folio_set_checked(newfolio);
6c287605
DH
569 /*
570 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
571 * migration entries. We can still have PG_anon_exclusive set on an
572 * effectively unmapped and unreferenced first sub-pages of an
573 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
574 */
19138349
MWO
575 if (folio_test_mappedtodisk(folio))
576 folio_set_mappedtodisk(newfolio);
b20a3503 577
3417013e 578 /* Move dirty on pages not done by folio_migrate_mapping() */
19138349
MWO
579 if (folio_test_dirty(folio))
580 folio_set_dirty(newfolio);
b20a3503 581
19138349
MWO
582 if (folio_test_young(folio))
583 folio_set_young(newfolio);
584 if (folio_test_idle(folio))
585 folio_set_idle(newfolio);
33c3fc71 586
7851a45c
RR
587 /*
588 * Copy NUMA information to the new page, to prevent over-eager
589 * future migrations of this same page.
590 */
19138349 591 cpupid = page_cpupid_xchg_last(&folio->page, -1);
33024536
HY
592 /*
593 * For memory tiering mode, when migrate between slow and fast
594 * memory node, reset cpupid, because that is used to record
595 * page access time in slow memory node.
596 */
597 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
598 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
599 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
600
601 if (f_toptier != t_toptier)
602 cpupid = -1;
603 }
19138349 604 page_cpupid_xchg_last(&newfolio->page, cpupid);
7851a45c 605
19138349 606 folio_migrate_ksm(newfolio, folio);
c8d6553b
HD
607 /*
608 * Please do not reorder this without considering how mm/ksm.c's
609 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
610 */
19138349
MWO
611 if (folio_test_swapcache(folio))
612 folio_clear_swapcache(folio);
613 folio_clear_private(folio);
ad2fa371
MS
614
615 /* page->private contains hugetlb specific flags */
19138349
MWO
616 if (!folio_test_hugetlb(folio))
617 folio->private = NULL;
b20a3503
CL
618
619 /*
620 * If any waiters have accumulated on the new page then
621 * wake them up.
622 */
19138349
MWO
623 if (folio_test_writeback(newfolio))
624 folio_end_writeback(newfolio);
d435edca 625
6aeff241
YS
626 /*
627 * PG_readahead shares the same bit with PG_reclaim. The above
628 * end_page_writeback() may clear PG_readahead mistakenly, so set the
629 * bit after that.
630 */
19138349
MWO
631 if (folio_test_readahead(folio))
632 folio_set_readahead(newfolio);
6aeff241 633
19138349 634 folio_copy_owner(newfolio, folio);
74485cf2 635
19138349 636 if (!folio_test_hugetlb(folio))
d21bba2b 637 mem_cgroup_migrate(folio, newfolio);
b20a3503 638}
19138349 639EXPORT_SYMBOL(folio_migrate_flags);
2916ecc0 640
715cbfd6 641void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
2916ecc0 642{
715cbfd6
MWO
643 folio_copy(newfolio, folio);
644 folio_migrate_flags(newfolio, folio);
2916ecc0 645}
715cbfd6 646EXPORT_SYMBOL(folio_migrate_copy);
b20a3503 647
1d8b85cc
CL
648/************************************************************
649 * Migration functions
650 ***********************************************************/
651
16ce101d
AP
652int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
653 struct folio *src, enum migrate_mode mode, int extra_count)
654{
655 int rc;
656
657 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
658
659 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
660
661 if (rc != MIGRATEPAGE_SUCCESS)
662 return rc;
663
664 if (mode != MIGRATE_SYNC_NO_COPY)
665 folio_migrate_copy(dst, src);
666 else
667 folio_migrate_flags(dst, src);
668 return MIGRATEPAGE_SUCCESS;
669}
670
54184650
MWO
671/**
672 * migrate_folio() - Simple folio migration.
673 * @mapping: The address_space containing the folio.
674 * @dst: The folio to migrate the data to.
675 * @src: The folio containing the current data.
676 * @mode: How to migrate the page.
677 *
678 * Common logic to directly migrate a single LRU folio suitable for
679 * folios that do not use PagePrivate/PagePrivate2.
b20a3503 680 *
54184650 681 * Folios are locked upon entry and exit.
b20a3503 682 */
54184650
MWO
683int migrate_folio(struct address_space *mapping, struct folio *dst,
684 struct folio *src, enum migrate_mode mode)
b20a3503 685{
16ce101d 686 return migrate_folio_extra(mapping, dst, src, mode, 0);
b20a3503 687}
54184650 688EXPORT_SYMBOL(migrate_folio);
b20a3503 689
925c86a1 690#ifdef CONFIG_BUFFER_HEAD
84ade7c1
JK
691/* Returns true if all buffers are successfully locked */
692static bool buffer_migrate_lock_buffers(struct buffer_head *head,
693 enum migrate_mode mode)
694{
695 struct buffer_head *bh = head;
4bb6dc79 696 struct buffer_head *failed_bh;
84ade7c1 697
84ade7c1 698 do {
84ade7c1 699 if (!trylock_buffer(bh)) {
4bb6dc79
DA
700 if (mode == MIGRATE_ASYNC)
701 goto unlock;
702 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
703 goto unlock;
704 lock_buffer(bh);
84ade7c1
JK
705 }
706
707 bh = bh->b_this_page;
708 } while (bh != head);
4bb6dc79 709
84ade7c1 710 return true;
4bb6dc79
DA
711
712unlock:
713 /* We failed to lock the buffer and cannot stall. */
714 failed_bh = bh;
715 bh = head;
716 while (bh != failed_bh) {
717 unlock_buffer(bh);
718 bh = bh->b_this_page;
719 }
720
721 return false;
84ade7c1
JK
722}
723
67235182
MWO
724static int __buffer_migrate_folio(struct address_space *mapping,
725 struct folio *dst, struct folio *src, enum migrate_mode mode,
89cb0888 726 bool check_refs)
1d8b85cc 727{
1d8b85cc
CL
728 struct buffer_head *bh, *head;
729 int rc;
cc4f11e6 730 int expected_count;
1d8b85cc 731
67235182
MWO
732 head = folio_buffers(src);
733 if (!head)
54184650 734 return migrate_folio(mapping, dst, src, mode);
1d8b85cc 735
cc4f11e6 736 /* Check whether page does not have extra refs before we do more work */
108ca835 737 expected_count = folio_expected_refs(mapping, src);
67235182 738 if (folio_ref_count(src) != expected_count)
cc4f11e6 739 return -EAGAIN;
1d8b85cc 740
cc4f11e6
JK
741 if (!buffer_migrate_lock_buffers(head, mode))
742 return -EAGAIN;
1d8b85cc 743
89cb0888
JK
744 if (check_refs) {
745 bool busy;
746 bool invalidated = false;
747
748recheck_buffers:
749 busy = false;
750 spin_lock(&mapping->private_lock);
751 bh = head;
752 do {
753 if (atomic_read(&bh->b_count)) {
754 busy = true;
755 break;
756 }
757 bh = bh->b_this_page;
758 } while (bh != head);
89cb0888
JK
759 if (busy) {
760 if (invalidated) {
761 rc = -EAGAIN;
762 goto unlock_buffers;
763 }
ebdf4de5 764 spin_unlock(&mapping->private_lock);
89cb0888
JK
765 invalidate_bh_lrus();
766 invalidated = true;
767 goto recheck_buffers;
768 }
769 }
770
67235182 771 rc = folio_migrate_mapping(mapping, dst, src, 0);
78bd5209 772 if (rc != MIGRATEPAGE_SUCCESS)
cc4f11e6 773 goto unlock_buffers;
1d8b85cc 774
67235182 775 folio_attach_private(dst, folio_detach_private(src));
1d8b85cc
CL
776
777 bh = head;
778 do {
d5db4f9d 779 folio_set_bh(bh, dst, bh_offset(bh));
1d8b85cc 780 bh = bh->b_this_page;
1d8b85cc
CL
781 } while (bh != head);
782
2916ecc0 783 if (mode != MIGRATE_SYNC_NO_COPY)
67235182 784 folio_migrate_copy(dst, src);
2916ecc0 785 else
67235182 786 folio_migrate_flags(dst, src);
1d8b85cc 787
cc4f11e6
JK
788 rc = MIGRATEPAGE_SUCCESS;
789unlock_buffers:
ebdf4de5
JK
790 if (check_refs)
791 spin_unlock(&mapping->private_lock);
1d8b85cc
CL
792 bh = head;
793 do {
794 unlock_buffer(bh);
1d8b85cc 795 bh = bh->b_this_page;
1d8b85cc
CL
796 } while (bh != head);
797
cc4f11e6 798 return rc;
1d8b85cc 799}
89cb0888 800
67235182
MWO
801/**
802 * buffer_migrate_folio() - Migration function for folios with buffers.
803 * @mapping: The address space containing @src.
804 * @dst: The folio to migrate to.
805 * @src: The folio to migrate from.
806 * @mode: How to migrate the folio.
807 *
808 * This function can only be used if the underlying filesystem guarantees
809 * that no other references to @src exist. For example attached buffer
810 * heads are accessed only under the folio lock. If your filesystem cannot
811 * provide this guarantee, buffer_migrate_folio_norefs() may be more
812 * appropriate.
813 *
814 * Return: 0 on success or a negative errno on failure.
89cb0888 815 */
67235182
MWO
816int buffer_migrate_folio(struct address_space *mapping,
817 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 818{
67235182 819 return __buffer_migrate_folio(mapping, dst, src, mode, false);
89cb0888 820}
67235182
MWO
821EXPORT_SYMBOL(buffer_migrate_folio);
822
823/**
824 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
825 * @mapping: The address space containing @src.
826 * @dst: The folio to migrate to.
827 * @src: The folio to migrate from.
828 * @mode: How to migrate the folio.
829 *
830 * Like buffer_migrate_folio() except that this variant is more careful
831 * and checks that there are also no buffer head references. This function
832 * is the right one for mappings where buffer heads are directly looked
833 * up and referenced (such as block device mappings).
834 *
835 * Return: 0 on success or a negative errno on failure.
89cb0888 836 */
67235182
MWO
837int buffer_migrate_folio_norefs(struct address_space *mapping,
838 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 839{
67235182 840 return __buffer_migrate_folio(mapping, dst, src, mode, true);
89cb0888 841}
e26355e2 842EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
925c86a1 843#endif /* CONFIG_BUFFER_HEAD */
1d8b85cc 844
2ec810d5
MWO
845int filemap_migrate_folio(struct address_space *mapping,
846 struct folio *dst, struct folio *src, enum migrate_mode mode)
847{
848 int ret;
849
850 ret = folio_migrate_mapping(mapping, dst, src, 0);
851 if (ret != MIGRATEPAGE_SUCCESS)
852 return ret;
853
854 if (folio_get_private(src))
855 folio_attach_private(dst, folio_detach_private(src));
856
857 if (mode != MIGRATE_SYNC_NO_COPY)
858 folio_migrate_copy(dst, src);
859 else
860 folio_migrate_flags(dst, src);
861 return MIGRATEPAGE_SUCCESS;
862}
863EXPORT_SYMBOL_GPL(filemap_migrate_folio);
864
04e62a29 865/*
2be7fa10 866 * Writeback a folio to clean the dirty state
04e62a29 867 */
2be7fa10 868static int writeout(struct address_space *mapping, struct folio *folio)
8351a6e4 869{
04e62a29
CL
870 struct writeback_control wbc = {
871 .sync_mode = WB_SYNC_NONE,
872 .nr_to_write = 1,
873 .range_start = 0,
874 .range_end = LLONG_MAX,
04e62a29
CL
875 .for_reclaim = 1
876 };
877 int rc;
878
879 if (!mapping->a_ops->writepage)
880 /* No write method for the address space */
881 return -EINVAL;
882
2be7fa10 883 if (!folio_clear_dirty_for_io(folio))
04e62a29
CL
884 /* Someone else already triggered a write */
885 return -EAGAIN;
886
8351a6e4 887 /*
2be7fa10
MWO
888 * A dirty folio may imply that the underlying filesystem has
889 * the folio on some queue. So the folio must be clean for
890 * migration. Writeout may mean we lose the lock and the
891 * folio state is no longer what we checked for earlier.
04e62a29
CL
892 * At this point we know that the migration attempt cannot
893 * be successful.
8351a6e4 894 */
4eecb8b9 895 remove_migration_ptes(folio, folio, false);
8351a6e4 896
2be7fa10 897 rc = mapping->a_ops->writepage(&folio->page, &wbc);
8351a6e4 898
04e62a29
CL
899 if (rc != AOP_WRITEPAGE_ACTIVATE)
900 /* unlocked. Relock */
2be7fa10 901 folio_lock(folio);
04e62a29 902
bda8550d 903 return (rc < 0) ? -EIO : -EAGAIN;
04e62a29
CL
904}
905
906/*
907 * Default handling if a filesystem does not provide a migration function.
908 */
8faa8ef5
MWO
909static int fallback_migrate_folio(struct address_space *mapping,
910 struct folio *dst, struct folio *src, enum migrate_mode mode)
04e62a29 911{
8faa8ef5
MWO
912 if (folio_test_dirty(src)) {
913 /* Only writeback folios in full synchronous migration */
2916ecc0
JG
914 switch (mode) {
915 case MIGRATE_SYNC:
916 case MIGRATE_SYNC_NO_COPY:
917 break;
918 default:
b969c4ab 919 return -EBUSY;
2916ecc0 920 }
2be7fa10 921 return writeout(mapping, src);
b969c4ab 922 }
8351a6e4
CL
923
924 /*
925 * Buffers may be managed in a filesystem specific way.
926 * We must have no buffers or drop them.
927 */
0201ebf2 928 if (!filemap_release_folio(src, GFP_KERNEL))
806031bb 929 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
8351a6e4 930
54184650 931 return migrate_folio(mapping, dst, src, mode);
8351a6e4
CL
932}
933
e24f0b8f
CL
934/*
935 * Move a page to a newly allocated page
936 * The page is locked and all ptes have been successfully removed.
937 *
938 * The new page will have replaced the old page if this function
939 * is successful.
894bc310
LS
940 *
941 * Return value:
942 * < 0 - error code
78bd5209 943 * MIGRATEPAGE_SUCCESS - success
e24f0b8f 944 */
e7e3ffeb 945static int move_to_new_folio(struct folio *dst, struct folio *src,
5c3f9a67 946 enum migrate_mode mode)
e24f0b8f 947{
bda807d4 948 int rc = -EAGAIN;
e7e3ffeb 949 bool is_lru = !__PageMovable(&src->page);
e24f0b8f 950
e7e3ffeb
MWO
951 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
952 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
e24f0b8f 953
bda807d4 954 if (likely(is_lru)) {
68f2736a
MWO
955 struct address_space *mapping = folio_mapping(src);
956
bda807d4 957 if (!mapping)
54184650 958 rc = migrate_folio(mapping, dst, src, mode);
5490da4f 959 else if (mapping->a_ops->migrate_folio)
bda807d4 960 /*
5490da4f
MWO
961 * Most folios have a mapping and most filesystems
962 * provide a migrate_folio callback. Anonymous folios
bda807d4 963 * are part of swap space which also has its own
5490da4f 964 * migrate_folio callback. This is the most common path
bda807d4
MK
965 * for page migration.
966 */
5490da4f
MWO
967 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
968 mode);
bda807d4 969 else
8faa8ef5 970 rc = fallback_migrate_folio(mapping, dst, src, mode);
bda807d4 971 } else {
68f2736a
MWO
972 const struct movable_operations *mops;
973
e24f0b8f 974 /*
bda807d4
MK
975 * In case of non-lru page, it could be released after
976 * isolation step. In that case, we shouldn't try migration.
e24f0b8f 977 */
e7e3ffeb
MWO
978 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
979 if (!folio_test_movable(src)) {
bda807d4 980 rc = MIGRATEPAGE_SUCCESS;
e7e3ffeb 981 folio_clear_isolated(src);
bda807d4
MK
982 goto out;
983 }
984
da707a6d 985 mops = folio_movable_ops(src);
68f2736a 986 rc = mops->migrate_page(&dst->page, &src->page, mode);
bda807d4 987 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
e7e3ffeb 988 !folio_test_isolated(src));
bda807d4 989 }
e24f0b8f 990
5c3f9a67 991 /*
e7e3ffeb
MWO
992 * When successful, old pagecache src->mapping must be cleared before
993 * src is freed; but stats require that PageAnon be left as PageAnon.
5c3f9a67
HD
994 */
995 if (rc == MIGRATEPAGE_SUCCESS) {
e7e3ffeb
MWO
996 if (__PageMovable(&src->page)) {
997 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
bda807d4
MK
998
999 /*
1000 * We clear PG_movable under page_lock so any compactor
1001 * cannot try to migrate this page.
1002 */
e7e3ffeb 1003 folio_clear_isolated(src);
bda807d4
MK
1004 }
1005
1006 /*
e7e3ffeb 1007 * Anonymous and movable src->mapping will be cleared by
bda807d4
MK
1008 * free_pages_prepare so don't reset it here for keeping
1009 * the type to work PageAnon, for example.
1010 */
e7e3ffeb
MWO
1011 if (!folio_mapping_flags(src))
1012 src->mapping = NULL;
d2b2c6dd 1013
e7e3ffeb
MWO
1014 if (likely(!folio_is_zone_device(dst)))
1015 flush_dcache_folio(dst);
3fe2011f 1016 }
bda807d4 1017out:
e24f0b8f
CL
1018 return rc;
1019}
1020
64c8902e
HY
1021/*
1022 * To record some information during migration, we use some unused
1023 * fields (mapping and private) of struct folio of the newly allocated
1024 * destination folio. This is safe because nobody is using them
1025 * except us.
1026 */
e77d587a
LT
1027union migration_ptr {
1028 struct anon_vma *anon_vma;
1029 struct address_space *mapping;
1030};
64c8902e
HY
1031static void __migrate_folio_record(struct folio *dst,
1032 unsigned long page_was_mapped,
1033 struct anon_vma *anon_vma)
1034{
e77d587a
LT
1035 union migration_ptr ptr = { .anon_vma = anon_vma };
1036 dst->mapping = ptr.mapping;
64c8902e
HY
1037 dst->private = (void *)page_was_mapped;
1038}
1039
1040static void __migrate_folio_extract(struct folio *dst,
1041 int *page_was_mappedp,
1042 struct anon_vma **anon_vmap)
1043{
e77d587a
LT
1044 union migration_ptr ptr = { .mapping = dst->mapping };
1045 *anon_vmap = ptr.anon_vma;
64c8902e
HY
1046 *page_was_mappedp = (unsigned long)dst->private;
1047 dst->mapping = NULL;
1048 dst->private = NULL;
1049}
1050
5dfab109
HY
1051/* Restore the source folio to the original state upon failure */
1052static void migrate_folio_undo_src(struct folio *src,
1053 int page_was_mapped,
1054 struct anon_vma *anon_vma,
ebe75e47 1055 bool locked,
5dfab109
HY
1056 struct list_head *ret)
1057{
1058 if (page_was_mapped)
1059 remove_migration_ptes(src, src, false);
1060 /* Drop an anon_vma reference if we took one */
1061 if (anon_vma)
1062 put_anon_vma(anon_vma);
ebe75e47
HY
1063 if (locked)
1064 folio_unlock(src);
1065 if (ret)
1066 list_move_tail(&src->lru, ret);
5dfab109
HY
1067}
1068
1069/* Restore the destination folio to the original state upon failure */
4e096ae1
MWO
1070static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1071 free_folio_t put_new_folio, unsigned long private)
5dfab109 1072{
ebe75e47
HY
1073 if (locked)
1074 folio_unlock(dst);
4e096ae1
MWO
1075 if (put_new_folio)
1076 put_new_folio(dst, private);
5dfab109
HY
1077 else
1078 folio_put(dst);
1079}
1080
64c8902e
HY
1081/* Cleanup src folio upon migration success */
1082static void migrate_folio_done(struct folio *src,
1083 enum migrate_reason reason)
1084{
1085 /*
1086 * Compaction can migrate also non-LRU pages which are
1087 * not accounted to NR_ISOLATED_*. They can be recognized
1088 * as __PageMovable
1089 */
1090 if (likely(!__folio_test_movable(src)))
1091 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1092 folio_is_file_lru(src), -folio_nr_pages(src));
1093
1094 if (reason != MR_MEMORY_FAILURE)
1095 /* We release the page in page_handle_poison. */
1096 folio_put(src);
1097}
1098
ebe75e47 1099/* Obtain the lock on page, remove all ptes. */
4e096ae1
MWO
1100static int migrate_folio_unmap(new_folio_t get_new_folio,
1101 free_folio_t put_new_folio, unsigned long private,
1102 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1103 enum migrate_reason reason, struct list_head *ret)
e24f0b8f 1104{
ebe75e47 1105 struct folio *dst;
0dabec93 1106 int rc = -EAGAIN;
64c8902e 1107 int page_was_mapped = 0;
3f6c8272 1108 struct anon_vma *anon_vma = NULL;
682a71a1 1109 bool is_lru = !__PageMovable(&src->page);
ebe75e47
HY
1110 bool locked = false;
1111 bool dst_locked = false;
1112
ebe75e47
HY
1113 if (folio_ref_count(src) == 1) {
1114 /* Folio was freed from under us. So we are done. */
1115 folio_clear_active(src);
1116 folio_clear_unevictable(src);
1117 /* free_pages_prepare() will clear PG_isolated. */
1118 list_del(&src->lru);
1119 migrate_folio_done(src, reason);
1120 return MIGRATEPAGE_SUCCESS;
1121 }
1122
4e096ae1
MWO
1123 dst = get_new_folio(src, private);
1124 if (!dst)
ebe75e47 1125 return -ENOMEM;
ebe75e47
HY
1126 *dstp = dst;
1127
1128 dst->private = NULL;
95a402c3 1129
682a71a1 1130 if (!folio_trylock(src)) {
2ef7dbb2 1131 if (mode == MIGRATE_ASYNC)
0dabec93 1132 goto out;
3e7d3449
MG
1133
1134 /*
1135 * It's not safe for direct compaction to call lock_page.
1136 * For example, during page readahead pages are added locked
1137 * to the LRU. Later, when the IO completes the pages are
1138 * marked uptodate and unlocked. However, the queueing
1139 * could be merging multiple pages for one bio (e.g.
d4388340 1140 * mpage_readahead). If an allocation happens for the
3e7d3449
MG
1141 * second or third page, the process can end up locking
1142 * the same page twice and deadlocking. Rather than
1143 * trying to be clever about what pages can be locked,
1144 * avoid the use of lock_page for direct compaction
1145 * altogether.
1146 */
1147 if (current->flags & PF_MEMALLOC)
0dabec93 1148 goto out;
3e7d3449 1149
4bb6dc79
DA
1150 /*
1151 * In "light" mode, we can wait for transient locks (eg
1152 * inserting a page into the page table), but it's not
1153 * worth waiting for I/O.
1154 */
1155 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1156 goto out;
1157
682a71a1 1158 folio_lock(src);
e24f0b8f 1159 }
ebe75e47 1160 locked = true;
e24f0b8f 1161
682a71a1 1162 if (folio_test_writeback(src)) {
11bc82d6 1163 /*
fed5b64a 1164 * Only in the case of a full synchronous migration is it
a6bc32b8
MG
1165 * necessary to wait for PageWriteback. In the async case,
1166 * the retry loop is too short and in the sync-light case,
1167 * the overhead of stalling is too much
11bc82d6 1168 */
2916ecc0
JG
1169 switch (mode) {
1170 case MIGRATE_SYNC:
1171 case MIGRATE_SYNC_NO_COPY:
1172 break;
1173 default:
11bc82d6 1174 rc = -EBUSY;
ebe75e47 1175 goto out;
11bc82d6 1176 }
682a71a1 1177 folio_wait_writeback(src);
e24f0b8f 1178 }
03f15c86 1179
e24f0b8f 1180 /*
682a71a1
MWO
1181 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1182 * we cannot notice that anon_vma is freed while we migrate a page.
1ce82b69 1183 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d 1184 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
1185 * File Caches may use write_page() or lock_page() in migration, then,
1186 * just care Anon page here.
03f15c86 1187 *
29eea9b5 1188 * Only folio_get_anon_vma() understands the subtleties of
03f15c86
HD
1189 * getting a hold on an anon_vma from outside one of its mms.
1190 * But if we cannot get anon_vma, then we won't need it anyway,
1191 * because that implies that the anon page is no longer mapped
1192 * (and cannot be remapped so long as we hold the page lock).
dc386d4d 1193 */
682a71a1 1194 if (folio_test_anon(src) && !folio_test_ksm(src))
29eea9b5 1195 anon_vma = folio_get_anon_vma(src);
62e1c553 1196
7db7671f
HD
1197 /*
1198 * Block others from accessing the new page when we get around to
1199 * establishing additional references. We are usually the only one
682a71a1
MWO
1200 * holding a reference to dst at this point. We used to have a BUG
1201 * here if folio_trylock(dst) fails, but would like to allow for
1202 * cases where there might be a race with the previous use of dst.
7db7671f
HD
1203 * This is much like races on refcount of oldpage: just don't BUG().
1204 */
682a71a1 1205 if (unlikely(!folio_trylock(dst)))
ebe75e47
HY
1206 goto out;
1207 dst_locked = true;
7db7671f 1208
bda807d4 1209 if (unlikely(!is_lru)) {
64c8902e
HY
1210 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1211 return MIGRATEPAGE_UNMAP;
bda807d4
MK
1212 }
1213
dc386d4d 1214 /*
62e1c553
SL
1215 * Corner case handling:
1216 * 1. When a new swap-cache page is read into, it is added to the LRU
1217 * and treated as swapcache but it has no rmap yet.
682a71a1 1218 * Calling try_to_unmap() against a src->mapping==NULL page will
62e1c553 1219 * trigger a BUG. So handle it here.
d12b8951 1220 * 2. An orphaned page (see truncate_cleanup_page) might have
62e1c553
SL
1221 * fs-private metadata. The page can be picked up due to memory
1222 * offlining. Everywhere else except page reclaim, the page is
1223 * invisible to the vm, so the page can not be migrated. So try to
1224 * free the metadata, so the page can be freed.
e24f0b8f 1225 */
682a71a1
MWO
1226 if (!src->mapping) {
1227 if (folio_test_private(src)) {
1228 try_to_free_buffers(src);
ebe75e47 1229 goto out;
62e1c553 1230 }
682a71a1 1231 } else if (folio_mapped(src)) {
7db7671f 1232 /* Establish migration ptes */
682a71a1
MWO
1233 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1234 !folio_test_ksm(src) && !anon_vma, src);
fb3592c4 1235 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
64c8902e 1236 page_was_mapped = 1;
2ebba6b7 1237 }
dc386d4d 1238
64c8902e
HY
1239 if (!folio_mapped(src)) {
1240 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1241 return MIGRATEPAGE_UNMAP;
1242 }
1243
64c8902e 1244out:
80562ba0
HY
1245 /*
1246 * A folio that has not been unmapped will be restored to
1247 * right list unless we want to retry.
1248 */
fb3592c4 1249 if (rc == -EAGAIN)
ebe75e47 1250 ret = NULL;
80562ba0 1251
ebe75e47 1252 migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
4e096ae1 1253 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
80562ba0
HY
1254
1255 return rc;
1256}
1257
ebe75e47 1258/* Migrate the folio to the newly allocated folio in dst. */
4e096ae1 1259static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
ebe75e47
HY
1260 struct folio *src, struct folio *dst,
1261 enum migrate_mode mode, enum migrate_reason reason,
1262 struct list_head *ret)
64c8902e
HY
1263{
1264 int rc;
1265 int page_was_mapped = 0;
1266 struct anon_vma *anon_vma = NULL;
1267 bool is_lru = !__PageMovable(&src->page);
5dfab109 1268 struct list_head *prev;
64c8902e
HY
1269
1270 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
5dfab109
HY
1271 prev = dst->lru.prev;
1272 list_del(&dst->lru);
64c8902e
HY
1273
1274 rc = move_to_new_folio(dst, src, mode);
ebe75e47
HY
1275 if (rc)
1276 goto out;
5dfab109 1277
64c8902e
HY
1278 if (unlikely(!is_lru))
1279 goto out_unlock_both;
e24f0b8f 1280
c3096e67 1281 /*
682a71a1 1282 * When successful, push dst to LRU immediately: so that if it
c3096e67 1283 * turns out to be an mlocked page, remove_migration_ptes() will
682a71a1 1284 * automatically build up the correct dst->mlock_count for it.
c3096e67
HD
1285 *
1286 * We would like to do something similar for the old page, when
1287 * unsuccessful, and other cases when a page has been temporarily
1288 * isolated from the unevictable LRU: but this case is the easiest.
1289 */
ebe75e47
HY
1290 folio_add_lru(dst);
1291 if (page_was_mapped)
1292 lru_add_drain();
c3096e67 1293
5c3f9a67 1294 if (page_was_mapped)
ebe75e47 1295 remove_migration_ptes(src, dst, false);
3f6c8272 1296
7db7671f 1297out_unlock_both:
682a71a1 1298 folio_unlock(dst);
ebe75e47 1299 set_page_owner_migrate_reason(&dst->page, reason);
c6c919eb 1300 /*
682a71a1 1301 * If migration is successful, decrease refcount of dst,
c6c919eb 1302 * which will not free the page because new page owner increased
c3096e67 1303 * refcounter.
c6c919eb 1304 */
ebe75e47 1305 folio_put(dst);
c6c919eb 1306
dd4ae78a 1307 /*
ebe75e47
HY
1308 * A folio that has been migrated has all references removed
1309 * and will be freed.
dd4ae78a 1310 */
ebe75e47
HY
1311 list_del(&src->lru);
1312 /* Drop an anon_vma reference if we took one */
1313 if (anon_vma)
1314 put_anon_vma(anon_vma);
1315 folio_unlock(src);
1316 migrate_folio_done(src, reason);
bf6bddf1 1317
ebe75e47 1318 return rc;
0dabec93 1319out:
dd4ae78a 1320 /*
ebe75e47
HY
1321 * A folio that has not been migrated will be restored to
1322 * right list unless we want to retry.
dd4ae78a 1323 */
ebe75e47
HY
1324 if (rc == -EAGAIN) {
1325 list_add(&dst->lru, prev);
1326 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1327 return rc;
e24f0b8f 1328 }
68711a74 1329
ebe75e47 1330 migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
4e096ae1 1331 migrate_folio_undo_dst(dst, true, put_new_folio, private);
ebe75e47 1332
e24f0b8f
CL
1333 return rc;
1334}
1335
290408d4
NH
1336/*
1337 * Counterpart of unmap_and_move_page() for hugepage migration.
1338 *
1339 * This function doesn't wait the completion of hugepage I/O
1340 * because there is no race between I/O and migration for hugepage.
1341 * Note that currently hugepage I/O occurs only in direct I/O
1342 * where no lock is held and PG_writeback is irrelevant,
1343 * and writeback status of all subpages are counted in the reference
1344 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1345 * under direct I/O, the reference of the head page is 512 and a bit more.)
1346 * This means that when we try to migrate hugepage whose subpages are
1347 * doing direct I/O, some references remain after try_to_unmap() and
1348 * hugepage migration fails without data corruption.
1349 *
1350 * There is also no race when direct I/O is issued on the page under migration,
1351 * because then pte is replaced with migration swap entry and direct I/O code
1352 * will wait in the page fault for migration to complete.
1353 */
4e096ae1
MWO
1354static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1355 free_folio_t put_new_folio, unsigned long private,
1356 struct folio *src, int force, enum migrate_mode mode,
1357 int reason, struct list_head *ret)
290408d4 1358{
4e096ae1 1359 struct folio *dst;
2def7424 1360 int rc = -EAGAIN;
2ebba6b7 1361 int page_was_mapped = 0;
290408d4 1362 struct anon_vma *anon_vma = NULL;
c0d0381a 1363 struct address_space *mapping = NULL;
290408d4 1364
c33db292 1365 if (folio_ref_count(src) == 1) {
71a64f61 1366 /* page was freed from under us. So we are done. */
ea8e72f4 1367 folio_putback_active_hugetlb(src);
71a64f61
MS
1368 return MIGRATEPAGE_SUCCESS;
1369 }
1370
4e096ae1
MWO
1371 dst = get_new_folio(src, private);
1372 if (!dst)
290408d4
NH
1373 return -ENOMEM;
1374
c33db292 1375 if (!folio_trylock(src)) {
2916ecc0 1376 if (!force)
290408d4 1377 goto out;
2916ecc0
JG
1378 switch (mode) {
1379 case MIGRATE_SYNC:
1380 case MIGRATE_SYNC_NO_COPY:
1381 break;
1382 default:
1383 goto out;
1384 }
c33db292 1385 folio_lock(src);
290408d4
NH
1386 }
1387
cb6acd01
MK
1388 /*
1389 * Check for pages which are in the process of being freed. Without
c33db292 1390 * folio_mapping() set, hugetlbfs specific move page routine will not
cb6acd01
MK
1391 * be called and we could leak usage counts for subpools.
1392 */
345c62d1 1393 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
cb6acd01
MK
1394 rc = -EBUSY;
1395 goto out_unlock;
1396 }
1397
c33db292 1398 if (folio_test_anon(src))
29eea9b5 1399 anon_vma = folio_get_anon_vma(src);
290408d4 1400
c33db292 1401 if (unlikely(!folio_trylock(dst)))
7db7671f
HD
1402 goto put_anon;
1403
c33db292 1404 if (folio_mapped(src)) {
a98a2f0c 1405 enum ttu_flags ttu = 0;
336bf30e 1406
c33db292 1407 if (!folio_test_anon(src)) {
336bf30e
MK
1408 /*
1409 * In shared mappings, try_to_unmap could potentially
1410 * call huge_pmd_unshare. Because of this, take
1411 * semaphore in write mode here and set TTU_RMAP_LOCKED
1412 * to let lower levels know we have taken the lock.
1413 */
4e096ae1 1414 mapping = hugetlb_page_mapping_lock_write(&src->page);
336bf30e
MK
1415 if (unlikely(!mapping))
1416 goto unlock_put_anon;
1417
5202978b 1418 ttu = TTU_RMAP_LOCKED;
336bf30e 1419 }
c0d0381a 1420
4b8554c5 1421 try_to_migrate(src, ttu);
2ebba6b7 1422 page_was_mapped = 1;
336bf30e 1423
5202978b 1424 if (ttu & TTU_RMAP_LOCKED)
336bf30e 1425 i_mmap_unlock_write(mapping);
2ebba6b7 1426 }
290408d4 1427
c33db292 1428 if (!folio_mapped(src))
e7e3ffeb 1429 rc = move_to_new_folio(dst, src, mode);
290408d4 1430
336bf30e 1431 if (page_was_mapped)
4eecb8b9
MWO
1432 remove_migration_ptes(src,
1433 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
290408d4 1434
c0d0381a 1435unlock_put_anon:
c33db292 1436 folio_unlock(dst);
7db7671f
HD
1437
1438put_anon:
fd4a4663 1439 if (anon_vma)
9e60109f 1440 put_anon_vma(anon_vma);
8e6ac7fa 1441
2def7424 1442 if (rc == MIGRATEPAGE_SUCCESS) {
345c62d1 1443 move_hugetlb_state(src, dst, reason);
4e096ae1 1444 put_new_folio = NULL;
2def7424 1445 }
8e6ac7fa 1446
cb6acd01 1447out_unlock:
c33db292 1448 folio_unlock(src);
09761333 1449out:
dd4ae78a 1450 if (rc == MIGRATEPAGE_SUCCESS)
ea8e72f4 1451 folio_putback_active_hugetlb(src);
a04840c6 1452 else if (rc != -EAGAIN)
c33db292 1453 list_move_tail(&src->lru, ret);
68711a74
DR
1454
1455 /*
1456 * If migration was not successful and there's a freeing callback, use
1457 * it. Otherwise, put_page() will drop the reference grabbed during
1458 * isolation.
1459 */
4e096ae1
MWO
1460 if (put_new_folio)
1461 put_new_folio(dst, private);
68711a74 1462 else
ea8e72f4 1463 folio_putback_active_hugetlb(dst);
68711a74 1464
290408d4
NH
1465 return rc;
1466}
1467
eaec4e63 1468static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
d532e2e5 1469{
9c62ff00 1470 int rc;
d532e2e5 1471
eaec4e63
HY
1472 folio_lock(folio);
1473 rc = split_folio_to_list(folio, split_folios);
1474 folio_unlock(folio);
e6fa8a79 1475 if (!rc)
eaec4e63 1476 list_move_tail(&folio->lru, split_folios);
d532e2e5
YS
1477
1478 return rc;
1479}
1480
42012e04
HY
1481#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1482#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1483#else
1484#define NR_MAX_BATCHED_MIGRATION 512
1485#endif
e5bfff8b 1486#define NR_MAX_MIGRATE_PAGES_RETRY 10
2ef7dbb2
HY
1487#define NR_MAX_MIGRATE_ASYNC_RETRY 3
1488#define NR_MAX_MIGRATE_SYNC_RETRY \
1489 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
e5bfff8b 1490
5b855937
HY
1491struct migrate_pages_stats {
1492 int nr_succeeded; /* Normal and large folios migrated successfully, in
1493 units of base pages */
1494 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1495 units of base pages. Untried folios aren't counted */
1496 int nr_thp_succeeded; /* THP migrated successfully */
1497 int nr_thp_failed; /* THP failed to be migrated */
1498 int nr_thp_split; /* THP split before migrating */
1499};
1500
b20a3503 1501/*
e5bfff8b
HY
1502 * Returns the number of hugetlb folios that were not migrated, or an error code
1503 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1504 * any more because the list has become empty or no retryable hugetlb folios
1505 * exist any more. It is caller's responsibility to call putback_movable_pages()
1506 * only if ret != 0.
b20a3503 1507 */
4e096ae1
MWO
1508static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1509 free_folio_t put_new_folio, unsigned long private,
e5bfff8b
HY
1510 enum migrate_mode mode, int reason,
1511 struct migrate_pages_stats *stats,
1512 struct list_head *ret_folios)
b20a3503 1513{
e24f0b8f 1514 int retry = 1;
e5bfff8b
HY
1515 int nr_failed = 0;
1516 int nr_retry_pages = 0;
1517 int pass = 0;
1518 struct folio *folio, *folio2;
1519 int rc, nr_pages;
1520
1521 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1522 retry = 0;
1523 nr_retry_pages = 0;
1524
1525 list_for_each_entry_safe(folio, folio2, from, lru) {
1526 if (!folio_test_hugetlb(folio))
1527 continue;
1528
1529 nr_pages = folio_nr_pages(folio);
1530
1531 cond_resched();
1532
6f7d760e
HY
1533 /*
1534 * Migratability of hugepages depends on architectures and
1535 * their size. This check is necessary because some callers
1536 * of hugepage migration like soft offline and memory
1537 * hotremove don't walk through page tables or check whether
1538 * the hugepage is pmd-based or not before kicking migration.
1539 */
1540 if (!hugepage_migration_supported(folio_hstate(folio))) {
1541 nr_failed++;
1542 stats->nr_failed_pages += nr_pages;
1543 list_move_tail(&folio->lru, ret_folios);
1544 continue;
1545 }
1546
4e096ae1
MWO
1547 rc = unmap_and_move_huge_page(get_new_folio,
1548 put_new_folio, private,
1549 folio, pass > 2, mode,
e5bfff8b
HY
1550 reason, ret_folios);
1551 /*
1552 * The rules are:
1553 * Success: hugetlb folio will be put back
1554 * -EAGAIN: stay on the from list
1555 * -ENOMEM: stay on the from list
e5bfff8b
HY
1556 * Other errno: put on ret_folios list
1557 */
1558 switch(rc) {
e5bfff8b
HY
1559 case -ENOMEM:
1560 /*
1561 * When memory is low, don't bother to try to migrate
1562 * other folios, just exit.
1563 */
1564 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1565 return -ENOMEM;
1566 case -EAGAIN:
1567 retry++;
1568 nr_retry_pages += nr_pages;
1569 break;
1570 case MIGRATEPAGE_SUCCESS:
1571 stats->nr_succeeded += nr_pages;
1572 break;
1573 default:
1574 /*
1575 * Permanent failure (-EBUSY, etc.):
1576 * unlike -EAGAIN case, the failed folio is
1577 * removed from migration folio list and not
1578 * retried in the next outer loop.
1579 */
1580 nr_failed++;
1581 stats->nr_failed_pages += nr_pages;
1582 break;
1583 }
1584 }
1585 }
1586 /*
1587 * nr_failed is number of hugetlb folios failed to be migrated. After
1588 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1589 * folios as failed.
1590 */
1591 nr_failed += retry;
1592 stats->nr_failed_pages += nr_retry_pages;
1593
1594 return nr_failed;
1595}
1596
5dfab109
HY
1597/*
1598 * migrate_pages_batch() first unmaps folios in the from list as many as
1599 * possible, then move the unmapped folios.
fb3592c4
HY
1600 *
1601 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1602 * lock or bit when we have locked more than one folio. Which may cause
1603 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1604 * length of the from list must be <= 1.
5dfab109 1605 */
4e096ae1
MWO
1606static int migrate_pages_batch(struct list_head *from,
1607 new_folio_t get_new_folio, free_folio_t put_new_folio,
1608 unsigned long private, enum migrate_mode mode, int reason,
1609 struct list_head *ret_folios, struct list_head *split_folios,
1610 struct migrate_pages_stats *stats, int nr_pass)
b20a3503 1611{
a21d2133 1612 int retry = 1;
1a5bae25 1613 int thp_retry = 1;
b20a3503 1614 int nr_failed = 0;
077309bc 1615 int nr_retry_pages = 0;
b20a3503 1616 int pass = 0;
1a5bae25 1617 bool is_thp = false;
5dfab109 1618 struct folio *folio, *folio2, *dst = NULL, *dst2;
a21d2133 1619 int rc, rc_saved = 0, nr_pages;
5dfab109
HY
1620 LIST_HEAD(unmap_folios);
1621 LIST_HEAD(dst_folios);
b0b515bf 1622 bool nosplit = (reason == MR_NUMA_MISPLACED);
e5bfff8b 1623
fb3592c4
HY
1624 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1625 !list_empty(from) && !list_is_singular(from));
a21d2133 1626
124abced 1627 for (pass = 0; pass < nr_pass && retry; pass++) {
e24f0b8f 1628 retry = 0;
1a5bae25 1629 thp_retry = 0;
077309bc 1630 nr_retry_pages = 0;
b20a3503 1631
eaec4e63 1632 list_for_each_entry_safe(folio, folio2, from, lru) {
124abced 1633 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
eaec4e63 1634 nr_pages = folio_nr_pages(folio);
e5bfff8b 1635
e24f0b8f 1636 cond_resched();
2d1db3b1 1637
d532e2e5 1638 /*
eaec4e63 1639 * Large folio migration might be unsupported or
6f7d760e 1640 * the allocation might be failed so we should retry
eaec4e63
HY
1641 * on the same folio with the large folio split
1642 * to normal folios.
d532e2e5 1643 *
eaec4e63 1644 * Split folios are put in split_folios, and
e6fa8a79
HY
1645 * we will migrate them after the rest of the
1646 * list is processed.
d532e2e5 1647 */
6f7d760e 1648 if (!thp_migration_supported() && is_thp) {
124abced 1649 nr_failed++;
6f7d760e 1650 stats->nr_thp_failed++;
a21d2133 1651 if (!try_split_folio(folio, split_folios)) {
6f7d760e
HY
1652 stats->nr_thp_split++;
1653 continue;
f430893b 1654 }
6f7d760e
HY
1655 stats->nr_failed_pages += nr_pages;
1656 list_move_tail(&folio->lru, ret_folios);
1657 continue;
1658 }
f430893b 1659
4e096ae1
MWO
1660 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1661 private, folio, &dst, mode, reason,
1662 ret_folios);
dd4ae78a
YS
1663 /*
1664 * The rules are:
e5bfff8b 1665 * Success: folio will be freed
5dfab109
HY
1666 * Unmap: folio will be put on unmap_folios list,
1667 * dst folio put on dst_folios list
dd4ae78a
YS
1668 * -EAGAIN: stay on the from list
1669 * -ENOMEM: stay on the from list
42012e04 1670 * Other errno: put on ret_folios list
dd4ae78a 1671 */
e24f0b8f 1672 switch(rc) {
95a402c3 1673 case -ENOMEM:
94723aaf 1674 /*
d532e2e5 1675 * When memory is low, don't bother to try to migrate
5dfab109 1676 * other folios, move unmapped folios, then exit.
94723aaf 1677 */
124abced
HY
1678 nr_failed++;
1679 stats->nr_thp_failed += is_thp;
1680 /* Large folio NUMA faulting doesn't split to retry. */
1681 if (folio_test_large(folio) && !nosplit) {
1682 int ret = try_split_folio(folio, split_folios);
1683
1684 if (!ret) {
1685 stats->nr_thp_split += is_thp;
1686 break;
1687 } else if (reason == MR_LONGTERM_PIN &&
1688 ret == -EAGAIN) {
1689 /*
1690 * Try again to split large folio to
1691 * mitigate the failure of longterm pinning.
1692 */
1693 retry++;
1694 thp_retry += is_thp;
1695 nr_retry_pages += nr_pages;
1696 /* Undo duplicated failure counting. */
1697 nr_failed--;
1698 stats->nr_thp_failed -= is_thp;
1699 break;
94723aaf 1700 }
1a5bae25 1701 }
b5bade97 1702
42012e04 1703 stats->nr_failed_pages += nr_pages + nr_retry_pages;
fbed53b4 1704 /* nr_failed isn't updated for not used */
42012e04 1705 stats->nr_thp_failed += thp_retry;
5dfab109
HY
1706 rc_saved = rc;
1707 if (list_empty(&unmap_folios))
1708 goto out;
1709 else
1710 goto move;
e24f0b8f 1711 case -EAGAIN:
124abced
HY
1712 retry++;
1713 thp_retry += is_thp;
eaec4e63 1714 nr_retry_pages += nr_pages;
e24f0b8f 1715 break;
78bd5209 1716 case MIGRATEPAGE_SUCCESS:
42012e04
HY
1717 stats->nr_succeeded += nr_pages;
1718 stats->nr_thp_succeeded += is_thp;
e24f0b8f 1719 break;
5dfab109 1720 case MIGRATEPAGE_UNMAP:
5dfab109
HY
1721 list_move_tail(&folio->lru, &unmap_folios);
1722 list_add_tail(&dst->lru, &dst_folios);
e24f0b8f
CL
1723 break;
1724 default:
354a3363 1725 /*
d532e2e5 1726 * Permanent failure (-EBUSY, etc.):
eaec4e63
HY
1727 * unlike -EAGAIN case, the failed folio is
1728 * removed from migration folio list and not
354a3363
NH
1729 * retried in the next outer loop.
1730 */
124abced
HY
1731 nr_failed++;
1732 stats->nr_thp_failed += is_thp;
42012e04 1733 stats->nr_failed_pages += nr_pages;
e24f0b8f 1734 break;
2d1db3b1 1735 }
b20a3503
CL
1736 }
1737 }
7047b5a4 1738 nr_failed += retry;
42012e04
HY
1739 stats->nr_thp_failed += thp_retry;
1740 stats->nr_failed_pages += nr_retry_pages;
5dfab109 1741move:
7e12beb8
HY
1742 /* Flush TLBs for all unmapped folios */
1743 try_to_unmap_flush();
1744
5dfab109 1745 retry = 1;
124abced 1746 for (pass = 0; pass < nr_pass && retry; pass++) {
5dfab109 1747 retry = 0;
5dfab109
HY
1748 thp_retry = 0;
1749 nr_retry_pages = 0;
1750
1751 dst = list_first_entry(&dst_folios, struct folio, lru);
1752 dst2 = list_next_entry(dst, lru);
1753 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
124abced 1754 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
5dfab109
HY
1755 nr_pages = folio_nr_pages(folio);
1756
1757 cond_resched();
1758
4e096ae1 1759 rc = migrate_folio_move(put_new_folio, private,
5dfab109
HY
1760 folio, dst, mode,
1761 reason, ret_folios);
1762 /*
1763 * The rules are:
1764 * Success: folio will be freed
1765 * -EAGAIN: stay on the unmap_folios list
1766 * Other errno: put on ret_folios list
1767 */
1768 switch(rc) {
1769 case -EAGAIN:
124abced
HY
1770 retry++;
1771 thp_retry += is_thp;
5dfab109
HY
1772 nr_retry_pages += nr_pages;
1773 break;
1774 case MIGRATEPAGE_SUCCESS:
1775 stats->nr_succeeded += nr_pages;
1776 stats->nr_thp_succeeded += is_thp;
1777 break;
1778 default:
124abced
HY
1779 nr_failed++;
1780 stats->nr_thp_failed += is_thp;
5dfab109 1781 stats->nr_failed_pages += nr_pages;
e24f0b8f 1782 break;
2d1db3b1 1783 }
5dfab109
HY
1784 dst = dst2;
1785 dst2 = list_next_entry(dst, lru);
b20a3503
CL
1786 }
1787 }
7047b5a4 1788 nr_failed += retry;
5dfab109
HY
1789 stats->nr_thp_failed += thp_retry;
1790 stats->nr_failed_pages += nr_retry_pages;
1791
124abced 1792 rc = rc_saved ? : nr_failed;
5dfab109
HY
1793out:
1794 /* Cleanup remaining folios */
1795 dst = list_first_entry(&dst_folios, struct folio, lru);
1796 dst2 = list_next_entry(dst, lru);
1797 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1798 int page_was_mapped = 0;
1799 struct anon_vma *anon_vma = NULL;
1800
1801 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
1802 migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
ebe75e47 1803 true, ret_folios);
5dfab109 1804 list_del(&dst->lru);
4e096ae1 1805 migrate_folio_undo_dst(dst, true, put_new_folio, private);
5dfab109
HY
1806 dst = dst2;
1807 dst2 = list_next_entry(dst, lru);
1808 }
1809
42012e04
HY
1810 return rc;
1811}
1812
4e096ae1
MWO
1813static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1814 free_folio_t put_new_folio, unsigned long private,
1815 enum migrate_mode mode, int reason,
1816 struct list_head *ret_folios, struct list_head *split_folios,
1817 struct migrate_pages_stats *stats)
2ef7dbb2
HY
1818{
1819 int rc, nr_failed = 0;
1820 LIST_HEAD(folios);
1821 struct migrate_pages_stats astats;
1822
1823 memset(&astats, 0, sizeof(astats));
1824 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
4e096ae1 1825 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
2ef7dbb2
HY
1826 reason, &folios, split_folios, &astats,
1827 NR_MAX_MIGRATE_ASYNC_RETRY);
1828 stats->nr_succeeded += astats.nr_succeeded;
1829 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1830 stats->nr_thp_split += astats.nr_thp_split;
1831 if (rc < 0) {
1832 stats->nr_failed_pages += astats.nr_failed_pages;
1833 stats->nr_thp_failed += astats.nr_thp_failed;
1834 list_splice_tail(&folios, ret_folios);
1835 return rc;
1836 }
1837 stats->nr_thp_failed += astats.nr_thp_split;
1838 nr_failed += astats.nr_thp_split;
1839 /*
1840 * Fall back to migrate all failed folios one by one synchronously. All
1841 * failed folios except split THPs will be retried, so their failure
1842 * isn't counted
1843 */
1844 list_splice_tail_init(&folios, from);
1845 while (!list_empty(from)) {
1846 list_move(from->next, &folios);
4e096ae1 1847 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2ef7dbb2
HY
1848 private, mode, reason, ret_folios,
1849 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1850 list_splice_tail_init(&folios, ret_folios);
1851 if (rc < 0)
1852 return rc;
1853 nr_failed += rc;
1854 }
1855
1856 return nr_failed;
1857}
1858
42012e04
HY
1859/*
1860 * migrate_pages - migrate the folios specified in a list, to the free folios
1861 * supplied as the target for the page migration
1862 *
1863 * @from: The list of folios to be migrated.
4e096ae1 1864 * @get_new_folio: The function used to allocate free folios to be used
42012e04 1865 * as the target of the folio migration.
4e096ae1 1866 * @put_new_folio: The function used to free target folios if migration
42012e04 1867 * fails, or NULL if no special handling is necessary.
4e096ae1 1868 * @private: Private data to be passed on to get_new_folio()
42012e04
HY
1869 * @mode: The migration mode that specifies the constraints for
1870 * folio migration, if any.
1871 * @reason: The reason for folio migration.
1872 * @ret_succeeded: Set to the number of folios migrated successfully if
1873 * the caller passes a non-NULL pointer.
1874 *
1875 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1876 * are movable any more because the list has become empty or no retryable folios
1877 * exist any more. It is caller's responsibility to call putback_movable_pages()
1878 * only if ret != 0.
1879 *
1880 * Returns the number of {normal folio, large folio, hugetlb} that were not
1881 * migrated, or an error code. The number of large folio splits will be
1882 * considered as the number of non-migrated large folio, no matter how many
1883 * split folios of the large folio are migrated successfully.
1884 */
4e096ae1
MWO
1885int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1886 free_folio_t put_new_folio, unsigned long private,
42012e04
HY
1887 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1888{
1889 int rc, rc_gather;
2ef7dbb2 1890 int nr_pages;
42012e04
HY
1891 struct folio *folio, *folio2;
1892 LIST_HEAD(folios);
1893 LIST_HEAD(ret_folios);
a21d2133 1894 LIST_HEAD(split_folios);
42012e04
HY
1895 struct migrate_pages_stats stats;
1896
1897 trace_mm_migrate_pages_start(mode, reason);
1898
1899 memset(&stats, 0, sizeof(stats));
1900
4e096ae1 1901 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
42012e04
HY
1902 mode, reason, &stats, &ret_folios);
1903 if (rc_gather < 0)
1904 goto out;
fb3592c4 1905
42012e04
HY
1906again:
1907 nr_pages = 0;
1908 list_for_each_entry_safe(folio, folio2, from, lru) {
1909 /* Retried hugetlb folios will be kept in list */
1910 if (folio_test_hugetlb(folio)) {
1911 list_move_tail(&folio->lru, &ret_folios);
1912 continue;
1913 }
1914
1915 nr_pages += folio_nr_pages(folio);
2ef7dbb2 1916 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
42012e04
HY
1917 break;
1918 }
2ef7dbb2 1919 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
fb3592c4 1920 list_cut_before(&folios, from, &folio2->lru);
42012e04
HY
1921 else
1922 list_splice_init(from, &folios);
2ef7dbb2 1923 if (mode == MIGRATE_ASYNC)
4e096ae1
MWO
1924 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1925 private, mode, reason, &ret_folios,
1926 &split_folios, &stats,
1927 NR_MAX_MIGRATE_PAGES_RETRY);
2ef7dbb2 1928 else
4e096ae1
MWO
1929 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1930 private, mode, reason, &ret_folios,
1931 &split_folios, &stats);
42012e04
HY
1932 list_splice_tail_init(&folios, &ret_folios);
1933 if (rc < 0) {
1934 rc_gather = rc;
a21d2133 1935 list_splice_tail(&split_folios, &ret_folios);
42012e04
HY
1936 goto out;
1937 }
a21d2133
HY
1938 if (!list_empty(&split_folios)) {
1939 /*
1940 * Failure isn't counted since all split folios of a large folio
1941 * is counted as 1 failure already. And, we only try to migrate
1942 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1943 */
4e096ae1
MWO
1944 migrate_pages_batch(&split_folios, get_new_folio,
1945 put_new_folio, private, MIGRATE_ASYNC, reason,
1946 &ret_folios, NULL, &stats, 1);
a21d2133
HY
1947 list_splice_tail_init(&split_folios, &ret_folios);
1948 }
42012e04
HY
1949 rc_gather += rc;
1950 if (!list_empty(from))
1951 goto again;
95a402c3 1952out:
dd4ae78a 1953 /*
eaec4e63 1954 * Put the permanent failure folio back to migration list, they
dd4ae78a
YS
1955 * will be put back to the right list by the caller.
1956 */
eaec4e63 1957 list_splice(&ret_folios, from);
dd4ae78a 1958
03e5f82e 1959 /*
eaec4e63
HY
1960 * Return 0 in case all split folios of fail-to-migrate large folios
1961 * are migrated successfully.
03e5f82e
BW
1962 */
1963 if (list_empty(from))
42012e04 1964 rc_gather = 0;
03e5f82e 1965
5b855937
HY
1966 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
1967 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
1968 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
1969 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
1970 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
1971 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
1972 stats.nr_thp_succeeded, stats.nr_thp_failed,
1973 stats.nr_thp_split, mode, reason);
7b2a2d4a 1974
5ac95884 1975 if (ret_succeeded)
5b855937 1976 *ret_succeeded = stats.nr_succeeded;
5ac95884 1977
42012e04 1978 return rc_gather;
b20a3503 1979}
95a402c3 1980
4e096ae1 1981struct folio *alloc_migration_target(struct folio *src, unsigned long private)
b4b38223 1982{
19fc7bed
JK
1983 struct migration_target_control *mtc;
1984 gfp_t gfp_mask;
b4b38223 1985 unsigned int order = 0;
19fc7bed
JK
1986 int nid;
1987 int zidx;
1988
1989 mtc = (struct migration_target_control *)private;
1990 gfp_mask = mtc->gfp_mask;
1991 nid = mtc->nid;
1992 if (nid == NUMA_NO_NODE)
4e096ae1 1993 nid = folio_nid(src);
b4b38223 1994
4e096ae1
MWO
1995 if (folio_test_hugetlb(src)) {
1996 struct hstate *h = folio_hstate(src);
d92bbc27 1997
19fc7bed 1998 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
4e096ae1 1999 return alloc_hugetlb_folio_nodemask(h, nid,
e37d3e83 2000 mtc->nmask, gfp_mask);
d92bbc27 2001 }
b4b38223 2002
4e096ae1 2003 if (folio_test_large(src)) {
9933a0c8
JK
2004 /*
2005 * clear __GFP_RECLAIM to make the migration callback
2006 * consistent with regular THP allocations.
2007 */
2008 gfp_mask &= ~__GFP_RECLAIM;
b4b38223 2009 gfp_mask |= GFP_TRANSHUGE;
4e096ae1 2010 order = folio_order(src);
b4b38223 2011 }
4e096ae1 2012 zidx = zone_idx(folio_zone(src));
19fc7bed 2013 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
b4b38223
JK
2014 gfp_mask |= __GFP_HIGHMEM;
2015
4e096ae1 2016 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
b4b38223
JK
2017}
2018
742755a1 2019#ifdef CONFIG_NUMA
742755a1 2020
a49bd4d7 2021static int store_status(int __user *status, int start, int value, int nr)
742755a1 2022{
a49bd4d7
MH
2023 while (nr-- > 0) {
2024 if (put_user(value, status + start))
2025 return -EFAULT;
2026 start++;
2027 }
2028
2029 return 0;
2030}
2031
2032static int do_move_pages_to_node(struct mm_struct *mm,
2033 struct list_head *pagelist, int node)
2034{
2035 int err;
a0976311
JK
2036 struct migration_target_control mtc = {
2037 .nid = node,
2038 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2039 };
a49bd4d7 2040
a0976311 2041 err = migrate_pages(pagelist, alloc_migration_target, NULL,
5ac95884 2042 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
a49bd4d7
MH
2043 if (err)
2044 putback_movable_pages(pagelist);
2045 return err;
742755a1
CL
2046}
2047
2048/*
a49bd4d7
MH
2049 * Resolves the given address to a struct page, isolates it from the LRU and
2050 * puts it to the given pagelist.
e0153fc2
YS
2051 * Returns:
2052 * errno - if the page cannot be found/isolated
2053 * 0 - when it doesn't have to be migrated because it is already on the
2054 * target node
2055 * 1 - when it has been queued
742755a1 2056 */
428e106a 2057static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
a49bd4d7 2058 int node, struct list_head *pagelist, bool migrate_all)
742755a1 2059{
a49bd4d7 2060 struct vm_area_struct *vma;
428e106a 2061 unsigned long addr;
a49bd4d7 2062 struct page *page;
742755a1 2063 int err;
9747b9e9 2064 bool isolated;
742755a1 2065
d8ed45c5 2066 mmap_read_lock(mm);
428e106a
KS
2067 addr = (unsigned long)untagged_addr_remote(mm, p);
2068
a49bd4d7 2069 err = -EFAULT;
cb1c37b1
ML
2070 vma = vma_lookup(mm, addr);
2071 if (!vma || !vma_migratable(vma))
a49bd4d7 2072 goto out;
742755a1 2073
a49bd4d7 2074 /* FOLL_DUMP to ignore special (like zero) pages */
87d2762e 2075 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da 2076
a49bd4d7
MH
2077 err = PTR_ERR(page);
2078 if (IS_ERR(page))
2079 goto out;
89f5b7da 2080
a49bd4d7 2081 err = -ENOENT;
f7091ed6 2082 if (!page)
a49bd4d7 2083 goto out;
742755a1 2084
f7091ed6
HW
2085 if (is_zone_device_page(page))
2086 goto out_putpage;
2087
a49bd4d7
MH
2088 err = 0;
2089 if (page_to_nid(page) == node)
2090 goto out_putpage;
742755a1 2091
a49bd4d7
MH
2092 err = -EACCES;
2093 if (page_mapcount(page) > 1 && !migrate_all)
2094 goto out_putpage;
742755a1 2095
a49bd4d7
MH
2096 if (PageHuge(page)) {
2097 if (PageHead(page)) {
9747b9e9
BW
2098 isolated = isolate_hugetlb(page_folio(page), pagelist);
2099 err = isolated ? 1 : -EBUSY;
e632a938 2100 }
a49bd4d7
MH
2101 } else {
2102 struct page *head;
e632a938 2103
e8db67eb 2104 head = compound_head(page);
f7f9c00d
BW
2105 isolated = isolate_lru_page(head);
2106 if (!isolated) {
2107 err = -EBUSY;
a49bd4d7 2108 goto out_putpage;
f7f9c00d 2109 }
742755a1 2110
e0153fc2 2111 err = 1;
a49bd4d7
MH
2112 list_add_tail(&head->lru, pagelist);
2113 mod_node_page_state(page_pgdat(head),
9de4f22a 2114 NR_ISOLATED_ANON + page_is_file_lru(head),
6c357848 2115 thp_nr_pages(head));
a49bd4d7
MH
2116 }
2117out_putpage:
2118 /*
2119 * Either remove the duplicate refcount from
2120 * isolate_lru_page() or drop the page ref if it was
2121 * not isolated.
2122 */
2123 put_page(page);
2124out:
d8ed45c5 2125 mmap_read_unlock(mm);
742755a1
CL
2126 return err;
2127}
2128
7ca8783a
WY
2129static int move_pages_and_store_status(struct mm_struct *mm, int node,
2130 struct list_head *pagelist, int __user *status,
2131 int start, int i, unsigned long nr_pages)
2132{
2133 int err;
2134
5d7ae891
WY
2135 if (list_empty(pagelist))
2136 return 0;
2137
7ca8783a
WY
2138 err = do_move_pages_to_node(mm, pagelist, node);
2139 if (err) {
2140 /*
2141 * Positive err means the number of failed
2142 * pages to migrate. Since we are going to
2143 * abort and return the number of non-migrated
ab9dd4f8 2144 * pages, so need to include the rest of the
7ca8783a
WY
2145 * nr_pages that have not been attempted as
2146 * well.
2147 */
2148 if (err > 0)
a7504ed1 2149 err += nr_pages - i;
7ca8783a
WY
2150 return err;
2151 }
2152 return store_status(status, start, node, i - start);
2153}
2154
5e9a0f02
BG
2155/*
2156 * Migrate an array of page address onto an array of nodes and fill
2157 * the corresponding array of status.
2158 */
3268c63e 2159static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f02
BG
2160 unsigned long nr_pages,
2161 const void __user * __user *pages,
2162 const int __user *nodes,
2163 int __user *status, int flags)
2164{
a49bd4d7
MH
2165 int current_node = NUMA_NO_NODE;
2166 LIST_HEAD(pagelist);
2167 int start, i;
2168 int err = 0, err1;
35282a2d 2169
361a2a22 2170 lru_cache_disable();
35282a2d 2171
a49bd4d7
MH
2172 for (i = start = 0; i < nr_pages; i++) {
2173 const void __user *p;
a49bd4d7 2174 int node;
3140a227 2175
a49bd4d7
MH
2176 err = -EFAULT;
2177 if (get_user(p, pages + i))
2178 goto out_flush;
2179 if (get_user(node, nodes + i))
2180 goto out_flush;
a49bd4d7
MH
2181
2182 err = -ENODEV;
2183 if (node < 0 || node >= MAX_NUMNODES)
2184 goto out_flush;
2185 if (!node_state(node, N_MEMORY))
2186 goto out_flush;
5e9a0f02 2187
a49bd4d7
MH
2188 err = -EACCES;
2189 if (!node_isset(node, task_nodes))
2190 goto out_flush;
2191
2192 if (current_node == NUMA_NO_NODE) {
2193 current_node = node;
2194 start = i;
2195 } else if (node != current_node) {
7ca8783a
WY
2196 err = move_pages_and_store_status(mm, current_node,
2197 &pagelist, status, start, i, nr_pages);
a49bd4d7
MH
2198 if (err)
2199 goto out;
2200 start = i;
2201 current_node = node;
3140a227
BG
2202 }
2203
a49bd4d7
MH
2204 /*
2205 * Errors in the page lookup or isolation are not fatal and we simply
2206 * report them via status
2207 */
428e106a
KS
2208 err = add_page_for_migration(mm, p, current_node, &pagelist,
2209 flags & MPOL_MF_MOVE_ALL);
e0153fc2 2210
d08221a0 2211 if (err > 0) {
e0153fc2
YS
2212 /* The page is successfully queued for migration */
2213 continue;
2214 }
3140a227 2215
65462462
JH
2216 /*
2217 * The move_pages() man page does not have an -EEXIST choice, so
2218 * use -EFAULT instead.
2219 */
2220 if (err == -EEXIST)
2221 err = -EFAULT;
2222
d08221a0
WY
2223 /*
2224 * If the page is already on the target node (!err), store the
2225 * node, otherwise, store the err.
2226 */
2227 err = store_status(status, i, err ? : current_node, 1);
a49bd4d7
MH
2228 if (err)
2229 goto out_flush;
5e9a0f02 2230
7ca8783a
WY
2231 err = move_pages_and_store_status(mm, current_node, &pagelist,
2232 status, start, i, nr_pages);
a7504ed1
HY
2233 if (err) {
2234 /* We have accounted for page i */
2235 if (err > 0)
2236 err--;
4afdacec 2237 goto out;
a7504ed1 2238 }
a49bd4d7 2239 current_node = NUMA_NO_NODE;
3140a227 2240 }
a49bd4d7
MH
2241out_flush:
2242 /* Make sure we do not overwrite the existing error */
7ca8783a
WY
2243 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
2244 status, start, i, nr_pages);
dfe9aa23 2245 if (err >= 0)
a49bd4d7 2246 err = err1;
5e9a0f02 2247out:
361a2a22 2248 lru_cache_enable();
5e9a0f02
BG
2249 return err;
2250}
2251
742755a1 2252/*
2f007e74 2253 * Determine the nodes of an array of pages and store it in an array of status.
742755a1 2254 */
80bba129
BG
2255static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2256 const void __user **pages, int *status)
742755a1 2257{
2f007e74 2258 unsigned long i;
2f007e74 2259
d8ed45c5 2260 mmap_read_lock(mm);
742755a1 2261
2f007e74 2262 for (i = 0; i < nr_pages; i++) {
80bba129 2263 unsigned long addr = (unsigned long)(*pages);
742755a1
CL
2264 struct vm_area_struct *vma;
2265 struct page *page;
c095adbc 2266 int err = -EFAULT;
2f007e74 2267
059b8b48
LH
2268 vma = vma_lookup(mm, addr);
2269 if (!vma)
742755a1
CL
2270 goto set_status;
2271
d899844e 2272 /* FOLL_DUMP to ignore special (like zero) pages */
16fd6b31 2273 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da
LT
2274
2275 err = PTR_ERR(page);
2276 if (IS_ERR(page))
2277 goto set_status;
2278
f7091ed6
HW
2279 err = -ENOENT;
2280 if (!page)
2281 goto set_status;
2282
2283 if (!is_zone_device_page(page))
4cd61484 2284 err = page_to_nid(page);
f7091ed6 2285
16fd6b31 2286 put_page(page);
742755a1 2287set_status:
80bba129
BG
2288 *status = err;
2289
2290 pages++;
2291 status++;
2292 }
2293
d8ed45c5 2294 mmap_read_unlock(mm);
80bba129
BG
2295}
2296
5b1b561b
AB
2297static int get_compat_pages_array(const void __user *chunk_pages[],
2298 const void __user * __user *pages,
2299 unsigned long chunk_nr)
2300{
2301 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2302 compat_uptr_t p;
2303 int i;
2304
2305 for (i = 0; i < chunk_nr; i++) {
2306 if (get_user(p, pages32 + i))
2307 return -EFAULT;
2308 chunk_pages[i] = compat_ptr(p);
2309 }
2310
2311 return 0;
2312}
2313
80bba129
BG
2314/*
2315 * Determine the nodes of a user array of pages and store it in
2316 * a user array of status.
2317 */
2318static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2319 const void __user * __user *pages,
2320 int __user *status)
2321{
3eefb826 2322#define DO_PAGES_STAT_CHUNK_NR 16UL
80bba129
BG
2323 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2324 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba129 2325
87b8d1ad 2326 while (nr_pages) {
3eefb826 2327 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
87b8d1ad 2328
5b1b561b
AB
2329 if (in_compat_syscall()) {
2330 if (get_compat_pages_array(chunk_pages, pages,
2331 chunk_nr))
2332 break;
2333 } else {
2334 if (copy_from_user(chunk_pages, pages,
2335 chunk_nr * sizeof(*chunk_pages)))
2336 break;
2337 }
80bba129
BG
2338
2339 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2340
87b8d1ad
PA
2341 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2342 break;
742755a1 2343
87b8d1ad
PA
2344 pages += chunk_nr;
2345 status += chunk_nr;
2346 nr_pages -= chunk_nr;
2347 }
2348 return nr_pages ? -EFAULT : 0;
742755a1
CL
2349}
2350
4dc200ce 2351static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
742755a1 2352{
742755a1 2353 struct task_struct *task;
742755a1 2354 struct mm_struct *mm;
742755a1 2355
4dc200ce
ML
2356 /*
2357 * There is no need to check if current process has the right to modify
2358 * the specified process when they are same.
2359 */
2360 if (!pid) {
2361 mmget(current->mm);
2362 *mem_nodes = cpuset_mems_allowed(current);
2363 return current->mm;
2364 }
742755a1
CL
2365
2366 /* Find the mm_struct */
a879bf58 2367 rcu_read_lock();
4dc200ce 2368 task = find_task_by_vpid(pid);
742755a1 2369 if (!task) {
a879bf58 2370 rcu_read_unlock();
4dc200ce 2371 return ERR_PTR(-ESRCH);
742755a1 2372 }
3268c63e 2373 get_task_struct(task);
742755a1
CL
2374
2375 /*
2376 * Check if this process has the right to modify the specified
197e7e52 2377 * process. Use the regular "ptrace_may_access()" checks.
742755a1 2378 */
197e7e52 2379 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 2380 rcu_read_unlock();
4dc200ce 2381 mm = ERR_PTR(-EPERM);
5e9a0f02 2382 goto out;
742755a1 2383 }
c69e8d9c 2384 rcu_read_unlock();
742755a1 2385
4dc200ce
ML
2386 mm = ERR_PTR(security_task_movememory(task));
2387 if (IS_ERR(mm))
5e9a0f02 2388 goto out;
4dc200ce 2389 *mem_nodes = cpuset_mems_allowed(task);
3268c63e 2390 mm = get_task_mm(task);
4dc200ce 2391out:
3268c63e 2392 put_task_struct(task);
6e8b09ea 2393 if (!mm)
4dc200ce
ML
2394 mm = ERR_PTR(-EINVAL);
2395 return mm;
2396}
2397
2398/*
2399 * Move a list of pages in the address space of the currently executing
2400 * process.
2401 */
2402static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2403 const void __user * __user *pages,
2404 const int __user *nodes,
2405 int __user *status, int flags)
2406{
2407 struct mm_struct *mm;
2408 int err;
2409 nodemask_t task_nodes;
2410
2411 /* Check flags */
2412 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
6e8b09ea
SL
2413 return -EINVAL;
2414
4dc200ce
ML
2415 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2416 return -EPERM;
2417
2418 mm = find_mm_struct(pid, &task_nodes);
2419 if (IS_ERR(mm))
2420 return PTR_ERR(mm);
2421
6e8b09ea
SL
2422 if (nodes)
2423 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2424 nodes, status, flags);
2425 else
2426 err = do_pages_stat(mm, nr_pages, pages, status);
742755a1 2427
742755a1
CL
2428 mmput(mm);
2429 return err;
2430}
742755a1 2431
7addf443
DB
2432SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2433 const void __user * __user *, pages,
2434 const int __user *, nodes,
2435 int __user *, status, int, flags)
2436{
2437 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2438}
2439
7039e1db
PZ
2440#ifdef CONFIG_NUMA_BALANCING
2441/*
2442 * Returns true if this is a safe migration target node for misplaced NUMA
bc53008e 2443 * pages. Currently it only checks the watermarks which is crude.
7039e1db
PZ
2444 */
2445static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6 2446 unsigned long nr_migrate_pages)
7039e1db
PZ
2447{
2448 int z;
599d0c95 2449
7039e1db
PZ
2450 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2451 struct zone *zone = pgdat->node_zones + z;
2452
bc53008e 2453 if (!managed_zone(zone))
7039e1db
PZ
2454 continue;
2455
7039e1db
PZ
2456 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2457 if (!zone_watermark_ok(zone, 0,
2458 high_wmark_pages(zone) +
2459 nr_migrate_pages,
bfe9d006 2460 ZONE_MOVABLE, 0))
7039e1db
PZ
2461 continue;
2462 return true;
2463 }
2464 return false;
2465}
2466
4e096ae1 2467static struct folio *alloc_misplaced_dst_folio(struct folio *src,
666feb21 2468 unsigned long data)
7039e1db
PZ
2469{
2470 int nid = (int) data;
4e096ae1 2471 int order = folio_order(src);
c185e494 2472 gfp_t gfp = __GFP_THISNODE;
c185e494
MWO
2473
2474 if (order > 0)
2475 gfp |= GFP_TRANSHUGE_LIGHT;
2476 else {
2477 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2478 __GFP_NOWARN;
2479 gfp &= ~__GFP_RECLAIM;
2480 }
4e096ae1 2481 return __folio_alloc_node(gfp, order, nid);
c5b5a3dd
YS
2482}
2483
1c30e017 2484static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
b32967ff 2485{
2b9b624f 2486 int nr_pages = thp_nr_pages(page);
c574bbe9 2487 int order = compound_order(page);
a8f60772 2488
c574bbe9 2489 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
3abef4e6 2490
662aeea7
YS
2491 /* Do not migrate THP mapped by multiple processes */
2492 if (PageTransHuge(page) && total_mapcount(page) > 1)
2493 return 0;
2494
7039e1db 2495 /* Avoid migrating to a node that is nearly full */
c574bbe9
HY
2496 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2497 int z;
2498
2499 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2500 return 0;
2501 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
bc53008e 2502 if (managed_zone(pgdat->node_zones + z))
c574bbe9
HY
2503 break;
2504 }
2505 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
340ef390 2506 return 0;
c574bbe9 2507 }
7039e1db 2508
f7f9c00d 2509 if (!isolate_lru_page(page))
340ef390 2510 return 0;
7039e1db 2511
b75454e1 2512 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2b9b624f 2513 nr_pages);
340ef390 2514
149c33e1 2515 /*
340ef390
HD
2516 * Isolating the page has taken another reference, so the
2517 * caller's reference can be safely dropped without the page
2518 * disappearing underneath us during migration.
149c33e1
MG
2519 */
2520 put_page(page);
340ef390 2521 return 1;
b32967ff
MG
2522}
2523
2524/*
2525 * Attempt to migrate a misplaced page to the specified destination
2526 * node. Caller is expected to have an elevated reference count on
2527 * the page that will be dropped by this function before returning.
2528 */
1bc115d8
MG
2529int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2530 int node)
b32967ff
MG
2531{
2532 pg_data_t *pgdat = NODE_DATA(node);
340ef390 2533 int isolated;
b32967ff 2534 int nr_remaining;
e39bb6be 2535 unsigned int nr_succeeded;
b32967ff 2536 LIST_HEAD(migratepages);
b5916c02 2537 int nr_pages = thp_nr_pages(page);
c5b5a3dd 2538
b32967ff 2539 /*
1bc115d8
MG
2540 * Don't migrate file pages that are mapped in multiple processes
2541 * with execute permissions as they are probably shared libraries.
b32967ff 2542 */
7ee820ee
ML
2543 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2544 (vma->vm_flags & VM_EXEC))
b32967ff 2545 goto out;
b32967ff 2546
09a913a7
MG
2547 /*
2548 * Also do not migrate dirty pages as not all filesystems can move
2549 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2550 */
9de4f22a 2551 if (page_is_file_lru(page) && PageDirty(page))
09a913a7
MG
2552 goto out;
2553
b32967ff
MG
2554 isolated = numamigrate_isolate_page(pgdat, page);
2555 if (!isolated)
2556 goto out;
2557
2558 list_add(&page->lru, &migratepages);
4e096ae1 2559 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
c185e494
MWO
2560 NULL, node, MIGRATE_ASYNC,
2561 MR_NUMA_MISPLACED, &nr_succeeded);
b32967ff 2562 if (nr_remaining) {
59c82b70
JK
2563 if (!list_empty(&migratepages)) {
2564 list_del(&page->lru);
c5fc5c3a
YS
2565 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2566 page_is_file_lru(page), -nr_pages);
59c82b70
JK
2567 putback_lru_page(page);
2568 }
b32967ff 2569 isolated = 0;
e39bb6be
HY
2570 }
2571 if (nr_succeeded) {
2572 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2573 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2574 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2575 nr_succeeded);
2576 }
7039e1db 2577 BUG_ON(!list_empty(&migratepages));
7039e1db 2578 return isolated;
340ef390
HD
2579
2580out:
2581 put_page(page);
2582 return 0;
7039e1db 2583}
220018d3 2584#endif /* CONFIG_NUMA_BALANCING */
91952440 2585#endif /* CONFIG_NUMA */