mm: Introduce untagged_addr_remote()
[linux-block.git] / mm / migrate.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b20a3503 2/*
14e0f9bc 3 * Memory Migration functionality - linux/mm/migrate.c
b20a3503
CL
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
cde53535 13 * Christoph Lameter
b20a3503
CL
14 */
15
16#include <linux/migrate.h>
b95f1b31 17#include <linux/export.h>
b20a3503 18#include <linux/swap.h>
0697212a 19#include <linux/swapops.h>
b20a3503 20#include <linux/pagemap.h>
e23ca00b 21#include <linux/buffer_head.h>
b20a3503 22#include <linux/mm_inline.h>
b488893a 23#include <linux/nsproxy.h>
b20a3503 24#include <linux/pagevec.h>
e9995ef9 25#include <linux/ksm.h>
b20a3503
CL
26#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
04e62a29 30#include <linux/writeback.h>
742755a1
CL
31#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
86c3a764 33#include <linux/security.h>
42cb14b1 34#include <linux/backing-dev.h>
bda807d4 35#include <linux/compaction.h>
4f5ca265 36#include <linux/syscalls.h>
7addf443 37#include <linux/compat.h>
290408d4 38#include <linux/hugetlb.h>
8e6ac7fa 39#include <linux/hugetlb_cgroup.h>
5a0e3ad6 40#include <linux/gfp.h>
df6ad698 41#include <linux/pfn_t.h>
a5430dda 42#include <linux/memremap.h>
8315ada7 43#include <linux/userfaultfd_k.h>
bf6bddf1 44#include <linux/balloon_compaction.h>
33c3fc71 45#include <linux/page_idle.h>
d435edca 46#include <linux/page_owner.h>
6e84f315 47#include <linux/sched/mm.h>
197e7e52 48#include <linux/ptrace.h>
34290e2c 49#include <linux/oom.h>
884a6e5d 50#include <linux/memory.h>
ac16ec83 51#include <linux/random.h>
c574bbe9 52#include <linux/sched/sysctl.h>
467b171a 53#include <linux/memory-tiers.h>
b20a3503 54
0d1836c3
MN
55#include <asm/tlbflush.h>
56
7b2a2d4a
MG
57#include <trace/events/migrate.h>
58
b20a3503
CL
59#include "internal.h"
60
cd775580 61bool isolate_movable_page(struct page *page, isolate_mode_t mode)
bda807d4 62{
19979497 63 struct folio *folio = folio_get_nontail_page(page);
68f2736a 64 const struct movable_operations *mops;
bda807d4
MK
65
66 /*
67 * Avoid burning cycles with pages that are yet under __free_pages(),
68 * or just got freed under us.
69 *
70 * In case we 'win' a race for a movable page being freed under us and
71 * raise its refcount preventing __free_pages() from doing its job
72 * the put_page() at the end of this block will take care of
73 * release this page, thus avoiding a nasty leakage.
74 */
19979497 75 if (!folio)
bda807d4
MK
76 goto out;
77
19979497
VMO
78 if (unlikely(folio_test_slab(folio)))
79 goto out_putfolio;
8b881763
VB
80 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
81 smp_rmb();
bda807d4 82 /*
8b881763
VB
83 * Check movable flag before taking the page lock because
84 * we use non-atomic bitops on newly allocated page flags so
85 * unconditionally grabbing the lock ruins page's owner side.
bda807d4 86 */
19979497
VMO
87 if (unlikely(!__folio_test_movable(folio)))
88 goto out_putfolio;
8b881763
VB
89 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
90 smp_rmb();
19979497
VMO
91 if (unlikely(folio_test_slab(folio)))
92 goto out_putfolio;
8b881763 93
bda807d4
MK
94 /*
95 * As movable pages are not isolated from LRU lists, concurrent
96 * compaction threads can race against page migration functions
97 * as well as race against the releasing a page.
98 *
99 * In order to avoid having an already isolated movable page
100 * being (wrongly) re-isolated while it is under migration,
101 * or to avoid attempting to isolate pages being released,
102 * lets be sure we have the page lock
103 * before proceeding with the movable page isolation steps.
104 */
19979497
VMO
105 if (unlikely(!folio_trylock(folio)))
106 goto out_putfolio;
bda807d4 107
19979497 108 if (!folio_test_movable(folio) || folio_test_isolated(folio))
bda807d4
MK
109 goto out_no_isolated;
110
19979497
VMO
111 mops = folio_movable_ops(folio);
112 VM_BUG_ON_FOLIO(!mops, folio);
bda807d4 113
19979497 114 if (!mops->isolate_page(&folio->page, mode))
bda807d4
MK
115 goto out_no_isolated;
116
117 /* Driver shouldn't use PG_isolated bit of page->flags */
19979497
VMO
118 WARN_ON_ONCE(folio_test_isolated(folio));
119 folio_set_isolated(folio);
120 folio_unlock(folio);
bda807d4 121
cd775580 122 return true;
bda807d4
MK
123
124out_no_isolated:
19979497
VMO
125 folio_unlock(folio);
126out_putfolio:
127 folio_put(folio);
bda807d4 128out:
cd775580 129 return false;
bda807d4
MK
130}
131
280d724a 132static void putback_movable_folio(struct folio *folio)
bda807d4 133{
280d724a 134 const struct movable_operations *mops = folio_movable_ops(folio);
bda807d4 135
280d724a
VMO
136 mops->putback_page(&folio->page);
137 folio_clear_isolated(folio);
bda807d4
MK
138}
139
5733c7d1
RA
140/*
141 * Put previously isolated pages back onto the appropriate lists
142 * from where they were once taken off for compaction/migration.
143 *
59c82b70
JK
144 * This function shall be used whenever the isolated pageset has been
145 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
7ce82f4c 146 * and isolate_hugetlb().
5733c7d1
RA
147 */
148void putback_movable_pages(struct list_head *l)
149{
280d724a
VMO
150 struct folio *folio;
151 struct folio *folio2;
5733c7d1 152
280d724a
VMO
153 list_for_each_entry_safe(folio, folio2, l, lru) {
154 if (unlikely(folio_test_hugetlb(folio))) {
155 folio_putback_active_hugetlb(folio);
31caf665
NH
156 continue;
157 }
280d724a 158 list_del(&folio->lru);
bda807d4 159 /*
280d724a
VMO
160 * We isolated non-lru movable folio so here we can use
161 * __PageMovable because LRU folio's mapping cannot have
bda807d4
MK
162 * PAGE_MAPPING_MOVABLE.
163 */
280d724a
VMO
164 if (unlikely(__folio_test_movable(folio))) {
165 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
166 folio_lock(folio);
167 if (folio_test_movable(folio))
168 putback_movable_folio(folio);
bda807d4 169 else
280d724a
VMO
170 folio_clear_isolated(folio);
171 folio_unlock(folio);
172 folio_put(folio);
bda807d4 173 } else {
280d724a
VMO
174 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
175 folio_is_file_lru(folio), -folio_nr_pages(folio));
176 folio_putback_lru(folio);
bda807d4 177 }
b20a3503 178 }
b20a3503
CL
179}
180
0697212a
CL
181/*
182 * Restore a potential migration pte to a working pte entry
183 */
2f031c6f
MWO
184static bool remove_migration_pte(struct folio *folio,
185 struct vm_area_struct *vma, unsigned long addr, void *old)
0697212a 186{
4eecb8b9 187 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
0697212a 188
3fe87967 189 while (page_vma_mapped_walk(&pvmw)) {
6c287605 190 rmap_t rmap_flags = RMAP_NONE;
4eecb8b9
MWO
191 pte_t pte;
192 swp_entry_t entry;
193 struct page *new;
194 unsigned long idx = 0;
195
196 /* pgoff is invalid for ksm pages, but they are never large */
197 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199 new = folio_page(folio, idx);
0697212a 200
616b8371
ZY
201#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202 /* PMD-mapped THP migration entry */
203 if (!pvmw.pte) {
4eecb8b9
MWO
204 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205 !folio_test_pmd_mappable(folio), folio);
616b8371
ZY
206 remove_migration_pmd(&pvmw, new);
207 continue;
208 }
209#endif
210
4eecb8b9 211 folio_get(folio);
2e346877 212 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
3fe87967
KS
213 if (pte_swp_soft_dirty(*pvmw.pte))
214 pte = pte_mksoft_dirty(pte);
0697212a 215
3fe87967
KS
216 /*
217 * Recheck VMA as permissions can change since migration started
218 */
219 entry = pte_to_swp_entry(*pvmw.pte);
2e346877
PX
220 if (!is_migration_entry_young(entry))
221 pte = pte_mkold(pte);
222 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
223 pte = pte_mkdirty(pte);
4dd845b5 224 if (is_writable_migration_entry(entry))
3fe87967 225 pte = maybe_mkwrite(pte, vma);
f45ec5ff
PX
226 else if (pte_swp_uffd_wp(*pvmw.pte))
227 pte = pte_mkuffd_wp(pte);
96a9c287
PX
228 else
229 pte = pte_wrprotect(pte);
d3cb8bf6 230
6c287605
DH
231 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
232 rmap_flags |= RMAP_EXCLUSIVE;
233
6128763f 234 if (unlikely(is_device_private_page(new))) {
4dd845b5
AP
235 if (pte_write(pte))
236 entry = make_writable_device_private_entry(
237 page_to_pfn(new));
238 else
239 entry = make_readable_device_private_entry(
240 page_to_pfn(new));
6128763f 241 pte = swp_entry_to_pte(entry);
3d321bf8
RC
242 if (pte_swp_soft_dirty(*pvmw.pte))
243 pte = pte_swp_mksoft_dirty(pte);
6128763f
RC
244 if (pte_swp_uffd_wp(*pvmw.pte))
245 pte = pte_swp_mkuffd_wp(pte);
d2b2c6dd 246 }
a5430dda 247
3ef8fd7f 248#ifdef CONFIG_HUGETLB_PAGE
4eecb8b9 249 if (folio_test_hugetlb(folio)) {
79c1c594
CL
250 unsigned int shift = huge_page_shift(hstate_vma(vma));
251
3fe87967 252 pte = pte_mkhuge(pte);
79c1c594 253 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
4eecb8b9 254 if (folio_test_anon(folio))
28c5209d 255 hugepage_add_anon_rmap(new, vma, pvmw.address,
6c287605 256 rmap_flags);
3fe87967 257 else
fb3d824d 258 page_dup_file_rmap(new, true);
1eba86c0 259 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
383321ab
AK
260 } else
261#endif
262 {
4eecb8b9 263 if (folio_test_anon(folio))
f1e2db12 264 page_add_anon_rmap(new, vma, pvmw.address,
6c287605 265 rmap_flags);
383321ab 266 else
cea86fe2 267 page_add_file_rmap(new, vma, false);
1eba86c0 268 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
383321ab 269 }
b7435507 270 if (vma->vm_flags & VM_LOCKED)
96f97c43 271 mlock_drain_local();
e125fe40 272
4cc79b33
AK
273 trace_remove_migration_pte(pvmw.address, pte_val(pte),
274 compound_order(new));
275
3fe87967
KS
276 /* No need to invalidate - it was non-present before */
277 update_mmu_cache(vma, pvmw.address, pvmw.pte);
278 }
51afb12b 279
e4b82222 280 return true;
0697212a
CL
281}
282
04e62a29
CL
283/*
284 * Get rid of all migration entries and replace them by
285 * references to the indicated page.
286 */
4eecb8b9 287void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
04e62a29 288{
051ac83a
JK
289 struct rmap_walk_control rwc = {
290 .rmap_one = remove_migration_pte,
4eecb8b9 291 .arg = src,
051ac83a
JK
292 };
293
e388466d 294 if (locked)
2f031c6f 295 rmap_walk_locked(dst, &rwc);
e388466d 296 else
2f031c6f 297 rmap_walk(dst, &rwc);
04e62a29
CL
298}
299
0697212a
CL
300/*
301 * Something used the pte of a page under migration. We need to
302 * get to the page and wait until migration is finished.
303 * When we return from this function the fault will be retried.
0697212a 304 */
e66f17ff 305void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
30dad309 306 spinlock_t *ptl)
0697212a 307{
30dad309 308 pte_t pte;
0697212a 309 swp_entry_t entry;
0697212a 310
30dad309 311 spin_lock(ptl);
0697212a
CL
312 pte = *ptep;
313 if (!is_swap_pte(pte))
314 goto out;
315
316 entry = pte_to_swp_entry(pte);
317 if (!is_migration_entry(entry))
318 goto out;
319
ffa65753 320 migration_entry_wait_on_locked(entry, ptep, ptl);
0697212a
CL
321 return;
322out:
323 pte_unmap_unlock(ptep, ptl);
324}
325
30dad309
NH
326void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
327 unsigned long address)
328{
329 spinlock_t *ptl = pte_lockptr(mm, pmd);
330 pte_t *ptep = pte_offset_map(pmd, address);
331 __migration_entry_wait(mm, ptep, ptl);
332}
333
ad1ac596 334#ifdef CONFIG_HUGETLB_PAGE
fcd48540
PX
335/*
336 * The vma read lock must be held upon entry. Holding that lock prevents either
337 * the pte or the ptl from being freed.
338 *
339 * This function will release the vma lock before returning.
340 */
341void __migration_entry_wait_huge(struct vm_area_struct *vma,
342 pte_t *ptep, spinlock_t *ptl)
30dad309 343{
ad1ac596
ML
344 pte_t pte;
345
fcd48540 346 hugetlb_vma_assert_locked(vma);
ad1ac596
ML
347 spin_lock(ptl);
348 pte = huge_ptep_get(ptep);
349
fcd48540 350 if (unlikely(!is_hugetlb_entry_migration(pte))) {
ad1ac596 351 spin_unlock(ptl);
fcd48540
PX
352 hugetlb_vma_unlock_read(vma);
353 } else {
354 /*
355 * If migration entry existed, safe to release vma lock
356 * here because the pgtable page won't be freed without the
357 * pgtable lock released. See comment right above pgtable
358 * lock release in migration_entry_wait_on_locked().
359 */
360 hugetlb_vma_unlock_read(vma);
ad1ac596 361 migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl);
fcd48540 362 }
30dad309
NH
363}
364
ad1ac596
ML
365void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte)
366{
367 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte);
368
fcd48540 369 __migration_entry_wait_huge(vma, pte, ptl);
ad1ac596
ML
370}
371#endif
372
616b8371
ZY
373#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
374void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
375{
376 spinlock_t *ptl;
616b8371
ZY
377
378 ptl = pmd_lock(mm, pmd);
379 if (!is_pmd_migration_entry(*pmd))
380 goto unlock;
ffa65753 381 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl);
616b8371
ZY
382 return;
383unlock:
384 spin_unlock(ptl);
385}
386#endif
387
108ca835
MWO
388static int folio_expected_refs(struct address_space *mapping,
389 struct folio *folio)
0b3901b3 390{
108ca835
MWO
391 int refs = 1;
392 if (!mapping)
393 return refs;
0b3901b3 394
108ca835
MWO
395 refs += folio_nr_pages(folio);
396 if (folio_test_private(folio))
397 refs++;
398
399 return refs;
0b3901b3
JK
400}
401
b20a3503 402/*
c3fcf8a5 403 * Replace the page in the mapping.
5b5c7120
CL
404 *
405 * The number of remaining references must be:
406 * 1 for anonymous pages without a mapping
407 * 2 for pages with a mapping
266cf658 408 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a3503 409 */
3417013e
MWO
410int folio_migrate_mapping(struct address_space *mapping,
411 struct folio *newfolio, struct folio *folio, int extra_count)
b20a3503 412{
3417013e 413 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
42cb14b1
HD
414 struct zone *oldzone, *newzone;
415 int dirty;
108ca835 416 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
3417013e 417 long nr = folio_nr_pages(folio);
8763cb45 418
6c5240ae 419 if (!mapping) {
0e8c7d0f 420 /* Anonymous page without mapping */
3417013e 421 if (folio_ref_count(folio) != expected_count)
6c5240ae 422 return -EAGAIN;
cf4b769a
HD
423
424 /* No turning back from here */
3417013e
MWO
425 newfolio->index = folio->index;
426 newfolio->mapping = folio->mapping;
427 if (folio_test_swapbacked(folio))
428 __folio_set_swapbacked(newfolio);
cf4b769a 429
78bd5209 430 return MIGRATEPAGE_SUCCESS;
6c5240ae
CL
431 }
432
3417013e
MWO
433 oldzone = folio_zone(folio);
434 newzone = folio_zone(newfolio);
42cb14b1 435
89eb946a 436 xas_lock_irq(&xas);
3417013e 437 if (!folio_ref_freeze(folio, expected_count)) {
89eb946a 438 xas_unlock_irq(&xas);
e286781d
NP
439 return -EAGAIN;
440 }
441
b20a3503 442 /*
3417013e 443 * Now we know that no one else is looking at the folio:
cf4b769a 444 * no turning back from here.
b20a3503 445 */
3417013e
MWO
446 newfolio->index = folio->index;
447 newfolio->mapping = folio->mapping;
448 folio_ref_add(newfolio, nr); /* add cache reference */
449 if (folio_test_swapbacked(folio)) {
450 __folio_set_swapbacked(newfolio);
451 if (folio_test_swapcache(folio)) {
452 folio_set_swapcache(newfolio);
453 newfolio->private = folio_get_private(folio);
6326fec1
NP
454 }
455 } else {
3417013e 456 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
b20a3503
CL
457 }
458
42cb14b1 459 /* Move dirty while page refs frozen and newpage not yet exposed */
3417013e 460 dirty = folio_test_dirty(folio);
42cb14b1 461 if (dirty) {
3417013e
MWO
462 folio_clear_dirty(folio);
463 folio_set_dirty(newfolio);
42cb14b1
HD
464 }
465
3417013e 466 xas_store(&xas, newfolio);
7cf9c2c7
NP
467
468 /*
937a94c9
JG
469 * Drop cache reference from old page by unfreezing
470 * to one less reference.
7cf9c2c7
NP
471 * We know this isn't the last reference.
472 */
3417013e 473 folio_ref_unfreeze(folio, expected_count - nr);
7cf9c2c7 474
89eb946a 475 xas_unlock(&xas);
42cb14b1
HD
476 /* Leave irq disabled to prevent preemption while updating stats */
477
0e8c7d0f
CL
478 /*
479 * If moved to a different zone then also account
480 * the page for that zone. Other VM counters will be
481 * taken care of when we establish references to the
482 * new page and drop references to the old page.
483 *
484 * Note that anonymous pages are accounted for
4b9d0fab 485 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
0e8c7d0f
CL
486 * are mapped to swap space.
487 */
42cb14b1 488 if (newzone != oldzone) {
0d1c2072
JW
489 struct lruvec *old_lruvec, *new_lruvec;
490 struct mem_cgroup *memcg;
491
3417013e 492 memcg = folio_memcg(folio);
0d1c2072
JW
493 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
494 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
495
5c447d27
SB
496 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
497 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
3417013e 498 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
5c447d27
SB
499 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
500 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
42cb14b1 501 }
b6038942 502#ifdef CONFIG_SWAP
3417013e 503 if (folio_test_swapcache(folio)) {
b6038942
SB
504 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
505 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
506 }
507#endif
f56753ac 508 if (dirty && mapping_can_writeback(mapping)) {
5c447d27
SB
509 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
510 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
511 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
512 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
42cb14b1 513 }
4b02108a 514 }
42cb14b1 515 local_irq_enable();
b20a3503 516
78bd5209 517 return MIGRATEPAGE_SUCCESS;
b20a3503 518}
3417013e 519EXPORT_SYMBOL(folio_migrate_mapping);
b20a3503 520
290408d4
NH
521/*
522 * The expected number of remaining references is the same as that
3417013e 523 * of folio_migrate_mapping().
290408d4
NH
524 */
525int migrate_huge_page_move_mapping(struct address_space *mapping,
b890ec2a 526 struct folio *dst, struct folio *src)
290408d4 527{
b890ec2a 528 XA_STATE(xas, &mapping->i_pages, folio_index(src));
290408d4 529 int expected_count;
290408d4 530
89eb946a 531 xas_lock_irq(&xas);
b890ec2a
MWO
532 expected_count = 2 + folio_has_private(src);
533 if (!folio_ref_freeze(src, expected_count)) {
89eb946a 534 xas_unlock_irq(&xas);
290408d4
NH
535 return -EAGAIN;
536 }
537
b890ec2a
MWO
538 dst->index = src->index;
539 dst->mapping = src->mapping;
6a93ca8f 540
b890ec2a 541 folio_get(dst);
290408d4 542
b890ec2a 543 xas_store(&xas, dst);
290408d4 544
b890ec2a 545 folio_ref_unfreeze(src, expected_count - 1);
290408d4 546
89eb946a 547 xas_unlock_irq(&xas);
6a93ca8f 548
78bd5209 549 return MIGRATEPAGE_SUCCESS;
290408d4
NH
550}
551
b20a3503 552/*
19138349 553 * Copy the flags and some other ancillary information
b20a3503 554 */
19138349 555void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
b20a3503 556{
7851a45c
RR
557 int cpupid;
558
19138349
MWO
559 if (folio_test_error(folio))
560 folio_set_error(newfolio);
561 if (folio_test_referenced(folio))
562 folio_set_referenced(newfolio);
563 if (folio_test_uptodate(folio))
564 folio_mark_uptodate(newfolio);
565 if (folio_test_clear_active(folio)) {
566 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
567 folio_set_active(newfolio);
568 } else if (folio_test_clear_unevictable(folio))
569 folio_set_unevictable(newfolio);
570 if (folio_test_workingset(folio))
571 folio_set_workingset(newfolio);
572 if (folio_test_checked(folio))
573 folio_set_checked(newfolio);
6c287605
DH
574 /*
575 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
576 * migration entries. We can still have PG_anon_exclusive set on an
577 * effectively unmapped and unreferenced first sub-pages of an
578 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
579 */
19138349
MWO
580 if (folio_test_mappedtodisk(folio))
581 folio_set_mappedtodisk(newfolio);
b20a3503 582
3417013e 583 /* Move dirty on pages not done by folio_migrate_mapping() */
19138349
MWO
584 if (folio_test_dirty(folio))
585 folio_set_dirty(newfolio);
b20a3503 586
19138349
MWO
587 if (folio_test_young(folio))
588 folio_set_young(newfolio);
589 if (folio_test_idle(folio))
590 folio_set_idle(newfolio);
33c3fc71 591
7851a45c
RR
592 /*
593 * Copy NUMA information to the new page, to prevent over-eager
594 * future migrations of this same page.
595 */
19138349 596 cpupid = page_cpupid_xchg_last(&folio->page, -1);
33024536
HY
597 /*
598 * For memory tiering mode, when migrate between slow and fast
599 * memory node, reset cpupid, because that is used to record
600 * page access time in slow memory node.
601 */
602 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
603 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
604 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
605
606 if (f_toptier != t_toptier)
607 cpupid = -1;
608 }
19138349 609 page_cpupid_xchg_last(&newfolio->page, cpupid);
7851a45c 610
19138349 611 folio_migrate_ksm(newfolio, folio);
c8d6553b
HD
612 /*
613 * Please do not reorder this without considering how mm/ksm.c's
614 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
615 */
19138349
MWO
616 if (folio_test_swapcache(folio))
617 folio_clear_swapcache(folio);
618 folio_clear_private(folio);
ad2fa371
MS
619
620 /* page->private contains hugetlb specific flags */
19138349
MWO
621 if (!folio_test_hugetlb(folio))
622 folio->private = NULL;
b20a3503
CL
623
624 /*
625 * If any waiters have accumulated on the new page then
626 * wake them up.
627 */
19138349
MWO
628 if (folio_test_writeback(newfolio))
629 folio_end_writeback(newfolio);
d435edca 630
6aeff241
YS
631 /*
632 * PG_readahead shares the same bit with PG_reclaim. The above
633 * end_page_writeback() may clear PG_readahead mistakenly, so set the
634 * bit after that.
635 */
19138349
MWO
636 if (folio_test_readahead(folio))
637 folio_set_readahead(newfolio);
6aeff241 638
19138349 639 folio_copy_owner(newfolio, folio);
74485cf2 640
19138349 641 if (!folio_test_hugetlb(folio))
d21bba2b 642 mem_cgroup_migrate(folio, newfolio);
b20a3503 643}
19138349 644EXPORT_SYMBOL(folio_migrate_flags);
2916ecc0 645
715cbfd6 646void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
2916ecc0 647{
715cbfd6
MWO
648 folio_copy(newfolio, folio);
649 folio_migrate_flags(newfolio, folio);
2916ecc0 650}
715cbfd6 651EXPORT_SYMBOL(folio_migrate_copy);
b20a3503 652
1d8b85cc
CL
653/************************************************************
654 * Migration functions
655 ***********************************************************/
656
16ce101d
AP
657int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
658 struct folio *src, enum migrate_mode mode, int extra_count)
659{
660 int rc;
661
662 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
663
664 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
665
666 if (rc != MIGRATEPAGE_SUCCESS)
667 return rc;
668
669 if (mode != MIGRATE_SYNC_NO_COPY)
670 folio_migrate_copy(dst, src);
671 else
672 folio_migrate_flags(dst, src);
673 return MIGRATEPAGE_SUCCESS;
674}
675
54184650
MWO
676/**
677 * migrate_folio() - Simple folio migration.
678 * @mapping: The address_space containing the folio.
679 * @dst: The folio to migrate the data to.
680 * @src: The folio containing the current data.
681 * @mode: How to migrate the page.
682 *
683 * Common logic to directly migrate a single LRU folio suitable for
684 * folios that do not use PagePrivate/PagePrivate2.
b20a3503 685 *
54184650 686 * Folios are locked upon entry and exit.
b20a3503 687 */
54184650
MWO
688int migrate_folio(struct address_space *mapping, struct folio *dst,
689 struct folio *src, enum migrate_mode mode)
b20a3503 690{
16ce101d 691 return migrate_folio_extra(mapping, dst, src, mode, 0);
b20a3503 692}
54184650 693EXPORT_SYMBOL(migrate_folio);
b20a3503 694
9361401e 695#ifdef CONFIG_BLOCK
84ade7c1
JK
696/* Returns true if all buffers are successfully locked */
697static bool buffer_migrate_lock_buffers(struct buffer_head *head,
698 enum migrate_mode mode)
699{
700 struct buffer_head *bh = head;
701
702 /* Simple case, sync compaction */
703 if (mode != MIGRATE_ASYNC) {
704 do {
84ade7c1
JK
705 lock_buffer(bh);
706 bh = bh->b_this_page;
707
708 } while (bh != head);
709
710 return true;
711 }
712
713 /* async case, we cannot block on lock_buffer so use trylock_buffer */
714 do {
84ade7c1
JK
715 if (!trylock_buffer(bh)) {
716 /*
717 * We failed to lock the buffer and cannot stall in
718 * async migration. Release the taken locks
719 */
720 struct buffer_head *failed_bh = bh;
84ade7c1
JK
721 bh = head;
722 while (bh != failed_bh) {
723 unlock_buffer(bh);
84ade7c1
JK
724 bh = bh->b_this_page;
725 }
726 return false;
727 }
728
729 bh = bh->b_this_page;
730 } while (bh != head);
731 return true;
732}
733
67235182
MWO
734static int __buffer_migrate_folio(struct address_space *mapping,
735 struct folio *dst, struct folio *src, enum migrate_mode mode,
89cb0888 736 bool check_refs)
1d8b85cc 737{
1d8b85cc
CL
738 struct buffer_head *bh, *head;
739 int rc;
cc4f11e6 740 int expected_count;
1d8b85cc 741
67235182
MWO
742 head = folio_buffers(src);
743 if (!head)
54184650 744 return migrate_folio(mapping, dst, src, mode);
1d8b85cc 745
cc4f11e6 746 /* Check whether page does not have extra refs before we do more work */
108ca835 747 expected_count = folio_expected_refs(mapping, src);
67235182 748 if (folio_ref_count(src) != expected_count)
cc4f11e6 749 return -EAGAIN;
1d8b85cc 750
cc4f11e6
JK
751 if (!buffer_migrate_lock_buffers(head, mode))
752 return -EAGAIN;
1d8b85cc 753
89cb0888
JK
754 if (check_refs) {
755 bool busy;
756 bool invalidated = false;
757
758recheck_buffers:
759 busy = false;
760 spin_lock(&mapping->private_lock);
761 bh = head;
762 do {
763 if (atomic_read(&bh->b_count)) {
764 busy = true;
765 break;
766 }
767 bh = bh->b_this_page;
768 } while (bh != head);
89cb0888
JK
769 if (busy) {
770 if (invalidated) {
771 rc = -EAGAIN;
772 goto unlock_buffers;
773 }
ebdf4de5 774 spin_unlock(&mapping->private_lock);
89cb0888
JK
775 invalidate_bh_lrus();
776 invalidated = true;
777 goto recheck_buffers;
778 }
779 }
780
67235182 781 rc = folio_migrate_mapping(mapping, dst, src, 0);
78bd5209 782 if (rc != MIGRATEPAGE_SUCCESS)
cc4f11e6 783 goto unlock_buffers;
1d8b85cc 784
67235182 785 folio_attach_private(dst, folio_detach_private(src));
1d8b85cc
CL
786
787 bh = head;
788 do {
67235182 789 set_bh_page(bh, &dst->page, bh_offset(bh));
1d8b85cc 790 bh = bh->b_this_page;
1d8b85cc
CL
791 } while (bh != head);
792
2916ecc0 793 if (mode != MIGRATE_SYNC_NO_COPY)
67235182 794 folio_migrate_copy(dst, src);
2916ecc0 795 else
67235182 796 folio_migrate_flags(dst, src);
1d8b85cc 797
cc4f11e6
JK
798 rc = MIGRATEPAGE_SUCCESS;
799unlock_buffers:
ebdf4de5
JK
800 if (check_refs)
801 spin_unlock(&mapping->private_lock);
1d8b85cc
CL
802 bh = head;
803 do {
804 unlock_buffer(bh);
1d8b85cc 805 bh = bh->b_this_page;
1d8b85cc
CL
806 } while (bh != head);
807
cc4f11e6 808 return rc;
1d8b85cc 809}
89cb0888 810
67235182
MWO
811/**
812 * buffer_migrate_folio() - Migration function for folios with buffers.
813 * @mapping: The address space containing @src.
814 * @dst: The folio to migrate to.
815 * @src: The folio to migrate from.
816 * @mode: How to migrate the folio.
817 *
818 * This function can only be used if the underlying filesystem guarantees
819 * that no other references to @src exist. For example attached buffer
820 * heads are accessed only under the folio lock. If your filesystem cannot
821 * provide this guarantee, buffer_migrate_folio_norefs() may be more
822 * appropriate.
823 *
824 * Return: 0 on success or a negative errno on failure.
89cb0888 825 */
67235182
MWO
826int buffer_migrate_folio(struct address_space *mapping,
827 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 828{
67235182 829 return __buffer_migrate_folio(mapping, dst, src, mode, false);
89cb0888 830}
67235182
MWO
831EXPORT_SYMBOL(buffer_migrate_folio);
832
833/**
834 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
835 * @mapping: The address space containing @src.
836 * @dst: The folio to migrate to.
837 * @src: The folio to migrate from.
838 * @mode: How to migrate the folio.
839 *
840 * Like buffer_migrate_folio() except that this variant is more careful
841 * and checks that there are also no buffer head references. This function
842 * is the right one for mappings where buffer heads are directly looked
843 * up and referenced (such as block device mappings).
844 *
845 * Return: 0 on success or a negative errno on failure.
89cb0888 846 */
67235182
MWO
847int buffer_migrate_folio_norefs(struct address_space *mapping,
848 struct folio *dst, struct folio *src, enum migrate_mode mode)
89cb0888 849{
67235182 850 return __buffer_migrate_folio(mapping, dst, src, mode, true);
89cb0888 851}
e26355e2 852EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
9361401e 853#endif
1d8b85cc 854
2ec810d5
MWO
855int filemap_migrate_folio(struct address_space *mapping,
856 struct folio *dst, struct folio *src, enum migrate_mode mode)
857{
858 int ret;
859
860 ret = folio_migrate_mapping(mapping, dst, src, 0);
861 if (ret != MIGRATEPAGE_SUCCESS)
862 return ret;
863
864 if (folio_get_private(src))
865 folio_attach_private(dst, folio_detach_private(src));
866
867 if (mode != MIGRATE_SYNC_NO_COPY)
868 folio_migrate_copy(dst, src);
869 else
870 folio_migrate_flags(dst, src);
871 return MIGRATEPAGE_SUCCESS;
872}
873EXPORT_SYMBOL_GPL(filemap_migrate_folio);
874
04e62a29 875/*
2be7fa10 876 * Writeback a folio to clean the dirty state
04e62a29 877 */
2be7fa10 878static int writeout(struct address_space *mapping, struct folio *folio)
8351a6e4 879{
04e62a29
CL
880 struct writeback_control wbc = {
881 .sync_mode = WB_SYNC_NONE,
882 .nr_to_write = 1,
883 .range_start = 0,
884 .range_end = LLONG_MAX,
04e62a29
CL
885 .for_reclaim = 1
886 };
887 int rc;
888
889 if (!mapping->a_ops->writepage)
890 /* No write method for the address space */
891 return -EINVAL;
892
2be7fa10 893 if (!folio_clear_dirty_for_io(folio))
04e62a29
CL
894 /* Someone else already triggered a write */
895 return -EAGAIN;
896
8351a6e4 897 /*
2be7fa10
MWO
898 * A dirty folio may imply that the underlying filesystem has
899 * the folio on some queue. So the folio must be clean for
900 * migration. Writeout may mean we lose the lock and the
901 * folio state is no longer what we checked for earlier.
04e62a29
CL
902 * At this point we know that the migration attempt cannot
903 * be successful.
8351a6e4 904 */
4eecb8b9 905 remove_migration_ptes(folio, folio, false);
8351a6e4 906
2be7fa10 907 rc = mapping->a_ops->writepage(&folio->page, &wbc);
8351a6e4 908
04e62a29
CL
909 if (rc != AOP_WRITEPAGE_ACTIVATE)
910 /* unlocked. Relock */
2be7fa10 911 folio_lock(folio);
04e62a29 912
bda8550d 913 return (rc < 0) ? -EIO : -EAGAIN;
04e62a29
CL
914}
915
916/*
917 * Default handling if a filesystem does not provide a migration function.
918 */
8faa8ef5
MWO
919static int fallback_migrate_folio(struct address_space *mapping,
920 struct folio *dst, struct folio *src, enum migrate_mode mode)
04e62a29 921{
8faa8ef5
MWO
922 if (folio_test_dirty(src)) {
923 /* Only writeback folios in full synchronous migration */
2916ecc0
JG
924 switch (mode) {
925 case MIGRATE_SYNC:
926 case MIGRATE_SYNC_NO_COPY:
927 break;
928 default:
b969c4ab 929 return -EBUSY;
2916ecc0 930 }
2be7fa10 931 return writeout(mapping, src);
b969c4ab 932 }
8351a6e4
CL
933
934 /*
935 * Buffers may be managed in a filesystem specific way.
936 * We must have no buffers or drop them.
937 */
8faa8ef5
MWO
938 if (folio_test_private(src) &&
939 !filemap_release_folio(src, GFP_KERNEL))
806031bb 940 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
8351a6e4 941
54184650 942 return migrate_folio(mapping, dst, src, mode);
8351a6e4
CL
943}
944
e24f0b8f
CL
945/*
946 * Move a page to a newly allocated page
947 * The page is locked and all ptes have been successfully removed.
948 *
949 * The new page will have replaced the old page if this function
950 * is successful.
894bc310
LS
951 *
952 * Return value:
953 * < 0 - error code
78bd5209 954 * MIGRATEPAGE_SUCCESS - success
e24f0b8f 955 */
e7e3ffeb 956static int move_to_new_folio(struct folio *dst, struct folio *src,
5c3f9a67 957 enum migrate_mode mode)
e24f0b8f 958{
bda807d4 959 int rc = -EAGAIN;
e7e3ffeb 960 bool is_lru = !__PageMovable(&src->page);
e24f0b8f 961
e7e3ffeb
MWO
962 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
963 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
e24f0b8f 964
bda807d4 965 if (likely(is_lru)) {
68f2736a
MWO
966 struct address_space *mapping = folio_mapping(src);
967
bda807d4 968 if (!mapping)
54184650 969 rc = migrate_folio(mapping, dst, src, mode);
5490da4f 970 else if (mapping->a_ops->migrate_folio)
bda807d4 971 /*
5490da4f
MWO
972 * Most folios have a mapping and most filesystems
973 * provide a migrate_folio callback. Anonymous folios
bda807d4 974 * are part of swap space which also has its own
5490da4f 975 * migrate_folio callback. This is the most common path
bda807d4
MK
976 * for page migration.
977 */
5490da4f
MWO
978 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
979 mode);
bda807d4 980 else
8faa8ef5 981 rc = fallback_migrate_folio(mapping, dst, src, mode);
bda807d4 982 } else {
68f2736a
MWO
983 const struct movable_operations *mops;
984
e24f0b8f 985 /*
bda807d4
MK
986 * In case of non-lru page, it could be released after
987 * isolation step. In that case, we shouldn't try migration.
e24f0b8f 988 */
e7e3ffeb
MWO
989 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
990 if (!folio_test_movable(src)) {
bda807d4 991 rc = MIGRATEPAGE_SUCCESS;
e7e3ffeb 992 folio_clear_isolated(src);
bda807d4
MK
993 goto out;
994 }
995
da707a6d 996 mops = folio_movable_ops(src);
68f2736a 997 rc = mops->migrate_page(&dst->page, &src->page, mode);
bda807d4 998 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
e7e3ffeb 999 !folio_test_isolated(src));
bda807d4 1000 }
e24f0b8f 1001
5c3f9a67 1002 /*
e7e3ffeb
MWO
1003 * When successful, old pagecache src->mapping must be cleared before
1004 * src is freed; but stats require that PageAnon be left as PageAnon.
5c3f9a67
HD
1005 */
1006 if (rc == MIGRATEPAGE_SUCCESS) {
e7e3ffeb
MWO
1007 if (__PageMovable(&src->page)) {
1008 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
bda807d4
MK
1009
1010 /*
1011 * We clear PG_movable under page_lock so any compactor
1012 * cannot try to migrate this page.
1013 */
e7e3ffeb 1014 folio_clear_isolated(src);
bda807d4
MK
1015 }
1016
1017 /*
e7e3ffeb 1018 * Anonymous and movable src->mapping will be cleared by
bda807d4
MK
1019 * free_pages_prepare so don't reset it here for keeping
1020 * the type to work PageAnon, for example.
1021 */
e7e3ffeb
MWO
1022 if (!folio_mapping_flags(src))
1023 src->mapping = NULL;
d2b2c6dd 1024
e7e3ffeb
MWO
1025 if (likely(!folio_is_zone_device(dst)))
1026 flush_dcache_folio(dst);
3fe2011f 1027 }
bda807d4 1028out:
e24f0b8f
CL
1029 return rc;
1030}
1031
64c8902e
HY
1032/*
1033 * To record some information during migration, we use some unused
1034 * fields (mapping and private) of struct folio of the newly allocated
1035 * destination folio. This is safe because nobody is using them
1036 * except us.
1037 */
e77d587a
LT
1038union migration_ptr {
1039 struct anon_vma *anon_vma;
1040 struct address_space *mapping;
1041};
64c8902e
HY
1042static void __migrate_folio_record(struct folio *dst,
1043 unsigned long page_was_mapped,
1044 struct anon_vma *anon_vma)
1045{
e77d587a
LT
1046 union migration_ptr ptr = { .anon_vma = anon_vma };
1047 dst->mapping = ptr.mapping;
64c8902e
HY
1048 dst->private = (void *)page_was_mapped;
1049}
1050
1051static void __migrate_folio_extract(struct folio *dst,
1052 int *page_was_mappedp,
1053 struct anon_vma **anon_vmap)
1054{
e77d587a
LT
1055 union migration_ptr ptr = { .mapping = dst->mapping };
1056 *anon_vmap = ptr.anon_vma;
64c8902e
HY
1057 *page_was_mappedp = (unsigned long)dst->private;
1058 dst->mapping = NULL;
1059 dst->private = NULL;
1060}
1061
5dfab109
HY
1062/* Restore the source folio to the original state upon failure */
1063static void migrate_folio_undo_src(struct folio *src,
1064 int page_was_mapped,
1065 struct anon_vma *anon_vma,
ebe75e47 1066 bool locked,
5dfab109
HY
1067 struct list_head *ret)
1068{
1069 if (page_was_mapped)
1070 remove_migration_ptes(src, src, false);
1071 /* Drop an anon_vma reference if we took one */
1072 if (anon_vma)
1073 put_anon_vma(anon_vma);
ebe75e47
HY
1074 if (locked)
1075 folio_unlock(src);
1076 if (ret)
1077 list_move_tail(&src->lru, ret);
5dfab109
HY
1078}
1079
1080/* Restore the destination folio to the original state upon failure */
1081static void migrate_folio_undo_dst(struct folio *dst,
ebe75e47 1082 bool locked,
5dfab109
HY
1083 free_page_t put_new_page,
1084 unsigned long private)
1085{
ebe75e47
HY
1086 if (locked)
1087 folio_unlock(dst);
5dfab109
HY
1088 if (put_new_page)
1089 put_new_page(&dst->page, private);
1090 else
1091 folio_put(dst);
1092}
1093
64c8902e
HY
1094/* Cleanup src folio upon migration success */
1095static void migrate_folio_done(struct folio *src,
1096 enum migrate_reason reason)
1097{
1098 /*
1099 * Compaction can migrate also non-LRU pages which are
1100 * not accounted to NR_ISOLATED_*. They can be recognized
1101 * as __PageMovable
1102 */
1103 if (likely(!__folio_test_movable(src)))
1104 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1105 folio_is_file_lru(src), -folio_nr_pages(src));
1106
1107 if (reason != MR_MEMORY_FAILURE)
1108 /* We release the page in page_handle_poison. */
1109 folio_put(src);
1110}
1111
ebe75e47
HY
1112/* Obtain the lock on page, remove all ptes. */
1113static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
1114 unsigned long private, struct folio *src,
1115 struct folio **dstp, int force, bool avoid_force_lock,
1116 enum migrate_mode mode, enum migrate_reason reason,
1117 struct list_head *ret)
e24f0b8f 1118{
ebe75e47 1119 struct folio *dst;
0dabec93 1120 int rc = -EAGAIN;
ebe75e47 1121 struct page *newpage = NULL;
64c8902e 1122 int page_was_mapped = 0;
3f6c8272 1123 struct anon_vma *anon_vma = NULL;
682a71a1 1124 bool is_lru = !__PageMovable(&src->page);
ebe75e47
HY
1125 bool locked = false;
1126 bool dst_locked = false;
1127
ebe75e47
HY
1128 if (folio_ref_count(src) == 1) {
1129 /* Folio was freed from under us. So we are done. */
1130 folio_clear_active(src);
1131 folio_clear_unevictable(src);
1132 /* free_pages_prepare() will clear PG_isolated. */
1133 list_del(&src->lru);
1134 migrate_folio_done(src, reason);
1135 return MIGRATEPAGE_SUCCESS;
1136 }
1137
1138 newpage = get_new_page(&src->page, private);
1139 if (!newpage)
1140 return -ENOMEM;
1141 dst = page_folio(newpage);
1142 *dstp = dst;
1143
1144 dst->private = NULL;
95a402c3 1145
682a71a1 1146 if (!folio_trylock(src)) {
a6bc32b8 1147 if (!force || mode == MIGRATE_ASYNC)
0dabec93 1148 goto out;
3e7d3449
MG
1149
1150 /*
1151 * It's not safe for direct compaction to call lock_page.
1152 * For example, during page readahead pages are added locked
1153 * to the LRU. Later, when the IO completes the pages are
1154 * marked uptodate and unlocked. However, the queueing
1155 * could be merging multiple pages for one bio (e.g.
d4388340 1156 * mpage_readahead). If an allocation happens for the
3e7d3449
MG
1157 * second or third page, the process can end up locking
1158 * the same page twice and deadlocking. Rather than
1159 * trying to be clever about what pages can be locked,
1160 * avoid the use of lock_page for direct compaction
1161 * altogether.
1162 */
1163 if (current->flags & PF_MEMALLOC)
0dabec93 1164 goto out;
3e7d3449 1165
5dfab109
HY
1166 /*
1167 * We have locked some folios and are going to wait to lock
1168 * this folio. To avoid a potential deadlock, let's bail
1169 * out and not do that. The locked folios will be moved and
1170 * unlocked, then we can wait to lock this folio.
1171 */
1172 if (avoid_force_lock) {
1173 rc = -EDEADLOCK;
1174 goto out;
1175 }
1176
682a71a1 1177 folio_lock(src);
e24f0b8f 1178 }
ebe75e47 1179 locked = true;
e24f0b8f 1180
682a71a1 1181 if (folio_test_writeback(src)) {
11bc82d6 1182 /*
fed5b64a 1183 * Only in the case of a full synchronous migration is it
a6bc32b8
MG
1184 * necessary to wait for PageWriteback. In the async case,
1185 * the retry loop is too short and in the sync-light case,
1186 * the overhead of stalling is too much
11bc82d6 1187 */
2916ecc0
JG
1188 switch (mode) {
1189 case MIGRATE_SYNC:
1190 case MIGRATE_SYNC_NO_COPY:
1191 break;
1192 default:
11bc82d6 1193 rc = -EBUSY;
ebe75e47 1194 goto out;
11bc82d6
AA
1195 }
1196 if (!force)
ebe75e47 1197 goto out;
682a71a1 1198 folio_wait_writeback(src);
e24f0b8f 1199 }
03f15c86 1200
e24f0b8f 1201 /*
682a71a1
MWO
1202 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1203 * we cannot notice that anon_vma is freed while we migrate a page.
1ce82b69 1204 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d 1205 * of migration. File cache pages are no problem because of page_lock()
989f89c5
KH
1206 * File Caches may use write_page() or lock_page() in migration, then,
1207 * just care Anon page here.
03f15c86 1208 *
29eea9b5 1209 * Only folio_get_anon_vma() understands the subtleties of
03f15c86
HD
1210 * getting a hold on an anon_vma from outside one of its mms.
1211 * But if we cannot get anon_vma, then we won't need it anyway,
1212 * because that implies that the anon page is no longer mapped
1213 * (and cannot be remapped so long as we hold the page lock).
dc386d4d 1214 */
682a71a1 1215 if (folio_test_anon(src) && !folio_test_ksm(src))
29eea9b5 1216 anon_vma = folio_get_anon_vma(src);
62e1c553 1217
7db7671f
HD
1218 /*
1219 * Block others from accessing the new page when we get around to
1220 * establishing additional references. We are usually the only one
682a71a1
MWO
1221 * holding a reference to dst at this point. We used to have a BUG
1222 * here if folio_trylock(dst) fails, but would like to allow for
1223 * cases where there might be a race with the previous use of dst.
7db7671f
HD
1224 * This is much like races on refcount of oldpage: just don't BUG().
1225 */
682a71a1 1226 if (unlikely(!folio_trylock(dst)))
ebe75e47
HY
1227 goto out;
1228 dst_locked = true;
7db7671f 1229
bda807d4 1230 if (unlikely(!is_lru)) {
64c8902e
HY
1231 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1232 return MIGRATEPAGE_UNMAP;
bda807d4
MK
1233 }
1234
dc386d4d 1235 /*
62e1c553
SL
1236 * Corner case handling:
1237 * 1. When a new swap-cache page is read into, it is added to the LRU
1238 * and treated as swapcache but it has no rmap yet.
682a71a1 1239 * Calling try_to_unmap() against a src->mapping==NULL page will
62e1c553 1240 * trigger a BUG. So handle it here.
d12b8951 1241 * 2. An orphaned page (see truncate_cleanup_page) might have
62e1c553
SL
1242 * fs-private metadata. The page can be picked up due to memory
1243 * offlining. Everywhere else except page reclaim, the page is
1244 * invisible to the vm, so the page can not be migrated. So try to
1245 * free the metadata, so the page can be freed.
e24f0b8f 1246 */
682a71a1
MWO
1247 if (!src->mapping) {
1248 if (folio_test_private(src)) {
1249 try_to_free_buffers(src);
ebe75e47 1250 goto out;
62e1c553 1251 }
682a71a1 1252 } else if (folio_mapped(src)) {
7db7671f 1253 /* Establish migration ptes */
682a71a1
MWO
1254 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1255 !folio_test_ksm(src) && !anon_vma, src);
7e12beb8 1256 try_to_migrate(src, TTU_BATCH_FLUSH);
64c8902e 1257 page_was_mapped = 1;
2ebba6b7 1258 }
dc386d4d 1259
64c8902e
HY
1260 if (!folio_mapped(src)) {
1261 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1262 return MIGRATEPAGE_UNMAP;
1263 }
1264
64c8902e 1265out:
80562ba0
HY
1266 /*
1267 * A folio that has not been unmapped will be restored to
1268 * right list unless we want to retry.
1269 */
ebe75e47
HY
1270 if (rc == -EAGAIN || rc == -EDEADLOCK)
1271 ret = NULL;
80562ba0 1272
ebe75e47
HY
1273 migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
1274 migrate_folio_undo_dst(dst, dst_locked, put_new_page, private);
80562ba0
HY
1275
1276 return rc;
1277}
1278
ebe75e47
HY
1279/* Migrate the folio to the newly allocated folio in dst. */
1280static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
1281 struct folio *src, struct folio *dst,
1282 enum migrate_mode mode, enum migrate_reason reason,
1283 struct list_head *ret)
64c8902e
HY
1284{
1285 int rc;
1286 int page_was_mapped = 0;
1287 struct anon_vma *anon_vma = NULL;
1288 bool is_lru = !__PageMovable(&src->page);
5dfab109 1289 struct list_head *prev;
64c8902e
HY
1290
1291 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
5dfab109
HY
1292 prev = dst->lru.prev;
1293 list_del(&dst->lru);
64c8902e
HY
1294
1295 rc = move_to_new_folio(dst, src, mode);
ebe75e47
HY
1296 if (rc)
1297 goto out;
5dfab109 1298
64c8902e
HY
1299 if (unlikely(!is_lru))
1300 goto out_unlock_both;
e24f0b8f 1301
c3096e67 1302 /*
682a71a1 1303 * When successful, push dst to LRU immediately: so that if it
c3096e67 1304 * turns out to be an mlocked page, remove_migration_ptes() will
682a71a1 1305 * automatically build up the correct dst->mlock_count for it.
c3096e67
HD
1306 *
1307 * We would like to do something similar for the old page, when
1308 * unsuccessful, and other cases when a page has been temporarily
1309 * isolated from the unevictable LRU: but this case is the easiest.
1310 */
ebe75e47
HY
1311 folio_add_lru(dst);
1312 if (page_was_mapped)
1313 lru_add_drain();
c3096e67 1314
5c3f9a67 1315 if (page_was_mapped)
ebe75e47 1316 remove_migration_ptes(src, dst, false);
3f6c8272 1317
7db7671f 1318out_unlock_both:
682a71a1 1319 folio_unlock(dst);
ebe75e47 1320 set_page_owner_migrate_reason(&dst->page, reason);
c6c919eb 1321 /*
682a71a1 1322 * If migration is successful, decrease refcount of dst,
c6c919eb 1323 * which will not free the page because new page owner increased
c3096e67 1324 * refcounter.
c6c919eb 1325 */
ebe75e47 1326 folio_put(dst);
c6c919eb 1327
dd4ae78a 1328 /*
ebe75e47
HY
1329 * A folio that has been migrated has all references removed
1330 * and will be freed.
dd4ae78a 1331 */
ebe75e47
HY
1332 list_del(&src->lru);
1333 /* Drop an anon_vma reference if we took one */
1334 if (anon_vma)
1335 put_anon_vma(anon_vma);
1336 folio_unlock(src);
1337 migrate_folio_done(src, reason);
bf6bddf1 1338
ebe75e47 1339 return rc;
0dabec93 1340out:
dd4ae78a 1341 /*
ebe75e47
HY
1342 * A folio that has not been migrated will be restored to
1343 * right list unless we want to retry.
dd4ae78a 1344 */
ebe75e47
HY
1345 if (rc == -EAGAIN) {
1346 list_add(&dst->lru, prev);
1347 __migrate_folio_record(dst, page_was_mapped, anon_vma);
1348 return rc;
e24f0b8f 1349 }
68711a74 1350
ebe75e47
HY
1351 migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
1352 migrate_folio_undo_dst(dst, true, put_new_page, private);
1353
e24f0b8f
CL
1354 return rc;
1355}
1356
290408d4
NH
1357/*
1358 * Counterpart of unmap_and_move_page() for hugepage migration.
1359 *
1360 * This function doesn't wait the completion of hugepage I/O
1361 * because there is no race between I/O and migration for hugepage.
1362 * Note that currently hugepage I/O occurs only in direct I/O
1363 * where no lock is held and PG_writeback is irrelevant,
1364 * and writeback status of all subpages are counted in the reference
1365 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1366 * under direct I/O, the reference of the head page is 512 and a bit more.)
1367 * This means that when we try to migrate hugepage whose subpages are
1368 * doing direct I/O, some references remain after try_to_unmap() and
1369 * hugepage migration fails without data corruption.
1370 *
1371 * There is also no race when direct I/O is issued on the page under migration,
1372 * because then pte is replaced with migration swap entry and direct I/O code
1373 * will wait in the page fault for migration to complete.
1374 */
1375static int unmap_and_move_huge_page(new_page_t get_new_page,
68711a74
DR
1376 free_page_t put_new_page, unsigned long private,
1377 struct page *hpage, int force,
dd4ae78a
YS
1378 enum migrate_mode mode, int reason,
1379 struct list_head *ret)
290408d4 1380{
4eecb8b9 1381 struct folio *dst, *src = page_folio(hpage);
2def7424 1382 int rc = -EAGAIN;
2ebba6b7 1383 int page_was_mapped = 0;
32665f2b 1384 struct page *new_hpage;
290408d4 1385 struct anon_vma *anon_vma = NULL;
c0d0381a 1386 struct address_space *mapping = NULL;
290408d4 1387
c33db292 1388 if (folio_ref_count(src) == 1) {
71a64f61 1389 /* page was freed from under us. So we are done. */
ea8e72f4 1390 folio_putback_active_hugetlb(src);
71a64f61
MS
1391 return MIGRATEPAGE_SUCCESS;
1392 }
1393
666feb21 1394 new_hpage = get_new_page(hpage, private);
290408d4
NH
1395 if (!new_hpage)
1396 return -ENOMEM;
4eecb8b9 1397 dst = page_folio(new_hpage);
290408d4 1398
c33db292 1399 if (!folio_trylock(src)) {
2916ecc0 1400 if (!force)
290408d4 1401 goto out;
2916ecc0
JG
1402 switch (mode) {
1403 case MIGRATE_SYNC:
1404 case MIGRATE_SYNC_NO_COPY:
1405 break;
1406 default:
1407 goto out;
1408 }
c33db292 1409 folio_lock(src);
290408d4
NH
1410 }
1411
cb6acd01
MK
1412 /*
1413 * Check for pages which are in the process of being freed. Without
c33db292 1414 * folio_mapping() set, hugetlbfs specific move page routine will not
cb6acd01
MK
1415 * be called and we could leak usage counts for subpools.
1416 */
345c62d1 1417 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
cb6acd01
MK
1418 rc = -EBUSY;
1419 goto out_unlock;
1420 }
1421
c33db292 1422 if (folio_test_anon(src))
29eea9b5 1423 anon_vma = folio_get_anon_vma(src);
290408d4 1424
c33db292 1425 if (unlikely(!folio_trylock(dst)))
7db7671f
HD
1426 goto put_anon;
1427
c33db292 1428 if (folio_mapped(src)) {
a98a2f0c 1429 enum ttu_flags ttu = 0;
336bf30e 1430
c33db292 1431 if (!folio_test_anon(src)) {
336bf30e
MK
1432 /*
1433 * In shared mappings, try_to_unmap could potentially
1434 * call huge_pmd_unshare. Because of this, take
1435 * semaphore in write mode here and set TTU_RMAP_LOCKED
1436 * to let lower levels know we have taken the lock.
1437 */
1438 mapping = hugetlb_page_mapping_lock_write(hpage);
1439 if (unlikely(!mapping))
1440 goto unlock_put_anon;
1441
5202978b 1442 ttu = TTU_RMAP_LOCKED;
336bf30e 1443 }
c0d0381a 1444
4b8554c5 1445 try_to_migrate(src, ttu);
2ebba6b7 1446 page_was_mapped = 1;
336bf30e 1447
5202978b 1448 if (ttu & TTU_RMAP_LOCKED)
336bf30e 1449 i_mmap_unlock_write(mapping);
2ebba6b7 1450 }
290408d4 1451
c33db292 1452 if (!folio_mapped(src))
e7e3ffeb 1453 rc = move_to_new_folio(dst, src, mode);
290408d4 1454
336bf30e 1455 if (page_was_mapped)
4eecb8b9
MWO
1456 remove_migration_ptes(src,
1457 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
290408d4 1458
c0d0381a 1459unlock_put_anon:
c33db292 1460 folio_unlock(dst);
7db7671f
HD
1461
1462put_anon:
fd4a4663 1463 if (anon_vma)
9e60109f 1464 put_anon_vma(anon_vma);
8e6ac7fa 1465
2def7424 1466 if (rc == MIGRATEPAGE_SUCCESS) {
345c62d1 1467 move_hugetlb_state(src, dst, reason);
2def7424
HD
1468 put_new_page = NULL;
1469 }
8e6ac7fa 1470
cb6acd01 1471out_unlock:
c33db292 1472 folio_unlock(src);
09761333 1473out:
dd4ae78a 1474 if (rc == MIGRATEPAGE_SUCCESS)
ea8e72f4 1475 folio_putback_active_hugetlb(src);
a04840c6 1476 else if (rc != -EAGAIN)
c33db292 1477 list_move_tail(&src->lru, ret);
68711a74
DR
1478
1479 /*
1480 * If migration was not successful and there's a freeing callback, use
1481 * it. Otherwise, put_page() will drop the reference grabbed during
1482 * isolation.
1483 */
2def7424 1484 if (put_new_page)
68711a74
DR
1485 put_new_page(new_hpage, private);
1486 else
ea8e72f4 1487 folio_putback_active_hugetlb(dst);
68711a74 1488
290408d4
NH
1489 return rc;
1490}
1491
eaec4e63 1492static inline int try_split_folio(struct folio *folio, struct list_head *split_folios)
d532e2e5 1493{
9c62ff00 1494 int rc;
d532e2e5 1495
eaec4e63
HY
1496 folio_lock(folio);
1497 rc = split_folio_to_list(folio, split_folios);
1498 folio_unlock(folio);
e6fa8a79 1499 if (!rc)
eaec4e63 1500 list_move_tail(&folio->lru, split_folios);
d532e2e5
YS
1501
1502 return rc;
1503}
1504
42012e04
HY
1505#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1506#define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1507#else
1508#define NR_MAX_BATCHED_MIGRATION 512
1509#endif
e5bfff8b
HY
1510#define NR_MAX_MIGRATE_PAGES_RETRY 10
1511
5b855937
HY
1512struct migrate_pages_stats {
1513 int nr_succeeded; /* Normal and large folios migrated successfully, in
1514 units of base pages */
1515 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1516 units of base pages. Untried folios aren't counted */
1517 int nr_thp_succeeded; /* THP migrated successfully */
1518 int nr_thp_failed; /* THP failed to be migrated */
1519 int nr_thp_split; /* THP split before migrating */
1520};
1521
b20a3503 1522/*
e5bfff8b
HY
1523 * Returns the number of hugetlb folios that were not migrated, or an error code
1524 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1525 * any more because the list has become empty or no retryable hugetlb folios
1526 * exist any more. It is caller's responsibility to call putback_movable_pages()
1527 * only if ret != 0.
b20a3503 1528 */
e5bfff8b
HY
1529static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
1530 free_page_t put_new_page, unsigned long private,
1531 enum migrate_mode mode, int reason,
1532 struct migrate_pages_stats *stats,
1533 struct list_head *ret_folios)
b20a3503 1534{
e24f0b8f 1535 int retry = 1;
e5bfff8b
HY
1536 int nr_failed = 0;
1537 int nr_retry_pages = 0;
1538 int pass = 0;
1539 struct folio *folio, *folio2;
1540 int rc, nr_pages;
1541
1542 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1543 retry = 0;
1544 nr_retry_pages = 0;
1545
1546 list_for_each_entry_safe(folio, folio2, from, lru) {
1547 if (!folio_test_hugetlb(folio))
1548 continue;
1549
1550 nr_pages = folio_nr_pages(folio);
1551
1552 cond_resched();
1553
6f7d760e
HY
1554 /*
1555 * Migratability of hugepages depends on architectures and
1556 * their size. This check is necessary because some callers
1557 * of hugepage migration like soft offline and memory
1558 * hotremove don't walk through page tables or check whether
1559 * the hugepage is pmd-based or not before kicking migration.
1560 */
1561 if (!hugepage_migration_supported(folio_hstate(folio))) {
1562 nr_failed++;
1563 stats->nr_failed_pages += nr_pages;
1564 list_move_tail(&folio->lru, ret_folios);
1565 continue;
1566 }
1567
e5bfff8b
HY
1568 rc = unmap_and_move_huge_page(get_new_page,
1569 put_new_page, private,
1570 &folio->page, pass > 2, mode,
1571 reason, ret_folios);
1572 /*
1573 * The rules are:
1574 * Success: hugetlb folio will be put back
1575 * -EAGAIN: stay on the from list
1576 * -ENOMEM: stay on the from list
e5bfff8b
HY
1577 * Other errno: put on ret_folios list
1578 */
1579 switch(rc) {
e5bfff8b
HY
1580 case -ENOMEM:
1581 /*
1582 * When memory is low, don't bother to try to migrate
1583 * other folios, just exit.
1584 */
1585 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1586 return -ENOMEM;
1587 case -EAGAIN:
1588 retry++;
1589 nr_retry_pages += nr_pages;
1590 break;
1591 case MIGRATEPAGE_SUCCESS:
1592 stats->nr_succeeded += nr_pages;
1593 break;
1594 default:
1595 /*
1596 * Permanent failure (-EBUSY, etc.):
1597 * unlike -EAGAIN case, the failed folio is
1598 * removed from migration folio list and not
1599 * retried in the next outer loop.
1600 */
1601 nr_failed++;
1602 stats->nr_failed_pages += nr_pages;
1603 break;
1604 }
1605 }
1606 }
1607 /*
1608 * nr_failed is number of hugetlb folios failed to be migrated. After
1609 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1610 * folios as failed.
1611 */
1612 nr_failed += retry;
1613 stats->nr_failed_pages += nr_retry_pages;
1614
1615 return nr_failed;
1616}
1617
5dfab109
HY
1618/*
1619 * migrate_pages_batch() first unmaps folios in the from list as many as
1620 * possible, then move the unmapped folios.
1621 */
42012e04 1622static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
68711a74 1623 free_page_t put_new_page, unsigned long private,
42012e04
HY
1624 enum migrate_mode mode, int reason, struct list_head *ret_folios,
1625 struct migrate_pages_stats *stats)
b20a3503 1626{
5dfab109 1627 int retry;
eaec4e63 1628 int large_retry = 1;
1a5bae25 1629 int thp_retry = 1;
b20a3503 1630 int nr_failed = 0;
077309bc 1631 int nr_retry_pages = 0;
eaec4e63 1632 int nr_large_failed = 0;
b20a3503 1633 int pass = 0;
eaec4e63 1634 bool is_large = false;
1a5bae25 1635 bool is_thp = false;
5dfab109
HY
1636 struct folio *folio, *folio2, *dst = NULL, *dst2;
1637 int rc, rc_saved, nr_pages;
eaec4e63 1638 LIST_HEAD(split_folios);
5dfab109
HY
1639 LIST_HEAD(unmap_folios);
1640 LIST_HEAD(dst_folios);
b0b515bf 1641 bool nosplit = (reason == MR_NUMA_MISPLACED);
eaec4e63 1642 bool no_split_folio_counting = false;
5dfab109 1643 bool avoid_force_lock;
e5bfff8b 1644
5dfab109
HY
1645retry:
1646 rc_saved = 0;
1647 avoid_force_lock = false;
1648 retry = 1;
e5bfff8b
HY
1649 for (pass = 0;
1650 pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry);
1651 pass++) {
e24f0b8f 1652 retry = 0;
eaec4e63 1653 large_retry = 0;
1a5bae25 1654 thp_retry = 0;
077309bc 1655 nr_retry_pages = 0;
b20a3503 1656
eaec4e63 1657 list_for_each_entry_safe(folio, folio2, from, lru) {
1a5bae25 1658 /*
eaec4e63
HY
1659 * Large folio statistics is based on the source large
1660 * folio. Capture required information that might get
1661 * lost during migration.
1a5bae25 1662 */
e5bfff8b 1663 is_large = folio_test_large(folio);
eaec4e63
HY
1664 is_thp = is_large && folio_test_pmd_mappable(folio);
1665 nr_pages = folio_nr_pages(folio);
e5bfff8b 1666
e24f0b8f 1667 cond_resched();
2d1db3b1 1668
d532e2e5 1669 /*
eaec4e63 1670 * Large folio migration might be unsupported or
6f7d760e 1671 * the allocation might be failed so we should retry
eaec4e63
HY
1672 * on the same folio with the large folio split
1673 * to normal folios.
d532e2e5 1674 *
eaec4e63 1675 * Split folios are put in split_folios, and
e6fa8a79
HY
1676 * we will migrate them after the rest of the
1677 * list is processed.
d532e2e5 1678 */
6f7d760e
HY
1679 if (!thp_migration_supported() && is_thp) {
1680 nr_large_failed++;
1681 stats->nr_thp_failed++;
1682 if (!try_split_folio(folio, &split_folios)) {
1683 stats->nr_thp_split++;
1684 continue;
f430893b 1685 }
6f7d760e
HY
1686 stats->nr_failed_pages += nr_pages;
1687 list_move_tail(&folio->lru, ret_folios);
1688 continue;
1689 }
f430893b 1690
64c8902e 1691 rc = migrate_folio_unmap(get_new_page, put_new_page, private,
5dfab109
HY
1692 folio, &dst, pass > 2, avoid_force_lock,
1693 mode, reason, ret_folios);
dd4ae78a
YS
1694 /*
1695 * The rules are:
e5bfff8b 1696 * Success: folio will be freed
5dfab109
HY
1697 * Unmap: folio will be put on unmap_folios list,
1698 * dst folio put on dst_folios list
dd4ae78a 1699 * -EAGAIN: stay on the from list
5dfab109 1700 * -EDEADLOCK: stay on the from list
dd4ae78a 1701 * -ENOMEM: stay on the from list
42012e04 1702 * Other errno: put on ret_folios list
dd4ae78a 1703 */
e24f0b8f 1704 switch(rc) {
95a402c3 1705 case -ENOMEM:
94723aaf 1706 /*
d532e2e5 1707 * When memory is low, don't bother to try to migrate
5dfab109 1708 * other folios, move unmapped folios, then exit.
94723aaf 1709 */
eaec4e63
HY
1710 if (is_large) {
1711 nr_large_failed++;
42012e04 1712 stats->nr_thp_failed += is_thp;
eaec4e63 1713 /* Large folio NUMA faulting doesn't split to retry. */
fd4a7ac3 1714 if (!nosplit) {
eaec4e63 1715 int ret = try_split_folio(folio, &split_folios);
fd4a7ac3
BW
1716
1717 if (!ret) {
42012e04 1718 stats->nr_thp_split += is_thp;
fd4a7ac3
BW
1719 break;
1720 } else if (reason == MR_LONGTERM_PIN &&
1721 ret == -EAGAIN) {
1722 /*
eaec4e63
HY
1723 * Try again to split large folio to
1724 * mitigate the failure of longterm pinning.
fd4a7ac3 1725 */
eaec4e63
HY
1726 large_retry++;
1727 thp_retry += is_thp;
1728 nr_retry_pages += nr_pages;
fd4a7ac3
BW
1729 break;
1730 }
94723aaf 1731 }
eaec4e63 1732 } else if (!no_split_folio_counting) {
f430893b 1733 nr_failed++;
1a5bae25 1734 }
b5bade97 1735
42012e04 1736 stats->nr_failed_pages += nr_pages + nr_retry_pages;
69a041ff 1737 /*
eaec4e63 1738 * There might be some split folios of fail-to-migrate large
42012e04 1739 * folios left in split_folios list. Move them to ret_folios
69a041ff 1740 * list so that they could be put back to the right list by
eaec4e63 1741 * the caller otherwise the folio refcnt will be leaked.
69a041ff 1742 */
42012e04 1743 list_splice_init(&split_folios, ret_folios);
fbed53b4 1744 /* nr_failed isn't updated for not used */
eaec4e63 1745 nr_large_failed += large_retry;
42012e04 1746 stats->nr_thp_failed += thp_retry;
5dfab109
HY
1747 rc_saved = rc;
1748 if (list_empty(&unmap_folios))
1749 goto out;
1750 else
1751 goto move;
1752 case -EDEADLOCK:
1753 /*
1754 * The folio cannot be locked for potential deadlock.
1755 * Go move (and unlock) all locked folios. Then we can
1756 * try again.
1757 */
1758 rc_saved = rc;
1759 goto move;
e24f0b8f 1760 case -EAGAIN:
eaec4e63
HY
1761 if (is_large) {
1762 large_retry++;
1763 thp_retry += is_thp;
1764 } else if (!no_split_folio_counting) {
f430893b 1765 retry++;
eaec4e63
HY
1766 }
1767 nr_retry_pages += nr_pages;
e24f0b8f 1768 break;
78bd5209 1769 case MIGRATEPAGE_SUCCESS:
42012e04
HY
1770 stats->nr_succeeded += nr_pages;
1771 stats->nr_thp_succeeded += is_thp;
e24f0b8f 1772 break;
5dfab109
HY
1773 case MIGRATEPAGE_UNMAP:
1774 /*
1775 * We have locked some folios, don't force lock
1776 * to avoid deadlock.
1777 */
1778 avoid_force_lock = true;
1779 list_move_tail(&folio->lru, &unmap_folios);
1780 list_add_tail(&dst->lru, &dst_folios);
e24f0b8f
CL
1781 break;
1782 default:
354a3363 1783 /*
d532e2e5 1784 * Permanent failure (-EBUSY, etc.):
eaec4e63
HY
1785 * unlike -EAGAIN case, the failed folio is
1786 * removed from migration folio list and not
354a3363
NH
1787 * retried in the next outer loop.
1788 */
eaec4e63
HY
1789 if (is_large) {
1790 nr_large_failed++;
42012e04 1791 stats->nr_thp_failed += is_thp;
eaec4e63 1792 } else if (!no_split_folio_counting) {
b5bade97 1793 nr_failed++;
eaec4e63 1794 }
f430893b 1795
42012e04 1796 stats->nr_failed_pages += nr_pages;
e24f0b8f 1797 break;
2d1db3b1 1798 }
b20a3503
CL
1799 }
1800 }
7047b5a4 1801 nr_failed += retry;
eaec4e63 1802 nr_large_failed += large_retry;
42012e04
HY
1803 stats->nr_thp_failed += thp_retry;
1804 stats->nr_failed_pages += nr_retry_pages;
5dfab109 1805move:
7e12beb8
HY
1806 /* Flush TLBs for all unmapped folios */
1807 try_to_unmap_flush();
1808
5dfab109
HY
1809 retry = 1;
1810 for (pass = 0;
1811 pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry);
1812 pass++) {
1813 retry = 0;
1814 large_retry = 0;
1815 thp_retry = 0;
1816 nr_retry_pages = 0;
1817
1818 dst = list_first_entry(&dst_folios, struct folio, lru);
1819 dst2 = list_next_entry(dst, lru);
1820 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1821 is_large = folio_test_large(folio);
1822 is_thp = is_large && folio_test_pmd_mappable(folio);
1823 nr_pages = folio_nr_pages(folio);
1824
1825 cond_resched();
1826
1827 rc = migrate_folio_move(put_new_page, private,
1828 folio, dst, mode,
1829 reason, ret_folios);
1830 /*
1831 * The rules are:
1832 * Success: folio will be freed
1833 * -EAGAIN: stay on the unmap_folios list
1834 * Other errno: put on ret_folios list
1835 */
1836 switch(rc) {
1837 case -EAGAIN:
1838 if (is_large) {
1839 large_retry++;
1840 thp_retry += is_thp;
1841 } else if (!no_split_folio_counting) {
1842 retry++;
1843 }
1844 nr_retry_pages += nr_pages;
1845 break;
1846 case MIGRATEPAGE_SUCCESS:
1847 stats->nr_succeeded += nr_pages;
1848 stats->nr_thp_succeeded += is_thp;
1849 break;
1850 default:
1851 if (is_large) {
1852 nr_large_failed++;
1853 stats->nr_thp_failed += is_thp;
eaec4e63 1854 } else if (!no_split_folio_counting) {
b5bade97 1855 nr_failed++;
eaec4e63 1856 }
f430893b 1857
5dfab109 1858 stats->nr_failed_pages += nr_pages;
e24f0b8f 1859 break;
2d1db3b1 1860 }
5dfab109
HY
1861 dst = dst2;
1862 dst2 = list_next_entry(dst, lru);
b20a3503
CL
1863 }
1864 }
7047b5a4 1865 nr_failed += retry;
eaec4e63 1866 nr_large_failed += large_retry;
5dfab109
HY
1867 stats->nr_thp_failed += thp_retry;
1868 stats->nr_failed_pages += nr_retry_pages;
1869
1870 if (rc_saved)
1871 rc = rc_saved;
1872 else
1873 rc = nr_failed + nr_large_failed;
1874out:
1875 /* Cleanup remaining folios */
1876 dst = list_first_entry(&dst_folios, struct folio, lru);
1877 dst2 = list_next_entry(dst, lru);
1878 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1879 int page_was_mapped = 0;
1880 struct anon_vma *anon_vma = NULL;
1881
1882 __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
1883 migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
ebe75e47 1884 true, ret_folios);
5dfab109 1885 list_del(&dst->lru);
ebe75e47 1886 migrate_folio_undo_dst(dst, true, put_new_page, private);
5dfab109
HY
1887 dst = dst2;
1888 dst2 = list_next_entry(dst, lru);
1889 }
1890
b5bade97 1891 /*
eaec4e63
HY
1892 * Try to migrate split folios of fail-to-migrate large folios, no
1893 * nr_failed counting in this round, since all split folios of a
1894 * large folio is counted as 1 failure in the first round.
b5bade97 1895 */
5dfab109 1896 if (rc >= 0 && !list_empty(&split_folios)) {
b5bade97 1897 /*
e5bfff8b
HY
1898 * Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY
1899 * retries) to ret_folios to avoid migrating them again.
b5bade97 1900 */
42012e04 1901 list_splice_init(from, ret_folios);
eaec4e63
HY
1902 list_splice_init(&split_folios, from);
1903 no_split_folio_counting = true;
5dfab109 1904 goto retry;
b5bade97
BW
1905 }
1906
5dfab109
HY
1907 /*
1908 * We have unlocked all locked folios, so we can force lock now, let's
1909 * try again.
1910 */
1911 if (rc == -EDEADLOCK)
1912 goto retry;
1913
42012e04
HY
1914 return rc;
1915}
1916
1917/*
1918 * migrate_pages - migrate the folios specified in a list, to the free folios
1919 * supplied as the target for the page migration
1920 *
1921 * @from: The list of folios to be migrated.
1922 * @get_new_page: The function used to allocate free folios to be used
1923 * as the target of the folio migration.
1924 * @put_new_page: The function used to free target folios if migration
1925 * fails, or NULL if no special handling is necessary.
1926 * @private: Private data to be passed on to get_new_page()
1927 * @mode: The migration mode that specifies the constraints for
1928 * folio migration, if any.
1929 * @reason: The reason for folio migration.
1930 * @ret_succeeded: Set to the number of folios migrated successfully if
1931 * the caller passes a non-NULL pointer.
1932 *
1933 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1934 * are movable any more because the list has become empty or no retryable folios
1935 * exist any more. It is caller's responsibility to call putback_movable_pages()
1936 * only if ret != 0.
1937 *
1938 * Returns the number of {normal folio, large folio, hugetlb} that were not
1939 * migrated, or an error code. The number of large folio splits will be
1940 * considered as the number of non-migrated large folio, no matter how many
1941 * split folios of the large folio are migrated successfully.
1942 */
1943int migrate_pages(struct list_head *from, new_page_t get_new_page,
1944 free_page_t put_new_page, unsigned long private,
1945 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1946{
1947 int rc, rc_gather;
1948 int nr_pages;
1949 struct folio *folio, *folio2;
1950 LIST_HEAD(folios);
1951 LIST_HEAD(ret_folios);
1952 struct migrate_pages_stats stats;
1953
1954 trace_mm_migrate_pages_start(mode, reason);
1955
1956 memset(&stats, 0, sizeof(stats));
1957
1958 rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private,
1959 mode, reason, &stats, &ret_folios);
1960 if (rc_gather < 0)
1961 goto out;
1962again:
1963 nr_pages = 0;
1964 list_for_each_entry_safe(folio, folio2, from, lru) {
1965 /* Retried hugetlb folios will be kept in list */
1966 if (folio_test_hugetlb(folio)) {
1967 list_move_tail(&folio->lru, &ret_folios);
1968 continue;
1969 }
1970
1971 nr_pages += folio_nr_pages(folio);
1972 if (nr_pages > NR_MAX_BATCHED_MIGRATION)
1973 break;
1974 }
1975 if (nr_pages > NR_MAX_BATCHED_MIGRATION)
1976 list_cut_before(&folios, from, &folio->lru);
1977 else
1978 list_splice_init(from, &folios);
1979 rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
1980 mode, reason, &ret_folios, &stats);
1981 list_splice_tail_init(&folios, &ret_folios);
1982 if (rc < 0) {
1983 rc_gather = rc;
1984 goto out;
1985 }
1986 rc_gather += rc;
1987 if (!list_empty(from))
1988 goto again;
95a402c3 1989out:
dd4ae78a 1990 /*
eaec4e63 1991 * Put the permanent failure folio back to migration list, they
dd4ae78a
YS
1992 * will be put back to the right list by the caller.
1993 */
eaec4e63 1994 list_splice(&ret_folios, from);
dd4ae78a 1995
03e5f82e 1996 /*
eaec4e63
HY
1997 * Return 0 in case all split folios of fail-to-migrate large folios
1998 * are migrated successfully.
03e5f82e
BW
1999 */
2000 if (list_empty(from))
42012e04 2001 rc_gather = 0;
03e5f82e 2002
5b855937
HY
2003 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2004 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2005 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2006 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2007 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2008 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2009 stats.nr_thp_succeeded, stats.nr_thp_failed,
2010 stats.nr_thp_split, mode, reason);
7b2a2d4a 2011
5ac95884 2012 if (ret_succeeded)
5b855937 2013 *ret_succeeded = stats.nr_succeeded;
5ac95884 2014
42012e04 2015 return rc_gather;
b20a3503 2016}
95a402c3 2017
19fc7bed 2018struct page *alloc_migration_target(struct page *page, unsigned long private)
b4b38223 2019{
ffe06786 2020 struct folio *folio = page_folio(page);
19fc7bed
JK
2021 struct migration_target_control *mtc;
2022 gfp_t gfp_mask;
b4b38223 2023 unsigned int order = 0;
e37d3e83 2024 struct folio *hugetlb_folio = NULL;
ffe06786 2025 struct folio *new_folio = NULL;
19fc7bed
JK
2026 int nid;
2027 int zidx;
2028
2029 mtc = (struct migration_target_control *)private;
2030 gfp_mask = mtc->gfp_mask;
2031 nid = mtc->nid;
2032 if (nid == NUMA_NO_NODE)
ffe06786 2033 nid = folio_nid(folio);
b4b38223 2034
ffe06786 2035 if (folio_test_hugetlb(folio)) {
e51da3a9 2036 struct hstate *h = folio_hstate(folio);
d92bbc27 2037
19fc7bed 2038 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
e37d3e83
SK
2039 hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
2040 mtc->nmask, gfp_mask);
2041 return &hugetlb_folio->page;
d92bbc27 2042 }
b4b38223 2043
ffe06786 2044 if (folio_test_large(folio)) {
9933a0c8
JK
2045 /*
2046 * clear __GFP_RECLAIM to make the migration callback
2047 * consistent with regular THP allocations.
2048 */
2049 gfp_mask &= ~__GFP_RECLAIM;
b4b38223 2050 gfp_mask |= GFP_TRANSHUGE;
ffe06786 2051 order = folio_order(folio);
b4b38223 2052 }
ffe06786 2053 zidx = zone_idx(folio_zone(folio));
19fc7bed 2054 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
b4b38223
JK
2055 gfp_mask |= __GFP_HIGHMEM;
2056
ffe06786 2057 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
b4b38223 2058
ffe06786 2059 return &new_folio->page;
b4b38223
JK
2060}
2061
742755a1 2062#ifdef CONFIG_NUMA
742755a1 2063
a49bd4d7 2064static int store_status(int __user *status, int start, int value, int nr)
742755a1 2065{
a49bd4d7
MH
2066 while (nr-- > 0) {
2067 if (put_user(value, status + start))
2068 return -EFAULT;
2069 start++;
2070 }
2071
2072 return 0;
2073}
2074
2075static int do_move_pages_to_node(struct mm_struct *mm,
2076 struct list_head *pagelist, int node)
2077{
2078 int err;
a0976311
JK
2079 struct migration_target_control mtc = {
2080 .nid = node,
2081 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2082 };
a49bd4d7 2083
a0976311 2084 err = migrate_pages(pagelist, alloc_migration_target, NULL,
5ac95884 2085 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
a49bd4d7
MH
2086 if (err)
2087 putback_movable_pages(pagelist);
2088 return err;
742755a1
CL
2089}
2090
2091/*
a49bd4d7
MH
2092 * Resolves the given address to a struct page, isolates it from the LRU and
2093 * puts it to the given pagelist.
e0153fc2
YS
2094 * Returns:
2095 * errno - if the page cannot be found/isolated
2096 * 0 - when it doesn't have to be migrated because it is already on the
2097 * target node
2098 * 1 - when it has been queued
742755a1 2099 */
428e106a 2100static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
a49bd4d7 2101 int node, struct list_head *pagelist, bool migrate_all)
742755a1 2102{
a49bd4d7 2103 struct vm_area_struct *vma;
428e106a 2104 unsigned long addr;
a49bd4d7 2105 struct page *page;
742755a1 2106 int err;
9747b9e9 2107 bool isolated;
742755a1 2108
d8ed45c5 2109 mmap_read_lock(mm);
428e106a
KS
2110 addr = (unsigned long)untagged_addr_remote(mm, p);
2111
a49bd4d7 2112 err = -EFAULT;
cb1c37b1
ML
2113 vma = vma_lookup(mm, addr);
2114 if (!vma || !vma_migratable(vma))
a49bd4d7 2115 goto out;
742755a1 2116
a49bd4d7 2117 /* FOLL_DUMP to ignore special (like zero) pages */
87d2762e 2118 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da 2119
a49bd4d7
MH
2120 err = PTR_ERR(page);
2121 if (IS_ERR(page))
2122 goto out;
89f5b7da 2123
a49bd4d7 2124 err = -ENOENT;
f7091ed6 2125 if (!page)
a49bd4d7 2126 goto out;
742755a1 2127
f7091ed6
HW
2128 if (is_zone_device_page(page))
2129 goto out_putpage;
2130
a49bd4d7
MH
2131 err = 0;
2132 if (page_to_nid(page) == node)
2133 goto out_putpage;
742755a1 2134
a49bd4d7
MH
2135 err = -EACCES;
2136 if (page_mapcount(page) > 1 && !migrate_all)
2137 goto out_putpage;
742755a1 2138
a49bd4d7
MH
2139 if (PageHuge(page)) {
2140 if (PageHead(page)) {
9747b9e9
BW
2141 isolated = isolate_hugetlb(page_folio(page), pagelist);
2142 err = isolated ? 1 : -EBUSY;
e632a938 2143 }
a49bd4d7
MH
2144 } else {
2145 struct page *head;
e632a938 2146
e8db67eb 2147 head = compound_head(page);
f7f9c00d
BW
2148 isolated = isolate_lru_page(head);
2149 if (!isolated) {
2150 err = -EBUSY;
a49bd4d7 2151 goto out_putpage;
f7f9c00d 2152 }
742755a1 2153
e0153fc2 2154 err = 1;
a49bd4d7
MH
2155 list_add_tail(&head->lru, pagelist);
2156 mod_node_page_state(page_pgdat(head),
9de4f22a 2157 NR_ISOLATED_ANON + page_is_file_lru(head),
6c357848 2158 thp_nr_pages(head));
a49bd4d7
MH
2159 }
2160out_putpage:
2161 /*
2162 * Either remove the duplicate refcount from
2163 * isolate_lru_page() or drop the page ref if it was
2164 * not isolated.
2165 */
2166 put_page(page);
2167out:
d8ed45c5 2168 mmap_read_unlock(mm);
742755a1
CL
2169 return err;
2170}
2171
7ca8783a
WY
2172static int move_pages_and_store_status(struct mm_struct *mm, int node,
2173 struct list_head *pagelist, int __user *status,
2174 int start, int i, unsigned long nr_pages)
2175{
2176 int err;
2177
5d7ae891
WY
2178 if (list_empty(pagelist))
2179 return 0;
2180
7ca8783a
WY
2181 err = do_move_pages_to_node(mm, pagelist, node);
2182 if (err) {
2183 /*
2184 * Positive err means the number of failed
2185 * pages to migrate. Since we are going to
2186 * abort and return the number of non-migrated
ab9dd4f8 2187 * pages, so need to include the rest of the
7ca8783a
WY
2188 * nr_pages that have not been attempted as
2189 * well.
2190 */
2191 if (err > 0)
a7504ed1 2192 err += nr_pages - i;
7ca8783a
WY
2193 return err;
2194 }
2195 return store_status(status, start, node, i - start);
2196}
2197
5e9a0f02
BG
2198/*
2199 * Migrate an array of page address onto an array of nodes and fill
2200 * the corresponding array of status.
2201 */
3268c63e 2202static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f02
BG
2203 unsigned long nr_pages,
2204 const void __user * __user *pages,
2205 const int __user *nodes,
2206 int __user *status, int flags)
2207{
a49bd4d7
MH
2208 int current_node = NUMA_NO_NODE;
2209 LIST_HEAD(pagelist);
2210 int start, i;
2211 int err = 0, err1;
35282a2d 2212
361a2a22 2213 lru_cache_disable();
35282a2d 2214
a49bd4d7
MH
2215 for (i = start = 0; i < nr_pages; i++) {
2216 const void __user *p;
a49bd4d7 2217 int node;
3140a227 2218
a49bd4d7
MH
2219 err = -EFAULT;
2220 if (get_user(p, pages + i))
2221 goto out_flush;
2222 if (get_user(node, nodes + i))
2223 goto out_flush;
a49bd4d7
MH
2224
2225 err = -ENODEV;
2226 if (node < 0 || node >= MAX_NUMNODES)
2227 goto out_flush;
2228 if (!node_state(node, N_MEMORY))
2229 goto out_flush;
5e9a0f02 2230
a49bd4d7
MH
2231 err = -EACCES;
2232 if (!node_isset(node, task_nodes))
2233 goto out_flush;
2234
2235 if (current_node == NUMA_NO_NODE) {
2236 current_node = node;
2237 start = i;
2238 } else if (node != current_node) {
7ca8783a
WY
2239 err = move_pages_and_store_status(mm, current_node,
2240 &pagelist, status, start, i, nr_pages);
a49bd4d7
MH
2241 if (err)
2242 goto out;
2243 start = i;
2244 current_node = node;
3140a227
BG
2245 }
2246
a49bd4d7
MH
2247 /*
2248 * Errors in the page lookup or isolation are not fatal and we simply
2249 * report them via status
2250 */
428e106a
KS
2251 err = add_page_for_migration(mm, p, current_node, &pagelist,
2252 flags & MPOL_MF_MOVE_ALL);
e0153fc2 2253
d08221a0 2254 if (err > 0) {
e0153fc2
YS
2255 /* The page is successfully queued for migration */
2256 continue;
2257 }
3140a227 2258
65462462
JH
2259 /*
2260 * The move_pages() man page does not have an -EEXIST choice, so
2261 * use -EFAULT instead.
2262 */
2263 if (err == -EEXIST)
2264 err = -EFAULT;
2265
d08221a0
WY
2266 /*
2267 * If the page is already on the target node (!err), store the
2268 * node, otherwise, store the err.
2269 */
2270 err = store_status(status, i, err ? : current_node, 1);
a49bd4d7
MH
2271 if (err)
2272 goto out_flush;
5e9a0f02 2273
7ca8783a
WY
2274 err = move_pages_and_store_status(mm, current_node, &pagelist,
2275 status, start, i, nr_pages);
a7504ed1
HY
2276 if (err) {
2277 /* We have accounted for page i */
2278 if (err > 0)
2279 err--;
4afdacec 2280 goto out;
a7504ed1 2281 }
a49bd4d7 2282 current_node = NUMA_NO_NODE;
3140a227 2283 }
a49bd4d7
MH
2284out_flush:
2285 /* Make sure we do not overwrite the existing error */
7ca8783a
WY
2286 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
2287 status, start, i, nr_pages);
dfe9aa23 2288 if (err >= 0)
a49bd4d7 2289 err = err1;
5e9a0f02 2290out:
361a2a22 2291 lru_cache_enable();
5e9a0f02
BG
2292 return err;
2293}
2294
742755a1 2295/*
2f007e74 2296 * Determine the nodes of an array of pages and store it in an array of status.
742755a1 2297 */
80bba129
BG
2298static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2299 const void __user **pages, int *status)
742755a1 2300{
2f007e74 2301 unsigned long i;
2f007e74 2302
d8ed45c5 2303 mmap_read_lock(mm);
742755a1 2304
2f007e74 2305 for (i = 0; i < nr_pages; i++) {
80bba129 2306 unsigned long addr = (unsigned long)(*pages);
742755a1
CL
2307 struct vm_area_struct *vma;
2308 struct page *page;
c095adbc 2309 int err = -EFAULT;
2f007e74 2310
059b8b48
LH
2311 vma = vma_lookup(mm, addr);
2312 if (!vma)
742755a1
CL
2313 goto set_status;
2314
d899844e 2315 /* FOLL_DUMP to ignore special (like zero) pages */
16fd6b31 2316 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
89f5b7da
LT
2317
2318 err = PTR_ERR(page);
2319 if (IS_ERR(page))
2320 goto set_status;
2321
f7091ed6
HW
2322 err = -ENOENT;
2323 if (!page)
2324 goto set_status;
2325
2326 if (!is_zone_device_page(page))
4cd61484 2327 err = page_to_nid(page);
f7091ed6 2328
16fd6b31 2329 put_page(page);
742755a1 2330set_status:
80bba129
BG
2331 *status = err;
2332
2333 pages++;
2334 status++;
2335 }
2336
d8ed45c5 2337 mmap_read_unlock(mm);
80bba129
BG
2338}
2339
5b1b561b
AB
2340static int get_compat_pages_array(const void __user *chunk_pages[],
2341 const void __user * __user *pages,
2342 unsigned long chunk_nr)
2343{
2344 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2345 compat_uptr_t p;
2346 int i;
2347
2348 for (i = 0; i < chunk_nr; i++) {
2349 if (get_user(p, pages32 + i))
2350 return -EFAULT;
2351 chunk_pages[i] = compat_ptr(p);
2352 }
2353
2354 return 0;
2355}
2356
80bba129
BG
2357/*
2358 * Determine the nodes of a user array of pages and store it in
2359 * a user array of status.
2360 */
2361static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2362 const void __user * __user *pages,
2363 int __user *status)
2364{
3eefb826 2365#define DO_PAGES_STAT_CHUNK_NR 16UL
80bba129
BG
2366 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2367 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba129 2368
87b8d1ad 2369 while (nr_pages) {
3eefb826 2370 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
87b8d1ad 2371
5b1b561b
AB
2372 if (in_compat_syscall()) {
2373 if (get_compat_pages_array(chunk_pages, pages,
2374 chunk_nr))
2375 break;
2376 } else {
2377 if (copy_from_user(chunk_pages, pages,
2378 chunk_nr * sizeof(*chunk_pages)))
2379 break;
2380 }
80bba129
BG
2381
2382 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2383
87b8d1ad
PA
2384 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2385 break;
742755a1 2386
87b8d1ad
PA
2387 pages += chunk_nr;
2388 status += chunk_nr;
2389 nr_pages -= chunk_nr;
2390 }
2391 return nr_pages ? -EFAULT : 0;
742755a1
CL
2392}
2393
4dc200ce 2394static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
742755a1 2395{
742755a1 2396 struct task_struct *task;
742755a1 2397 struct mm_struct *mm;
742755a1 2398
4dc200ce
ML
2399 /*
2400 * There is no need to check if current process has the right to modify
2401 * the specified process when they are same.
2402 */
2403 if (!pid) {
2404 mmget(current->mm);
2405 *mem_nodes = cpuset_mems_allowed(current);
2406 return current->mm;
2407 }
742755a1
CL
2408
2409 /* Find the mm_struct */
a879bf58 2410 rcu_read_lock();
4dc200ce 2411 task = find_task_by_vpid(pid);
742755a1 2412 if (!task) {
a879bf58 2413 rcu_read_unlock();
4dc200ce 2414 return ERR_PTR(-ESRCH);
742755a1 2415 }
3268c63e 2416 get_task_struct(task);
742755a1
CL
2417
2418 /*
2419 * Check if this process has the right to modify the specified
197e7e52 2420 * process. Use the regular "ptrace_may_access()" checks.
742755a1 2421 */
197e7e52 2422 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c 2423 rcu_read_unlock();
4dc200ce 2424 mm = ERR_PTR(-EPERM);
5e9a0f02 2425 goto out;
742755a1 2426 }
c69e8d9c 2427 rcu_read_unlock();
742755a1 2428
4dc200ce
ML
2429 mm = ERR_PTR(security_task_movememory(task));
2430 if (IS_ERR(mm))
5e9a0f02 2431 goto out;
4dc200ce 2432 *mem_nodes = cpuset_mems_allowed(task);
3268c63e 2433 mm = get_task_mm(task);
4dc200ce 2434out:
3268c63e 2435 put_task_struct(task);
6e8b09ea 2436 if (!mm)
4dc200ce
ML
2437 mm = ERR_PTR(-EINVAL);
2438 return mm;
2439}
2440
2441/*
2442 * Move a list of pages in the address space of the currently executing
2443 * process.
2444 */
2445static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2446 const void __user * __user *pages,
2447 const int __user *nodes,
2448 int __user *status, int flags)
2449{
2450 struct mm_struct *mm;
2451 int err;
2452 nodemask_t task_nodes;
2453
2454 /* Check flags */
2455 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
6e8b09ea
SL
2456 return -EINVAL;
2457
4dc200ce
ML
2458 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2459 return -EPERM;
2460
2461 mm = find_mm_struct(pid, &task_nodes);
2462 if (IS_ERR(mm))
2463 return PTR_ERR(mm);
2464
6e8b09ea
SL
2465 if (nodes)
2466 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2467 nodes, status, flags);
2468 else
2469 err = do_pages_stat(mm, nr_pages, pages, status);
742755a1 2470
742755a1
CL
2471 mmput(mm);
2472 return err;
2473}
742755a1 2474
7addf443
DB
2475SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2476 const void __user * __user *, pages,
2477 const int __user *, nodes,
2478 int __user *, status, int, flags)
2479{
2480 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2481}
2482
7039e1db
PZ
2483#ifdef CONFIG_NUMA_BALANCING
2484/*
2485 * Returns true if this is a safe migration target node for misplaced NUMA
bc53008e 2486 * pages. Currently it only checks the watermarks which is crude.
7039e1db
PZ
2487 */
2488static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6 2489 unsigned long nr_migrate_pages)
7039e1db
PZ
2490{
2491 int z;
599d0c95 2492
7039e1db
PZ
2493 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2494 struct zone *zone = pgdat->node_zones + z;
2495
bc53008e 2496 if (!managed_zone(zone))
7039e1db
PZ
2497 continue;
2498
7039e1db
PZ
2499 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2500 if (!zone_watermark_ok(zone, 0,
2501 high_wmark_pages(zone) +
2502 nr_migrate_pages,
bfe9d006 2503 ZONE_MOVABLE, 0))
7039e1db
PZ
2504 continue;
2505 return true;
2506 }
2507 return false;
2508}
2509
2510static struct page *alloc_misplaced_dst_page(struct page *page,
666feb21 2511 unsigned long data)
7039e1db
PZ
2512{
2513 int nid = (int) data;
c185e494
MWO
2514 int order = compound_order(page);
2515 gfp_t gfp = __GFP_THISNODE;
2516 struct folio *new;
2517
2518 if (order > 0)
2519 gfp |= GFP_TRANSHUGE_LIGHT;
2520 else {
2521 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2522 __GFP_NOWARN;
2523 gfp &= ~__GFP_RECLAIM;
2524 }
2525 new = __folio_alloc_node(gfp, order, nid);
c5b5a3dd 2526
c185e494 2527 return &new->page;
c5b5a3dd
YS
2528}
2529
1c30e017 2530static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
b32967ff 2531{
2b9b624f 2532 int nr_pages = thp_nr_pages(page);
c574bbe9 2533 int order = compound_order(page);
a8f60772 2534
c574bbe9 2535 VM_BUG_ON_PAGE(order && !PageTransHuge(page), page);
3abef4e6 2536
662aeea7
YS
2537 /* Do not migrate THP mapped by multiple processes */
2538 if (PageTransHuge(page) && total_mapcount(page) > 1)
2539 return 0;
2540
7039e1db 2541 /* Avoid migrating to a node that is nearly full */
c574bbe9
HY
2542 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2543 int z;
2544
2545 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2546 return 0;
2547 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
bc53008e 2548 if (managed_zone(pgdat->node_zones + z))
c574bbe9
HY
2549 break;
2550 }
2551 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE);
340ef390 2552 return 0;
c574bbe9 2553 }
7039e1db 2554
f7f9c00d 2555 if (!isolate_lru_page(page))
340ef390 2556 return 0;
7039e1db 2557
b75454e1 2558 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page),
2b9b624f 2559 nr_pages);
340ef390 2560
149c33e1 2561 /*
340ef390
HD
2562 * Isolating the page has taken another reference, so the
2563 * caller's reference can be safely dropped without the page
2564 * disappearing underneath us during migration.
149c33e1
MG
2565 */
2566 put_page(page);
340ef390 2567 return 1;
b32967ff
MG
2568}
2569
2570/*
2571 * Attempt to migrate a misplaced page to the specified destination
2572 * node. Caller is expected to have an elevated reference count on
2573 * the page that will be dropped by this function before returning.
2574 */
1bc115d8
MG
2575int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2576 int node)
b32967ff
MG
2577{
2578 pg_data_t *pgdat = NODE_DATA(node);
340ef390 2579 int isolated;
b32967ff 2580 int nr_remaining;
e39bb6be 2581 unsigned int nr_succeeded;
b32967ff 2582 LIST_HEAD(migratepages);
b5916c02 2583 int nr_pages = thp_nr_pages(page);
c5b5a3dd 2584
b32967ff 2585 /*
1bc115d8
MG
2586 * Don't migrate file pages that are mapped in multiple processes
2587 * with execute permissions as they are probably shared libraries.
b32967ff 2588 */
7ee820ee
ML
2589 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2590 (vma->vm_flags & VM_EXEC))
b32967ff 2591 goto out;
b32967ff 2592
09a913a7
MG
2593 /*
2594 * Also do not migrate dirty pages as not all filesystems can move
2595 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2596 */
9de4f22a 2597 if (page_is_file_lru(page) && PageDirty(page))
09a913a7
MG
2598 goto out;
2599
b32967ff
MG
2600 isolated = numamigrate_isolate_page(pgdat, page);
2601 if (!isolated)
2602 goto out;
2603
2604 list_add(&page->lru, &migratepages);
c185e494
MWO
2605 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2606 NULL, node, MIGRATE_ASYNC,
2607 MR_NUMA_MISPLACED, &nr_succeeded);
b32967ff 2608 if (nr_remaining) {
59c82b70
JK
2609 if (!list_empty(&migratepages)) {
2610 list_del(&page->lru);
c5fc5c3a
YS
2611 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2612 page_is_file_lru(page), -nr_pages);
59c82b70
JK
2613 putback_lru_page(page);
2614 }
b32967ff 2615 isolated = 0;
e39bb6be
HY
2616 }
2617 if (nr_succeeded) {
2618 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2619 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node))
2620 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2621 nr_succeeded);
2622 }
7039e1db 2623 BUG_ON(!list_empty(&migratepages));
7039e1db 2624 return isolated;
340ef390
HD
2625
2626out:
2627 put_page(page);
2628 return 0;
7039e1db 2629}
220018d3 2630#endif /* CONFIG_NUMA_BALANCING */
91952440 2631#endif /* CONFIG_NUMA */