brd: use XArray instead of radix-tree to index backing pages
[linux-block.git] / mm / khugepaged.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b46e756f
KS
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6e84f315 6#include <linux/sched/mm.h>
f7ccbae4 7#include <linux/sched/coredump.h>
b46e756f
KS
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
80110bbf 19#include <linux/page_table_check.h>
b46e756f 20#include <linux/swapops.h>
f3f0e1d2 21#include <linux/shmem_fs.h>
b46e756f
KS
22
23#include <asm/tlb.h>
24#include <asm/pgalloc.h>
25#include "internal.h"
b26e2701 26#include "mm_slot.h"
b46e756f
KS
27
28enum scan_result {
29 SCAN_FAIL,
30 SCAN_SUCCEED,
31 SCAN_PMD_NULL,
34488399 32 SCAN_PMD_NONE,
50722804 33 SCAN_PMD_MAPPED,
b46e756f 34 SCAN_EXCEED_NONE_PTE,
71a2c112
KS
35 SCAN_EXCEED_SWAP_PTE,
36 SCAN_EXCEED_SHARED_PTE,
b46e756f 37 SCAN_PTE_NON_PRESENT,
e1e267c7 38 SCAN_PTE_UFFD_WP,
58ac9a89 39 SCAN_PTE_MAPPED_HUGEPAGE,
b46e756f 40 SCAN_PAGE_RO,
0db501f7 41 SCAN_LACK_REFERENCED_PAGE,
b46e756f
KS
42 SCAN_PAGE_NULL,
43 SCAN_SCAN_ABORT,
44 SCAN_PAGE_COUNT,
45 SCAN_PAGE_LRU,
46 SCAN_PAGE_LOCK,
47 SCAN_PAGE_ANON,
48 SCAN_PAGE_COMPOUND,
49 SCAN_ANY_PROCESS,
50 SCAN_VMA_NULL,
51 SCAN_VMA_CHECK,
52 SCAN_ADDRESS_RANGE,
b46e756f
KS
53 SCAN_DEL_PAGE_LRU,
54 SCAN_ALLOC_HUGE_PAGE_FAIL,
55 SCAN_CGROUP_CHARGE_FAIL,
f3f0e1d2 56 SCAN_TRUNCATED,
99cb0dbd 57 SCAN_PAGE_HAS_PRIVATE,
2ce0bdfe 58 SCAN_STORE_FAILED,
98c76c9f 59 SCAN_COPY_MC,
ac492b9c 60 SCAN_PAGE_FILLED,
b46e756f
KS
61};
62
63#define CREATE_TRACE_POINTS
64#include <trace/events/huge_memory.h>
65
4aab2be0
VB
66static struct task_struct *khugepaged_thread __read_mostly;
67static DEFINE_MUTEX(khugepaged_mutex);
68
b46e756f
KS
69/* default scan 8*512 pte (or vmas) every 30 second */
70static unsigned int khugepaged_pages_to_scan __read_mostly;
71static unsigned int khugepaged_pages_collapsed;
72static unsigned int khugepaged_full_scans;
73static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
74/* during fragmentation poll the hugepage allocator once every minute */
75static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
76static unsigned long khugepaged_sleep_expire;
77static DEFINE_SPINLOCK(khugepaged_mm_lock);
78static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
79/*
80 * default collapse hugepages if there is at least one pte mapped like
81 * it would have happened if the vma was large enough during page
82 * fault.
d8ea7cc8
ZK
83 *
84 * Note that these are only respected if collapse was initiated by khugepaged.
b46e756f
KS
85 */
86static unsigned int khugepaged_max_ptes_none __read_mostly;
87static unsigned int khugepaged_max_ptes_swap __read_mostly;
71a2c112 88static unsigned int khugepaged_max_ptes_shared __read_mostly;
b46e756f
KS
89
90#define MM_SLOTS_HASH_BITS 10
91static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
92
93static struct kmem_cache *mm_slot_cache __read_mostly;
94
27e1f827
SL
95#define MAX_PTE_MAPPED_THP 8
96
34d6b470 97struct collapse_control {
d8ea7cc8
ZK
98 bool is_khugepaged;
99
34d6b470
ZK
100 /* Num pages scanned per node */
101 u32 node_load[MAX_NUMNODES];
102
e031ff96
YS
103 /* nodemask for allocation fallback */
104 nodemask_t alloc_nmask;
34d6b470
ZK
105};
106
b46e756f 107/**
b26e2701
QZ
108 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
109 * @slot: hash lookup from mm to mm_slot
336e6b53
AS
110 * @nr_pte_mapped_thp: number of pte mapped THP
111 * @pte_mapped_thp: address array corresponding pte mapped THP
b46e756f 112 */
b26e2701
QZ
113struct khugepaged_mm_slot {
114 struct mm_slot slot;
27e1f827
SL
115
116 /* pte-mapped THP in this mm */
117 int nr_pte_mapped_thp;
118 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
b46e756f
KS
119};
120
121/**
122 * struct khugepaged_scan - cursor for scanning
123 * @mm_head: the head of the mm list to scan
124 * @mm_slot: the current mm_slot we are scanning
125 * @address: the next address inside that to be scanned
126 *
127 * There is only the one khugepaged_scan instance of this cursor structure.
128 */
129struct khugepaged_scan {
130 struct list_head mm_head;
b26e2701 131 struct khugepaged_mm_slot *mm_slot;
b46e756f
KS
132 unsigned long address;
133};
134
135static struct khugepaged_scan khugepaged_scan = {
136 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
137};
138
e1465d12 139#ifdef CONFIG_SYSFS
b46e756f
KS
140static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
141 struct kobj_attribute *attr,
142 char *buf)
143{
ae7a927d 144 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
b46e756f
KS
145}
146
147static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
148 struct kobj_attribute *attr,
149 const char *buf, size_t count)
150{
dfefd226 151 unsigned int msecs;
b46e756f
KS
152 int err;
153
dfefd226
AD
154 err = kstrtouint(buf, 10, &msecs);
155 if (err)
b46e756f
KS
156 return -EINVAL;
157
158 khugepaged_scan_sleep_millisecs = msecs;
159 khugepaged_sleep_expire = 0;
160 wake_up_interruptible(&khugepaged_wait);
161
162 return count;
163}
164static struct kobj_attribute scan_sleep_millisecs_attr =
6dcdc94d 165 __ATTR_RW(scan_sleep_millisecs);
b46e756f
KS
166
167static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
168 struct kobj_attribute *attr,
169 char *buf)
170{
ae7a927d 171 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
b46e756f
KS
172}
173
174static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
175 struct kobj_attribute *attr,
176 const char *buf, size_t count)
177{
dfefd226 178 unsigned int msecs;
b46e756f
KS
179 int err;
180
dfefd226
AD
181 err = kstrtouint(buf, 10, &msecs);
182 if (err)
b46e756f
KS
183 return -EINVAL;
184
185 khugepaged_alloc_sleep_millisecs = msecs;
186 khugepaged_sleep_expire = 0;
187 wake_up_interruptible(&khugepaged_wait);
188
189 return count;
190}
191static struct kobj_attribute alloc_sleep_millisecs_attr =
6dcdc94d 192 __ATTR_RW(alloc_sleep_millisecs);
b46e756f
KS
193
194static ssize_t pages_to_scan_show(struct kobject *kobj,
195 struct kobj_attribute *attr,
196 char *buf)
197{
ae7a927d 198 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
b46e756f
KS
199}
200static ssize_t pages_to_scan_store(struct kobject *kobj,
201 struct kobj_attribute *attr,
202 const char *buf, size_t count)
203{
dfefd226 204 unsigned int pages;
b46e756f 205 int err;
b46e756f 206
dfefd226
AD
207 err = kstrtouint(buf, 10, &pages);
208 if (err || !pages)
b46e756f
KS
209 return -EINVAL;
210
211 khugepaged_pages_to_scan = pages;
212
213 return count;
214}
215static struct kobj_attribute pages_to_scan_attr =
6dcdc94d 216 __ATTR_RW(pages_to_scan);
b46e756f
KS
217
218static ssize_t pages_collapsed_show(struct kobject *kobj,
219 struct kobj_attribute *attr,
220 char *buf)
221{
ae7a927d 222 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
b46e756f
KS
223}
224static struct kobj_attribute pages_collapsed_attr =
225 __ATTR_RO(pages_collapsed);
226
227static ssize_t full_scans_show(struct kobject *kobj,
228 struct kobj_attribute *attr,
229 char *buf)
230{
ae7a927d 231 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
b46e756f
KS
232}
233static struct kobj_attribute full_scans_attr =
234 __ATTR_RO(full_scans);
235
6dcdc94d
ML
236static ssize_t defrag_show(struct kobject *kobj,
237 struct kobj_attribute *attr, char *buf)
b46e756f
KS
238{
239 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 240 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
b46e756f 241}
6dcdc94d
ML
242static ssize_t defrag_store(struct kobject *kobj,
243 struct kobj_attribute *attr,
244 const char *buf, size_t count)
b46e756f
KS
245{
246 return single_hugepage_flag_store(kobj, attr, buf, count,
247 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
248}
249static struct kobj_attribute khugepaged_defrag_attr =
6dcdc94d 250 __ATTR_RW(defrag);
b46e756f
KS
251
252/*
253 * max_ptes_none controls if khugepaged should collapse hugepages over
254 * any unmapped ptes in turn potentially increasing the memory
255 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
256 * reduce the available free memory in the system as it
257 * runs. Increasing max_ptes_none will instead potentially reduce the
258 * free memory in the system during the khugepaged scan.
259 */
6dcdc94d
ML
260static ssize_t max_ptes_none_show(struct kobject *kobj,
261 struct kobj_attribute *attr,
262 char *buf)
b46e756f 263{
ae7a927d 264 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
b46e756f 265}
6dcdc94d
ML
266static ssize_t max_ptes_none_store(struct kobject *kobj,
267 struct kobj_attribute *attr,
268 const char *buf, size_t count)
b46e756f
KS
269{
270 int err;
271 unsigned long max_ptes_none;
272
273 err = kstrtoul(buf, 10, &max_ptes_none);
36ee2c78 274 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
b46e756f
KS
275 return -EINVAL;
276
277 khugepaged_max_ptes_none = max_ptes_none;
278
279 return count;
280}
281static struct kobj_attribute khugepaged_max_ptes_none_attr =
6dcdc94d 282 __ATTR_RW(max_ptes_none);
b46e756f 283
6dcdc94d
ML
284static ssize_t max_ptes_swap_show(struct kobject *kobj,
285 struct kobj_attribute *attr,
286 char *buf)
b46e756f 287{
ae7a927d 288 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
b46e756f
KS
289}
290
6dcdc94d
ML
291static ssize_t max_ptes_swap_store(struct kobject *kobj,
292 struct kobj_attribute *attr,
293 const char *buf, size_t count)
b46e756f
KS
294{
295 int err;
296 unsigned long max_ptes_swap;
297
298 err = kstrtoul(buf, 10, &max_ptes_swap);
36ee2c78 299 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
b46e756f
KS
300 return -EINVAL;
301
302 khugepaged_max_ptes_swap = max_ptes_swap;
303
304 return count;
305}
306
307static struct kobj_attribute khugepaged_max_ptes_swap_attr =
6dcdc94d 308 __ATTR_RW(max_ptes_swap);
b46e756f 309
6dcdc94d
ML
310static ssize_t max_ptes_shared_show(struct kobject *kobj,
311 struct kobj_attribute *attr,
312 char *buf)
71a2c112 313{
ae7a927d 314 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
71a2c112
KS
315}
316
6dcdc94d
ML
317static ssize_t max_ptes_shared_store(struct kobject *kobj,
318 struct kobj_attribute *attr,
319 const char *buf, size_t count)
71a2c112
KS
320{
321 int err;
322 unsigned long max_ptes_shared;
323
324 err = kstrtoul(buf, 10, &max_ptes_shared);
36ee2c78 325 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
71a2c112
KS
326 return -EINVAL;
327
328 khugepaged_max_ptes_shared = max_ptes_shared;
329
330 return count;
331}
332
333static struct kobj_attribute khugepaged_max_ptes_shared_attr =
6dcdc94d 334 __ATTR_RW(max_ptes_shared);
71a2c112 335
b46e756f
KS
336static struct attribute *khugepaged_attr[] = {
337 &khugepaged_defrag_attr.attr,
338 &khugepaged_max_ptes_none_attr.attr,
71a2c112
KS
339 &khugepaged_max_ptes_swap_attr.attr,
340 &khugepaged_max_ptes_shared_attr.attr,
b46e756f
KS
341 &pages_to_scan_attr.attr,
342 &pages_collapsed_attr.attr,
343 &full_scans_attr.attr,
344 &scan_sleep_millisecs_attr.attr,
345 &alloc_sleep_millisecs_attr.attr,
b46e756f
KS
346 NULL,
347};
348
349struct attribute_group khugepaged_attr_group = {
350 .attrs = khugepaged_attr,
351 .name = "khugepaged",
352};
e1465d12 353#endif /* CONFIG_SYSFS */
b46e756f 354
b46e756f
KS
355int hugepage_madvise(struct vm_area_struct *vma,
356 unsigned long *vm_flags, int advice)
357{
358 switch (advice) {
359 case MADV_HUGEPAGE:
360#ifdef CONFIG_S390
361 /*
362 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
363 * can't handle this properly after s390_enable_sie, so we simply
364 * ignore the madvise to prevent qemu from causing a SIGSEGV.
365 */
366 if (mm_has_pgste(vma->vm_mm))
367 return 0;
368#endif
369 *vm_flags &= ~VM_NOHUGEPAGE;
370 *vm_flags |= VM_HUGEPAGE;
371 /*
372 * If the vma become good for khugepaged to scan,
373 * register it here without waiting a page fault that
374 * may not happen any time soon.
375 */
c791576c 376 khugepaged_enter_vma(vma, *vm_flags);
b46e756f
KS
377 break;
378 case MADV_NOHUGEPAGE:
379 *vm_flags &= ~VM_HUGEPAGE;
380 *vm_flags |= VM_NOHUGEPAGE;
381 /*
382 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
383 * this vma even if we leave the mm registered in khugepaged if
384 * it got registered before VM_NOHUGEPAGE was set.
385 */
386 break;
387 }
388
389 return 0;
390}
391
392int __init khugepaged_init(void)
393{
394 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
b26e2701
QZ
395 sizeof(struct khugepaged_mm_slot),
396 __alignof__(struct khugepaged_mm_slot),
397 0, NULL);
b46e756f
KS
398 if (!mm_slot_cache)
399 return -ENOMEM;
400
401 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
402 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
403 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
71a2c112 404 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
b46e756f
KS
405
406 return 0;
407}
408
409void __init khugepaged_destroy(void)
410{
411 kmem_cache_destroy(mm_slot_cache);
412}
413
7d2c4385 414static inline int hpage_collapse_test_exit(struct mm_struct *mm)
b46e756f 415{
4d45e75a 416 return atomic_read(&mm->mm_users) == 0;
b46e756f
KS
417}
418
d2081b2b 419void __khugepaged_enter(struct mm_struct *mm)
b46e756f 420{
b26e2701
QZ
421 struct khugepaged_mm_slot *mm_slot;
422 struct mm_slot *slot;
b46e756f
KS
423 int wakeup;
424
b26e2701 425 mm_slot = mm_slot_alloc(mm_slot_cache);
b46e756f 426 if (!mm_slot)
d2081b2b 427 return;
b46e756f 428
b26e2701
QZ
429 slot = &mm_slot->slot;
430
b46e756f 431 /* __khugepaged_exit() must not run from under us */
7d2c4385 432 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
b46e756f 433 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
b26e2701 434 mm_slot_free(mm_slot_cache, mm_slot);
d2081b2b 435 return;
b46e756f
KS
436 }
437
438 spin_lock(&khugepaged_mm_lock);
b26e2701 439 mm_slot_insert(mm_slots_hash, mm, slot);
b46e756f
KS
440 /*
441 * Insert just behind the scanning cursor, to let the area settle
442 * down a little.
443 */
444 wakeup = list_empty(&khugepaged_scan.mm_head);
b26e2701 445 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
b46e756f
KS
446 spin_unlock(&khugepaged_mm_lock);
447
f1f10076 448 mmgrab(mm);
b46e756f
KS
449 if (wakeup)
450 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
451}
452
c791576c
YS
453void khugepaged_enter_vma(struct vm_area_struct *vma,
454 unsigned long vm_flags)
b46e756f 455{
2647d11b 456 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
1064026b 457 hugepage_flags_enabled()) {
a7f4e6e4 458 if (hugepage_vma_check(vma, vm_flags, false, false, true))
2647d11b
YS
459 __khugepaged_enter(vma->vm_mm);
460 }
b46e756f
KS
461}
462
463void __khugepaged_exit(struct mm_struct *mm)
464{
b26e2701
QZ
465 struct khugepaged_mm_slot *mm_slot;
466 struct mm_slot *slot;
b46e756f
KS
467 int free = 0;
468
469 spin_lock(&khugepaged_mm_lock);
b26e2701
QZ
470 slot = mm_slot_lookup(mm_slots_hash, mm);
471 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
b46e756f 472 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
b26e2701
QZ
473 hash_del(&slot->hash);
474 list_del(&slot->mm_node);
b46e756f
KS
475 free = 1;
476 }
477 spin_unlock(&khugepaged_mm_lock);
478
479 if (free) {
480 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
b26e2701 481 mm_slot_free(mm_slot_cache, mm_slot);
b46e756f
KS
482 mmdrop(mm);
483 } else if (mm_slot) {
484 /*
485 * This is required to serialize against
7d2c4385
ZK
486 * hpage_collapse_test_exit() (which is guaranteed to run
487 * under mmap sem read mode). Stop here (after we return all
488 * pagetables will be destroyed) until khugepaged has finished
489 * working on the pagetables under the mmap_lock.
b46e756f 490 */
d8ed45c5
ML
491 mmap_write_lock(mm);
492 mmap_write_unlock(mm);
b46e756f
KS
493 }
494}
495
92644f58
VMO
496static void release_pte_folio(struct folio *folio)
497{
498 node_stat_mod_folio(folio,
499 NR_ISOLATED_ANON + folio_is_file_lru(folio),
500 -folio_nr_pages(folio));
501 folio_unlock(folio);
502 folio_putback_lru(folio);
503}
504
b46e756f
KS
505static void release_pte_page(struct page *page)
506{
92644f58 507 release_pte_folio(page_folio(page));
b46e756f
KS
508}
509
5503fbf2
KS
510static void release_pte_pages(pte_t *pte, pte_t *_pte,
511 struct list_head *compound_pagelist)
b46e756f 512{
9bdfeea4 513 struct folio *folio, *tmp;
5503fbf2 514
b46e756f
KS
515 while (--_pte >= pte) {
516 pte_t pteval = *_pte;
f528260b 517 unsigned long pfn;
5503fbf2 518
f528260b
VMO
519 if (pte_none(pteval))
520 continue;
521 pfn = pte_pfn(pteval);
522 if (is_zero_pfn(pfn))
523 continue;
524 folio = pfn_folio(pfn);
525 if (folio_test_large(folio))
526 continue;
527 release_pte_folio(folio);
5503fbf2
KS
528 }
529
9bdfeea4
VMO
530 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
531 list_del(&folio->lru);
532 release_pte_folio(folio);
b46e756f
KS
533 }
534}
535
9445689f
KS
536static bool is_refcount_suitable(struct page *page)
537{
538 int expected_refcount;
539
540 expected_refcount = total_mapcount(page);
541 if (PageSwapCache(page))
542 expected_refcount += compound_nr(page);
543
544 return page_count(page) == expected_refcount;
545}
546
b46e756f
KS
547static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
548 unsigned long address,
5503fbf2 549 pte_t *pte,
d8ea7cc8 550 struct collapse_control *cc,
5503fbf2 551 struct list_head *compound_pagelist)
b46e756f
KS
552{
553 struct page *page = NULL;
554 pte_t *_pte;
50ad2f24 555 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
0db501f7 556 bool writable = false;
b46e756f 557
36ee2c78 558 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
b46e756f
KS
559 _pte++, address += PAGE_SIZE) {
560 pte_t pteval = *_pte;
561 if (pte_none(pteval) || (pte_present(pteval) &&
562 is_zero_pfn(pte_pfn(pteval)))) {
d8ea7cc8 563 ++none_or_zero;
b46e756f 564 if (!userfaultfd_armed(vma) &&
d8ea7cc8
ZK
565 (!cc->is_khugepaged ||
566 none_or_zero <= khugepaged_max_ptes_none)) {
b46e756f
KS
567 continue;
568 } else {
569 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 570 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
571 goto out;
572 }
573 }
574 if (!pte_present(pteval)) {
575 result = SCAN_PTE_NON_PRESENT;
576 goto out;
577 }
dd47ac42
PX
578 if (pte_uffd_wp(pteval)) {
579 result = SCAN_PTE_UFFD_WP;
580 goto out;
581 }
b46e756f 582 page = vm_normal_page(vma, address, pteval);
3218f871 583 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
b46e756f
KS
584 result = SCAN_PAGE_NULL;
585 goto out;
586 }
587
5503fbf2
KS
588 VM_BUG_ON_PAGE(!PageAnon(page), page);
589
d8ea7cc8
ZK
590 if (page_mapcount(page) > 1) {
591 ++shared;
592 if (cc->is_khugepaged &&
593 shared > khugepaged_max_ptes_shared) {
594 result = SCAN_EXCEED_SHARED_PTE;
595 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
596 goto out;
597 }
71a2c112
KS
598 }
599
fece2029 600 if (PageCompound(page)) {
5503fbf2
KS
601 struct page *p;
602 page = compound_head(page);
fece2029 603
5503fbf2
KS
604 /*
605 * Check if we have dealt with the compound page
606 * already
607 */
608 list_for_each_entry(p, compound_pagelist, lru) {
609 if (page == p)
610 goto next;
611 }
612 }
b46e756f
KS
613
614 /*
615 * We can do it before isolate_lru_page because the
616 * page can't be freed from under us. NOTE: PG_lock
617 * is needed to serialize against split_huge_page
618 * when invoked from the VM.
619 */
620 if (!trylock_page(page)) {
621 result = SCAN_PAGE_LOCK;
622 goto out;
623 }
624
625 /*
9445689f
KS
626 * Check if the page has any GUP (or other external) pins.
627 *
628 * The page table that maps the page has been already unlinked
629 * from the page table tree and this process cannot get
f0953a1b 630 * an additional pin on the page.
9445689f
KS
631 *
632 * New pins can come later if the page is shared across fork,
633 * but not from this process. The other process cannot write to
634 * the page, only trigger CoW.
b46e756f 635 */
9445689f 636 if (!is_refcount_suitable(page)) {
b46e756f
KS
637 unlock_page(page);
638 result = SCAN_PAGE_COUNT;
639 goto out;
640 }
b46e756f
KS
641
642 /*
643 * Isolate the page to avoid collapsing an hugepage
644 * currently in use by the VM.
645 */
f7f9c00d 646 if (!isolate_lru_page(page)) {
b46e756f
KS
647 unlock_page(page);
648 result = SCAN_DEL_PAGE_LRU;
649 goto out;
650 }
5503fbf2
KS
651 mod_node_page_state(page_pgdat(page),
652 NR_ISOLATED_ANON + page_is_file_lru(page),
653 compound_nr(page));
b46e756f
KS
654 VM_BUG_ON_PAGE(!PageLocked(page), page);
655 VM_BUG_ON_PAGE(PageLRU(page), page);
656
5503fbf2
KS
657 if (PageCompound(page))
658 list_add_tail(&page->lru, compound_pagelist);
659next:
d8ea7cc8
ZK
660 /*
661 * If collapse was initiated by khugepaged, check that there is
662 * enough young pte to justify collapsing the page
663 */
664 if (cc->is_khugepaged &&
665 (pte_young(pteval) || page_is_young(page) ||
666 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
667 address)))
0db501f7 668 referenced++;
5503fbf2
KS
669
670 if (pte_write(pteval))
671 writable = true;
b46e756f 672 }
74e579bf
ML
673
674 if (unlikely(!writable)) {
b46e756f 675 result = SCAN_PAGE_RO;
d8ea7cc8 676 } else if (unlikely(cc->is_khugepaged && !referenced)) {
74e579bf
ML
677 result = SCAN_LACK_REFERENCED_PAGE;
678 } else {
679 result = SCAN_SUCCEED;
680 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
681 referenced, writable, result);
50ad2f24 682 return result;
b46e756f 683 }
b46e756f 684out:
5503fbf2 685 release_pte_pages(pte, _pte, compound_pagelist);
b46e756f
KS
686 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
687 referenced, writable, result);
50ad2f24 688 return result;
b46e756f
KS
689}
690
98c76c9f
JY
691static void __collapse_huge_page_copy_succeeded(pte_t *pte,
692 struct vm_area_struct *vma,
693 unsigned long address,
694 spinlock_t *ptl,
695 struct list_head *compound_pagelist)
b46e756f 696{
98c76c9f
JY
697 struct page *src_page;
698 struct page *tmp;
b46e756f 699 pte_t *_pte;
98c76c9f 700 pte_t pteval;
b46e756f 701
98c76c9f
JY
702 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
703 _pte++, address += PAGE_SIZE) {
704 pteval = *_pte;
b46e756f 705 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
b46e756f
KS
706 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
707 if (is_zero_pfn(pte_pfn(pteval))) {
708 /*
709 * ptl mostly unnecessary.
710 */
711 spin_lock(ptl);
08d5b29e 712 ptep_clear(vma->vm_mm, address, _pte);
b46e756f
KS
713 spin_unlock(ptl);
714 }
715 } else {
716 src_page = pte_page(pteval);
5503fbf2
KS
717 if (!PageCompound(src_page))
718 release_pte_page(src_page);
b46e756f
KS
719 /*
720 * ptl mostly unnecessary, but preempt has to
721 * be disabled to update the per-cpu stats
722 * inside page_remove_rmap().
723 */
724 spin_lock(ptl);
08d5b29e 725 ptep_clear(vma->vm_mm, address, _pte);
cea86fe2 726 page_remove_rmap(src_page, vma, false);
b46e756f
KS
727 spin_unlock(ptl);
728 free_page_and_swap_cache(src_page);
729 }
b46e756f 730 }
5503fbf2
KS
731
732 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
733 list_del(&src_page->lru);
1baec203
ML
734 mod_node_page_state(page_pgdat(src_page),
735 NR_ISOLATED_ANON + page_is_file_lru(src_page),
736 -compound_nr(src_page));
737 unlock_page(src_page);
738 free_swap_cache(src_page);
739 putback_lru_page(src_page);
5503fbf2 740 }
b46e756f
KS
741}
742
98c76c9f
JY
743static void __collapse_huge_page_copy_failed(pte_t *pte,
744 pmd_t *pmd,
745 pmd_t orig_pmd,
746 struct vm_area_struct *vma,
747 struct list_head *compound_pagelist)
748{
749 spinlock_t *pmd_ptl;
750
751 /*
752 * Re-establish the PMD to point to the original page table
753 * entry. Restoring PMD needs to be done prior to releasing
754 * pages. Since pages are still isolated and locked here,
755 * acquiring anon_vma_lock_write is unnecessary.
756 */
757 pmd_ptl = pmd_lock(vma->vm_mm, pmd);
758 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
759 spin_unlock(pmd_ptl);
760 /*
761 * Release both raw and compound pages isolated
762 * in __collapse_huge_page_isolate.
763 */
764 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
765}
766
767/*
768 * __collapse_huge_page_copy - attempts to copy memory contents from raw
769 * pages to a hugepage. Cleans up the raw pages if copying succeeds;
770 * otherwise restores the original page table and releases isolated raw pages.
771 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
772 *
773 * @pte: starting of the PTEs to copy from
774 * @page: the new hugepage to copy contents to
775 * @pmd: pointer to the new hugepage's PMD
776 * @orig_pmd: the original raw pages' PMD
777 * @vma: the original raw pages' virtual memory area
778 * @address: starting address to copy
779 * @ptl: lock on raw pages' PTEs
780 * @compound_pagelist: list that stores compound pages
781 */
782static int __collapse_huge_page_copy(pte_t *pte,
783 struct page *page,
784 pmd_t *pmd,
785 pmd_t orig_pmd,
786 struct vm_area_struct *vma,
787 unsigned long address,
788 spinlock_t *ptl,
789 struct list_head *compound_pagelist)
790{
791 struct page *src_page;
792 pte_t *_pte;
793 pte_t pteval;
794 unsigned long _address;
795 int result = SCAN_SUCCEED;
796
797 /*
798 * Copying pages' contents is subject to memory poison at any iteration.
799 */
800 for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
801 _pte++, page++, _address += PAGE_SIZE) {
802 pteval = *_pte;
803 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
804 clear_user_highpage(page, _address);
805 continue;
806 }
807 src_page = pte_page(pteval);
808 if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
809 result = SCAN_COPY_MC;
810 break;
811 }
812 }
813
814 if (likely(result == SCAN_SUCCEED))
815 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
816 compound_pagelist);
817 else
818 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
819 compound_pagelist);
820
821 return result;
822}
823
b46e756f
KS
824static void khugepaged_alloc_sleep(void)
825{
826 DEFINE_WAIT(wait);
827
828 add_wait_queue(&khugepaged_wait, &wait);
f5d39b02
PZ
829 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
830 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
b46e756f
KS
831 remove_wait_queue(&khugepaged_wait, &wait);
832}
833
34d6b470 834struct collapse_control khugepaged_collapse_control = {
d8ea7cc8 835 .is_khugepaged = true,
34d6b470 836};
b46e756f 837
7d2c4385 838static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
b46e756f
KS
839{
840 int i;
841
842 /*
a5f5f91d 843 * If node_reclaim_mode is disabled, then no extra effort is made to
b46e756f
KS
844 * allocate memory locally.
845 */
202e35db 846 if (!node_reclaim_enabled())
b46e756f
KS
847 return false;
848
849 /* If there is a count for this node already, it must be acceptable */
34d6b470 850 if (cc->node_load[nid])
b46e756f
KS
851 return false;
852
853 for (i = 0; i < MAX_NUMNODES; i++) {
34d6b470 854 if (!cc->node_load[i])
b46e756f 855 continue;
a55c7454 856 if (node_distance(nid, i) > node_reclaim_distance)
b46e756f
KS
857 return true;
858 }
859 return false;
860}
861
1064026b
YS
862#define khugepaged_defrag() \
863 (transparent_hugepage_flags & \
864 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
865
b46e756f
KS
866/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
867static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
868{
25160354 869 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
b46e756f
KS
870}
871
872#ifdef CONFIG_NUMA
7d2c4385 873static int hpage_collapse_find_target_node(struct collapse_control *cc)
b46e756f 874{
b46e756f
KS
875 int nid, target_node = 0, max_value = 0;
876
877 /* find first node with max normal pages hit */
878 for (nid = 0; nid < MAX_NUMNODES; nid++)
34d6b470
ZK
879 if (cc->node_load[nid] > max_value) {
880 max_value = cc->node_load[nid];
b46e756f
KS
881 target_node = nid;
882 }
883
e031ff96
YS
884 for_each_online_node(nid) {
885 if (max_value == cc->node_load[nid])
886 node_set(nid, cc->alloc_nmask);
887 }
b46e756f 888
b46e756f
KS
889 return target_node;
890}
c6a7f445 891#else
7d2c4385 892static int hpage_collapse_find_target_node(struct collapse_control *cc)
b46e756f 893{
c6a7f445 894 return 0;
b46e756f 895}
c6a7f445 896#endif
b46e756f 897
e031ff96
YS
898static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
899 nodemask_t *nmask)
b46e756f 900{
e031ff96 901 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
b46e756f
KS
902 if (unlikely(!*hpage)) {
903 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
9710a78a 904 return false;
b46e756f
KS
905 }
906
907 prep_transhuge_page(*hpage);
908 count_vm_event(THP_COLLAPSE_ALLOC);
b46e756f
KS
909 return true;
910}
911
b46e756f 912/*
c1e8d7c6
ML
913 * If mmap_lock temporarily dropped, revalidate vma
914 * before taking mmap_lock.
50ad2f24 915 * Returns enum scan_result value.
b46e756f
KS
916 */
917
c131f751 918static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
34488399 919 bool expect_anon,
a7f4e6e4
ZK
920 struct vm_area_struct **vmap,
921 struct collapse_control *cc)
b46e756f
KS
922{
923 struct vm_area_struct *vma;
b46e756f 924
7d2c4385 925 if (unlikely(hpage_collapse_test_exit(mm)))
b46e756f
KS
926 return SCAN_ANY_PROCESS;
927
c131f751 928 *vmap = vma = find_vma(mm, address);
b46e756f
KS
929 if (!vma)
930 return SCAN_VMA_NULL;
931
4fa6893f 932 if (!transhuge_vma_suitable(vma, address))
b46e756f 933 return SCAN_ADDRESS_RANGE;
a7f4e6e4
ZK
934 if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
935 cc->is_khugepaged))
b46e756f 936 return SCAN_VMA_CHECK;
f707fa49
YS
937 /*
938 * Anon VMA expected, the address may be unmapped then
939 * remapped to file after khugepaged reaquired the mmap_lock.
940 *
941 * hugepage_vma_check may return true for qualified file
942 * vmas.
943 */
34488399
ZK
944 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
945 return SCAN_PAGE_ANON;
50ad2f24 946 return SCAN_SUCCEED;
b46e756f
KS
947}
948
edb5d0cf
ZK
949/*
950 * See pmd_trans_unstable() for how the result may change out from
951 * underneath us, even if we hold mmap_lock in read.
952 */
50722804
ZK
953static int find_pmd_or_thp_or_none(struct mm_struct *mm,
954 unsigned long address,
955 pmd_t **pmd)
956{
957 pmd_t pmde;
958
959 *pmd = mm_find_pmd(mm, address);
960 if (!*pmd)
961 return SCAN_PMD_NULL;
962
dab6e717 963 pmde = pmdp_get_lockless(*pmd);
50722804
ZK
964
965#ifdef CONFIG_TRANSPARENT_HUGEPAGE
966 /* See comments in pmd_none_or_trans_huge_or_clear_bad() */
967 barrier();
968#endif
34488399
ZK
969 if (pmd_none(pmde))
970 return SCAN_PMD_NONE;
edb5d0cf
ZK
971 if (!pmd_present(pmde))
972 return SCAN_PMD_NULL;
50722804
ZK
973 if (pmd_trans_huge(pmde))
974 return SCAN_PMD_MAPPED;
edb5d0cf
ZK
975 if (pmd_devmap(pmde))
976 return SCAN_PMD_NULL;
50722804
ZK
977 if (pmd_bad(pmde))
978 return SCAN_PMD_NULL;
979 return SCAN_SUCCEED;
980}
981
982static int check_pmd_still_valid(struct mm_struct *mm,
983 unsigned long address,
984 pmd_t *pmd)
985{
986 pmd_t *new_pmd;
987 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
988
989 if (result != SCAN_SUCCEED)
990 return result;
991 if (new_pmd != pmd)
992 return SCAN_FAIL;
993 return SCAN_SUCCEED;
b46e756f
KS
994}
995
996/*
997 * Bring missing pages in from swap, to complete THP collapse.
7d2c4385 998 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
b46e756f 999 *
4d928e20
ML
1000 * Called and returns without pte mapped or spinlocks held.
1001 * Note that if false is returned, mmap_lock will be released.
b46e756f
KS
1002 */
1003
50ad2f24
ZK
1004static int __collapse_huge_page_swapin(struct mm_struct *mm,
1005 struct vm_area_struct *vma,
1006 unsigned long haddr, pmd_t *pmd,
1007 int referenced)
b46e756f 1008{
2b740303
SJ
1009 int swapped_in = 0;
1010 vm_fault_t ret = 0;
2b635dd3
WD
1011 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1012
1013 for (address = haddr; address < end; address += PAGE_SIZE) {
1014 struct vm_fault vmf = {
1015 .vma = vma,
1016 .address = address,
1017 .pgoff = linear_page_index(vma, haddr),
1018 .flags = FAULT_FLAG_ALLOW_RETRY,
1019 .pmd = pmd,
1020 };
1021
1022 vmf.pte = pte_offset_map(pmd, address);
2994302b 1023 vmf.orig_pte = *vmf.pte;
2b635dd3
WD
1024 if (!is_swap_pte(vmf.orig_pte)) {
1025 pte_unmap(vmf.pte);
b46e756f 1026 continue;
2b635dd3 1027 }
2994302b 1028 ret = do_swap_page(&vmf);
0db501f7 1029
4d928e20
ML
1030 /*
1031 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1032 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1033 * we do not retry here and swap entry will remain in pagetable
1034 * resulting in later failure.
1035 */
b46e756f 1036 if (ret & VM_FAULT_RETRY) {
4d928e20 1037 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
50ad2f24
ZK
1038 /* Likely, but not guaranteed, that page lock failed */
1039 return SCAN_PAGE_LOCK;
b46e756f
KS
1040 }
1041 if (ret & VM_FAULT_ERROR) {
4d928e20 1042 mmap_read_unlock(mm);
0db501f7 1043 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
50ad2f24 1044 return SCAN_FAIL;
b46e756f 1045 }
4d928e20 1046 swapped_in++;
b46e756f 1047 }
ae2c5d80
KS
1048
1049 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1050 if (swapped_in)
1051 lru_add_drain();
1052
0db501f7 1053 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
50ad2f24 1054 return SCAN_SUCCEED;
b46e756f
KS
1055}
1056
9710a78a
ZK
1057static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
1058 struct collapse_control *cc)
1059{
7d8faaf1 1060 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
e031ff96 1061 GFP_TRANSHUGE);
7d2c4385 1062 int node = hpage_collapse_find_target_node(cc);
94c02ad7 1063 struct folio *folio;
9710a78a 1064
e031ff96 1065 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
9710a78a 1066 return SCAN_ALLOC_HUGE_PAGE_FAIL;
94c02ad7
PX
1067
1068 folio = page_folio(*hpage);
1069 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1070 folio_put(folio);
1071 *hpage = NULL;
9710a78a 1072 return SCAN_CGROUP_CHARGE_FAIL;
94c02ad7 1073 }
9710a78a 1074 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
94c02ad7 1075
9710a78a
ZK
1076 return SCAN_SUCCEED;
1077}
1078
50ad2f24
ZK
1079static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1080 int referenced, int unmapped,
1081 struct collapse_control *cc)
b46e756f 1082{
5503fbf2 1083 LIST_HEAD(compound_pagelist);
b46e756f
KS
1084 pmd_t *pmd, _pmd;
1085 pte_t *pte;
1086 pgtable_t pgtable;
50ad2f24 1087 struct page *hpage;
b46e756f 1088 spinlock_t *pmd_ptl, *pte_ptl;
50ad2f24 1089 int result = SCAN_FAIL;
c131f751 1090 struct vm_area_struct *vma;
ac46d4f3 1091 struct mmu_notifier_range range;
b46e756f
KS
1092
1093 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1094
988ddb71 1095 /*
c1e8d7c6 1096 * Before allocating the hugepage, release the mmap_lock read lock.
988ddb71 1097 * The allocation can take potentially a long time if it involves
c1e8d7c6 1098 * sync compaction, and we do not need to hold the mmap_lock during
988ddb71
KS
1099 * that. We will recheck the vma after taking it again in write mode.
1100 */
d8ed45c5 1101 mmap_read_unlock(mm);
b46e756f 1102
50ad2f24 1103 result = alloc_charge_hpage(&hpage, mm, cc);
9710a78a 1104 if (result != SCAN_SUCCEED)
b46e756f 1105 goto out_nolock;
b46e756f 1106
d8ed45c5 1107 mmap_read_lock(mm);
34488399 1108 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
50ad2f24 1109 if (result != SCAN_SUCCEED) {
d8ed45c5 1110 mmap_read_unlock(mm);
b46e756f
KS
1111 goto out_nolock;
1112 }
1113
50722804
ZK
1114 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1115 if (result != SCAN_SUCCEED) {
d8ed45c5 1116 mmap_read_unlock(mm);
b46e756f
KS
1117 goto out_nolock;
1118 }
1119
50ad2f24
ZK
1120 if (unmapped) {
1121 /*
1122 * __collapse_huge_page_swapin will return with mmap_lock
1123 * released when it fails. So we jump out_nolock directly in
1124 * that case. Continuing to collapse causes inconsistency.
1125 */
1126 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1127 referenced);
1128 if (result != SCAN_SUCCEED)
1129 goto out_nolock;
b46e756f
KS
1130 }
1131
d8ed45c5 1132 mmap_read_unlock(mm);
b46e756f
KS
1133 /*
1134 * Prevent all access to pagetables with the exception of
1135 * gup_fast later handled by the ptep_clear_flush and the VM
1136 * handled by the anon_vma lock + PG_lock.
1137 */
d8ed45c5 1138 mmap_write_lock(mm);
34488399 1139 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
50ad2f24 1140 if (result != SCAN_SUCCEED)
18d24a7c 1141 goto out_up_write;
b46e756f 1142 /* check if the pmd is still valid */
50722804
ZK
1143 result = check_pmd_still_valid(mm, address, pmd);
1144 if (result != SCAN_SUCCEED)
18d24a7c 1145 goto out_up_write;
b46e756f 1146
55fd6fcc 1147 vma_start_write(vma);
b46e756f
KS
1148 anon_vma_lock_write(vma->anon_vma);
1149
7d4a8be0
AP
1150 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1151 address + HPAGE_PMD_SIZE);
ac46d4f3 1152 mmu_notifier_invalidate_range_start(&range);
ec649c9d
VS
1153
1154 pte = pte_offset_map(pmd, address);
1155 pte_ptl = pte_lockptr(mm, pmd);
1156
b46e756f
KS
1157 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1158 /*
70cbc3cc
YS
1159 * This removes any huge TLB entry from the CPU so we won't allow
1160 * huge and small TLB entries for the same virtual address to
1161 * avoid the risk of CPU bugs in that area.
1162 *
1163 * Parallel fast GUP is fine since fast GUP will back off when
1164 * it detects PMD is changed.
b46e756f
KS
1165 */
1166 _pmd = pmdp_collapse_flush(vma, address, pmd);
1167 spin_unlock(pmd_ptl);
ac46d4f3 1168 mmu_notifier_invalidate_range_end(&range);
2ba99c5e 1169 tlb_remove_table_sync_one();
b46e756f
KS
1170
1171 spin_lock(pte_ptl);
d8ea7cc8 1172 result = __collapse_huge_page_isolate(vma, address, pte, cc,
50ad2f24 1173 &compound_pagelist);
b46e756f
KS
1174 spin_unlock(pte_ptl);
1175
50ad2f24 1176 if (unlikely(result != SCAN_SUCCEED)) {
b46e756f
KS
1177 pte_unmap(pte);
1178 spin_lock(pmd_ptl);
1179 BUG_ON(!pmd_none(*pmd));
1180 /*
1181 * We can only use set_pmd_at when establishing
1182 * hugepmds and never for establishing regular pmds that
1183 * points to regular pagetables. Use pmd_populate for that
1184 */
1185 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1186 spin_unlock(pmd_ptl);
1187 anon_vma_unlock_write(vma->anon_vma);
18d24a7c 1188 goto out_up_write;
b46e756f
KS
1189 }
1190
1191 /*
1192 * All pages are isolated and locked so anon_vma rmap
1193 * can't run anymore.
1194 */
1195 anon_vma_unlock_write(vma->anon_vma);
1196
98c76c9f
JY
1197 result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
1198 vma, address, pte_ptl,
1199 &compound_pagelist);
b46e756f 1200 pte_unmap(pte);
98c76c9f
JY
1201 if (unlikely(result != SCAN_SUCCEED))
1202 goto out_up_write;
1203
588d01f9
ML
1204 /*
1205 * spin_lock() below is not the equivalent of smp_wmb(), but
1206 * the smp_wmb() inside __SetPageUptodate() can be reused to
1207 * avoid the copy_huge_page writes to become visible after
1208 * the set_pmd_at() write.
1209 */
50ad2f24 1210 __SetPageUptodate(hpage);
b46e756f
KS
1211 pgtable = pmd_pgtable(_pmd);
1212
50ad2f24 1213 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
f55e1014 1214 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
b46e756f 1215
b46e756f
KS
1216 spin_lock(pmd_ptl);
1217 BUG_ON(!pmd_none(*pmd));
50ad2f24
ZK
1218 page_add_new_anon_rmap(hpage, vma, address);
1219 lru_cache_add_inactive_or_unevictable(hpage, vma);
b46e756f
KS
1220 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1221 set_pmd_at(mm, address, pmd, _pmd);
1222 update_mmu_cache_pmd(vma, address, pmd);
1223 spin_unlock(pmd_ptl);
1224
50ad2f24 1225 hpage = NULL;
b46e756f 1226
b46e756f
KS
1227 result = SCAN_SUCCEED;
1228out_up_write:
d8ed45c5 1229 mmap_write_unlock(mm);
b46e756f 1230out_nolock:
7cb1d7ef 1231 if (hpage)
50ad2f24 1232 put_page(hpage);
50ad2f24
ZK
1233 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1234 return result;
b46e756f
KS
1235}
1236
7d2c4385
ZK
1237static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1238 struct vm_area_struct *vma,
1239 unsigned long address, bool *mmap_locked,
1240 struct collapse_control *cc)
b46e756f
KS
1241{
1242 pmd_t *pmd;
1243 pte_t *pte, *_pte;
50ad2f24 1244 int result = SCAN_FAIL, referenced = 0;
71a2c112 1245 int none_or_zero = 0, shared = 0;
b46e756f
KS
1246 struct page *page = NULL;
1247 unsigned long _address;
1248 spinlock_t *ptl;
1249 int node = NUMA_NO_NODE, unmapped = 0;
0db501f7 1250 bool writable = false;
b46e756f
KS
1251
1252 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1253
50722804
ZK
1254 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1255 if (result != SCAN_SUCCEED)
b46e756f 1256 goto out;
b46e756f 1257
34d6b470 1258 memset(cc->node_load, 0, sizeof(cc->node_load));
e031ff96 1259 nodes_clear(cc->alloc_nmask);
b46e756f 1260 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
36ee2c78 1261 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
b46e756f
KS
1262 _pte++, _address += PAGE_SIZE) {
1263 pte_t pteval = *_pte;
1264 if (is_swap_pte(pteval)) {
d8ea7cc8
ZK
1265 ++unmapped;
1266 if (!cc->is_khugepaged ||
1267 unmapped <= khugepaged_max_ptes_swap) {
e1e267c7
PX
1268 /*
1269 * Always be strict with uffd-wp
1270 * enabled swap entries. Please see
1271 * comment below for pte_uffd_wp().
1272 */
2bad466c 1273 if (pte_swp_uffd_wp_any(pteval)) {
e1e267c7
PX
1274 result = SCAN_PTE_UFFD_WP;
1275 goto out_unmap;
1276 }
b46e756f
KS
1277 continue;
1278 } else {
1279 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 1280 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
b46e756f
KS
1281 goto out_unmap;
1282 }
1283 }
1284 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
d8ea7cc8 1285 ++none_or_zero;
b46e756f 1286 if (!userfaultfd_armed(vma) &&
d8ea7cc8
ZK
1287 (!cc->is_khugepaged ||
1288 none_or_zero <= khugepaged_max_ptes_none)) {
b46e756f
KS
1289 continue;
1290 } else {
1291 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 1292 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
1293 goto out_unmap;
1294 }
1295 }
e1e267c7
PX
1296 if (pte_uffd_wp(pteval)) {
1297 /*
1298 * Don't collapse the page if any of the small
1299 * PTEs are armed with uffd write protection.
1300 * Here we can also mark the new huge pmd as
1301 * write protected if any of the small ones is
8958b249 1302 * marked but that could bring unknown
e1e267c7
PX
1303 * userfault messages that falls outside of
1304 * the registered range. So, just be simple.
1305 */
1306 result = SCAN_PTE_UFFD_WP;
1307 goto out_unmap;
1308 }
b46e756f
KS
1309 if (pte_write(pteval))
1310 writable = true;
1311
1312 page = vm_normal_page(vma, _address, pteval);
3218f871 1313 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
b46e756f
KS
1314 result = SCAN_PAGE_NULL;
1315 goto out_unmap;
1316 }
1317
d8ea7cc8
ZK
1318 if (page_mapcount(page) > 1) {
1319 ++shared;
1320 if (cc->is_khugepaged &&
1321 shared > khugepaged_max_ptes_shared) {
1322 result = SCAN_EXCEED_SHARED_PTE;
1323 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1324 goto out_unmap;
1325 }
71a2c112
KS
1326 }
1327
5503fbf2 1328 page = compound_head(page);
b46e756f
KS
1329
1330 /*
1331 * Record which node the original page is from and save this
34d6b470 1332 * information to cc->node_load[].
0b8f0d87 1333 * Khugepaged will allocate hugepage from the node has the max
b46e756f
KS
1334 * hit record.
1335 */
1336 node = page_to_nid(page);
7d2c4385 1337 if (hpage_collapse_scan_abort(node, cc)) {
b46e756f
KS
1338 result = SCAN_SCAN_ABORT;
1339 goto out_unmap;
1340 }
34d6b470 1341 cc->node_load[node]++;
b46e756f
KS
1342 if (!PageLRU(page)) {
1343 result = SCAN_PAGE_LRU;
1344 goto out_unmap;
1345 }
1346 if (PageLocked(page)) {
1347 result = SCAN_PAGE_LOCK;
1348 goto out_unmap;
1349 }
1350 if (!PageAnon(page)) {
1351 result = SCAN_PAGE_ANON;
1352 goto out_unmap;
1353 }
1354
1355 /*
9445689f
KS
1356 * Check if the page has any GUP (or other external) pins.
1357 *
cb67f428
HD
1358 * Here the check may be racy:
1359 * it may see total_mapcount > refcount in some cases?
9445689f
KS
1360 * But such case is ephemeral we could always retry collapse
1361 * later. However it may report false positive if the page
1362 * has excessive GUP pins (i.e. 512). Anyway the same check
1363 * will be done again later the risk seems low.
b46e756f 1364 */
9445689f 1365 if (!is_refcount_suitable(page)) {
b46e756f
KS
1366 result = SCAN_PAGE_COUNT;
1367 goto out_unmap;
1368 }
d8ea7cc8
ZK
1369
1370 /*
1371 * If collapse was initiated by khugepaged, check that there is
1372 * enough young pte to justify collapsing the page
1373 */
1374 if (cc->is_khugepaged &&
1375 (pte_young(pteval) || page_is_young(page) ||
1376 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1377 address)))
0db501f7 1378 referenced++;
b46e756f 1379 }
ffe945e6 1380 if (!writable) {
b46e756f 1381 result = SCAN_PAGE_RO;
d8ea7cc8
ZK
1382 } else if (cc->is_khugepaged &&
1383 (!referenced ||
1384 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
ffe945e6
KS
1385 result = SCAN_LACK_REFERENCED_PAGE;
1386 } else {
1387 result = SCAN_SUCCEED;
b46e756f
KS
1388 }
1389out_unmap:
1390 pte_unmap_unlock(pte, ptl);
50ad2f24
ZK
1391 if (result == SCAN_SUCCEED) {
1392 result = collapse_huge_page(mm, address, referenced,
1393 unmapped, cc);
c1e8d7c6 1394 /* collapse_huge_page will return with the mmap_lock released */
50ad2f24 1395 *mmap_locked = false;
b46e756f
KS
1396 }
1397out:
1398 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1399 none_or_zero, result, unmapped);
50ad2f24 1400 return result;
b46e756f
KS
1401}
1402
b26e2701 1403static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
b46e756f 1404{
b26e2701
QZ
1405 struct mm_slot *slot = &mm_slot->slot;
1406 struct mm_struct *mm = slot->mm;
b46e756f 1407
35f3aa39 1408 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f 1409
7d2c4385 1410 if (hpage_collapse_test_exit(mm)) {
b46e756f 1411 /* free mm_slot */
b26e2701
QZ
1412 hash_del(&slot->hash);
1413 list_del(&slot->mm_node);
b46e756f
KS
1414
1415 /*
1416 * Not strictly needed because the mm exited already.
1417 *
1418 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1419 */
1420
1421 /* khugepaged_mm_lock actually not necessary for the below */
b26e2701 1422 mm_slot_free(mm_slot_cache, mm_slot);
b46e756f
KS
1423 mmdrop(mm);
1424 }
1425}
1426
396bcc52 1427#ifdef CONFIG_SHMEM
27e1f827
SL
1428/*
1429 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1430 * khugepaged should try to collapse the page table.
34488399
ZK
1431 *
1432 * Note that following race exists:
1433 * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
1434 * emptying the A's ->pte_mapped_thp[] array.
1435 * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
1436 * retract_page_tables() finds a VMA in mm_struct A mapping the same extent
1437 * (at virtual address X) and adds an entry (for X) into mm_struct A's
1438 * ->pte-mapped_thp[] array.
1439 * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
1440 * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1441 * (for X) into mm_struct A's ->pte-mapped_thp[] array.
1442 * Thus, it's possible the same address is added multiple times for the same
1443 * mm_struct. Should this happen, we'll simply attempt
1444 * collapse_pte_mapped_thp() multiple times for the same address, under the same
1445 * exclusive mmap_lock, and assuming the first call is successful, subsequent
1446 * attempts will return quickly (without grabbing any additional locks) when
1447 * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap
1448 * check, and since this is a rare occurrence, the cost of preventing this
1449 * "multiple-add" is thought to be more expensive than just handling it, should
1450 * it occur.
27e1f827 1451 */
58ac9a89 1452static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
081c3256 1453 unsigned long addr)
27e1f827 1454{
b26e2701
QZ
1455 struct khugepaged_mm_slot *mm_slot;
1456 struct mm_slot *slot;
58ac9a89 1457 bool ret = false;
27e1f827
SL
1458
1459 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1460
1461 spin_lock(&khugepaged_mm_lock);
b26e2701
QZ
1462 slot = mm_slot_lookup(mm_slots_hash, mm);
1463 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
58ac9a89 1464 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
27e1f827 1465 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
58ac9a89
ZK
1466 ret = true;
1467 }
27e1f827 1468 spin_unlock(&khugepaged_mm_lock);
58ac9a89 1469 return ret;
27e1f827
SL
1470}
1471
34488399
ZK
1472/* hpage must be locked, and mmap_lock must be held in write */
1473static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1474 pmd_t *pmdp, struct page *hpage)
1475{
1476 struct vm_fault vmf = {
1477 .vma = vma,
1478 .address = addr,
1479 .flags = 0,
1480 .pmd = pmdp,
1481 };
1482
1483 VM_BUG_ON(!PageTransHuge(hpage));
1484 mmap_assert_write_locked(vma->vm_mm);
1485
1486 if (do_set_pmd(&vmf, hpage))
1487 return SCAN_FAIL;
1488
1489 get_page(hpage);
1490 return SCAN_SUCCEED;
27e1f827
SL
1491}
1492
8d3c106e
JH
1493/*
1494 * A note about locking:
1495 * Trying to take the page table spinlocks would be useless here because those
1496 * are only used to synchronize:
1497 *
1498 * - modifying terminal entries (ones that point to a data page, not to another
1499 * page table)
1500 * - installing *new* non-terminal entries
1501 *
1502 * Instead, we need roughly the same kind of protection as free_pgtables() or
1503 * mm_take_all_locks() (but only for a single VMA):
1504 * The mmap lock together with this VMA's rmap locks covers all paths towards
1505 * the page table entries we're messing with here, except for hardware page
1506 * table walks and lockless_pages_from_mm().
1507 */
e59a47b8
PT
1508static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1509 unsigned long addr, pmd_t *pmdp)
1510{
e59a47b8 1511 pmd_t pmd;
f268f6cf 1512 struct mmu_notifier_range range;
e59a47b8 1513
80110bbf 1514 mmap_assert_write_locked(mm);
8d3c106e
JH
1515 if (vma->vm_file)
1516 lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
1517 /*
1518 * All anon_vmas attached to the VMA have the same root and are
1519 * therefore locked by the same lock.
1520 */
1521 if (vma->anon_vma)
1522 lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
1523
7d4a8be0 1524 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
f268f6cf
JH
1525 addr + HPAGE_PMD_SIZE);
1526 mmu_notifier_invalidate_range_start(&range);
e59a47b8 1527 pmd = pmdp_collapse_flush(vma, addr, pmdp);
2ba99c5e 1528 tlb_remove_table_sync_one();
f268f6cf 1529 mmu_notifier_invalidate_range_end(&range);
e59a47b8 1530 mm_dec_nr_ptes(mm);
80110bbf 1531 page_table_check_pte_clear_range(mm, addr, pmd);
e59a47b8
PT
1532 pte_free(mm, pmd_pgtable(pmd));
1533}
1534
27e1f827 1535/**
336e6b53
AS
1536 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1537 * address haddr.
1538 *
1539 * @mm: process address space where collapse happens
1540 * @addr: THP collapse address
34488399 1541 * @install_pmd: If a huge PMD should be installed
27e1f827
SL
1542 *
1543 * This function checks whether all the PTEs in the PMD are pointing to the
1544 * right THP. If so, retract the page table so the THP can refault in with
34488399 1545 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
27e1f827 1546 */
34488399
ZK
1547int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1548 bool install_pmd)
27e1f827
SL
1549{
1550 unsigned long haddr = addr & HPAGE_PMD_MASK;
94d815b2 1551 struct vm_area_struct *vma = vma_lookup(mm, haddr);
119a5fc1 1552 struct page *hpage;
27e1f827 1553 pte_t *start_pte, *pte;
e59a47b8 1554 pmd_t *pmd;
27e1f827 1555 spinlock_t *ptl;
58ac9a89 1556 int count = 0, result = SCAN_FAIL;
27e1f827
SL
1557 int i;
1558
58ac9a89
ZK
1559 mmap_assert_write_locked(mm);
1560
34488399 1561 /* Fast check before locking page if already PMD-mapped */
58ac9a89 1562 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
34488399
ZK
1563 if (result == SCAN_PMD_MAPPED)
1564 return result;
58ac9a89 1565
27e1f827 1566 if (!vma || !vma->vm_file ||
fef792a4 1567 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
34488399 1568 return SCAN_VMA_CHECK;
27e1f827
SL
1569
1570 /*
a7f4e6e4
ZK
1571 * If we are here, we've succeeded in replacing all the native pages
1572 * in the page cache with a single hugepage. If a mm were to fault-in
1573 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1574 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1575 * analogously elide sysfs THP settings here.
27e1f827 1576 */
a7f4e6e4 1577 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
34488399 1578 return SCAN_VMA_CHECK;
27e1f827 1579
deb4c93a
PX
1580 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1581 if (userfaultfd_wp(vma))
34488399 1582 return SCAN_PTE_UFFD_WP;
deb4c93a 1583
119a5fc1
HD
1584 hpage = find_lock_page(vma->vm_file->f_mapping,
1585 linear_page_index(vma, haddr));
1586 if (!hpage)
34488399 1587 return SCAN_PAGE_NULL;
119a5fc1 1588
34488399
ZK
1589 if (!PageHead(hpage)) {
1590 result = SCAN_FAIL;
119a5fc1 1591 goto drop_hpage;
34488399 1592 }
119a5fc1 1593
34488399
ZK
1594 if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1595 result = SCAN_PAGE_COMPOUND;
119a5fc1 1596 goto drop_hpage;
34488399 1597 }
119a5fc1 1598
34488399
ZK
1599 switch (result) {
1600 case SCAN_SUCCEED:
1601 break;
1602 case SCAN_PMD_NONE:
1603 /*
1604 * In MADV_COLLAPSE path, possible race with khugepaged where
1605 * all pte entries have been removed and pmd cleared. If so,
1606 * skip all the pte checks and just update the pmd mapping.
1607 */
1608 goto maybe_install_pmd;
1609 default:
119a5fc1 1610 goto drop_hpage;
34488399 1611 }
27e1f827 1612
55fd6fcc
SB
1613 /* Lock the vma before taking i_mmap and page table locks */
1614 vma_start_write(vma);
1615
8d3c106e
JH
1616 /*
1617 * We need to lock the mapping so that from here on, only GUP-fast and
1618 * hardware page walks can access the parts of the page tables that
1619 * we're operating on.
1620 * See collapse_and_free_pmd().
1621 */
1622 i_mmap_lock_write(vma->vm_file->f_mapping);
1623
1624 /*
1625 * This spinlock should be unnecessary: Nobody else should be accessing
1626 * the page tables under spinlock protection here, only
1627 * lockless_pages_from_mm() and the hardware page walker can access page
1628 * tables while all the high-level locks are held in write mode.
1629 */
27e1f827 1630 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
34488399 1631 result = SCAN_FAIL;
27e1f827
SL
1632
1633 /* step 1: check all mapped PTEs are to the right huge page */
1634 for (i = 0, addr = haddr, pte = start_pte;
1635 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1636 struct page *page;
1637
1638 /* empty pte, skip */
1639 if (pte_none(*pte))
1640 continue;
1641
1642 /* page swapped out, abort */
34488399
ZK
1643 if (!pte_present(*pte)) {
1644 result = SCAN_PTE_NON_PRESENT;
27e1f827 1645 goto abort;
34488399 1646 }
27e1f827
SL
1647
1648 page = vm_normal_page(vma, addr, *pte);
3218f871
AS
1649 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1650 page = NULL;
27e1f827 1651 /*
119a5fc1
HD
1652 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1653 * page table, but the new page will not be a subpage of hpage.
27e1f827 1654 */
119a5fc1 1655 if (hpage + i != page)
27e1f827
SL
1656 goto abort;
1657 count++;
1658 }
1659
1660 /* step 2: adjust rmap */
1661 for (i = 0, addr = haddr, pte = start_pte;
1662 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1663 struct page *page;
1664
1665 if (pte_none(*pte))
1666 continue;
1667 page = vm_normal_page(vma, addr, *pte);
3218f871
AS
1668 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1669 goto abort;
cea86fe2 1670 page_remove_rmap(page, vma, false);
27e1f827
SL
1671 }
1672
1673 pte_unmap_unlock(start_pte, ptl);
1674
1675 /* step 3: set proper refcount and mm_counters. */
119a5fc1 1676 if (count) {
27e1f827
SL
1677 page_ref_sub(hpage, count);
1678 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1679 }
1680
34488399 1681 /* step 4: remove pte entries */
ab0c3f12
HD
1682 /* we make no change to anon, but protect concurrent anon page lookup */
1683 if (vma->anon_vma)
1684 anon_vma_lock_write(vma->anon_vma);
1685
e59a47b8 1686 collapse_and_free_pmd(mm, vma, haddr, pmd);
34488399 1687
ab0c3f12
HD
1688 if (vma->anon_vma)
1689 anon_vma_unlock_write(vma->anon_vma);
8d3c106e
JH
1690 i_mmap_unlock_write(vma->vm_file->f_mapping);
1691
34488399
ZK
1692maybe_install_pmd:
1693 /* step 5: install pmd entry */
1694 result = install_pmd
1695 ? set_huge_pmd(vma, haddr, pmd, hpage)
1696 : SCAN_SUCCEED;
1697
119a5fc1
HD
1698drop_hpage:
1699 unlock_page(hpage);
1700 put_page(hpage);
34488399 1701 return result;
27e1f827
SL
1702
1703abort:
1704 pte_unmap_unlock(start_pte, ptl);
8d3c106e 1705 i_mmap_unlock_write(vma->vm_file->f_mapping);
119a5fc1 1706 goto drop_hpage;
27e1f827
SL
1707}
1708
b26e2701 1709static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
27e1f827 1710{
b26e2701
QZ
1711 struct mm_slot *slot = &mm_slot->slot;
1712 struct mm_struct *mm = slot->mm;
27e1f827
SL
1713 int i;
1714
1715 if (likely(mm_slot->nr_pte_mapped_thp == 0))
0edf61e5 1716 return;
27e1f827 1717
d8ed45c5 1718 if (!mmap_write_trylock(mm))
0edf61e5 1719 return;
27e1f827 1720
7d2c4385 1721 if (unlikely(hpage_collapse_test_exit(mm)))
27e1f827
SL
1722 goto out;
1723
1724 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
34488399 1725 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
27e1f827
SL
1726
1727out:
1728 mm_slot->nr_pte_mapped_thp = 0;
d8ed45c5 1729 mmap_write_unlock(mm);
27e1f827
SL
1730}
1731
34488399
ZK
1732static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1733 struct mm_struct *target_mm,
1734 unsigned long target_addr, struct page *hpage,
1735 struct collapse_control *cc)
f3f0e1d2
KS
1736{
1737 struct vm_area_struct *vma;
34488399 1738 int target_result = SCAN_FAIL;
f3f0e1d2
KS
1739
1740 i_mmap_lock_write(mapping);
1741 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
34488399
ZK
1742 int result = SCAN_FAIL;
1743 struct mm_struct *mm = NULL;
1744 unsigned long addr = 0;
1745 pmd_t *pmd;
1746 bool is_target = false;
1747
27e1f827
SL
1748 /*
1749 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1750 * got written to. These VMAs are likely not worth investing
3e4e28c5 1751 * mmap_write_lock(mm) as PMD-mapping is likely to be split
27e1f827
SL
1752 * later.
1753 *
36ee2c78 1754 * Note that vma->anon_vma check is racy: it can be set up after
c1e8d7c6 1755 * the check but before we took mmap_lock by the fault path.
27e1f827
SL
1756 * But page lock would prevent establishing any new ptes of the
1757 * page, so we are safe.
1758 *
1759 * An alternative would be drop the check, but check that page
1760 * table is clear before calling pmdp_collapse_flush() under
1761 * ptl. It has higher chance to recover THP for the VMA, but
8d3c106e
JH
1762 * has higher cost too. It would also probably require locking
1763 * the anon_vma.
27e1f827 1764 */
023f47a8 1765 if (READ_ONCE(vma->anon_vma)) {
34488399
ZK
1766 result = SCAN_PAGE_ANON;
1767 goto next;
1768 }
f3f0e1d2 1769 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
34488399
ZK
1770 if (addr & ~HPAGE_PMD_MASK ||
1771 vma->vm_end < addr + HPAGE_PMD_SIZE) {
1772 result = SCAN_VMA_CHECK;
1773 goto next;
1774 }
18e77600 1775 mm = vma->vm_mm;
34488399
ZK
1776 is_target = mm == target_mm && addr == target_addr;
1777 result = find_pmd_or_thp_or_none(mm, addr, &pmd);
1778 if (result != SCAN_SUCCEED)
1779 goto next;
f3f0e1d2 1780 /*
c1e8d7c6 1781 * We need exclusive mmap_lock to retract page table.
27e1f827
SL
1782 *
1783 * We use trylock due to lock inversion: we need to acquire
c1e8d7c6 1784 * mmap_lock while holding page lock. Fault path does it in
27e1f827 1785 * reverse order. Trylock is a way to avoid deadlock.
34488399
ZK
1786 *
1787 * Also, it's not MADV_COLLAPSE's job to collapse other
1788 * mappings - let khugepaged take care of them later.
f3f0e1d2 1789 */
34488399
ZK
1790 result = SCAN_PTE_MAPPED_HUGEPAGE;
1791 if ((cc->is_khugepaged || is_target) &&
1792 mmap_write_trylock(mm)) {
55fd6fcc
SB
1793 /* trylock for the same lock inversion as above */
1794 if (!vma_try_start_write(vma))
1795 goto unlock_next;
1796
023f47a8
JH
1797 /*
1798 * Re-check whether we have an ->anon_vma, because
1799 * collapse_and_free_pmd() requires that either no
1800 * ->anon_vma exists or the anon_vma is locked.
1801 * We already checked ->anon_vma above, but that check
1802 * is racy because ->anon_vma can be populated under the
1803 * mmap lock in read mode.
1804 */
1805 if (vma->anon_vma) {
1806 result = SCAN_PAGE_ANON;
1807 goto unlock_next;
1808 }
deb4c93a
PX
1809 /*
1810 * When a vma is registered with uffd-wp, we can't
1811 * recycle the pmd pgtable because there can be pte
1812 * markers installed. Skip it only, so the rest mm/vma
1813 * can still have the same file mapped hugely, however
1814 * it'll always mapped in small page size for uffd-wp
1815 * registered ranges.
1816 */
34488399
ZK
1817 if (hpage_collapse_test_exit(mm)) {
1818 result = SCAN_ANY_PROCESS;
1819 goto unlock_next;
1820 }
1821 if (userfaultfd_wp(vma)) {
1822 result = SCAN_PTE_UFFD_WP;
1823 goto unlock_next;
1824 }
1825 collapse_and_free_pmd(mm, vma, addr, pmd);
1826 if (!cc->is_khugepaged && is_target)
1827 result = set_huge_pmd(vma, addr, pmd, hpage);
1828 else
1829 result = SCAN_SUCCEED;
1830
1831unlock_next:
18e77600 1832 mmap_write_unlock(mm);
34488399
ZK
1833 goto next;
1834 }
1835 /*
1836 * Calling context will handle target mm/addr. Otherwise, let
1837 * khugepaged try again later.
1838 */
1839 if (!is_target) {
18e77600 1840 khugepaged_add_pte_mapped_thp(mm, addr);
34488399 1841 continue;
f3f0e1d2 1842 }
34488399
ZK
1843next:
1844 if (is_target)
1845 target_result = result;
f3f0e1d2
KS
1846 }
1847 i_mmap_unlock_write(mapping);
34488399 1848 return target_result;
f3f0e1d2
KS
1849}
1850
1851/**
99cb0dbd 1852 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
f3f0e1d2 1853 *
336e6b53 1854 * @mm: process address space where collapse happens
34488399 1855 * @addr: virtual collapse start address
336e6b53
AS
1856 * @file: file that collapse on
1857 * @start: collapse start address
9710a78a 1858 * @cc: collapse context and scratchpad
336e6b53 1859 *
f3f0e1d2 1860 * Basic scheme is simple, details are more complex:
87c460a0 1861 * - allocate and lock a new huge page;
a2e17cc2 1862 * - scan page cache, locking old pages
99cb0dbd 1863 * + swap/gup in pages if necessary;
a2e17cc2
DS
1864 * - copy data to new page
1865 * - handle shmem holes
1866 * + re-validate that holes weren't filled by someone else
1867 * + check for userfaultfd
ac492b9c 1868 * - finalize updates to the page cache;
77da9389 1869 * - if replacing succeeds:
87c460a0 1870 * + unlock huge page;
a2e17cc2 1871 * + free old pages;
f3f0e1d2 1872 * - if replacing failed;
a2e17cc2 1873 * + unlock old pages
87c460a0 1874 * + unlock and free huge page;
f3f0e1d2 1875 */
34488399
ZK
1876static int collapse_file(struct mm_struct *mm, unsigned long addr,
1877 struct file *file, pgoff_t start,
1878 struct collapse_control *cc)
f3f0e1d2 1879{
579c571e 1880 struct address_space *mapping = file->f_mapping;
50ad2f24 1881 struct page *hpage;
12904d95
JY
1882 struct page *page;
1883 struct page *tmp;
1884 struct folio *folio;
4c9473e8 1885 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
f3f0e1d2 1886 LIST_HEAD(pagelist);
77da9389 1887 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
f3f0e1d2 1888 int nr_none = 0, result = SCAN_SUCCEED;
99cb0dbd 1889 bool is_shmem = shmem_file(file);
4c9473e8 1890 int nr = 0;
f3f0e1d2 1891
99cb0dbd 1892 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
f3f0e1d2
KS
1893 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1894
50ad2f24 1895 result = alloc_charge_hpage(&hpage, mm, cc);
9710a78a 1896 if (result != SCAN_SUCCEED)
f3f0e1d2 1897 goto out;
f3f0e1d2 1898
cae106dd
DS
1899 __SetPageLocked(hpage);
1900 if (is_shmem)
1901 __SetPageSwapBacked(hpage);
1902 hpage->index = start;
1903 hpage->mapping = mapping;
1904
6b24ca4a
MWO
1905 /*
1906 * Ensure we have slots for all the pages in the range. This is
1907 * almost certainly a no-op because most of the pages must be present
1908 */
95feeabb
HD
1909 do {
1910 xas_lock_irq(&xas);
1911 xas_create_range(&xas);
1912 if (!xas_error(&xas))
1913 break;
1914 xas_unlock_irq(&xas);
1915 if (!xas_nomem(&xas, GFP_KERNEL)) {
95feeabb 1916 result = SCAN_FAIL;
cae106dd 1917 goto rollback;
95feeabb
HD
1918 }
1919 } while (1);
1920
77da9389
MW
1921 xas_set(&xas, start);
1922 for (index = start; index < end; index++) {
12904d95 1923 page = xas_next(&xas);
77da9389
MW
1924
1925 VM_BUG_ON(index != xas.xa_index);
99cb0dbd
SL
1926 if (is_shmem) {
1927 if (!page) {
1928 /*
1929 * Stop if extent has been truncated or
1930 * hole-punched, and is now completely
1931 * empty.
1932 */
1933 if (index == start) {
1934 if (!xas_next_entry(&xas, end - 1)) {
1935 result = SCAN_TRUNCATED;
1936 goto xa_locked;
1937 }
ac492b9c 1938 xas_set(&xas, index + 1);
99cb0dbd
SL
1939 }
1940 if (!shmem_charge(mapping->host, 1)) {
1941 result = SCAN_FAIL;
042a3082 1942 goto xa_locked;
701270fa 1943 }
99cb0dbd
SL
1944 nr_none++;
1945 continue;
701270fa 1946 }
99cb0dbd
SL
1947
1948 if (xa_is_value(page) || !PageUptodate(page)) {
1949 xas_unlock_irq(&xas);
1950 /* swap in or instantiate fallocated page */
7459c149
MWO
1951 if (shmem_get_folio(mapping->host, index,
1952 &folio, SGP_NOALLOC)) {
99cb0dbd
SL
1953 result = SCAN_FAIL;
1954 goto xa_unlocked;
1955 }
efa3d814
DS
1956 /* drain pagevecs to help isolate_lru_page() */
1957 lru_add_drain();
7459c149 1958 page = folio_file_page(folio, index);
99cb0dbd
SL
1959 } else if (trylock_page(page)) {
1960 get_page(page);
1961 xas_unlock_irq(&xas);
1962 } else {
1963 result = SCAN_PAGE_LOCK;
042a3082 1964 goto xa_locked;
77da9389 1965 }
99cb0dbd
SL
1966 } else { /* !is_shmem */
1967 if (!page || xa_is_value(page)) {
1968 xas_unlock_irq(&xas);
1969 page_cache_sync_readahead(mapping, &file->f_ra,
1970 file, index,
e5a59d30 1971 end - index);
99cb0dbd
SL
1972 /* drain pagevecs to help isolate_lru_page() */
1973 lru_add_drain();
1974 page = find_lock_page(mapping, index);
1975 if (unlikely(page == NULL)) {
1976 result = SCAN_FAIL;
1977 goto xa_unlocked;
1978 }
75f36069
SL
1979 } else if (PageDirty(page)) {
1980 /*
1981 * khugepaged only works on read-only fd,
1982 * so this page is dirty because it hasn't
1983 * been flushed since first write. There
1984 * won't be new dirty pages.
1985 *
1986 * Trigger async flush here and hope the
1987 * writeback is done when khugepaged
1988 * revisits this page.
1989 *
1990 * This is a one-off situation. We are not
1991 * forcing writeback in loop.
1992 */
1993 xas_unlock_irq(&xas);
1994 filemap_flush(mapping);
1995 result = SCAN_FAIL;
1996 goto xa_unlocked;
74c42e1b
RW
1997 } else if (PageWriteback(page)) {
1998 xas_unlock_irq(&xas);
1999 result = SCAN_FAIL;
2000 goto xa_unlocked;
99cb0dbd
SL
2001 } else if (trylock_page(page)) {
2002 get_page(page);
2003 xas_unlock_irq(&xas);
2004 } else {
2005 result = SCAN_PAGE_LOCK;
2006 goto xa_locked;
f3f0e1d2 2007 }
f3f0e1d2
KS
2008 }
2009
2010 /*
b93b0163 2011 * The page must be locked, so we can drop the i_pages lock
f3f0e1d2
KS
2012 * without racing with truncate.
2013 */
2014 VM_BUG_ON_PAGE(!PageLocked(page), page);
4655e5e5
SL
2015
2016 /* make sure the page is up to date */
2017 if (unlikely(!PageUptodate(page))) {
2018 result = SCAN_FAIL;
2019 goto out_unlock;
2020 }
06a5e126
HD
2021
2022 /*
2023 * If file was truncated then extended, or hole-punched, before
2024 * we locked the first page, then a THP might be there already.
58ac9a89 2025 * This will be discovered on the first iteration.
06a5e126
HD
2026 */
2027 if (PageTransCompound(page)) {
58ac9a89
ZK
2028 struct page *head = compound_head(page);
2029
2030 result = compound_order(head) == HPAGE_PMD_ORDER &&
2031 head->index == start
2032 /* Maybe PMD-mapped */
2033 ? SCAN_PTE_MAPPED_HUGEPAGE
2034 : SCAN_PAGE_COMPOUND;
06a5e126
HD
2035 goto out_unlock;
2036 }
f3f0e1d2 2037
64ab3195
VMO
2038 folio = page_folio(page);
2039
2040 if (folio_mapping(folio) != mapping) {
f3f0e1d2
KS
2041 result = SCAN_TRUNCATED;
2042 goto out_unlock;
2043 }
f3f0e1d2 2044
64ab3195
VMO
2045 if (!is_shmem && (folio_test_dirty(folio) ||
2046 folio_test_writeback(folio))) {
4655e5e5
SL
2047 /*
2048 * khugepaged only works on read-only fd, so this
2049 * page is dirty because it hasn't been flushed
2050 * since first write.
2051 */
2052 result = SCAN_FAIL;
2053 goto out_unlock;
2054 }
2055
be2d5756 2056 if (!folio_isolate_lru(folio)) {
f3f0e1d2 2057 result = SCAN_DEL_PAGE_LRU;
042a3082 2058 goto out_unlock;
f3f0e1d2
KS
2059 }
2060
64ab3195
VMO
2061 if (folio_has_private(folio) &&
2062 !filemap_release_folio(folio, GFP_KERNEL)) {
99cb0dbd 2063 result = SCAN_PAGE_HAS_PRIVATE;
64ab3195 2064 folio_putback_lru(folio);
99cb0dbd
SL
2065 goto out_unlock;
2066 }
2067
64ab3195
VMO
2068 if (folio_mapped(folio))
2069 try_to_unmap(folio,
869f7ee6 2070 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
f3f0e1d2 2071
77da9389
MW
2072 xas_lock_irq(&xas);
2073 xas_set(&xas, index);
f3f0e1d2 2074
77da9389 2075 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
f3f0e1d2
KS
2076
2077 /*
a2e17cc2 2078 * We control three references to the page:
f3f0e1d2 2079 * - we hold a pin on it;
77da9389 2080 * - one reference from page cache;
f3f0e1d2 2081 * - one from isolate_lru_page;
a2e17cc2
DS
2082 * If those are the only references, then any new usage of the
2083 * page will have to fetch it from the page cache. That requires
2084 * locking the page to handle truncate, so any new usage will be
2085 * blocked until we unlock page after collapse/during rollback.
f3f0e1d2 2086 */
a2e17cc2 2087 if (page_count(page) != 3) {
f3f0e1d2 2088 result = SCAN_PAGE_COUNT;
042a3082
HD
2089 xas_unlock_irq(&xas);
2090 putback_lru_page(page);
2091 goto out_unlock;
f3f0e1d2
KS
2092 }
2093
2094 /*
a2e17cc2 2095 * Accumulate the pages that are being collapsed.
f3f0e1d2
KS
2096 */
2097 list_add_tail(&page->lru, &pagelist);
f3f0e1d2 2098 continue;
f3f0e1d2
KS
2099out_unlock:
2100 unlock_page(page);
2101 put_page(page);
042a3082 2102 goto xa_unlocked;
f3f0e1d2
KS
2103 }
2104
12904d95 2105 if (!is_shmem) {
09d91cda 2106 filemap_nr_thps_inc(mapping);
eb6ecbed
CF
2107 /*
2108 * Paired with smp_mb() in do_dentry_open() to ensure
2109 * i_writecount is up to date and the update to nr_thps is
2110 * visible. Ensures the page cache will be truncated if the
2111 * file is opened writable.
2112 */
2113 smp_mb();
2114 if (inode_is_open_for_write(mapping->host)) {
2115 result = SCAN_FAIL;
eb6ecbed 2116 filemap_nr_thps_dec(mapping);
eb6ecbed 2117 }
09d91cda 2118 }
99cb0dbd 2119
042a3082
HD
2120xa_locked:
2121 xas_unlock_irq(&xas);
77da9389 2122xa_unlocked:
042a3082 2123
6d9df8a5
HD
2124 /*
2125 * If collapse is successful, flush must be done now before copying.
2126 * If collapse is unsuccessful, does flush actually need to be done?
2127 * Do it anyway, to clear the state.
2128 */
2129 try_to_unmap_flush();
2130
cae106dd
DS
2131 if (result != SCAN_SUCCEED)
2132 goto rollback;
2133
2134 /*
a2e17cc2 2135 * The old pages are locked, so they won't change anymore.
cae106dd
DS
2136 */
2137 index = start;
2138 list_for_each_entry(page, &pagelist, lru) {
2139 while (index < page->index) {
12904d95
JY
2140 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2141 index++;
2142 }
cae106dd
DS
2143 if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
2144 result = SCAN_COPY_MC;
2145 goto rollback;
2146 }
2147 index++;
2148 }
2149 while (index < end) {
2150 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2151 index++;
2152 }
2153
ac492b9c
DS
2154 if (nr_none) {
2155 struct vm_area_struct *vma;
2156 int nr_none_check = 0;
2157
2158 i_mmap_lock_read(mapping);
2159 xas_lock_irq(&xas);
2160
2161 xas_set(&xas, start);
2162 for (index = start; index < end; index++) {
2163 if (!xas_next(&xas)) {
2164 xas_store(&xas, XA_RETRY_ENTRY);
2165 if (xas_error(&xas)) {
2166 result = SCAN_STORE_FAILED;
2167 goto immap_locked;
2168 }
2169 nr_none_check++;
2170 }
2171 }
2172
2173 if (nr_none != nr_none_check) {
2174 result = SCAN_PAGE_FILLED;
2175 goto immap_locked;
2176 }
2177
2178 /*
2179 * If userspace observed a missing page in a VMA with a MODE_MISSING
2180 * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that
2181 * page. If so, we need to roll back to avoid suppressing such an
2182 * event. Since wp/minor userfaultfds don't give userspace any
2183 * guarantees that the kernel doesn't fill a missing page with a zero
2184 * page, so they don't matter here.
2185 *
2186 * Any userfaultfds registered after this point will not be able to
2187 * observe any missing pages due to the previously inserted retry
2188 * entries.
2189 */
2190 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2191 if (userfaultfd_missing(vma)) {
2192 result = SCAN_EXCEED_NONE_PTE;
2193 goto immap_locked;
2194 }
2195 }
2196
2197immap_locked:
2198 i_mmap_unlock_read(mapping);
2199 if (result != SCAN_SUCCEED) {
2200 xas_set(&xas, start);
2201 for (index = start; index < end; index++) {
2202 if (xas_next(&xas) == XA_RETRY_ENTRY)
2203 xas_store(&xas, NULL);
2204 }
2205
2206 xas_unlock_irq(&xas);
2207 goto rollback;
2208 }
2209 } else {
2210 xas_lock_irq(&xas);
12904d95
JY
2211 }
2212
2213 nr = thp_nr_pages(hpage);
cae106dd
DS
2214 if (is_shmem)
2215 __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
2216 else
2217 __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
12904d95 2218
cae106dd
DS
2219 if (nr_none) {
2220 __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
2221 /* nr_none is always 0 for non-shmem. */
2222 __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
2223 }
f3f0e1d2 2224
a2e17cc2
DS
2225 /*
2226 * Mark hpage as uptodate before inserting it into the page cache so
2227 * that it isn't mistaken for an fallocated but unwritten page.
2228 */
cae106dd
DS
2229 folio = page_folio(hpage);
2230 folio_mark_uptodate(folio);
2231 folio_ref_add(folio, HPAGE_PMD_NR - 1);
284a344e 2232
cae106dd
DS
2233 if (is_shmem)
2234 folio_mark_dirty(folio);
2235 folio_add_lru(folio);
f3f0e1d2 2236
a2e17cc2
DS
2237 /* Join all the small entries into a single multi-index entry. */
2238 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2239 xas_store(&xas, hpage);
0175ab61 2240 WARN_ON_ONCE(xas_error(&xas));
a2e17cc2
DS
2241 xas_unlock_irq(&xas);
2242
cae106dd
DS
2243 /*
2244 * Remove pte page tables, so we can re-fault the page as huge.
2245 */
2246 result = retract_page_tables(mapping, start, mm, addr, hpage,
2247 cc);
2248 unlock_page(hpage);
ac492b9c
DS
2249
2250 /*
2251 * The collapse has succeeded, so free the old pages.
2252 */
2253 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2254 list_del(&page->lru);
2255 page->mapping = NULL;
ac492b9c
DS
2256 ClearPageActive(page);
2257 ClearPageUnevictable(page);
2258 unlock_page(page);
a2e17cc2 2259 folio_put_refs(page_folio(page), 3);
ac492b9c
DS
2260 }
2261
cae106dd
DS
2262 goto out;
2263
2264rollback:
2265 /* Something went wrong: roll back page cache changes */
cae106dd 2266 if (nr_none) {
a2e17cc2 2267 xas_lock_irq(&xas);
cae106dd
DS
2268 mapping->nrpages -= nr_none;
2269 shmem_uncharge(mapping->host, nr_none);
a2e17cc2 2270 xas_unlock_irq(&xas);
cae106dd 2271 }
aaa52e34 2272
a2e17cc2 2273 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
cae106dd 2274 list_del(&page->lru);
cae106dd
DS
2275 unlock_page(page);
2276 putback_lru_page(page);
a2e17cc2 2277 put_page(page);
cae106dd 2278 }
cae106dd
DS
2279 /*
2280 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2281 * file only. This undo is not needed unless failure is
2282 * due to SCAN_COPY_MC.
2283 */
2284 if (!is_shmem && result == SCAN_COPY_MC) {
2285 filemap_nr_thps_dec(mapping);
12904d95 2286 /*
cae106dd
DS
2287 * Paired with smp_mb() in do_dentry_open() to
2288 * ensure the update to nr_thps is visible.
12904d95 2289 */
cae106dd
DS
2290 smp_mb();
2291 }
12904d95 2292
cae106dd 2293 hpage->mapping = NULL;
042a3082 2294
cae106dd
DS
2295 unlock_page(hpage);
2296 put_page(hpage);
f3f0e1d2
KS
2297out:
2298 VM_BUG_ON(!list_empty(&pagelist));
4c9473e8 2299 trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
50ad2f24 2300 return result;
f3f0e1d2
KS
2301}
2302
34488399
ZK
2303static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2304 struct file *file, pgoff_t start,
2305 struct collapse_control *cc)
f3f0e1d2
KS
2306{
2307 struct page *page = NULL;
579c571e 2308 struct address_space *mapping = file->f_mapping;
85b392db 2309 XA_STATE(xas, &mapping->i_pages, start);
f3f0e1d2
KS
2310 int present, swap;
2311 int node = NUMA_NO_NODE;
2312 int result = SCAN_SUCCEED;
2313
2314 present = 0;
2315 swap = 0;
34d6b470 2316 memset(cc->node_load, 0, sizeof(cc->node_load));
e031ff96 2317 nodes_clear(cc->alloc_nmask);
f3f0e1d2 2318 rcu_read_lock();
85b392db
MW
2319 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2320 if (xas_retry(&xas, page))
f3f0e1d2 2321 continue;
f3f0e1d2 2322
85b392db 2323 if (xa_is_value(page)) {
d8ea7cc8
ZK
2324 ++swap;
2325 if (cc->is_khugepaged &&
2326 swap > khugepaged_max_ptes_swap) {
f3f0e1d2 2327 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 2328 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
f3f0e1d2
KS
2329 break;
2330 }
2331 continue;
2332 }
2333
6b24ca4a 2334 /*
58ac9a89 2335 * TODO: khugepaged should compact smaller compound pages
6b24ca4a
MWO
2336 * into a PMD sized page
2337 */
f3f0e1d2 2338 if (PageTransCompound(page)) {
58ac9a89
ZK
2339 struct page *head = compound_head(page);
2340
2341 result = compound_order(head) == HPAGE_PMD_ORDER &&
2342 head->index == start
2343 /* Maybe PMD-mapped */
2344 ? SCAN_PTE_MAPPED_HUGEPAGE
2345 : SCAN_PAGE_COMPOUND;
2346 /*
2347 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2348 * by the caller won't touch the page cache, and so
2349 * it's safe to skip LRU and refcount checks before
2350 * returning.
2351 */
f3f0e1d2
KS
2352 break;
2353 }
2354
2355 node = page_to_nid(page);
7d2c4385 2356 if (hpage_collapse_scan_abort(node, cc)) {
f3f0e1d2
KS
2357 result = SCAN_SCAN_ABORT;
2358 break;
2359 }
34d6b470 2360 cc->node_load[node]++;
f3f0e1d2
KS
2361
2362 if (!PageLRU(page)) {
2363 result = SCAN_PAGE_LRU;
2364 break;
2365 }
2366
99cb0dbd
SL
2367 if (page_count(page) !=
2368 1 + page_mapcount(page) + page_has_private(page)) {
f3f0e1d2
KS
2369 result = SCAN_PAGE_COUNT;
2370 break;
2371 }
2372
2373 /*
2374 * We probably should check if the page is referenced here, but
2375 * nobody would transfer pte_young() to PageReferenced() for us.
2376 * And rmap walk here is just too costly...
2377 */
2378
2379 present++;
2380
2381 if (need_resched()) {
85b392db 2382 xas_pause(&xas);
f3f0e1d2 2383 cond_resched_rcu();
f3f0e1d2
KS
2384 }
2385 }
2386 rcu_read_unlock();
2387
2388 if (result == SCAN_SUCCEED) {
d8ea7cc8
ZK
2389 if (cc->is_khugepaged &&
2390 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
f3f0e1d2 2391 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 2392 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
f3f0e1d2 2393 } else {
34488399 2394 result = collapse_file(mm, addr, file, start, cc);
f3f0e1d2
KS
2395 }
2396 }
2397
045634ff 2398 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
50ad2f24 2399 return result;
f3f0e1d2
KS
2400}
2401#else
34488399
ZK
2402static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2403 struct file *file, pgoff_t start,
2404 struct collapse_control *cc)
f3f0e1d2
KS
2405{
2406 BUILD_BUG();
2407}
27e1f827 2408
b26e2701 2409static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
27e1f827 2410{
27e1f827 2411}
58ac9a89
ZK
2412
2413static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
2414 unsigned long addr)
2415{
2416 return false;
2417}
f3f0e1d2
KS
2418#endif
2419
50ad2f24 2420static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
34d6b470 2421 struct collapse_control *cc)
b46e756f
KS
2422 __releases(&khugepaged_mm_lock)
2423 __acquires(&khugepaged_mm_lock)
2424{
68540502 2425 struct vma_iterator vmi;
b26e2701
QZ
2426 struct khugepaged_mm_slot *mm_slot;
2427 struct mm_slot *slot;
b46e756f
KS
2428 struct mm_struct *mm;
2429 struct vm_area_struct *vma;
2430 int progress = 0;
2431
2432 VM_BUG_ON(!pages);
35f3aa39 2433 lockdep_assert_held(&khugepaged_mm_lock);
50ad2f24 2434 *result = SCAN_FAIL;
b46e756f 2435
b26e2701 2436 if (khugepaged_scan.mm_slot) {
b46e756f 2437 mm_slot = khugepaged_scan.mm_slot;
b26e2701
QZ
2438 slot = &mm_slot->slot;
2439 } else {
2440 slot = list_entry(khugepaged_scan.mm_head.next,
b46e756f 2441 struct mm_slot, mm_node);
b26e2701 2442 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
b46e756f
KS
2443 khugepaged_scan.address = 0;
2444 khugepaged_scan.mm_slot = mm_slot;
2445 }
2446 spin_unlock(&khugepaged_mm_lock);
27e1f827 2447 khugepaged_collapse_pte_mapped_thps(mm_slot);
b46e756f 2448
b26e2701 2449 mm = slot->mm;
3b454ad3
YS
2450 /*
2451 * Don't wait for semaphore (to avoid long wait times). Just move to
2452 * the next mm on the list.
2453 */
2454 vma = NULL;
d8ed45c5 2455 if (unlikely(!mmap_read_trylock(mm)))
c1e8d7c6 2456 goto breakouterloop_mmap_lock;
b46e756f
KS
2457
2458 progress++;
68540502
MWO
2459 if (unlikely(hpage_collapse_test_exit(mm)))
2460 goto breakouterloop;
2461
2462 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2463 for_each_vma(vmi, vma) {
b46e756f
KS
2464 unsigned long hstart, hend;
2465
2466 cond_resched();
7d2c4385 2467 if (unlikely(hpage_collapse_test_exit(mm))) {
b46e756f
KS
2468 progress++;
2469 break;
2470 }
a7f4e6e4 2471 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
b46e756f
KS
2472skip:
2473 progress++;
2474 continue;
2475 }
4fa6893f
YS
2476 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2477 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
b46e756f
KS
2478 if (khugepaged_scan.address > hend)
2479 goto skip;
2480 if (khugepaged_scan.address < hstart)
2481 khugepaged_scan.address = hstart;
2482 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2483
2484 while (khugepaged_scan.address < hend) {
50ad2f24
ZK
2485 bool mmap_locked = true;
2486
b46e756f 2487 cond_resched();
7d2c4385 2488 if (unlikely(hpage_collapse_test_exit(mm)))
b46e756f
KS
2489 goto breakouterloop;
2490
2491 VM_BUG_ON(khugepaged_scan.address < hstart ||
2492 khugepaged_scan.address + HPAGE_PMD_SIZE >
2493 hend);
99cb0dbd 2494 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
396bcc52 2495 struct file *file = get_file(vma->vm_file);
f3f0e1d2
KS
2496 pgoff_t pgoff = linear_page_index(vma,
2497 khugepaged_scan.address);
99cb0dbd 2498
d8ed45c5 2499 mmap_read_unlock(mm);
34488399
ZK
2500 *result = hpage_collapse_scan_file(mm,
2501 khugepaged_scan.address,
2502 file, pgoff, cc);
50ad2f24 2503 mmap_locked = false;
f3f0e1d2
KS
2504 fput(file);
2505 } else {
7d2c4385
ZK
2506 *result = hpage_collapse_scan_pmd(mm, vma,
2507 khugepaged_scan.address,
2508 &mmap_locked,
2509 cc);
f3f0e1d2 2510 }
58ac9a89
ZK
2511 switch (*result) {
2512 case SCAN_PTE_MAPPED_HUGEPAGE: {
2513 pmd_t *pmd;
2514
2515 *result = find_pmd_or_thp_or_none(mm,
2516 khugepaged_scan.address,
2517 &pmd);
2518 if (*result != SCAN_SUCCEED)
2519 break;
2520 if (!khugepaged_add_pte_mapped_thp(mm,
2521 khugepaged_scan.address))
2522 break;
2523 } fallthrough;
2524 case SCAN_SUCCEED:
50ad2f24 2525 ++khugepaged_pages_collapsed;
58ac9a89
ZK
2526 break;
2527 default:
2528 break;
f3f0e1d2 2529 }
58ac9a89 2530
b46e756f
KS
2531 /* move to next address */
2532 khugepaged_scan.address += HPAGE_PMD_SIZE;
2533 progress += HPAGE_PMD_NR;
50ad2f24
ZK
2534 if (!mmap_locked)
2535 /*
2536 * We released mmap_lock so break loop. Note
2537 * that we drop mmap_lock before all hugepage
2538 * allocations, so if allocation fails, we are
2539 * guaranteed to break here and report the
2540 * correct result back to caller.
2541 */
c1e8d7c6 2542 goto breakouterloop_mmap_lock;
b46e756f
KS
2543 if (progress >= pages)
2544 goto breakouterloop;
2545 }
2546 }
2547breakouterloop:
d8ed45c5 2548 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
c1e8d7c6 2549breakouterloop_mmap_lock:
b46e756f
KS
2550
2551 spin_lock(&khugepaged_mm_lock);
2552 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2553 /*
2554 * Release the current mm_slot if this mm is about to die, or
2555 * if we scanned all vmas of this mm.
2556 */
7d2c4385 2557 if (hpage_collapse_test_exit(mm) || !vma) {
b46e756f
KS
2558 /*
2559 * Make sure that if mm_users is reaching zero while
2560 * khugepaged runs here, khugepaged_exit will find
2561 * mm_slot not pointing to the exiting mm.
2562 */
b26e2701
QZ
2563 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2564 slot = list_entry(slot->mm_node.next,
2565 struct mm_slot, mm_node);
2566 khugepaged_scan.mm_slot =
2567 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
b46e756f
KS
2568 khugepaged_scan.address = 0;
2569 } else {
2570 khugepaged_scan.mm_slot = NULL;
2571 khugepaged_full_scans++;
2572 }
2573
2574 collect_mm_slot(mm_slot);
2575 }
2576
2577 return progress;
2578}
2579
2580static int khugepaged_has_work(void)
2581{
2582 return !list_empty(&khugepaged_scan.mm_head) &&
1064026b 2583 hugepage_flags_enabled();
b46e756f
KS
2584}
2585
2586static int khugepaged_wait_event(void)
2587{
2588 return !list_empty(&khugepaged_scan.mm_head) ||
2589 kthread_should_stop();
2590}
2591
34d6b470 2592static void khugepaged_do_scan(struct collapse_control *cc)
b46e756f 2593{
b46e756f 2594 unsigned int progress = 0, pass_through_head = 0;
89dc6a96 2595 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
b46e756f 2596 bool wait = true;
50ad2f24 2597 int result = SCAN_SUCCEED;
b46e756f 2598
a980df33
KS
2599 lru_add_drain_all();
2600
c6a7f445 2601 while (true) {
b46e756f
KS
2602 cond_resched();
2603
2604 if (unlikely(kthread_should_stop() || try_to_freeze()))
2605 break;
2606
2607 spin_lock(&khugepaged_mm_lock);
2608 if (!khugepaged_scan.mm_slot)
2609 pass_through_head++;
2610 if (khugepaged_has_work() &&
2611 pass_through_head < 2)
2612 progress += khugepaged_scan_mm_slot(pages - progress,
50ad2f24 2613 &result, cc);
b46e756f
KS
2614 else
2615 progress = pages;
2616 spin_unlock(&khugepaged_mm_lock);
b46e756f 2617
c6a7f445
YS
2618 if (progress >= pages)
2619 break;
2620
50ad2f24 2621 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
c6a7f445
YS
2622 /*
2623 * If fail to allocate the first time, try to sleep for
2624 * a while. When hit again, cancel the scan.
2625 */
2626 if (!wait)
2627 break;
2628 wait = false;
c6a7f445
YS
2629 khugepaged_alloc_sleep();
2630 }
2631 }
b46e756f
KS
2632}
2633
2634static bool khugepaged_should_wakeup(void)
2635{
2636 return kthread_should_stop() ||
2637 time_after_eq(jiffies, khugepaged_sleep_expire);
2638}
2639
2640static void khugepaged_wait_work(void)
2641{
2642 if (khugepaged_has_work()) {
2643 const unsigned long scan_sleep_jiffies =
2644 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2645
2646 if (!scan_sleep_jiffies)
2647 return;
2648
2649 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2650 wait_event_freezable_timeout(khugepaged_wait,
2651 khugepaged_should_wakeup(),
2652 scan_sleep_jiffies);
2653 return;
2654 }
2655
1064026b 2656 if (hugepage_flags_enabled())
b46e756f
KS
2657 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2658}
2659
2660static int khugepaged(void *none)
2661{
b26e2701 2662 struct khugepaged_mm_slot *mm_slot;
b46e756f
KS
2663
2664 set_freezable();
2665 set_user_nice(current, MAX_NICE);
2666
2667 while (!kthread_should_stop()) {
34d6b470 2668 khugepaged_do_scan(&khugepaged_collapse_control);
b46e756f
KS
2669 khugepaged_wait_work();
2670 }
2671
2672 spin_lock(&khugepaged_mm_lock);
2673 mm_slot = khugepaged_scan.mm_slot;
2674 khugepaged_scan.mm_slot = NULL;
2675 if (mm_slot)
2676 collect_mm_slot(mm_slot);
2677 spin_unlock(&khugepaged_mm_lock);
2678 return 0;
2679}
2680
2681static void set_recommended_min_free_kbytes(void)
2682{
2683 struct zone *zone;
2684 int nr_zones = 0;
2685 unsigned long recommended_min;
2686
1064026b 2687 if (!hugepage_flags_enabled()) {
bd3400ea
LF
2688 calculate_min_free_kbytes();
2689 goto update_wmarks;
2690 }
2691
b7d349c7
JK
2692 for_each_populated_zone(zone) {
2693 /*
2694 * We don't need to worry about fragmentation of
2695 * ZONE_MOVABLE since it only has movable pages.
2696 */
2697 if (zone_idx(zone) > gfp_zone(GFP_USER))
2698 continue;
2699
b46e756f 2700 nr_zones++;
b7d349c7 2701 }
b46e756f
KS
2702
2703 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2704 recommended_min = pageblock_nr_pages * nr_zones * 2;
2705
2706 /*
2707 * Make sure that on average at least two pageblocks are almost free
2708 * of another type, one for a migratetype to fall back to and a
2709 * second to avoid subsequent fallbacks of other types There are 3
2710 * MIGRATE_TYPES we care about.
2711 */
2712 recommended_min += pageblock_nr_pages * nr_zones *
2713 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2714
2715 /* don't ever allow to reserve more than 5% of the lowmem */
2716 recommended_min = min(recommended_min,
2717 (unsigned long) nr_free_buffer_pages() / 20);
2718 recommended_min <<= (PAGE_SHIFT-10);
2719
2720 if (recommended_min > min_free_kbytes) {
2721 if (user_min_free_kbytes >= 0)
2722 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2723 min_free_kbytes, recommended_min);
2724
2725 min_free_kbytes = recommended_min;
2726 }
bd3400ea
LF
2727
2728update_wmarks:
b46e756f
KS
2729 setup_per_zone_wmarks();
2730}
2731
2732int start_stop_khugepaged(void)
2733{
b46e756f
KS
2734 int err = 0;
2735
2736 mutex_lock(&khugepaged_mutex);
1064026b 2737 if (hugepage_flags_enabled()) {
b46e756f
KS
2738 if (!khugepaged_thread)
2739 khugepaged_thread = kthread_run(khugepaged, NULL,
2740 "khugepaged");
2741 if (IS_ERR(khugepaged_thread)) {
2742 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2743 err = PTR_ERR(khugepaged_thread);
2744 khugepaged_thread = NULL;
2745 goto fail;
2746 }
2747
2748 if (!list_empty(&khugepaged_scan.mm_head))
2749 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
2750 } else if (khugepaged_thread) {
2751 kthread_stop(khugepaged_thread);
2752 khugepaged_thread = NULL;
2753 }
bd3400ea 2754 set_recommended_min_free_kbytes();
b46e756f
KS
2755fail:
2756 mutex_unlock(&khugepaged_mutex);
2757 return err;
2758}
4aab2be0
VB
2759
2760void khugepaged_min_free_kbytes_update(void)
2761{
2762 mutex_lock(&khugepaged_mutex);
1064026b 2763 if (hugepage_flags_enabled() && khugepaged_thread)
4aab2be0
VB
2764 set_recommended_min_free_kbytes();
2765 mutex_unlock(&khugepaged_mutex);
2766}
7d8faaf1 2767
57e9cc50
JW
2768bool current_is_khugepaged(void)
2769{
2770 return kthread_func(current) == khugepaged;
2771}
2772
7d8faaf1
ZK
2773static int madvise_collapse_errno(enum scan_result r)
2774{
2775 /*
2776 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2777 * actionable feedback to caller, so they may take an appropriate
2778 * fallback measure depending on the nature of the failure.
2779 */
2780 switch (r) {
2781 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2782 return -ENOMEM;
2783 case SCAN_CGROUP_CHARGE_FAIL:
ac492b9c 2784 case SCAN_EXCEED_NONE_PTE:
7d8faaf1
ZK
2785 return -EBUSY;
2786 /* Resource temporary unavailable - trying again might succeed */
ae63c898 2787 case SCAN_PAGE_COUNT:
7d8faaf1
ZK
2788 case SCAN_PAGE_LOCK:
2789 case SCAN_PAGE_LRU:
0f3e2a2c 2790 case SCAN_DEL_PAGE_LRU:
ac492b9c 2791 case SCAN_PAGE_FILLED:
7d8faaf1
ZK
2792 return -EAGAIN;
2793 /*
2794 * Other: Trying again likely not to succeed / error intrinsic to
2795 * specified memory range. khugepaged likely won't be able to collapse
2796 * either.
2797 */
2798 default:
2799 return -EINVAL;
2800 }
2801}
2802
2803int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2804 unsigned long start, unsigned long end)
2805{
2806 struct collapse_control *cc;
2807 struct mm_struct *mm = vma->vm_mm;
2808 unsigned long hstart, hend, addr;
2809 int thps = 0, last_fail = SCAN_FAIL;
2810 bool mmap_locked = true;
2811
2812 BUG_ON(vma->vm_start > start);
2813 BUG_ON(vma->vm_end < end);
2814
2815 *prev = vma;
2816
7d8faaf1
ZK
2817 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
2818 return -EINVAL;
2819
2820 cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2821 if (!cc)
2822 return -ENOMEM;
2823 cc->is_khugepaged = false;
7d8faaf1
ZK
2824
2825 mmgrab(mm);
2826 lru_add_drain_all();
2827
2828 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2829 hend = end & HPAGE_PMD_MASK;
2830
2831 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2832 int result = SCAN_FAIL;
2833
2834 if (!mmap_locked) {
2835 cond_resched();
2836 mmap_read_lock(mm);
2837 mmap_locked = true;
34488399
ZK
2838 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2839 cc);
7d8faaf1
ZK
2840 if (result != SCAN_SUCCEED) {
2841 last_fail = result;
2842 goto out_nolock;
2843 }
4d24de94 2844
52dc0310 2845 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
7d8faaf1
ZK
2846 }
2847 mmap_assert_locked(mm);
2848 memset(cc->node_load, 0, sizeof(cc->node_load));
e031ff96 2849 nodes_clear(cc->alloc_nmask);
34488399
ZK
2850 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2851 struct file *file = get_file(vma->vm_file);
2852 pgoff_t pgoff = linear_page_index(vma, addr);
2853
2854 mmap_read_unlock(mm);
2855 mmap_locked = false;
2856 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2857 cc);
2858 fput(file);
2859 } else {
2860 result = hpage_collapse_scan_pmd(mm, vma, addr,
2861 &mmap_locked, cc);
2862 }
7d8faaf1
ZK
2863 if (!mmap_locked)
2864 *prev = NULL; /* Tell caller we dropped mmap_lock */
2865
34488399 2866handle_result:
7d8faaf1
ZK
2867 switch (result) {
2868 case SCAN_SUCCEED:
2869 case SCAN_PMD_MAPPED:
2870 ++thps;
2871 break;
34488399
ZK
2872 case SCAN_PTE_MAPPED_HUGEPAGE:
2873 BUG_ON(mmap_locked);
2874 BUG_ON(*prev);
2875 mmap_write_lock(mm);
2876 result = collapse_pte_mapped_thp(mm, addr, true);
2877 mmap_write_unlock(mm);
2878 goto handle_result;
7d8faaf1
ZK
2879 /* Whitelisted set of results where continuing OK */
2880 case SCAN_PMD_NULL:
2881 case SCAN_PTE_NON_PRESENT:
2882 case SCAN_PTE_UFFD_WP:
2883 case SCAN_PAGE_RO:
2884 case SCAN_LACK_REFERENCED_PAGE:
2885 case SCAN_PAGE_NULL:
2886 case SCAN_PAGE_COUNT:
2887 case SCAN_PAGE_LOCK:
2888 case SCAN_PAGE_COMPOUND:
2889 case SCAN_PAGE_LRU:
0f3e2a2c 2890 case SCAN_DEL_PAGE_LRU:
7d8faaf1
ZK
2891 last_fail = result;
2892 break;
2893 default:
2894 last_fail = result;
2895 /* Other error, exit */
2896 goto out_maybelock;
2897 }
2898 }
2899
2900out_maybelock:
2901 /* Caller expects us to hold mmap_lock on return */
2902 if (!mmap_locked)
2903 mmap_read_lock(mm);
2904out_nolock:
2905 mmap_assert_locked(mm);
2906 mmdrop(mm);
2907 kfree(cc);
2908
2909 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2910 : madvise_collapse_errno(last_fail);
2911}