Merge tag 'dmaengine-fix-6.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / mm / khugepaged.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6#include <linux/sched/mm.h>
7#include <linux/sched/coredump.h>
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/page_table_check.h>
20#include <linux/rcupdate_wait.h>
21#include <linux/swapops.h>
22#include <linux/shmem_fs.h>
23#include <linux/ksm.h>
24
25#include <asm/tlb.h>
26#include <asm/pgalloc.h>
27#include "internal.h"
28#include "mm_slot.h"
29
30enum scan_result {
31 SCAN_FAIL,
32 SCAN_SUCCEED,
33 SCAN_PMD_NULL,
34 SCAN_PMD_NONE,
35 SCAN_PMD_MAPPED,
36 SCAN_EXCEED_NONE_PTE,
37 SCAN_EXCEED_SWAP_PTE,
38 SCAN_EXCEED_SHARED_PTE,
39 SCAN_PTE_NON_PRESENT,
40 SCAN_PTE_UFFD_WP,
41 SCAN_PTE_MAPPED_HUGEPAGE,
42 SCAN_PAGE_RO,
43 SCAN_LACK_REFERENCED_PAGE,
44 SCAN_PAGE_NULL,
45 SCAN_SCAN_ABORT,
46 SCAN_PAGE_COUNT,
47 SCAN_PAGE_LRU,
48 SCAN_PAGE_LOCK,
49 SCAN_PAGE_ANON,
50 SCAN_PAGE_COMPOUND,
51 SCAN_ANY_PROCESS,
52 SCAN_VMA_NULL,
53 SCAN_VMA_CHECK,
54 SCAN_ADDRESS_RANGE,
55 SCAN_DEL_PAGE_LRU,
56 SCAN_ALLOC_HUGE_PAGE_FAIL,
57 SCAN_CGROUP_CHARGE_FAIL,
58 SCAN_TRUNCATED,
59 SCAN_PAGE_HAS_PRIVATE,
60 SCAN_STORE_FAILED,
61 SCAN_COPY_MC,
62 SCAN_PAGE_FILLED,
63};
64
65#define CREATE_TRACE_POINTS
66#include <trace/events/huge_memory.h>
67
68static struct task_struct *khugepaged_thread __read_mostly;
69static DEFINE_MUTEX(khugepaged_mutex);
70
71/* default scan 8*512 pte (or vmas) every 30 second */
72static unsigned int khugepaged_pages_to_scan __read_mostly;
73static unsigned int khugepaged_pages_collapsed;
74static unsigned int khugepaged_full_scans;
75static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
76/* during fragmentation poll the hugepage allocator once every minute */
77static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
78static unsigned long khugepaged_sleep_expire;
79static DEFINE_SPINLOCK(khugepaged_mm_lock);
80static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
81/*
82 * default collapse hugepages if there is at least one pte mapped like
83 * it would have happened if the vma was large enough during page
84 * fault.
85 *
86 * Note that these are only respected if collapse was initiated by khugepaged.
87 */
88static unsigned int khugepaged_max_ptes_none __read_mostly;
89static unsigned int khugepaged_max_ptes_swap __read_mostly;
90static unsigned int khugepaged_max_ptes_shared __read_mostly;
91
92#define MM_SLOTS_HASH_BITS 10
93static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
94
95static struct kmem_cache *mm_slot_cache __ro_after_init;
96
97struct collapse_control {
98 bool is_khugepaged;
99
100 /* Num pages scanned per node */
101 u32 node_load[MAX_NUMNODES];
102
103 /* nodemask for allocation fallback */
104 nodemask_t alloc_nmask;
105};
106
107/**
108 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
109 * @slot: hash lookup from mm to mm_slot
110 */
111struct khugepaged_mm_slot {
112 struct mm_slot slot;
113};
114
115/**
116 * struct khugepaged_scan - cursor for scanning
117 * @mm_head: the head of the mm list to scan
118 * @mm_slot: the current mm_slot we are scanning
119 * @address: the next address inside that to be scanned
120 *
121 * There is only the one khugepaged_scan instance of this cursor structure.
122 */
123struct khugepaged_scan {
124 struct list_head mm_head;
125 struct khugepaged_mm_slot *mm_slot;
126 unsigned long address;
127};
128
129static struct khugepaged_scan khugepaged_scan = {
130 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
131};
132
133#ifdef CONFIG_SYSFS
134static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
135 struct kobj_attribute *attr,
136 char *buf)
137{
138 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
139}
140
141static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
142 struct kobj_attribute *attr,
143 const char *buf, size_t count)
144{
145 unsigned int msecs;
146 int err;
147
148 err = kstrtouint(buf, 10, &msecs);
149 if (err)
150 return -EINVAL;
151
152 khugepaged_scan_sleep_millisecs = msecs;
153 khugepaged_sleep_expire = 0;
154 wake_up_interruptible(&khugepaged_wait);
155
156 return count;
157}
158static struct kobj_attribute scan_sleep_millisecs_attr =
159 __ATTR_RW(scan_sleep_millisecs);
160
161static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
162 struct kobj_attribute *attr,
163 char *buf)
164{
165 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
166}
167
168static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
169 struct kobj_attribute *attr,
170 const char *buf, size_t count)
171{
172 unsigned int msecs;
173 int err;
174
175 err = kstrtouint(buf, 10, &msecs);
176 if (err)
177 return -EINVAL;
178
179 khugepaged_alloc_sleep_millisecs = msecs;
180 khugepaged_sleep_expire = 0;
181 wake_up_interruptible(&khugepaged_wait);
182
183 return count;
184}
185static struct kobj_attribute alloc_sleep_millisecs_attr =
186 __ATTR_RW(alloc_sleep_millisecs);
187
188static ssize_t pages_to_scan_show(struct kobject *kobj,
189 struct kobj_attribute *attr,
190 char *buf)
191{
192 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
193}
194static ssize_t pages_to_scan_store(struct kobject *kobj,
195 struct kobj_attribute *attr,
196 const char *buf, size_t count)
197{
198 unsigned int pages;
199 int err;
200
201 err = kstrtouint(buf, 10, &pages);
202 if (err || !pages)
203 return -EINVAL;
204
205 khugepaged_pages_to_scan = pages;
206
207 return count;
208}
209static struct kobj_attribute pages_to_scan_attr =
210 __ATTR_RW(pages_to_scan);
211
212static ssize_t pages_collapsed_show(struct kobject *kobj,
213 struct kobj_attribute *attr,
214 char *buf)
215{
216 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
217}
218static struct kobj_attribute pages_collapsed_attr =
219 __ATTR_RO(pages_collapsed);
220
221static ssize_t full_scans_show(struct kobject *kobj,
222 struct kobj_attribute *attr,
223 char *buf)
224{
225 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
226}
227static struct kobj_attribute full_scans_attr =
228 __ATTR_RO(full_scans);
229
230static ssize_t defrag_show(struct kobject *kobj,
231 struct kobj_attribute *attr, char *buf)
232{
233 return single_hugepage_flag_show(kobj, attr, buf,
234 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
235}
236static ssize_t defrag_store(struct kobject *kobj,
237 struct kobj_attribute *attr,
238 const char *buf, size_t count)
239{
240 return single_hugepage_flag_store(kobj, attr, buf, count,
241 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
242}
243static struct kobj_attribute khugepaged_defrag_attr =
244 __ATTR_RW(defrag);
245
246/*
247 * max_ptes_none controls if khugepaged should collapse hugepages over
248 * any unmapped ptes in turn potentially increasing the memory
249 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
250 * reduce the available free memory in the system as it
251 * runs. Increasing max_ptes_none will instead potentially reduce the
252 * free memory in the system during the khugepaged scan.
253 */
254static ssize_t max_ptes_none_show(struct kobject *kobj,
255 struct kobj_attribute *attr,
256 char *buf)
257{
258 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
259}
260static ssize_t max_ptes_none_store(struct kobject *kobj,
261 struct kobj_attribute *attr,
262 const char *buf, size_t count)
263{
264 int err;
265 unsigned long max_ptes_none;
266
267 err = kstrtoul(buf, 10, &max_ptes_none);
268 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
269 return -EINVAL;
270
271 khugepaged_max_ptes_none = max_ptes_none;
272
273 return count;
274}
275static struct kobj_attribute khugepaged_max_ptes_none_attr =
276 __ATTR_RW(max_ptes_none);
277
278static ssize_t max_ptes_swap_show(struct kobject *kobj,
279 struct kobj_attribute *attr,
280 char *buf)
281{
282 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
283}
284
285static ssize_t max_ptes_swap_store(struct kobject *kobj,
286 struct kobj_attribute *attr,
287 const char *buf, size_t count)
288{
289 int err;
290 unsigned long max_ptes_swap;
291
292 err = kstrtoul(buf, 10, &max_ptes_swap);
293 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
294 return -EINVAL;
295
296 khugepaged_max_ptes_swap = max_ptes_swap;
297
298 return count;
299}
300
301static struct kobj_attribute khugepaged_max_ptes_swap_attr =
302 __ATTR_RW(max_ptes_swap);
303
304static ssize_t max_ptes_shared_show(struct kobject *kobj,
305 struct kobj_attribute *attr,
306 char *buf)
307{
308 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
309}
310
311static ssize_t max_ptes_shared_store(struct kobject *kobj,
312 struct kobj_attribute *attr,
313 const char *buf, size_t count)
314{
315 int err;
316 unsigned long max_ptes_shared;
317
318 err = kstrtoul(buf, 10, &max_ptes_shared);
319 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
320 return -EINVAL;
321
322 khugepaged_max_ptes_shared = max_ptes_shared;
323
324 return count;
325}
326
327static struct kobj_attribute khugepaged_max_ptes_shared_attr =
328 __ATTR_RW(max_ptes_shared);
329
330static struct attribute *khugepaged_attr[] = {
331 &khugepaged_defrag_attr.attr,
332 &khugepaged_max_ptes_none_attr.attr,
333 &khugepaged_max_ptes_swap_attr.attr,
334 &khugepaged_max_ptes_shared_attr.attr,
335 &pages_to_scan_attr.attr,
336 &pages_collapsed_attr.attr,
337 &full_scans_attr.attr,
338 &scan_sleep_millisecs_attr.attr,
339 &alloc_sleep_millisecs_attr.attr,
340 NULL,
341};
342
343struct attribute_group khugepaged_attr_group = {
344 .attrs = khugepaged_attr,
345 .name = "khugepaged",
346};
347#endif /* CONFIG_SYSFS */
348
349int hugepage_madvise(struct vm_area_struct *vma,
350 unsigned long *vm_flags, int advice)
351{
352 switch (advice) {
353 case MADV_HUGEPAGE:
354#ifdef CONFIG_S390
355 /*
356 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
357 * can't handle this properly after s390_enable_sie, so we simply
358 * ignore the madvise to prevent qemu from causing a SIGSEGV.
359 */
360 if (mm_has_pgste(vma->vm_mm))
361 return 0;
362#endif
363 *vm_flags &= ~VM_NOHUGEPAGE;
364 *vm_flags |= VM_HUGEPAGE;
365 /*
366 * If the vma become good for khugepaged to scan,
367 * register it here without waiting a page fault that
368 * may not happen any time soon.
369 */
370 khugepaged_enter_vma(vma, *vm_flags);
371 break;
372 case MADV_NOHUGEPAGE:
373 *vm_flags &= ~VM_HUGEPAGE;
374 *vm_flags |= VM_NOHUGEPAGE;
375 /*
376 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 * this vma even if we leave the mm registered in khugepaged if
378 * it got registered before VM_NOHUGEPAGE was set.
379 */
380 break;
381 }
382
383 return 0;
384}
385
386int __init khugepaged_init(void)
387{
388 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389 sizeof(struct khugepaged_mm_slot),
390 __alignof__(struct khugepaged_mm_slot),
391 0, NULL);
392 if (!mm_slot_cache)
393 return -ENOMEM;
394
395 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
396 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
397 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
398 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
399
400 return 0;
401}
402
403void __init khugepaged_destroy(void)
404{
405 kmem_cache_destroy(mm_slot_cache);
406}
407
408static inline int hpage_collapse_test_exit(struct mm_struct *mm)
409{
410 return atomic_read(&mm->mm_users) == 0;
411}
412
413void __khugepaged_enter(struct mm_struct *mm)
414{
415 struct khugepaged_mm_slot *mm_slot;
416 struct mm_slot *slot;
417 int wakeup;
418
419 /* __khugepaged_exit() must not run from under us */
420 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
421 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
422 return;
423
424 mm_slot = mm_slot_alloc(mm_slot_cache);
425 if (!mm_slot)
426 return;
427
428 slot = &mm_slot->slot;
429
430 spin_lock(&khugepaged_mm_lock);
431 mm_slot_insert(mm_slots_hash, mm, slot);
432 /*
433 * Insert just behind the scanning cursor, to let the area settle
434 * down a little.
435 */
436 wakeup = list_empty(&khugepaged_scan.mm_head);
437 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
438 spin_unlock(&khugepaged_mm_lock);
439
440 mmgrab(mm);
441 if (wakeup)
442 wake_up_interruptible(&khugepaged_wait);
443}
444
445void khugepaged_enter_vma(struct vm_area_struct *vma,
446 unsigned long vm_flags)
447{
448 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
449 hugepage_flags_enabled()) {
450 if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
451 PMD_ORDER))
452 __khugepaged_enter(vma->vm_mm);
453 }
454}
455
456void __khugepaged_exit(struct mm_struct *mm)
457{
458 struct khugepaged_mm_slot *mm_slot;
459 struct mm_slot *slot;
460 int free = 0;
461
462 spin_lock(&khugepaged_mm_lock);
463 slot = mm_slot_lookup(mm_slots_hash, mm);
464 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
465 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
466 hash_del(&slot->hash);
467 list_del(&slot->mm_node);
468 free = 1;
469 }
470 spin_unlock(&khugepaged_mm_lock);
471
472 if (free) {
473 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
474 mm_slot_free(mm_slot_cache, mm_slot);
475 mmdrop(mm);
476 } else if (mm_slot) {
477 /*
478 * This is required to serialize against
479 * hpage_collapse_test_exit() (which is guaranteed to run
480 * under mmap sem read mode). Stop here (after we return all
481 * pagetables will be destroyed) until khugepaged has finished
482 * working on the pagetables under the mmap_lock.
483 */
484 mmap_write_lock(mm);
485 mmap_write_unlock(mm);
486 }
487}
488
489static void release_pte_folio(struct folio *folio)
490{
491 node_stat_mod_folio(folio,
492 NR_ISOLATED_ANON + folio_is_file_lru(folio),
493 -folio_nr_pages(folio));
494 folio_unlock(folio);
495 folio_putback_lru(folio);
496}
497
498static void release_pte_pages(pte_t *pte, pte_t *_pte,
499 struct list_head *compound_pagelist)
500{
501 struct folio *folio, *tmp;
502
503 while (--_pte >= pte) {
504 pte_t pteval = ptep_get(_pte);
505 unsigned long pfn;
506
507 if (pte_none(pteval))
508 continue;
509 pfn = pte_pfn(pteval);
510 if (is_zero_pfn(pfn))
511 continue;
512 folio = pfn_folio(pfn);
513 if (folio_test_large(folio))
514 continue;
515 release_pte_folio(folio);
516 }
517
518 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
519 list_del(&folio->lru);
520 release_pte_folio(folio);
521 }
522}
523
524static bool is_refcount_suitable(struct folio *folio)
525{
526 int expected_refcount;
527
528 expected_refcount = folio_mapcount(folio);
529 if (folio_test_swapcache(folio))
530 expected_refcount += folio_nr_pages(folio);
531
532 return folio_ref_count(folio) == expected_refcount;
533}
534
535static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
536 unsigned long address,
537 pte_t *pte,
538 struct collapse_control *cc,
539 struct list_head *compound_pagelist)
540{
541 struct page *page = NULL;
542 struct folio *folio = NULL;
543 pte_t *_pte;
544 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
545 bool writable = false;
546
547 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
548 _pte++, address += PAGE_SIZE) {
549 pte_t pteval = ptep_get(_pte);
550 if (pte_none(pteval) || (pte_present(pteval) &&
551 is_zero_pfn(pte_pfn(pteval)))) {
552 ++none_or_zero;
553 if (!userfaultfd_armed(vma) &&
554 (!cc->is_khugepaged ||
555 none_or_zero <= khugepaged_max_ptes_none)) {
556 continue;
557 } else {
558 result = SCAN_EXCEED_NONE_PTE;
559 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
560 goto out;
561 }
562 }
563 if (!pte_present(pteval)) {
564 result = SCAN_PTE_NON_PRESENT;
565 goto out;
566 }
567 if (pte_uffd_wp(pteval)) {
568 result = SCAN_PTE_UFFD_WP;
569 goto out;
570 }
571 page = vm_normal_page(vma, address, pteval);
572 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
573 result = SCAN_PAGE_NULL;
574 goto out;
575 }
576
577 folio = page_folio(page);
578 VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
579
580 if (page_mapcount(page) > 1) {
581 ++shared;
582 if (cc->is_khugepaged &&
583 shared > khugepaged_max_ptes_shared) {
584 result = SCAN_EXCEED_SHARED_PTE;
585 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
586 goto out;
587 }
588 }
589
590 if (folio_test_large(folio)) {
591 struct folio *f;
592
593 /*
594 * Check if we have dealt with the compound page
595 * already
596 */
597 list_for_each_entry(f, compound_pagelist, lru) {
598 if (folio == f)
599 goto next;
600 }
601 }
602
603 /*
604 * We can do it before isolate_lru_page because the
605 * page can't be freed from under us. NOTE: PG_lock
606 * is needed to serialize against split_huge_page
607 * when invoked from the VM.
608 */
609 if (!folio_trylock(folio)) {
610 result = SCAN_PAGE_LOCK;
611 goto out;
612 }
613
614 /*
615 * Check if the page has any GUP (or other external) pins.
616 *
617 * The page table that maps the page has been already unlinked
618 * from the page table tree and this process cannot get
619 * an additional pin on the page.
620 *
621 * New pins can come later if the page is shared across fork,
622 * but not from this process. The other process cannot write to
623 * the page, only trigger CoW.
624 */
625 if (!is_refcount_suitable(folio)) {
626 folio_unlock(folio);
627 result = SCAN_PAGE_COUNT;
628 goto out;
629 }
630
631 /*
632 * Isolate the page to avoid collapsing an hugepage
633 * currently in use by the VM.
634 */
635 if (!folio_isolate_lru(folio)) {
636 folio_unlock(folio);
637 result = SCAN_DEL_PAGE_LRU;
638 goto out;
639 }
640 node_stat_mod_folio(folio,
641 NR_ISOLATED_ANON + folio_is_file_lru(folio),
642 folio_nr_pages(folio));
643 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
644 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
645
646 if (folio_test_large(folio))
647 list_add_tail(&folio->lru, compound_pagelist);
648next:
649 /*
650 * If collapse was initiated by khugepaged, check that there is
651 * enough young pte to justify collapsing the page
652 */
653 if (cc->is_khugepaged &&
654 (pte_young(pteval) || folio_test_young(folio) ||
655 folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
656 address)))
657 referenced++;
658
659 if (pte_write(pteval))
660 writable = true;
661 }
662
663 if (unlikely(!writable)) {
664 result = SCAN_PAGE_RO;
665 } else if (unlikely(cc->is_khugepaged && !referenced)) {
666 result = SCAN_LACK_REFERENCED_PAGE;
667 } else {
668 result = SCAN_SUCCEED;
669 trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
670 referenced, writable, result);
671 return result;
672 }
673out:
674 release_pte_pages(pte, _pte, compound_pagelist);
675 trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
676 referenced, writable, result);
677 return result;
678}
679
680static void __collapse_huge_page_copy_succeeded(pte_t *pte,
681 struct vm_area_struct *vma,
682 unsigned long address,
683 spinlock_t *ptl,
684 struct list_head *compound_pagelist)
685{
686 struct folio *src_folio;
687 struct page *src_page;
688 struct page *tmp;
689 pte_t *_pte;
690 pte_t pteval;
691
692 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
693 _pte++, address += PAGE_SIZE) {
694 pteval = ptep_get(_pte);
695 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
696 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
697 if (is_zero_pfn(pte_pfn(pteval))) {
698 /*
699 * ptl mostly unnecessary.
700 */
701 spin_lock(ptl);
702 ptep_clear(vma->vm_mm, address, _pte);
703 spin_unlock(ptl);
704 ksm_might_unmap_zero_page(vma->vm_mm, pteval);
705 }
706 } else {
707 src_page = pte_page(pteval);
708 src_folio = page_folio(src_page);
709 if (!folio_test_large(src_folio))
710 release_pte_folio(src_folio);
711 /*
712 * ptl mostly unnecessary, but preempt has to
713 * be disabled to update the per-cpu stats
714 * inside folio_remove_rmap_pte().
715 */
716 spin_lock(ptl);
717 ptep_clear(vma->vm_mm, address, _pte);
718 folio_remove_rmap_pte(src_folio, src_page, vma);
719 spin_unlock(ptl);
720 free_page_and_swap_cache(src_page);
721 }
722 }
723
724 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
725 list_del(&src_page->lru);
726 mod_node_page_state(page_pgdat(src_page),
727 NR_ISOLATED_ANON + page_is_file_lru(src_page),
728 -compound_nr(src_page));
729 unlock_page(src_page);
730 free_swap_cache(src_page);
731 putback_lru_page(src_page);
732 }
733}
734
735static void __collapse_huge_page_copy_failed(pte_t *pte,
736 pmd_t *pmd,
737 pmd_t orig_pmd,
738 struct vm_area_struct *vma,
739 struct list_head *compound_pagelist)
740{
741 spinlock_t *pmd_ptl;
742
743 /*
744 * Re-establish the PMD to point to the original page table
745 * entry. Restoring PMD needs to be done prior to releasing
746 * pages. Since pages are still isolated and locked here,
747 * acquiring anon_vma_lock_write is unnecessary.
748 */
749 pmd_ptl = pmd_lock(vma->vm_mm, pmd);
750 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
751 spin_unlock(pmd_ptl);
752 /*
753 * Release both raw and compound pages isolated
754 * in __collapse_huge_page_isolate.
755 */
756 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
757}
758
759/*
760 * __collapse_huge_page_copy - attempts to copy memory contents from raw
761 * pages to a hugepage. Cleans up the raw pages if copying succeeds;
762 * otherwise restores the original page table and releases isolated raw pages.
763 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
764 *
765 * @pte: starting of the PTEs to copy from
766 * @page: the new hugepage to copy contents to
767 * @pmd: pointer to the new hugepage's PMD
768 * @orig_pmd: the original raw pages' PMD
769 * @vma: the original raw pages' virtual memory area
770 * @address: starting address to copy
771 * @ptl: lock on raw pages' PTEs
772 * @compound_pagelist: list that stores compound pages
773 */
774static int __collapse_huge_page_copy(pte_t *pte,
775 struct page *page,
776 pmd_t *pmd,
777 pmd_t orig_pmd,
778 struct vm_area_struct *vma,
779 unsigned long address,
780 spinlock_t *ptl,
781 struct list_head *compound_pagelist)
782{
783 struct page *src_page;
784 pte_t *_pte;
785 pte_t pteval;
786 unsigned long _address;
787 int result = SCAN_SUCCEED;
788
789 /*
790 * Copying pages' contents is subject to memory poison at any iteration.
791 */
792 for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
793 _pte++, page++, _address += PAGE_SIZE) {
794 pteval = ptep_get(_pte);
795 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
796 clear_user_highpage(page, _address);
797 continue;
798 }
799 src_page = pte_page(pteval);
800 if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
801 result = SCAN_COPY_MC;
802 break;
803 }
804 }
805
806 if (likely(result == SCAN_SUCCEED))
807 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
808 compound_pagelist);
809 else
810 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
811 compound_pagelist);
812
813 return result;
814}
815
816static void khugepaged_alloc_sleep(void)
817{
818 DEFINE_WAIT(wait);
819
820 add_wait_queue(&khugepaged_wait, &wait);
821 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
822 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
823 remove_wait_queue(&khugepaged_wait, &wait);
824}
825
826struct collapse_control khugepaged_collapse_control = {
827 .is_khugepaged = true,
828};
829
830static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
831{
832 int i;
833
834 /*
835 * If node_reclaim_mode is disabled, then no extra effort is made to
836 * allocate memory locally.
837 */
838 if (!node_reclaim_enabled())
839 return false;
840
841 /* If there is a count for this node already, it must be acceptable */
842 if (cc->node_load[nid])
843 return false;
844
845 for (i = 0; i < MAX_NUMNODES; i++) {
846 if (!cc->node_load[i])
847 continue;
848 if (node_distance(nid, i) > node_reclaim_distance)
849 return true;
850 }
851 return false;
852}
853
854#define khugepaged_defrag() \
855 (transparent_hugepage_flags & \
856 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
857
858/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
859static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
860{
861 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
862}
863
864#ifdef CONFIG_NUMA
865static int hpage_collapse_find_target_node(struct collapse_control *cc)
866{
867 int nid, target_node = 0, max_value = 0;
868
869 /* find first node with max normal pages hit */
870 for (nid = 0; nid < MAX_NUMNODES; nid++)
871 if (cc->node_load[nid] > max_value) {
872 max_value = cc->node_load[nid];
873 target_node = nid;
874 }
875
876 for_each_online_node(nid) {
877 if (max_value == cc->node_load[nid])
878 node_set(nid, cc->alloc_nmask);
879 }
880
881 return target_node;
882}
883#else
884static int hpage_collapse_find_target_node(struct collapse_control *cc)
885{
886 return 0;
887}
888#endif
889
890static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
891 nodemask_t *nmask)
892{
893 *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
894
895 if (unlikely(!*folio)) {
896 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
897 return false;
898 }
899
900 count_vm_event(THP_COLLAPSE_ALLOC);
901 return true;
902}
903
904/*
905 * If mmap_lock temporarily dropped, revalidate vma
906 * before taking mmap_lock.
907 * Returns enum scan_result value.
908 */
909
910static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
911 bool expect_anon,
912 struct vm_area_struct **vmap,
913 struct collapse_control *cc)
914{
915 struct vm_area_struct *vma;
916
917 if (unlikely(hpage_collapse_test_exit(mm)))
918 return SCAN_ANY_PROCESS;
919
920 *vmap = vma = find_vma(mm, address);
921 if (!vma)
922 return SCAN_VMA_NULL;
923
924 if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
925 return SCAN_ADDRESS_RANGE;
926 if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
927 cc->is_khugepaged, PMD_ORDER))
928 return SCAN_VMA_CHECK;
929 /*
930 * Anon VMA expected, the address may be unmapped then
931 * remapped to file after khugepaged reaquired the mmap_lock.
932 *
933 * thp_vma_allowable_order may return true for qualified file
934 * vmas.
935 */
936 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
937 return SCAN_PAGE_ANON;
938 return SCAN_SUCCEED;
939}
940
941static int find_pmd_or_thp_or_none(struct mm_struct *mm,
942 unsigned long address,
943 pmd_t **pmd)
944{
945 pmd_t pmde;
946
947 *pmd = mm_find_pmd(mm, address);
948 if (!*pmd)
949 return SCAN_PMD_NULL;
950
951 pmde = pmdp_get_lockless(*pmd);
952 if (pmd_none(pmde))
953 return SCAN_PMD_NONE;
954 if (!pmd_present(pmde))
955 return SCAN_PMD_NULL;
956 if (pmd_trans_huge(pmde))
957 return SCAN_PMD_MAPPED;
958 if (pmd_devmap(pmde))
959 return SCAN_PMD_NULL;
960 if (pmd_bad(pmde))
961 return SCAN_PMD_NULL;
962 return SCAN_SUCCEED;
963}
964
965static int check_pmd_still_valid(struct mm_struct *mm,
966 unsigned long address,
967 pmd_t *pmd)
968{
969 pmd_t *new_pmd;
970 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
971
972 if (result != SCAN_SUCCEED)
973 return result;
974 if (new_pmd != pmd)
975 return SCAN_FAIL;
976 return SCAN_SUCCEED;
977}
978
979/*
980 * Bring missing pages in from swap, to complete THP collapse.
981 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
982 *
983 * Called and returns without pte mapped or spinlocks held.
984 * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
985 */
986static int __collapse_huge_page_swapin(struct mm_struct *mm,
987 struct vm_area_struct *vma,
988 unsigned long haddr, pmd_t *pmd,
989 int referenced)
990{
991 int swapped_in = 0;
992 vm_fault_t ret = 0;
993 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
994 int result;
995 pte_t *pte = NULL;
996 spinlock_t *ptl;
997
998 for (address = haddr; address < end; address += PAGE_SIZE) {
999 struct vm_fault vmf = {
1000 .vma = vma,
1001 .address = address,
1002 .pgoff = linear_page_index(vma, address),
1003 .flags = FAULT_FLAG_ALLOW_RETRY,
1004 .pmd = pmd,
1005 };
1006
1007 if (!pte++) {
1008 pte = pte_offset_map_nolock(mm, pmd, address, &ptl);
1009 if (!pte) {
1010 mmap_read_unlock(mm);
1011 result = SCAN_PMD_NULL;
1012 goto out;
1013 }
1014 }
1015
1016 vmf.orig_pte = ptep_get_lockless(pte);
1017 if (!is_swap_pte(vmf.orig_pte))
1018 continue;
1019
1020 vmf.pte = pte;
1021 vmf.ptl = ptl;
1022 ret = do_swap_page(&vmf);
1023 /* Which unmaps pte (after perhaps re-checking the entry) */
1024 pte = NULL;
1025
1026 /*
1027 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1028 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1029 * we do not retry here and swap entry will remain in pagetable
1030 * resulting in later failure.
1031 */
1032 if (ret & VM_FAULT_RETRY) {
1033 /* Likely, but not guaranteed, that page lock failed */
1034 result = SCAN_PAGE_LOCK;
1035 goto out;
1036 }
1037 if (ret & VM_FAULT_ERROR) {
1038 mmap_read_unlock(mm);
1039 result = SCAN_FAIL;
1040 goto out;
1041 }
1042 swapped_in++;
1043 }
1044
1045 if (pte)
1046 pte_unmap(pte);
1047
1048 /* Drain LRU cache to remove extra pin on the swapped in pages */
1049 if (swapped_in)
1050 lru_add_drain();
1051
1052 result = SCAN_SUCCEED;
1053out:
1054 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1055 return result;
1056}
1057
1058static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
1059 struct collapse_control *cc)
1060{
1061 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
1062 GFP_TRANSHUGE);
1063 int node = hpage_collapse_find_target_node(cc);
1064 struct folio *folio;
1065
1066 if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) {
1067 *hpage = NULL;
1068 return SCAN_ALLOC_HUGE_PAGE_FAIL;
1069 }
1070
1071 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1072 folio_put(folio);
1073 *hpage = NULL;
1074 return SCAN_CGROUP_CHARGE_FAIL;
1075 }
1076
1077 count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1078
1079 *hpage = folio_page(folio, 0);
1080 return SCAN_SUCCEED;
1081}
1082
1083static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1084 int referenced, int unmapped,
1085 struct collapse_control *cc)
1086{
1087 LIST_HEAD(compound_pagelist);
1088 pmd_t *pmd, _pmd;
1089 pte_t *pte;
1090 pgtable_t pgtable;
1091 struct folio *folio;
1092 struct page *hpage;
1093 spinlock_t *pmd_ptl, *pte_ptl;
1094 int result = SCAN_FAIL;
1095 struct vm_area_struct *vma;
1096 struct mmu_notifier_range range;
1097
1098 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1099
1100 /*
1101 * Before allocating the hugepage, release the mmap_lock read lock.
1102 * The allocation can take potentially a long time if it involves
1103 * sync compaction, and we do not need to hold the mmap_lock during
1104 * that. We will recheck the vma after taking it again in write mode.
1105 */
1106 mmap_read_unlock(mm);
1107
1108 result = alloc_charge_hpage(&hpage, mm, cc);
1109 if (result != SCAN_SUCCEED)
1110 goto out_nolock;
1111
1112 mmap_read_lock(mm);
1113 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1114 if (result != SCAN_SUCCEED) {
1115 mmap_read_unlock(mm);
1116 goto out_nolock;
1117 }
1118
1119 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1120 if (result != SCAN_SUCCEED) {
1121 mmap_read_unlock(mm);
1122 goto out_nolock;
1123 }
1124
1125 if (unmapped) {
1126 /*
1127 * __collapse_huge_page_swapin will return with mmap_lock
1128 * released when it fails. So we jump out_nolock directly in
1129 * that case. Continuing to collapse causes inconsistency.
1130 */
1131 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1132 referenced);
1133 if (result != SCAN_SUCCEED)
1134 goto out_nolock;
1135 }
1136
1137 mmap_read_unlock(mm);
1138 /*
1139 * Prevent all access to pagetables with the exception of
1140 * gup_fast later handled by the ptep_clear_flush and the VM
1141 * handled by the anon_vma lock + PG_lock.
1142 *
1143 * UFFDIO_MOVE is prevented to race as well thanks to the
1144 * mmap_lock.
1145 */
1146 mmap_write_lock(mm);
1147 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1148 if (result != SCAN_SUCCEED)
1149 goto out_up_write;
1150 /* check if the pmd is still valid */
1151 result = check_pmd_still_valid(mm, address, pmd);
1152 if (result != SCAN_SUCCEED)
1153 goto out_up_write;
1154
1155 vma_start_write(vma);
1156 anon_vma_lock_write(vma->anon_vma);
1157
1158 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1159 address + HPAGE_PMD_SIZE);
1160 mmu_notifier_invalidate_range_start(&range);
1161
1162 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1163 /*
1164 * This removes any huge TLB entry from the CPU so we won't allow
1165 * huge and small TLB entries for the same virtual address to
1166 * avoid the risk of CPU bugs in that area.
1167 *
1168 * Parallel fast GUP is fine since fast GUP will back off when
1169 * it detects PMD is changed.
1170 */
1171 _pmd = pmdp_collapse_flush(vma, address, pmd);
1172 spin_unlock(pmd_ptl);
1173 mmu_notifier_invalidate_range_end(&range);
1174 tlb_remove_table_sync_one();
1175
1176 pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1177 if (pte) {
1178 result = __collapse_huge_page_isolate(vma, address, pte, cc,
1179 &compound_pagelist);
1180 spin_unlock(pte_ptl);
1181 } else {
1182 result = SCAN_PMD_NULL;
1183 }
1184
1185 if (unlikely(result != SCAN_SUCCEED)) {
1186 if (pte)
1187 pte_unmap(pte);
1188 spin_lock(pmd_ptl);
1189 BUG_ON(!pmd_none(*pmd));
1190 /*
1191 * We can only use set_pmd_at when establishing
1192 * hugepmds and never for establishing regular pmds that
1193 * points to regular pagetables. Use pmd_populate for that
1194 */
1195 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1196 spin_unlock(pmd_ptl);
1197 anon_vma_unlock_write(vma->anon_vma);
1198 goto out_up_write;
1199 }
1200
1201 /*
1202 * All pages are isolated and locked so anon_vma rmap
1203 * can't run anymore.
1204 */
1205 anon_vma_unlock_write(vma->anon_vma);
1206
1207 result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
1208 vma, address, pte_ptl,
1209 &compound_pagelist);
1210 pte_unmap(pte);
1211 if (unlikely(result != SCAN_SUCCEED))
1212 goto out_up_write;
1213
1214 folio = page_folio(hpage);
1215 /*
1216 * The smp_wmb() inside __folio_mark_uptodate() ensures the
1217 * copy_huge_page writes become visible before the set_pmd_at()
1218 * write.
1219 */
1220 __folio_mark_uptodate(folio);
1221 pgtable = pmd_pgtable(_pmd);
1222
1223 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1224 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1225
1226 spin_lock(pmd_ptl);
1227 BUG_ON(!pmd_none(*pmd));
1228 folio_add_new_anon_rmap(folio, vma, address);
1229 folio_add_lru_vma(folio, vma);
1230 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1231 set_pmd_at(mm, address, pmd, _pmd);
1232 update_mmu_cache_pmd(vma, address, pmd);
1233 spin_unlock(pmd_ptl);
1234
1235 hpage = NULL;
1236
1237 result = SCAN_SUCCEED;
1238out_up_write:
1239 mmap_write_unlock(mm);
1240out_nolock:
1241 if (hpage)
1242 put_page(hpage);
1243 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1244 return result;
1245}
1246
1247static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1248 struct vm_area_struct *vma,
1249 unsigned long address, bool *mmap_locked,
1250 struct collapse_control *cc)
1251{
1252 pmd_t *pmd;
1253 pte_t *pte, *_pte;
1254 int result = SCAN_FAIL, referenced = 0;
1255 int none_or_zero = 0, shared = 0;
1256 struct page *page = NULL;
1257 struct folio *folio = NULL;
1258 unsigned long _address;
1259 spinlock_t *ptl;
1260 int node = NUMA_NO_NODE, unmapped = 0;
1261 bool writable = false;
1262
1263 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1264
1265 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1266 if (result != SCAN_SUCCEED)
1267 goto out;
1268
1269 memset(cc->node_load, 0, sizeof(cc->node_load));
1270 nodes_clear(cc->alloc_nmask);
1271 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1272 if (!pte) {
1273 result = SCAN_PMD_NULL;
1274 goto out;
1275 }
1276
1277 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1278 _pte++, _address += PAGE_SIZE) {
1279 pte_t pteval = ptep_get(_pte);
1280 if (is_swap_pte(pteval)) {
1281 ++unmapped;
1282 if (!cc->is_khugepaged ||
1283 unmapped <= khugepaged_max_ptes_swap) {
1284 /*
1285 * Always be strict with uffd-wp
1286 * enabled swap entries. Please see
1287 * comment below for pte_uffd_wp().
1288 */
1289 if (pte_swp_uffd_wp_any(pteval)) {
1290 result = SCAN_PTE_UFFD_WP;
1291 goto out_unmap;
1292 }
1293 continue;
1294 } else {
1295 result = SCAN_EXCEED_SWAP_PTE;
1296 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1297 goto out_unmap;
1298 }
1299 }
1300 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1301 ++none_or_zero;
1302 if (!userfaultfd_armed(vma) &&
1303 (!cc->is_khugepaged ||
1304 none_or_zero <= khugepaged_max_ptes_none)) {
1305 continue;
1306 } else {
1307 result = SCAN_EXCEED_NONE_PTE;
1308 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1309 goto out_unmap;
1310 }
1311 }
1312 if (pte_uffd_wp(pteval)) {
1313 /*
1314 * Don't collapse the page if any of the small
1315 * PTEs are armed with uffd write protection.
1316 * Here we can also mark the new huge pmd as
1317 * write protected if any of the small ones is
1318 * marked but that could bring unknown
1319 * userfault messages that falls outside of
1320 * the registered range. So, just be simple.
1321 */
1322 result = SCAN_PTE_UFFD_WP;
1323 goto out_unmap;
1324 }
1325 if (pte_write(pteval))
1326 writable = true;
1327
1328 page = vm_normal_page(vma, _address, pteval);
1329 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1330 result = SCAN_PAGE_NULL;
1331 goto out_unmap;
1332 }
1333
1334 if (page_mapcount(page) > 1) {
1335 ++shared;
1336 if (cc->is_khugepaged &&
1337 shared > khugepaged_max_ptes_shared) {
1338 result = SCAN_EXCEED_SHARED_PTE;
1339 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1340 goto out_unmap;
1341 }
1342 }
1343
1344 folio = page_folio(page);
1345 /*
1346 * Record which node the original page is from and save this
1347 * information to cc->node_load[].
1348 * Khugepaged will allocate hugepage from the node has the max
1349 * hit record.
1350 */
1351 node = folio_nid(folio);
1352 if (hpage_collapse_scan_abort(node, cc)) {
1353 result = SCAN_SCAN_ABORT;
1354 goto out_unmap;
1355 }
1356 cc->node_load[node]++;
1357 if (!folio_test_lru(folio)) {
1358 result = SCAN_PAGE_LRU;
1359 goto out_unmap;
1360 }
1361 if (folio_test_locked(folio)) {
1362 result = SCAN_PAGE_LOCK;
1363 goto out_unmap;
1364 }
1365 if (!folio_test_anon(folio)) {
1366 result = SCAN_PAGE_ANON;
1367 goto out_unmap;
1368 }
1369
1370 /*
1371 * Check if the page has any GUP (or other external) pins.
1372 *
1373 * Here the check may be racy:
1374 * it may see total_mapcount > refcount in some cases?
1375 * But such case is ephemeral we could always retry collapse
1376 * later. However it may report false positive if the page
1377 * has excessive GUP pins (i.e. 512). Anyway the same check
1378 * will be done again later the risk seems low.
1379 */
1380 if (!is_refcount_suitable(folio)) {
1381 result = SCAN_PAGE_COUNT;
1382 goto out_unmap;
1383 }
1384
1385 /*
1386 * If collapse was initiated by khugepaged, check that there is
1387 * enough young pte to justify collapsing the page
1388 */
1389 if (cc->is_khugepaged &&
1390 (pte_young(pteval) || folio_test_young(folio) ||
1391 folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
1392 address)))
1393 referenced++;
1394 }
1395 if (!writable) {
1396 result = SCAN_PAGE_RO;
1397 } else if (cc->is_khugepaged &&
1398 (!referenced ||
1399 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1400 result = SCAN_LACK_REFERENCED_PAGE;
1401 } else {
1402 result = SCAN_SUCCEED;
1403 }
1404out_unmap:
1405 pte_unmap_unlock(pte, ptl);
1406 if (result == SCAN_SUCCEED) {
1407 result = collapse_huge_page(mm, address, referenced,
1408 unmapped, cc);
1409 /* collapse_huge_page will return with the mmap_lock released */
1410 *mmap_locked = false;
1411 }
1412out:
1413 trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
1414 none_or_zero, result, unmapped);
1415 return result;
1416}
1417
1418static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1419{
1420 struct mm_slot *slot = &mm_slot->slot;
1421 struct mm_struct *mm = slot->mm;
1422
1423 lockdep_assert_held(&khugepaged_mm_lock);
1424
1425 if (hpage_collapse_test_exit(mm)) {
1426 /* free mm_slot */
1427 hash_del(&slot->hash);
1428 list_del(&slot->mm_node);
1429
1430 /*
1431 * Not strictly needed because the mm exited already.
1432 *
1433 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1434 */
1435
1436 /* khugepaged_mm_lock actually not necessary for the below */
1437 mm_slot_free(mm_slot_cache, mm_slot);
1438 mmdrop(mm);
1439 }
1440}
1441
1442#ifdef CONFIG_SHMEM
1443/* hpage must be locked, and mmap_lock must be held */
1444static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1445 pmd_t *pmdp, struct page *hpage)
1446{
1447 struct vm_fault vmf = {
1448 .vma = vma,
1449 .address = addr,
1450 .flags = 0,
1451 .pmd = pmdp,
1452 };
1453
1454 VM_BUG_ON(!PageTransHuge(hpage));
1455 mmap_assert_locked(vma->vm_mm);
1456
1457 if (do_set_pmd(&vmf, hpage))
1458 return SCAN_FAIL;
1459
1460 get_page(hpage);
1461 return SCAN_SUCCEED;
1462}
1463
1464/**
1465 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1466 * address haddr.
1467 *
1468 * @mm: process address space where collapse happens
1469 * @addr: THP collapse address
1470 * @install_pmd: If a huge PMD should be installed
1471 *
1472 * This function checks whether all the PTEs in the PMD are pointing to the
1473 * right THP. If so, retract the page table so the THP can refault in with
1474 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1475 */
1476int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1477 bool install_pmd)
1478{
1479 struct mmu_notifier_range range;
1480 bool notified = false;
1481 unsigned long haddr = addr & HPAGE_PMD_MASK;
1482 struct vm_area_struct *vma = vma_lookup(mm, haddr);
1483 struct folio *folio;
1484 pte_t *start_pte, *pte;
1485 pmd_t *pmd, pgt_pmd;
1486 spinlock_t *pml = NULL, *ptl;
1487 int nr_ptes = 0, result = SCAN_FAIL;
1488 int i;
1489
1490 mmap_assert_locked(mm);
1491
1492 /* First check VMA found, in case page tables are being torn down */
1493 if (!vma || !vma->vm_file ||
1494 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1495 return SCAN_VMA_CHECK;
1496
1497 /* Fast check before locking page if already PMD-mapped */
1498 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1499 if (result == SCAN_PMD_MAPPED)
1500 return result;
1501
1502 /*
1503 * If we are here, we've succeeded in replacing all the native pages
1504 * in the page cache with a single hugepage. If a mm were to fault-in
1505 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1506 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1507 * analogously elide sysfs THP settings here.
1508 */
1509 if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
1510 PMD_ORDER))
1511 return SCAN_VMA_CHECK;
1512
1513 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1514 if (userfaultfd_wp(vma))
1515 return SCAN_PTE_UFFD_WP;
1516
1517 folio = filemap_lock_folio(vma->vm_file->f_mapping,
1518 linear_page_index(vma, haddr));
1519 if (IS_ERR(folio))
1520 return SCAN_PAGE_NULL;
1521
1522 if (folio_order(folio) != HPAGE_PMD_ORDER) {
1523 result = SCAN_PAGE_COMPOUND;
1524 goto drop_folio;
1525 }
1526
1527 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1528 switch (result) {
1529 case SCAN_SUCCEED:
1530 break;
1531 case SCAN_PMD_NONE:
1532 /*
1533 * All pte entries have been removed and pmd cleared.
1534 * Skip all the pte checks and just update the pmd mapping.
1535 */
1536 goto maybe_install_pmd;
1537 default:
1538 goto drop_folio;
1539 }
1540
1541 result = SCAN_FAIL;
1542 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1543 if (!start_pte) /* mmap_lock + page lock should prevent this */
1544 goto drop_folio;
1545
1546 /* step 1: check all mapped PTEs are to the right huge page */
1547 for (i = 0, addr = haddr, pte = start_pte;
1548 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1549 struct page *page;
1550 pte_t ptent = ptep_get(pte);
1551
1552 /* empty pte, skip */
1553 if (pte_none(ptent))
1554 continue;
1555
1556 /* page swapped out, abort */
1557 if (!pte_present(ptent)) {
1558 result = SCAN_PTE_NON_PRESENT;
1559 goto abort;
1560 }
1561
1562 page = vm_normal_page(vma, addr, ptent);
1563 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1564 page = NULL;
1565 /*
1566 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1567 * page table, but the new page will not be a subpage of hpage.
1568 */
1569 if (folio_page(folio, i) != page)
1570 goto abort;
1571 }
1572
1573 pte_unmap_unlock(start_pte, ptl);
1574 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1575 haddr, haddr + HPAGE_PMD_SIZE);
1576 mmu_notifier_invalidate_range_start(&range);
1577 notified = true;
1578
1579 /*
1580 * pmd_lock covers a wider range than ptl, and (if split from mm's
1581 * page_table_lock) ptl nests inside pml. The less time we hold pml,
1582 * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1583 * inserts a valid as-if-COWed PTE without even looking up page cache.
1584 * So page lock of folio does not protect from it, so we must not drop
1585 * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1586 */
1587 if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
1588 pml = pmd_lock(mm, pmd);
1589
1590 start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl);
1591 if (!start_pte) /* mmap_lock + page lock should prevent this */
1592 goto abort;
1593 if (!pml)
1594 spin_lock(ptl);
1595 else if (ptl != pml)
1596 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1597
1598 /* step 2: clear page table and adjust rmap */
1599 for (i = 0, addr = haddr, pte = start_pte;
1600 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1601 struct page *page;
1602 pte_t ptent = ptep_get(pte);
1603
1604 if (pte_none(ptent))
1605 continue;
1606 /*
1607 * We dropped ptl after the first scan, to do the mmu_notifier:
1608 * page lock stops more PTEs of the folio being faulted in, but
1609 * does not stop write faults COWing anon copies from existing
1610 * PTEs; and does not stop those being swapped out or migrated.
1611 */
1612 if (!pte_present(ptent)) {
1613 result = SCAN_PTE_NON_PRESENT;
1614 goto abort;
1615 }
1616 page = vm_normal_page(vma, addr, ptent);
1617 if (folio_page(folio, i) != page)
1618 goto abort;
1619
1620 /*
1621 * Must clear entry, or a racing truncate may re-remove it.
1622 * TLB flush can be left until pmdp_collapse_flush() does it.
1623 * PTE dirty? Shmem page is already dirty; file is read-only.
1624 */
1625 ptep_clear(mm, addr, pte);
1626 folio_remove_rmap_pte(folio, page, vma);
1627 nr_ptes++;
1628 }
1629
1630 pte_unmap(start_pte);
1631 if (!pml)
1632 spin_unlock(ptl);
1633
1634 /* step 3: set proper refcount and mm_counters. */
1635 if (nr_ptes) {
1636 folio_ref_sub(folio, nr_ptes);
1637 add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
1638 }
1639
1640 /* step 4: remove empty page table */
1641 if (!pml) {
1642 pml = pmd_lock(mm, pmd);
1643 if (ptl != pml)
1644 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1645 }
1646 pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1647 pmdp_get_lockless_sync();
1648 if (ptl != pml)
1649 spin_unlock(ptl);
1650 spin_unlock(pml);
1651
1652 mmu_notifier_invalidate_range_end(&range);
1653
1654 mm_dec_nr_ptes(mm);
1655 page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
1656 pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1657
1658maybe_install_pmd:
1659 /* step 5: install pmd entry */
1660 result = install_pmd
1661 ? set_huge_pmd(vma, haddr, pmd, &folio->page)
1662 : SCAN_SUCCEED;
1663 goto drop_folio;
1664abort:
1665 if (nr_ptes) {
1666 flush_tlb_mm(mm);
1667 folio_ref_sub(folio, nr_ptes);
1668 add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
1669 }
1670 if (start_pte)
1671 pte_unmap_unlock(start_pte, ptl);
1672 if (pml && pml != ptl)
1673 spin_unlock(pml);
1674 if (notified)
1675 mmu_notifier_invalidate_range_end(&range);
1676drop_folio:
1677 folio_unlock(folio);
1678 folio_put(folio);
1679 return result;
1680}
1681
1682static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1683{
1684 struct vm_area_struct *vma;
1685
1686 i_mmap_lock_read(mapping);
1687 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1688 struct mmu_notifier_range range;
1689 struct mm_struct *mm;
1690 unsigned long addr;
1691 pmd_t *pmd, pgt_pmd;
1692 spinlock_t *pml;
1693 spinlock_t *ptl;
1694 bool skipped_uffd = false;
1695
1696 /*
1697 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1698 * got written to. These VMAs are likely not worth removing
1699 * page tables from, as PMD-mapping is likely to be split later.
1700 */
1701 if (READ_ONCE(vma->anon_vma))
1702 continue;
1703
1704 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1705 if (addr & ~HPAGE_PMD_MASK ||
1706 vma->vm_end < addr + HPAGE_PMD_SIZE)
1707 continue;
1708
1709 mm = vma->vm_mm;
1710 if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
1711 continue;
1712
1713 if (hpage_collapse_test_exit(mm))
1714 continue;
1715 /*
1716 * When a vma is registered with uffd-wp, we cannot recycle
1717 * the page table because there may be pte markers installed.
1718 * Other vmas can still have the same file mapped hugely, but
1719 * skip this one: it will always be mapped in small page size
1720 * for uffd-wp registered ranges.
1721 */
1722 if (userfaultfd_wp(vma))
1723 continue;
1724
1725 /* PTEs were notified when unmapped; but now for the PMD? */
1726 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1727 addr, addr + HPAGE_PMD_SIZE);
1728 mmu_notifier_invalidate_range_start(&range);
1729
1730 pml = pmd_lock(mm, pmd);
1731 ptl = pte_lockptr(mm, pmd);
1732 if (ptl != pml)
1733 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1734
1735 /*
1736 * Huge page lock is still held, so normally the page table
1737 * must remain empty; and we have already skipped anon_vma
1738 * and userfaultfd_wp() vmas. But since the mmap_lock is not
1739 * held, it is still possible for a racing userfaultfd_ioctl()
1740 * to have inserted ptes or markers. Now that we hold ptlock,
1741 * repeating the anon_vma check protects from one category,
1742 * and repeating the userfaultfd_wp() check from another.
1743 */
1744 if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
1745 skipped_uffd = true;
1746 } else {
1747 pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
1748 pmdp_get_lockless_sync();
1749 }
1750
1751 if (ptl != pml)
1752 spin_unlock(ptl);
1753 spin_unlock(pml);
1754
1755 mmu_notifier_invalidate_range_end(&range);
1756
1757 if (!skipped_uffd) {
1758 mm_dec_nr_ptes(mm);
1759 page_table_check_pte_clear_range(mm, addr, pgt_pmd);
1760 pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1761 }
1762 }
1763 i_mmap_unlock_read(mapping);
1764}
1765
1766/**
1767 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1768 *
1769 * @mm: process address space where collapse happens
1770 * @addr: virtual collapse start address
1771 * @file: file that collapse on
1772 * @start: collapse start address
1773 * @cc: collapse context and scratchpad
1774 *
1775 * Basic scheme is simple, details are more complex:
1776 * - allocate and lock a new huge page;
1777 * - scan page cache, locking old pages
1778 * + swap/gup in pages if necessary;
1779 * - copy data to new page
1780 * - handle shmem holes
1781 * + re-validate that holes weren't filled by someone else
1782 * + check for userfaultfd
1783 * - finalize updates to the page cache;
1784 * - if replacing succeeds:
1785 * + unlock huge page;
1786 * + free old pages;
1787 * - if replacing failed;
1788 * + unlock old pages
1789 * + unlock and free huge page;
1790 */
1791static int collapse_file(struct mm_struct *mm, unsigned long addr,
1792 struct file *file, pgoff_t start,
1793 struct collapse_control *cc)
1794{
1795 struct address_space *mapping = file->f_mapping;
1796 struct page *hpage;
1797 struct page *page;
1798 struct page *tmp;
1799 struct folio *folio;
1800 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1801 LIST_HEAD(pagelist);
1802 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1803 int nr_none = 0, result = SCAN_SUCCEED;
1804 bool is_shmem = shmem_file(file);
1805 int nr = 0;
1806
1807 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1808 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1809
1810 result = alloc_charge_hpage(&hpage, mm, cc);
1811 if (result != SCAN_SUCCEED)
1812 goto out;
1813
1814 __SetPageLocked(hpage);
1815 if (is_shmem)
1816 __SetPageSwapBacked(hpage);
1817 hpage->index = start;
1818 hpage->mapping = mapping;
1819
1820 /*
1821 * Ensure we have slots for all the pages in the range. This is
1822 * almost certainly a no-op because most of the pages must be present
1823 */
1824 do {
1825 xas_lock_irq(&xas);
1826 xas_create_range(&xas);
1827 if (!xas_error(&xas))
1828 break;
1829 xas_unlock_irq(&xas);
1830 if (!xas_nomem(&xas, GFP_KERNEL)) {
1831 result = SCAN_FAIL;
1832 goto rollback;
1833 }
1834 } while (1);
1835
1836 for (index = start; index < end; index++) {
1837 xas_set(&xas, index);
1838 page = xas_load(&xas);
1839
1840 VM_BUG_ON(index != xas.xa_index);
1841 if (is_shmem) {
1842 if (!page) {
1843 /*
1844 * Stop if extent has been truncated or
1845 * hole-punched, and is now completely
1846 * empty.
1847 */
1848 if (index == start) {
1849 if (!xas_next_entry(&xas, end - 1)) {
1850 result = SCAN_TRUNCATED;
1851 goto xa_locked;
1852 }
1853 }
1854 nr_none++;
1855 continue;
1856 }
1857
1858 if (xa_is_value(page) || !PageUptodate(page)) {
1859 xas_unlock_irq(&xas);
1860 /* swap in or instantiate fallocated page */
1861 if (shmem_get_folio(mapping->host, index,
1862 &folio, SGP_NOALLOC)) {
1863 result = SCAN_FAIL;
1864 goto xa_unlocked;
1865 }
1866 /* drain lru cache to help isolate_lru_page() */
1867 lru_add_drain();
1868 page = folio_file_page(folio, index);
1869 } else if (trylock_page(page)) {
1870 get_page(page);
1871 xas_unlock_irq(&xas);
1872 } else {
1873 result = SCAN_PAGE_LOCK;
1874 goto xa_locked;
1875 }
1876 } else { /* !is_shmem */
1877 if (!page || xa_is_value(page)) {
1878 xas_unlock_irq(&xas);
1879 page_cache_sync_readahead(mapping, &file->f_ra,
1880 file, index,
1881 end - index);
1882 /* drain lru cache to help isolate_lru_page() */
1883 lru_add_drain();
1884 page = find_lock_page(mapping, index);
1885 if (unlikely(page == NULL)) {
1886 result = SCAN_FAIL;
1887 goto xa_unlocked;
1888 }
1889 } else if (PageDirty(page)) {
1890 /*
1891 * khugepaged only works on read-only fd,
1892 * so this page is dirty because it hasn't
1893 * been flushed since first write. There
1894 * won't be new dirty pages.
1895 *
1896 * Trigger async flush here and hope the
1897 * writeback is done when khugepaged
1898 * revisits this page.
1899 *
1900 * This is a one-off situation. We are not
1901 * forcing writeback in loop.
1902 */
1903 xas_unlock_irq(&xas);
1904 filemap_flush(mapping);
1905 result = SCAN_FAIL;
1906 goto xa_unlocked;
1907 } else if (PageWriteback(page)) {
1908 xas_unlock_irq(&xas);
1909 result = SCAN_FAIL;
1910 goto xa_unlocked;
1911 } else if (trylock_page(page)) {
1912 get_page(page);
1913 xas_unlock_irq(&xas);
1914 } else {
1915 result = SCAN_PAGE_LOCK;
1916 goto xa_locked;
1917 }
1918 }
1919
1920 /*
1921 * The page must be locked, so we can drop the i_pages lock
1922 * without racing with truncate.
1923 */
1924 VM_BUG_ON_PAGE(!PageLocked(page), page);
1925
1926 /* make sure the page is up to date */
1927 if (unlikely(!PageUptodate(page))) {
1928 result = SCAN_FAIL;
1929 goto out_unlock;
1930 }
1931
1932 /*
1933 * If file was truncated then extended, or hole-punched, before
1934 * we locked the first page, then a THP might be there already.
1935 * This will be discovered on the first iteration.
1936 */
1937 if (PageTransCompound(page)) {
1938 struct page *head = compound_head(page);
1939
1940 result = compound_order(head) == HPAGE_PMD_ORDER &&
1941 head->index == start
1942 /* Maybe PMD-mapped */
1943 ? SCAN_PTE_MAPPED_HUGEPAGE
1944 : SCAN_PAGE_COMPOUND;
1945 goto out_unlock;
1946 }
1947
1948 folio = page_folio(page);
1949
1950 if (folio_mapping(folio) != mapping) {
1951 result = SCAN_TRUNCATED;
1952 goto out_unlock;
1953 }
1954
1955 if (!is_shmem && (folio_test_dirty(folio) ||
1956 folio_test_writeback(folio))) {
1957 /*
1958 * khugepaged only works on read-only fd, so this
1959 * page is dirty because it hasn't been flushed
1960 * since first write.
1961 */
1962 result = SCAN_FAIL;
1963 goto out_unlock;
1964 }
1965
1966 if (!folio_isolate_lru(folio)) {
1967 result = SCAN_DEL_PAGE_LRU;
1968 goto out_unlock;
1969 }
1970
1971 if (!filemap_release_folio(folio, GFP_KERNEL)) {
1972 result = SCAN_PAGE_HAS_PRIVATE;
1973 folio_putback_lru(folio);
1974 goto out_unlock;
1975 }
1976
1977 if (folio_mapped(folio))
1978 try_to_unmap(folio,
1979 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1980
1981 xas_lock_irq(&xas);
1982
1983 VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page);
1984
1985 /*
1986 * We control three references to the page:
1987 * - we hold a pin on it;
1988 * - one reference from page cache;
1989 * - one from isolate_lru_page;
1990 * If those are the only references, then any new usage of the
1991 * page will have to fetch it from the page cache. That requires
1992 * locking the page to handle truncate, so any new usage will be
1993 * blocked until we unlock page after collapse/during rollback.
1994 */
1995 if (page_count(page) != 3) {
1996 result = SCAN_PAGE_COUNT;
1997 xas_unlock_irq(&xas);
1998 putback_lru_page(page);
1999 goto out_unlock;
2000 }
2001
2002 /*
2003 * Accumulate the pages that are being collapsed.
2004 */
2005 list_add_tail(&page->lru, &pagelist);
2006 continue;
2007out_unlock:
2008 unlock_page(page);
2009 put_page(page);
2010 goto xa_unlocked;
2011 }
2012
2013 if (!is_shmem) {
2014 filemap_nr_thps_inc(mapping);
2015 /*
2016 * Paired with smp_mb() in do_dentry_open() to ensure
2017 * i_writecount is up to date and the update to nr_thps is
2018 * visible. Ensures the page cache will be truncated if the
2019 * file is opened writable.
2020 */
2021 smp_mb();
2022 if (inode_is_open_for_write(mapping->host)) {
2023 result = SCAN_FAIL;
2024 filemap_nr_thps_dec(mapping);
2025 }
2026 }
2027
2028xa_locked:
2029 xas_unlock_irq(&xas);
2030xa_unlocked:
2031
2032 /*
2033 * If collapse is successful, flush must be done now before copying.
2034 * If collapse is unsuccessful, does flush actually need to be done?
2035 * Do it anyway, to clear the state.
2036 */
2037 try_to_unmap_flush();
2038
2039 if (result == SCAN_SUCCEED && nr_none &&
2040 !shmem_charge(mapping->host, nr_none))
2041 result = SCAN_FAIL;
2042 if (result != SCAN_SUCCEED) {
2043 nr_none = 0;
2044 goto rollback;
2045 }
2046
2047 /*
2048 * The old pages are locked, so they won't change anymore.
2049 */
2050 index = start;
2051 list_for_each_entry(page, &pagelist, lru) {
2052 while (index < page->index) {
2053 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2054 index++;
2055 }
2056 if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
2057 result = SCAN_COPY_MC;
2058 goto rollback;
2059 }
2060 index++;
2061 }
2062 while (index < end) {
2063 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2064 index++;
2065 }
2066
2067 if (nr_none) {
2068 struct vm_area_struct *vma;
2069 int nr_none_check = 0;
2070
2071 i_mmap_lock_read(mapping);
2072 xas_lock_irq(&xas);
2073
2074 xas_set(&xas, start);
2075 for (index = start; index < end; index++) {
2076 if (!xas_next(&xas)) {
2077 xas_store(&xas, XA_RETRY_ENTRY);
2078 if (xas_error(&xas)) {
2079 result = SCAN_STORE_FAILED;
2080 goto immap_locked;
2081 }
2082 nr_none_check++;
2083 }
2084 }
2085
2086 if (nr_none != nr_none_check) {
2087 result = SCAN_PAGE_FILLED;
2088 goto immap_locked;
2089 }
2090
2091 /*
2092 * If userspace observed a missing page in a VMA with a MODE_MISSING
2093 * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that
2094 * page. If so, we need to roll back to avoid suppressing such an
2095 * event. Since wp/minor userfaultfds don't give userspace any
2096 * guarantees that the kernel doesn't fill a missing page with a zero
2097 * page, so they don't matter here.
2098 *
2099 * Any userfaultfds registered after this point will not be able to
2100 * observe any missing pages due to the previously inserted retry
2101 * entries.
2102 */
2103 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2104 if (userfaultfd_missing(vma)) {
2105 result = SCAN_EXCEED_NONE_PTE;
2106 goto immap_locked;
2107 }
2108 }
2109
2110immap_locked:
2111 i_mmap_unlock_read(mapping);
2112 if (result != SCAN_SUCCEED) {
2113 xas_set(&xas, start);
2114 for (index = start; index < end; index++) {
2115 if (xas_next(&xas) == XA_RETRY_ENTRY)
2116 xas_store(&xas, NULL);
2117 }
2118
2119 xas_unlock_irq(&xas);
2120 goto rollback;
2121 }
2122 } else {
2123 xas_lock_irq(&xas);
2124 }
2125
2126 folio = page_folio(hpage);
2127 nr = folio_nr_pages(folio);
2128 if (is_shmem)
2129 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
2130 else
2131 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, nr);
2132
2133 if (nr_none) {
2134 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_none);
2135 /* nr_none is always 0 for non-shmem. */
2136 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_none);
2137 }
2138
2139 /*
2140 * Mark hpage as uptodate before inserting it into the page cache so
2141 * that it isn't mistaken for an fallocated but unwritten page.
2142 */
2143 folio_mark_uptodate(folio);
2144 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2145
2146 if (is_shmem)
2147 folio_mark_dirty(folio);
2148 folio_add_lru(folio);
2149
2150 /* Join all the small entries into a single multi-index entry. */
2151 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2152 xas_store(&xas, folio);
2153 WARN_ON_ONCE(xas_error(&xas));
2154 xas_unlock_irq(&xas);
2155
2156 /*
2157 * Remove pte page tables, so we can re-fault the page as huge.
2158 * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
2159 */
2160 retract_page_tables(mapping, start);
2161 if (cc && !cc->is_khugepaged)
2162 result = SCAN_PTE_MAPPED_HUGEPAGE;
2163 folio_unlock(folio);
2164
2165 /*
2166 * The collapse has succeeded, so free the old pages.
2167 */
2168 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2169 list_del(&page->lru);
2170 page->mapping = NULL;
2171 ClearPageActive(page);
2172 ClearPageUnevictable(page);
2173 unlock_page(page);
2174 folio_put_refs(page_folio(page), 3);
2175 }
2176
2177 goto out;
2178
2179rollback:
2180 /* Something went wrong: roll back page cache changes */
2181 if (nr_none) {
2182 xas_lock_irq(&xas);
2183 mapping->nrpages -= nr_none;
2184 xas_unlock_irq(&xas);
2185 shmem_uncharge(mapping->host, nr_none);
2186 }
2187
2188 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2189 list_del(&page->lru);
2190 unlock_page(page);
2191 putback_lru_page(page);
2192 put_page(page);
2193 }
2194 /*
2195 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2196 * file only. This undo is not needed unless failure is
2197 * due to SCAN_COPY_MC.
2198 */
2199 if (!is_shmem && result == SCAN_COPY_MC) {
2200 filemap_nr_thps_dec(mapping);
2201 /*
2202 * Paired with smp_mb() in do_dentry_open() to
2203 * ensure the update to nr_thps is visible.
2204 */
2205 smp_mb();
2206 }
2207
2208 hpage->mapping = NULL;
2209
2210 unlock_page(hpage);
2211 put_page(hpage);
2212out:
2213 VM_BUG_ON(!list_empty(&pagelist));
2214 trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
2215 return result;
2216}
2217
2218static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2219 struct file *file, pgoff_t start,
2220 struct collapse_control *cc)
2221{
2222 struct page *page = NULL;
2223 struct address_space *mapping = file->f_mapping;
2224 XA_STATE(xas, &mapping->i_pages, start);
2225 int present, swap;
2226 int node = NUMA_NO_NODE;
2227 int result = SCAN_SUCCEED;
2228
2229 present = 0;
2230 swap = 0;
2231 memset(cc->node_load, 0, sizeof(cc->node_load));
2232 nodes_clear(cc->alloc_nmask);
2233 rcu_read_lock();
2234 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2235 if (xas_retry(&xas, page))
2236 continue;
2237
2238 if (xa_is_value(page)) {
2239 ++swap;
2240 if (cc->is_khugepaged &&
2241 swap > khugepaged_max_ptes_swap) {
2242 result = SCAN_EXCEED_SWAP_PTE;
2243 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2244 break;
2245 }
2246 continue;
2247 }
2248
2249 /*
2250 * TODO: khugepaged should compact smaller compound pages
2251 * into a PMD sized page
2252 */
2253 if (PageTransCompound(page)) {
2254 struct page *head = compound_head(page);
2255
2256 result = compound_order(head) == HPAGE_PMD_ORDER &&
2257 head->index == start
2258 /* Maybe PMD-mapped */
2259 ? SCAN_PTE_MAPPED_HUGEPAGE
2260 : SCAN_PAGE_COMPOUND;
2261 /*
2262 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2263 * by the caller won't touch the page cache, and so
2264 * it's safe to skip LRU and refcount checks before
2265 * returning.
2266 */
2267 break;
2268 }
2269
2270 node = page_to_nid(page);
2271 if (hpage_collapse_scan_abort(node, cc)) {
2272 result = SCAN_SCAN_ABORT;
2273 break;
2274 }
2275 cc->node_load[node]++;
2276
2277 if (!PageLRU(page)) {
2278 result = SCAN_PAGE_LRU;
2279 break;
2280 }
2281
2282 if (page_count(page) !=
2283 1 + page_mapcount(page) + page_has_private(page)) {
2284 result = SCAN_PAGE_COUNT;
2285 break;
2286 }
2287
2288 /*
2289 * We probably should check if the page is referenced here, but
2290 * nobody would transfer pte_young() to PageReferenced() for us.
2291 * And rmap walk here is just too costly...
2292 */
2293
2294 present++;
2295
2296 if (need_resched()) {
2297 xas_pause(&xas);
2298 cond_resched_rcu();
2299 }
2300 }
2301 rcu_read_unlock();
2302
2303 if (result == SCAN_SUCCEED) {
2304 if (cc->is_khugepaged &&
2305 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2306 result = SCAN_EXCEED_NONE_PTE;
2307 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2308 } else {
2309 result = collapse_file(mm, addr, file, start, cc);
2310 }
2311 }
2312
2313 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
2314 return result;
2315}
2316#else
2317static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2318 struct file *file, pgoff_t start,
2319 struct collapse_control *cc)
2320{
2321 BUILD_BUG();
2322}
2323#endif
2324
2325static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2326 struct collapse_control *cc)
2327 __releases(&khugepaged_mm_lock)
2328 __acquires(&khugepaged_mm_lock)
2329{
2330 struct vma_iterator vmi;
2331 struct khugepaged_mm_slot *mm_slot;
2332 struct mm_slot *slot;
2333 struct mm_struct *mm;
2334 struct vm_area_struct *vma;
2335 int progress = 0;
2336
2337 VM_BUG_ON(!pages);
2338 lockdep_assert_held(&khugepaged_mm_lock);
2339 *result = SCAN_FAIL;
2340
2341 if (khugepaged_scan.mm_slot) {
2342 mm_slot = khugepaged_scan.mm_slot;
2343 slot = &mm_slot->slot;
2344 } else {
2345 slot = list_entry(khugepaged_scan.mm_head.next,
2346 struct mm_slot, mm_node);
2347 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2348 khugepaged_scan.address = 0;
2349 khugepaged_scan.mm_slot = mm_slot;
2350 }
2351 spin_unlock(&khugepaged_mm_lock);
2352
2353 mm = slot->mm;
2354 /*
2355 * Don't wait for semaphore (to avoid long wait times). Just move to
2356 * the next mm on the list.
2357 */
2358 vma = NULL;
2359 if (unlikely(!mmap_read_trylock(mm)))
2360 goto breakouterloop_mmap_lock;
2361
2362 progress++;
2363 if (unlikely(hpage_collapse_test_exit(mm)))
2364 goto breakouterloop;
2365
2366 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2367 for_each_vma(vmi, vma) {
2368 unsigned long hstart, hend;
2369
2370 cond_resched();
2371 if (unlikely(hpage_collapse_test_exit(mm))) {
2372 progress++;
2373 break;
2374 }
2375 if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
2376 true, PMD_ORDER)) {
2377skip:
2378 progress++;
2379 continue;
2380 }
2381 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2382 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2383 if (khugepaged_scan.address > hend)
2384 goto skip;
2385 if (khugepaged_scan.address < hstart)
2386 khugepaged_scan.address = hstart;
2387 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2388
2389 while (khugepaged_scan.address < hend) {
2390 bool mmap_locked = true;
2391
2392 cond_resched();
2393 if (unlikely(hpage_collapse_test_exit(mm)))
2394 goto breakouterloop;
2395
2396 VM_BUG_ON(khugepaged_scan.address < hstart ||
2397 khugepaged_scan.address + HPAGE_PMD_SIZE >
2398 hend);
2399 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2400 struct file *file = get_file(vma->vm_file);
2401 pgoff_t pgoff = linear_page_index(vma,
2402 khugepaged_scan.address);
2403
2404 mmap_read_unlock(mm);
2405 mmap_locked = false;
2406 *result = hpage_collapse_scan_file(mm,
2407 khugepaged_scan.address, file, pgoff, cc);
2408 fput(file);
2409 if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
2410 mmap_read_lock(mm);
2411 if (hpage_collapse_test_exit(mm))
2412 goto breakouterloop;
2413 *result = collapse_pte_mapped_thp(mm,
2414 khugepaged_scan.address, false);
2415 if (*result == SCAN_PMD_MAPPED)
2416 *result = SCAN_SUCCEED;
2417 mmap_read_unlock(mm);
2418 }
2419 } else {
2420 *result = hpage_collapse_scan_pmd(mm, vma,
2421 khugepaged_scan.address, &mmap_locked, cc);
2422 }
2423
2424 if (*result == SCAN_SUCCEED)
2425 ++khugepaged_pages_collapsed;
2426
2427 /* move to next address */
2428 khugepaged_scan.address += HPAGE_PMD_SIZE;
2429 progress += HPAGE_PMD_NR;
2430 if (!mmap_locked)
2431 /*
2432 * We released mmap_lock so break loop. Note
2433 * that we drop mmap_lock before all hugepage
2434 * allocations, so if allocation fails, we are
2435 * guaranteed to break here and report the
2436 * correct result back to caller.
2437 */
2438 goto breakouterloop_mmap_lock;
2439 if (progress >= pages)
2440 goto breakouterloop;
2441 }
2442 }
2443breakouterloop:
2444 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2445breakouterloop_mmap_lock:
2446
2447 spin_lock(&khugepaged_mm_lock);
2448 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2449 /*
2450 * Release the current mm_slot if this mm is about to die, or
2451 * if we scanned all vmas of this mm.
2452 */
2453 if (hpage_collapse_test_exit(mm) || !vma) {
2454 /*
2455 * Make sure that if mm_users is reaching zero while
2456 * khugepaged runs here, khugepaged_exit will find
2457 * mm_slot not pointing to the exiting mm.
2458 */
2459 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2460 slot = list_entry(slot->mm_node.next,
2461 struct mm_slot, mm_node);
2462 khugepaged_scan.mm_slot =
2463 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2464 khugepaged_scan.address = 0;
2465 } else {
2466 khugepaged_scan.mm_slot = NULL;
2467 khugepaged_full_scans++;
2468 }
2469
2470 collect_mm_slot(mm_slot);
2471 }
2472
2473 return progress;
2474}
2475
2476static int khugepaged_has_work(void)
2477{
2478 return !list_empty(&khugepaged_scan.mm_head) &&
2479 hugepage_flags_enabled();
2480}
2481
2482static int khugepaged_wait_event(void)
2483{
2484 return !list_empty(&khugepaged_scan.mm_head) ||
2485 kthread_should_stop();
2486}
2487
2488static void khugepaged_do_scan(struct collapse_control *cc)
2489{
2490 unsigned int progress = 0, pass_through_head = 0;
2491 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2492 bool wait = true;
2493 int result = SCAN_SUCCEED;
2494
2495 lru_add_drain_all();
2496
2497 while (true) {
2498 cond_resched();
2499
2500 if (unlikely(kthread_should_stop()))
2501 break;
2502
2503 spin_lock(&khugepaged_mm_lock);
2504 if (!khugepaged_scan.mm_slot)
2505 pass_through_head++;
2506 if (khugepaged_has_work() &&
2507 pass_through_head < 2)
2508 progress += khugepaged_scan_mm_slot(pages - progress,
2509 &result, cc);
2510 else
2511 progress = pages;
2512 spin_unlock(&khugepaged_mm_lock);
2513
2514 if (progress >= pages)
2515 break;
2516
2517 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2518 /*
2519 * If fail to allocate the first time, try to sleep for
2520 * a while. When hit again, cancel the scan.
2521 */
2522 if (!wait)
2523 break;
2524 wait = false;
2525 khugepaged_alloc_sleep();
2526 }
2527 }
2528}
2529
2530static bool khugepaged_should_wakeup(void)
2531{
2532 return kthread_should_stop() ||
2533 time_after_eq(jiffies, khugepaged_sleep_expire);
2534}
2535
2536static void khugepaged_wait_work(void)
2537{
2538 if (khugepaged_has_work()) {
2539 const unsigned long scan_sleep_jiffies =
2540 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2541
2542 if (!scan_sleep_jiffies)
2543 return;
2544
2545 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2546 wait_event_freezable_timeout(khugepaged_wait,
2547 khugepaged_should_wakeup(),
2548 scan_sleep_jiffies);
2549 return;
2550 }
2551
2552 if (hugepage_flags_enabled())
2553 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2554}
2555
2556static int khugepaged(void *none)
2557{
2558 struct khugepaged_mm_slot *mm_slot;
2559
2560 set_freezable();
2561 set_user_nice(current, MAX_NICE);
2562
2563 while (!kthread_should_stop()) {
2564 khugepaged_do_scan(&khugepaged_collapse_control);
2565 khugepaged_wait_work();
2566 }
2567
2568 spin_lock(&khugepaged_mm_lock);
2569 mm_slot = khugepaged_scan.mm_slot;
2570 khugepaged_scan.mm_slot = NULL;
2571 if (mm_slot)
2572 collect_mm_slot(mm_slot);
2573 spin_unlock(&khugepaged_mm_lock);
2574 return 0;
2575}
2576
2577static void set_recommended_min_free_kbytes(void)
2578{
2579 struct zone *zone;
2580 int nr_zones = 0;
2581 unsigned long recommended_min;
2582
2583 if (!hugepage_flags_enabled()) {
2584 calculate_min_free_kbytes();
2585 goto update_wmarks;
2586 }
2587
2588 for_each_populated_zone(zone) {
2589 /*
2590 * We don't need to worry about fragmentation of
2591 * ZONE_MOVABLE since it only has movable pages.
2592 */
2593 if (zone_idx(zone) > gfp_zone(GFP_USER))
2594 continue;
2595
2596 nr_zones++;
2597 }
2598
2599 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2600 recommended_min = pageblock_nr_pages * nr_zones * 2;
2601
2602 /*
2603 * Make sure that on average at least two pageblocks are almost free
2604 * of another type, one for a migratetype to fall back to and a
2605 * second to avoid subsequent fallbacks of other types There are 3
2606 * MIGRATE_TYPES we care about.
2607 */
2608 recommended_min += pageblock_nr_pages * nr_zones *
2609 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2610
2611 /* don't ever allow to reserve more than 5% of the lowmem */
2612 recommended_min = min(recommended_min,
2613 (unsigned long) nr_free_buffer_pages() / 20);
2614 recommended_min <<= (PAGE_SHIFT-10);
2615
2616 if (recommended_min > min_free_kbytes) {
2617 if (user_min_free_kbytes >= 0)
2618 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2619 min_free_kbytes, recommended_min);
2620
2621 min_free_kbytes = recommended_min;
2622 }
2623
2624update_wmarks:
2625 setup_per_zone_wmarks();
2626}
2627
2628int start_stop_khugepaged(void)
2629{
2630 int err = 0;
2631
2632 mutex_lock(&khugepaged_mutex);
2633 if (hugepage_flags_enabled()) {
2634 if (!khugepaged_thread)
2635 khugepaged_thread = kthread_run(khugepaged, NULL,
2636 "khugepaged");
2637 if (IS_ERR(khugepaged_thread)) {
2638 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2639 err = PTR_ERR(khugepaged_thread);
2640 khugepaged_thread = NULL;
2641 goto fail;
2642 }
2643
2644 if (!list_empty(&khugepaged_scan.mm_head))
2645 wake_up_interruptible(&khugepaged_wait);
2646 } else if (khugepaged_thread) {
2647 kthread_stop(khugepaged_thread);
2648 khugepaged_thread = NULL;
2649 }
2650 set_recommended_min_free_kbytes();
2651fail:
2652 mutex_unlock(&khugepaged_mutex);
2653 return err;
2654}
2655
2656void khugepaged_min_free_kbytes_update(void)
2657{
2658 mutex_lock(&khugepaged_mutex);
2659 if (hugepage_flags_enabled() && khugepaged_thread)
2660 set_recommended_min_free_kbytes();
2661 mutex_unlock(&khugepaged_mutex);
2662}
2663
2664bool current_is_khugepaged(void)
2665{
2666 return kthread_func(current) == khugepaged;
2667}
2668
2669static int madvise_collapse_errno(enum scan_result r)
2670{
2671 /*
2672 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2673 * actionable feedback to caller, so they may take an appropriate
2674 * fallback measure depending on the nature of the failure.
2675 */
2676 switch (r) {
2677 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2678 return -ENOMEM;
2679 case SCAN_CGROUP_CHARGE_FAIL:
2680 case SCAN_EXCEED_NONE_PTE:
2681 return -EBUSY;
2682 /* Resource temporary unavailable - trying again might succeed */
2683 case SCAN_PAGE_COUNT:
2684 case SCAN_PAGE_LOCK:
2685 case SCAN_PAGE_LRU:
2686 case SCAN_DEL_PAGE_LRU:
2687 case SCAN_PAGE_FILLED:
2688 return -EAGAIN;
2689 /*
2690 * Other: Trying again likely not to succeed / error intrinsic to
2691 * specified memory range. khugepaged likely won't be able to collapse
2692 * either.
2693 */
2694 default:
2695 return -EINVAL;
2696 }
2697}
2698
2699int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2700 unsigned long start, unsigned long end)
2701{
2702 struct collapse_control *cc;
2703 struct mm_struct *mm = vma->vm_mm;
2704 unsigned long hstart, hend, addr;
2705 int thps = 0, last_fail = SCAN_FAIL;
2706 bool mmap_locked = true;
2707
2708 BUG_ON(vma->vm_start > start);
2709 BUG_ON(vma->vm_end < end);
2710
2711 *prev = vma;
2712
2713 if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
2714 PMD_ORDER))
2715 return -EINVAL;
2716
2717 cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2718 if (!cc)
2719 return -ENOMEM;
2720 cc->is_khugepaged = false;
2721
2722 mmgrab(mm);
2723 lru_add_drain_all();
2724
2725 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2726 hend = end & HPAGE_PMD_MASK;
2727
2728 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2729 int result = SCAN_FAIL;
2730
2731 if (!mmap_locked) {
2732 cond_resched();
2733 mmap_read_lock(mm);
2734 mmap_locked = true;
2735 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2736 cc);
2737 if (result != SCAN_SUCCEED) {
2738 last_fail = result;
2739 goto out_nolock;
2740 }
2741
2742 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2743 }
2744 mmap_assert_locked(mm);
2745 memset(cc->node_load, 0, sizeof(cc->node_load));
2746 nodes_clear(cc->alloc_nmask);
2747 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2748 struct file *file = get_file(vma->vm_file);
2749 pgoff_t pgoff = linear_page_index(vma, addr);
2750
2751 mmap_read_unlock(mm);
2752 mmap_locked = false;
2753 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2754 cc);
2755 fput(file);
2756 } else {
2757 result = hpage_collapse_scan_pmd(mm, vma, addr,
2758 &mmap_locked, cc);
2759 }
2760 if (!mmap_locked)
2761 *prev = NULL; /* Tell caller we dropped mmap_lock */
2762
2763handle_result:
2764 switch (result) {
2765 case SCAN_SUCCEED:
2766 case SCAN_PMD_MAPPED:
2767 ++thps;
2768 break;
2769 case SCAN_PTE_MAPPED_HUGEPAGE:
2770 BUG_ON(mmap_locked);
2771 BUG_ON(*prev);
2772 mmap_read_lock(mm);
2773 result = collapse_pte_mapped_thp(mm, addr, true);
2774 mmap_read_unlock(mm);
2775 goto handle_result;
2776 /* Whitelisted set of results where continuing OK */
2777 case SCAN_PMD_NULL:
2778 case SCAN_PTE_NON_PRESENT:
2779 case SCAN_PTE_UFFD_WP:
2780 case SCAN_PAGE_RO:
2781 case SCAN_LACK_REFERENCED_PAGE:
2782 case SCAN_PAGE_NULL:
2783 case SCAN_PAGE_COUNT:
2784 case SCAN_PAGE_LOCK:
2785 case SCAN_PAGE_COMPOUND:
2786 case SCAN_PAGE_LRU:
2787 case SCAN_DEL_PAGE_LRU:
2788 last_fail = result;
2789 break;
2790 default:
2791 last_fail = result;
2792 /* Other error, exit */
2793 goto out_maybelock;
2794 }
2795 }
2796
2797out_maybelock:
2798 /* Caller expects us to hold mmap_lock on return */
2799 if (!mmap_locked)
2800 mmap_read_lock(mm);
2801out_nolock:
2802 mmap_assert_locked(mm);
2803 mmdrop(mm);
2804 kfree(cc);
2805
2806 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2807 : madvise_collapse_errno(last_fail);
2808}