1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/page_table_check.h>
20 #include <linux/swapops.h>
21 #include <linux/shmem_fs.h>
24 #include <asm/pgalloc.h>
36 SCAN_EXCEED_SHARED_PTE,
39 SCAN_PTE_MAPPED_HUGEPAGE,
41 SCAN_LACK_REFERENCED_PAGE,
54 SCAN_ALLOC_HUGE_PAGE_FAIL,
55 SCAN_CGROUP_CHARGE_FAIL,
57 SCAN_PAGE_HAS_PRIVATE,
60 #define CREATE_TRACE_POINTS
61 #include <trace/events/huge_memory.h>
63 static struct task_struct *khugepaged_thread __read_mostly;
64 static DEFINE_MUTEX(khugepaged_mutex);
66 /* default scan 8*512 pte (or vmas) every 30 second */
67 static unsigned int khugepaged_pages_to_scan __read_mostly;
68 static unsigned int khugepaged_pages_collapsed;
69 static unsigned int khugepaged_full_scans;
70 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
71 /* during fragmentation poll the hugepage allocator once every minute */
72 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
73 static unsigned long khugepaged_sleep_expire;
74 static DEFINE_SPINLOCK(khugepaged_mm_lock);
75 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
77 * default collapse hugepages if there is at least one pte mapped like
78 * it would have happened if the vma was large enough during page
81 * Note that these are only respected if collapse was initiated by khugepaged.
83 static unsigned int khugepaged_max_ptes_none __read_mostly;
84 static unsigned int khugepaged_max_ptes_swap __read_mostly;
85 static unsigned int khugepaged_max_ptes_shared __read_mostly;
87 #define MM_SLOTS_HASH_BITS 10
88 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
90 static struct kmem_cache *mm_slot_cache __read_mostly;
92 #define MAX_PTE_MAPPED_THP 8
94 struct collapse_control {
97 /* Num pages scanned per node */
98 u32 node_load[MAX_NUMNODES];
100 /* nodemask for allocation fallback */
101 nodemask_t alloc_nmask;
105 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
106 * @slot: hash lookup from mm to mm_slot
107 * @nr_pte_mapped_thp: number of pte mapped THP
108 * @pte_mapped_thp: address array corresponding pte mapped THP
110 struct khugepaged_mm_slot {
113 /* pte-mapped THP in this mm */
114 int nr_pte_mapped_thp;
115 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
119 * struct khugepaged_scan - cursor for scanning
120 * @mm_head: the head of the mm list to scan
121 * @mm_slot: the current mm_slot we are scanning
122 * @address: the next address inside that to be scanned
124 * There is only the one khugepaged_scan instance of this cursor structure.
126 struct khugepaged_scan {
127 struct list_head mm_head;
128 struct khugepaged_mm_slot *mm_slot;
129 unsigned long address;
132 static struct khugepaged_scan khugepaged_scan = {
133 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
137 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
138 struct kobj_attribute *attr,
141 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
144 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
145 struct kobj_attribute *attr,
146 const char *buf, size_t count)
151 err = kstrtouint(buf, 10, &msecs);
155 khugepaged_scan_sleep_millisecs = msecs;
156 khugepaged_sleep_expire = 0;
157 wake_up_interruptible(&khugepaged_wait);
161 static struct kobj_attribute scan_sleep_millisecs_attr =
162 __ATTR_RW(scan_sleep_millisecs);
164 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
165 struct kobj_attribute *attr,
168 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
171 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
172 struct kobj_attribute *attr,
173 const char *buf, size_t count)
178 err = kstrtouint(buf, 10, &msecs);
182 khugepaged_alloc_sleep_millisecs = msecs;
183 khugepaged_sleep_expire = 0;
184 wake_up_interruptible(&khugepaged_wait);
188 static struct kobj_attribute alloc_sleep_millisecs_attr =
189 __ATTR_RW(alloc_sleep_millisecs);
191 static ssize_t pages_to_scan_show(struct kobject *kobj,
192 struct kobj_attribute *attr,
195 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
197 static ssize_t pages_to_scan_store(struct kobject *kobj,
198 struct kobj_attribute *attr,
199 const char *buf, size_t count)
204 err = kstrtouint(buf, 10, &pages);
208 khugepaged_pages_to_scan = pages;
212 static struct kobj_attribute pages_to_scan_attr =
213 __ATTR_RW(pages_to_scan);
215 static ssize_t pages_collapsed_show(struct kobject *kobj,
216 struct kobj_attribute *attr,
219 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
221 static struct kobj_attribute pages_collapsed_attr =
222 __ATTR_RO(pages_collapsed);
224 static ssize_t full_scans_show(struct kobject *kobj,
225 struct kobj_attribute *attr,
228 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
230 static struct kobj_attribute full_scans_attr =
231 __ATTR_RO(full_scans);
233 static ssize_t defrag_show(struct kobject *kobj,
234 struct kobj_attribute *attr, char *buf)
236 return single_hugepage_flag_show(kobj, attr, buf,
237 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
239 static ssize_t defrag_store(struct kobject *kobj,
240 struct kobj_attribute *attr,
241 const char *buf, size_t count)
243 return single_hugepage_flag_store(kobj, attr, buf, count,
244 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
246 static struct kobj_attribute khugepaged_defrag_attr =
250 * max_ptes_none controls if khugepaged should collapse hugepages over
251 * any unmapped ptes in turn potentially increasing the memory
252 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
253 * reduce the available free memory in the system as it
254 * runs. Increasing max_ptes_none will instead potentially reduce the
255 * free memory in the system during the khugepaged scan.
257 static ssize_t max_ptes_none_show(struct kobject *kobj,
258 struct kobj_attribute *attr,
261 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
263 static ssize_t max_ptes_none_store(struct kobject *kobj,
264 struct kobj_attribute *attr,
265 const char *buf, size_t count)
268 unsigned long max_ptes_none;
270 err = kstrtoul(buf, 10, &max_ptes_none);
271 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
274 khugepaged_max_ptes_none = max_ptes_none;
278 static struct kobj_attribute khugepaged_max_ptes_none_attr =
279 __ATTR_RW(max_ptes_none);
281 static ssize_t max_ptes_swap_show(struct kobject *kobj,
282 struct kobj_attribute *attr,
285 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
288 static ssize_t max_ptes_swap_store(struct kobject *kobj,
289 struct kobj_attribute *attr,
290 const char *buf, size_t count)
293 unsigned long max_ptes_swap;
295 err = kstrtoul(buf, 10, &max_ptes_swap);
296 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
299 khugepaged_max_ptes_swap = max_ptes_swap;
304 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
305 __ATTR_RW(max_ptes_swap);
307 static ssize_t max_ptes_shared_show(struct kobject *kobj,
308 struct kobj_attribute *attr,
311 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
314 static ssize_t max_ptes_shared_store(struct kobject *kobj,
315 struct kobj_attribute *attr,
316 const char *buf, size_t count)
319 unsigned long max_ptes_shared;
321 err = kstrtoul(buf, 10, &max_ptes_shared);
322 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
325 khugepaged_max_ptes_shared = max_ptes_shared;
330 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
331 __ATTR_RW(max_ptes_shared);
333 static struct attribute *khugepaged_attr[] = {
334 &khugepaged_defrag_attr.attr,
335 &khugepaged_max_ptes_none_attr.attr,
336 &khugepaged_max_ptes_swap_attr.attr,
337 &khugepaged_max_ptes_shared_attr.attr,
338 &pages_to_scan_attr.attr,
339 &pages_collapsed_attr.attr,
340 &full_scans_attr.attr,
341 &scan_sleep_millisecs_attr.attr,
342 &alloc_sleep_millisecs_attr.attr,
346 struct attribute_group khugepaged_attr_group = {
347 .attrs = khugepaged_attr,
348 .name = "khugepaged",
350 #endif /* CONFIG_SYSFS */
352 int hugepage_madvise(struct vm_area_struct *vma,
353 unsigned long *vm_flags, int advice)
359 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
360 * can't handle this properly after s390_enable_sie, so we simply
361 * ignore the madvise to prevent qemu from causing a SIGSEGV.
363 if (mm_has_pgste(vma->vm_mm))
366 *vm_flags &= ~VM_NOHUGEPAGE;
367 *vm_flags |= VM_HUGEPAGE;
369 * If the vma become good for khugepaged to scan,
370 * register it here without waiting a page fault that
371 * may not happen any time soon.
373 khugepaged_enter_vma(vma, *vm_flags);
375 case MADV_NOHUGEPAGE:
376 *vm_flags &= ~VM_HUGEPAGE;
377 *vm_flags |= VM_NOHUGEPAGE;
379 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
380 * this vma even if we leave the mm registered in khugepaged if
381 * it got registered before VM_NOHUGEPAGE was set.
389 int __init khugepaged_init(void)
391 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
392 sizeof(struct khugepaged_mm_slot),
393 __alignof__(struct khugepaged_mm_slot),
398 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
399 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
400 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
401 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
406 void __init khugepaged_destroy(void)
408 kmem_cache_destroy(mm_slot_cache);
411 static inline int hpage_collapse_test_exit(struct mm_struct *mm)
413 return atomic_read(&mm->mm_users) == 0;
416 void __khugepaged_enter(struct mm_struct *mm)
418 struct khugepaged_mm_slot *mm_slot;
419 struct mm_slot *slot;
422 mm_slot = mm_slot_alloc(mm_slot_cache);
426 slot = &mm_slot->slot;
428 /* __khugepaged_exit() must not run from under us */
429 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
430 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
431 mm_slot_free(mm_slot_cache, mm_slot);
435 spin_lock(&khugepaged_mm_lock);
436 mm_slot_insert(mm_slots_hash, mm, slot);
438 * Insert just behind the scanning cursor, to let the area settle
441 wakeup = list_empty(&khugepaged_scan.mm_head);
442 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
443 spin_unlock(&khugepaged_mm_lock);
447 wake_up_interruptible(&khugepaged_wait);
450 void khugepaged_enter_vma(struct vm_area_struct *vma,
451 unsigned long vm_flags)
453 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
454 hugepage_flags_enabled()) {
455 if (hugepage_vma_check(vma, vm_flags, false, false, true))
456 __khugepaged_enter(vma->vm_mm);
460 void __khugepaged_exit(struct mm_struct *mm)
462 struct khugepaged_mm_slot *mm_slot;
463 struct mm_slot *slot;
466 spin_lock(&khugepaged_mm_lock);
467 slot = mm_slot_lookup(mm_slots_hash, mm);
468 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
469 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
470 hash_del(&slot->hash);
471 list_del(&slot->mm_node);
474 spin_unlock(&khugepaged_mm_lock);
477 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
478 mm_slot_free(mm_slot_cache, mm_slot);
480 } else if (mm_slot) {
482 * This is required to serialize against
483 * hpage_collapse_test_exit() (which is guaranteed to run
484 * under mmap sem read mode). Stop here (after we return all
485 * pagetables will be destroyed) until khugepaged has finished
486 * working on the pagetables under the mmap_lock.
489 mmap_write_unlock(mm);
493 static void release_pte_folio(struct folio *folio)
495 node_stat_mod_folio(folio,
496 NR_ISOLATED_ANON + folio_is_file_lru(folio),
497 -folio_nr_pages(folio));
499 folio_putback_lru(folio);
502 static void release_pte_page(struct page *page)
504 release_pte_folio(page_folio(page));
507 static void release_pte_pages(pte_t *pte, pte_t *_pte,
508 struct list_head *compound_pagelist)
510 struct folio *folio, *tmp;
512 while (--_pte >= pte) {
513 pte_t pteval = *_pte;
516 if (pte_none(pteval))
518 pfn = pte_pfn(pteval);
519 if (is_zero_pfn(pfn))
521 folio = pfn_folio(pfn);
522 if (folio_test_large(folio))
524 release_pte_folio(folio);
527 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
528 list_del(&folio->lru);
529 release_pte_folio(folio);
533 static bool is_refcount_suitable(struct page *page)
535 int expected_refcount;
537 expected_refcount = total_mapcount(page);
538 if (PageSwapCache(page))
539 expected_refcount += compound_nr(page);
541 return page_count(page) == expected_refcount;
544 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
545 unsigned long address,
547 struct collapse_control *cc,
548 struct list_head *compound_pagelist)
550 struct page *page = NULL;
552 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
553 bool writable = false;
555 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
556 _pte++, address += PAGE_SIZE) {
557 pte_t pteval = *_pte;
558 if (pte_none(pteval) || (pte_present(pteval) &&
559 is_zero_pfn(pte_pfn(pteval)))) {
561 if (!userfaultfd_armed(vma) &&
562 (!cc->is_khugepaged ||
563 none_or_zero <= khugepaged_max_ptes_none)) {
566 result = SCAN_EXCEED_NONE_PTE;
567 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
571 if (!pte_present(pteval)) {
572 result = SCAN_PTE_NON_PRESENT;
575 page = vm_normal_page(vma, address, pteval);
576 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
577 result = SCAN_PAGE_NULL;
581 VM_BUG_ON_PAGE(!PageAnon(page), page);
583 if (page_mapcount(page) > 1) {
585 if (cc->is_khugepaged &&
586 shared > khugepaged_max_ptes_shared) {
587 result = SCAN_EXCEED_SHARED_PTE;
588 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
593 if (PageCompound(page)) {
595 page = compound_head(page);
598 * Check if we have dealt with the compound page
601 list_for_each_entry(p, compound_pagelist, lru) {
608 * We can do it before isolate_lru_page because the
609 * page can't be freed from under us. NOTE: PG_lock
610 * is needed to serialize against split_huge_page
611 * when invoked from the VM.
613 if (!trylock_page(page)) {
614 result = SCAN_PAGE_LOCK;
619 * Check if the page has any GUP (or other external) pins.
621 * The page table that maps the page has been already unlinked
622 * from the page table tree and this process cannot get
623 * an additional pin on the page.
625 * New pins can come later if the page is shared across fork,
626 * but not from this process. The other process cannot write to
627 * the page, only trigger CoW.
629 if (!is_refcount_suitable(page)) {
631 result = SCAN_PAGE_COUNT;
636 * Isolate the page to avoid collapsing an hugepage
637 * currently in use by the VM.
639 if (!isolate_lru_page(page)) {
641 result = SCAN_DEL_PAGE_LRU;
644 mod_node_page_state(page_pgdat(page),
645 NR_ISOLATED_ANON + page_is_file_lru(page),
647 VM_BUG_ON_PAGE(!PageLocked(page), page);
648 VM_BUG_ON_PAGE(PageLRU(page), page);
650 if (PageCompound(page))
651 list_add_tail(&page->lru, compound_pagelist);
654 * If collapse was initiated by khugepaged, check that there is
655 * enough young pte to justify collapsing the page
657 if (cc->is_khugepaged &&
658 (pte_young(pteval) || page_is_young(page) ||
659 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
663 if (pte_write(pteval))
667 if (unlikely(!writable)) {
668 result = SCAN_PAGE_RO;
669 } else if (unlikely(cc->is_khugepaged && !referenced)) {
670 result = SCAN_LACK_REFERENCED_PAGE;
672 result = SCAN_SUCCEED;
673 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
674 referenced, writable, result);
678 release_pte_pages(pte, _pte, compound_pagelist);
679 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
680 referenced, writable, result);
684 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
685 struct vm_area_struct *vma,
686 unsigned long address,
688 struct list_head *compound_pagelist)
690 struct page *src_page, *tmp;
692 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
693 _pte++, page++, address += PAGE_SIZE) {
694 pte_t pteval = *_pte;
696 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
697 clear_user_highpage(page, address);
698 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
699 if (is_zero_pfn(pte_pfn(pteval))) {
701 * ptl mostly unnecessary.
704 ptep_clear(vma->vm_mm, address, _pte);
708 src_page = pte_page(pteval);
709 copy_user_highpage(page, src_page, address, vma);
710 if (!PageCompound(src_page))
711 release_pte_page(src_page);
713 * ptl mostly unnecessary, but preempt has to
714 * be disabled to update the per-cpu stats
715 * inside page_remove_rmap().
718 ptep_clear(vma->vm_mm, address, _pte);
719 page_remove_rmap(src_page, vma, false);
721 free_page_and_swap_cache(src_page);
725 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
726 list_del(&src_page->lru);
727 mod_node_page_state(page_pgdat(src_page),
728 NR_ISOLATED_ANON + page_is_file_lru(src_page),
729 -compound_nr(src_page));
730 unlock_page(src_page);
731 free_swap_cache(src_page);
732 putback_lru_page(src_page);
736 static void khugepaged_alloc_sleep(void)
740 add_wait_queue(&khugepaged_wait, &wait);
741 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
742 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
743 remove_wait_queue(&khugepaged_wait, &wait);
746 struct collapse_control khugepaged_collapse_control = {
747 .is_khugepaged = true,
750 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
755 * If node_reclaim_mode is disabled, then no extra effort is made to
756 * allocate memory locally.
758 if (!node_reclaim_enabled())
761 /* If there is a count for this node already, it must be acceptable */
762 if (cc->node_load[nid])
765 for (i = 0; i < MAX_NUMNODES; i++) {
766 if (!cc->node_load[i])
768 if (node_distance(nid, i) > node_reclaim_distance)
774 #define khugepaged_defrag() \
775 (transparent_hugepage_flags & \
776 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
778 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
779 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
781 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
785 static int hpage_collapse_find_target_node(struct collapse_control *cc)
787 int nid, target_node = 0, max_value = 0;
789 /* find first node with max normal pages hit */
790 for (nid = 0; nid < MAX_NUMNODES; nid++)
791 if (cc->node_load[nid] > max_value) {
792 max_value = cc->node_load[nid];
796 for_each_online_node(nid) {
797 if (max_value == cc->node_load[nid])
798 node_set(nid, cc->alloc_nmask);
804 static int hpage_collapse_find_target_node(struct collapse_control *cc)
810 static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
813 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
814 if (unlikely(!*hpage)) {
815 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
819 prep_transhuge_page(*hpage);
820 count_vm_event(THP_COLLAPSE_ALLOC);
825 * If mmap_lock temporarily dropped, revalidate vma
826 * before taking mmap_lock.
827 * Returns enum scan_result value.
830 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
832 struct vm_area_struct **vmap,
833 struct collapse_control *cc)
835 struct vm_area_struct *vma;
837 if (unlikely(hpage_collapse_test_exit(mm)))
838 return SCAN_ANY_PROCESS;
840 *vmap = vma = find_vma(mm, address);
842 return SCAN_VMA_NULL;
844 if (!transhuge_vma_suitable(vma, address))
845 return SCAN_ADDRESS_RANGE;
846 if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
848 return SCAN_VMA_CHECK;
850 * Anon VMA expected, the address may be unmapped then
851 * remapped to file after khugepaged reaquired the mmap_lock.
853 * hugepage_vma_check may return true for qualified file
856 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
857 return SCAN_PAGE_ANON;
862 * See pmd_trans_unstable() for how the result may change out from
863 * underneath us, even if we hold mmap_lock in read.
865 static int find_pmd_or_thp_or_none(struct mm_struct *mm,
866 unsigned long address,
871 *pmd = mm_find_pmd(mm, address);
873 return SCAN_PMD_NULL;
875 pmde = pmdp_get_lockless(*pmd);
877 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
878 /* See comments in pmd_none_or_trans_huge_or_clear_bad() */
882 return SCAN_PMD_NONE;
883 if (!pmd_present(pmde))
884 return SCAN_PMD_NULL;
885 if (pmd_trans_huge(pmde))
886 return SCAN_PMD_MAPPED;
887 if (pmd_devmap(pmde))
888 return SCAN_PMD_NULL;
890 return SCAN_PMD_NULL;
894 static int check_pmd_still_valid(struct mm_struct *mm,
895 unsigned long address,
899 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
901 if (result != SCAN_SUCCEED)
909 * Bring missing pages in from swap, to complete THP collapse.
910 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
912 * Called and returns without pte mapped or spinlocks held.
913 * Note that if false is returned, mmap_lock will be released.
916 static int __collapse_huge_page_swapin(struct mm_struct *mm,
917 struct vm_area_struct *vma,
918 unsigned long haddr, pmd_t *pmd,
923 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
925 for (address = haddr; address < end; address += PAGE_SIZE) {
926 struct vm_fault vmf = {
929 .pgoff = linear_page_index(vma, haddr),
930 .flags = FAULT_FLAG_ALLOW_RETRY,
934 vmf.pte = pte_offset_map(pmd, address);
935 vmf.orig_pte = *vmf.pte;
936 if (!is_swap_pte(vmf.orig_pte)) {
940 ret = do_swap_page(&vmf);
943 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
944 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
945 * we do not retry here and swap entry will remain in pagetable
946 * resulting in later failure.
948 if (ret & VM_FAULT_RETRY) {
949 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
950 /* Likely, but not guaranteed, that page lock failed */
951 return SCAN_PAGE_LOCK;
953 if (ret & VM_FAULT_ERROR) {
954 mmap_read_unlock(mm);
955 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
961 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
965 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
969 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
970 struct collapse_control *cc)
972 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
974 int node = hpage_collapse_find_target_node(cc);
977 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
978 return SCAN_ALLOC_HUGE_PAGE_FAIL;
980 folio = page_folio(*hpage);
981 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
984 return SCAN_CGROUP_CHARGE_FAIL;
986 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
991 static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
992 int referenced, int unmapped,
993 struct collapse_control *cc)
995 LIST_HEAD(compound_pagelist);
1000 spinlock_t *pmd_ptl, *pte_ptl;
1001 int result = SCAN_FAIL;
1002 struct vm_area_struct *vma;
1003 struct mmu_notifier_range range;
1005 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1008 * Before allocating the hugepage, release the mmap_lock read lock.
1009 * The allocation can take potentially a long time if it involves
1010 * sync compaction, and we do not need to hold the mmap_lock during
1011 * that. We will recheck the vma after taking it again in write mode.
1013 mmap_read_unlock(mm);
1015 result = alloc_charge_hpage(&hpage, mm, cc);
1016 if (result != SCAN_SUCCEED)
1020 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1021 if (result != SCAN_SUCCEED) {
1022 mmap_read_unlock(mm);
1026 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1027 if (result != SCAN_SUCCEED) {
1028 mmap_read_unlock(mm);
1034 * __collapse_huge_page_swapin will return with mmap_lock
1035 * released when it fails. So we jump out_nolock directly in
1036 * that case. Continuing to collapse causes inconsistency.
1038 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1040 if (result != SCAN_SUCCEED)
1044 mmap_read_unlock(mm);
1046 * Prevent all access to pagetables with the exception of
1047 * gup_fast later handled by the ptep_clear_flush and the VM
1048 * handled by the anon_vma lock + PG_lock.
1050 mmap_write_lock(mm);
1051 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1052 if (result != SCAN_SUCCEED)
1054 /* check if the pmd is still valid */
1055 result = check_pmd_still_valid(mm, address, pmd);
1056 if (result != SCAN_SUCCEED)
1059 vma_start_write(vma);
1060 anon_vma_lock_write(vma->anon_vma);
1062 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1063 address + HPAGE_PMD_SIZE);
1064 mmu_notifier_invalidate_range_start(&range);
1066 pte = pte_offset_map(pmd, address);
1067 pte_ptl = pte_lockptr(mm, pmd);
1069 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1071 * This removes any huge TLB entry from the CPU so we won't allow
1072 * huge and small TLB entries for the same virtual address to
1073 * avoid the risk of CPU bugs in that area.
1075 * Parallel fast GUP is fine since fast GUP will back off when
1076 * it detects PMD is changed.
1078 _pmd = pmdp_collapse_flush(vma, address, pmd);
1079 spin_unlock(pmd_ptl);
1080 mmu_notifier_invalidate_range_end(&range);
1081 tlb_remove_table_sync_one();
1084 result = __collapse_huge_page_isolate(vma, address, pte, cc,
1085 &compound_pagelist);
1086 spin_unlock(pte_ptl);
1088 if (unlikely(result != SCAN_SUCCEED)) {
1091 BUG_ON(!pmd_none(*pmd));
1093 * We can only use set_pmd_at when establishing
1094 * hugepmds and never for establishing regular pmds that
1095 * points to regular pagetables. Use pmd_populate for that
1097 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1098 spin_unlock(pmd_ptl);
1099 anon_vma_unlock_write(vma->anon_vma);
1104 * All pages are isolated and locked so anon_vma rmap
1105 * can't run anymore.
1107 anon_vma_unlock_write(vma->anon_vma);
1109 __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
1110 &compound_pagelist);
1113 * spin_lock() below is not the equivalent of smp_wmb(), but
1114 * the smp_wmb() inside __SetPageUptodate() can be reused to
1115 * avoid the copy_huge_page writes to become visible after
1116 * the set_pmd_at() write.
1118 __SetPageUptodate(hpage);
1119 pgtable = pmd_pgtable(_pmd);
1121 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1122 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1125 BUG_ON(!pmd_none(*pmd));
1126 page_add_new_anon_rmap(hpage, vma, address);
1127 lru_cache_add_inactive_or_unevictable(hpage, vma);
1128 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1129 set_pmd_at(mm, address, pmd, _pmd);
1130 update_mmu_cache_pmd(vma, address, pmd);
1131 spin_unlock(pmd_ptl);
1135 result = SCAN_SUCCEED;
1137 mmap_write_unlock(mm);
1141 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1145 static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1146 struct vm_area_struct *vma,
1147 unsigned long address, bool *mmap_locked,
1148 struct collapse_control *cc)
1152 int result = SCAN_FAIL, referenced = 0;
1153 int none_or_zero = 0, shared = 0;
1154 struct page *page = NULL;
1155 unsigned long _address;
1157 int node = NUMA_NO_NODE, unmapped = 0;
1158 bool writable = false;
1160 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1162 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1163 if (result != SCAN_SUCCEED)
1166 memset(cc->node_load, 0, sizeof(cc->node_load));
1167 nodes_clear(cc->alloc_nmask);
1168 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1169 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1170 _pte++, _address += PAGE_SIZE) {
1171 pte_t pteval = *_pte;
1172 if (is_swap_pte(pteval)) {
1174 if (!cc->is_khugepaged ||
1175 unmapped <= khugepaged_max_ptes_swap) {
1177 * Always be strict with uffd-wp
1178 * enabled swap entries. Please see
1179 * comment below for pte_uffd_wp().
1181 if (pte_swp_uffd_wp_any(pteval)) {
1182 result = SCAN_PTE_UFFD_WP;
1187 result = SCAN_EXCEED_SWAP_PTE;
1188 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1192 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1194 if (!userfaultfd_armed(vma) &&
1195 (!cc->is_khugepaged ||
1196 none_or_zero <= khugepaged_max_ptes_none)) {
1199 result = SCAN_EXCEED_NONE_PTE;
1200 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1204 if (pte_uffd_wp(pteval)) {
1206 * Don't collapse the page if any of the small
1207 * PTEs are armed with uffd write protection.
1208 * Here we can also mark the new huge pmd as
1209 * write protected if any of the small ones is
1210 * marked but that could bring unknown
1211 * userfault messages that falls outside of
1212 * the registered range. So, just be simple.
1214 result = SCAN_PTE_UFFD_WP;
1217 if (pte_write(pteval))
1220 page = vm_normal_page(vma, _address, pteval);
1221 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1222 result = SCAN_PAGE_NULL;
1226 if (page_mapcount(page) > 1) {
1228 if (cc->is_khugepaged &&
1229 shared > khugepaged_max_ptes_shared) {
1230 result = SCAN_EXCEED_SHARED_PTE;
1231 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1236 page = compound_head(page);
1239 * Record which node the original page is from and save this
1240 * information to cc->node_load[].
1241 * Khugepaged will allocate hugepage from the node has the max
1244 node = page_to_nid(page);
1245 if (hpage_collapse_scan_abort(node, cc)) {
1246 result = SCAN_SCAN_ABORT;
1249 cc->node_load[node]++;
1250 if (!PageLRU(page)) {
1251 result = SCAN_PAGE_LRU;
1254 if (PageLocked(page)) {
1255 result = SCAN_PAGE_LOCK;
1258 if (!PageAnon(page)) {
1259 result = SCAN_PAGE_ANON;
1264 * Check if the page has any GUP (or other external) pins.
1266 * Here the check may be racy:
1267 * it may see total_mapcount > refcount in some cases?
1268 * But such case is ephemeral we could always retry collapse
1269 * later. However it may report false positive if the page
1270 * has excessive GUP pins (i.e. 512). Anyway the same check
1271 * will be done again later the risk seems low.
1273 if (!is_refcount_suitable(page)) {
1274 result = SCAN_PAGE_COUNT;
1279 * If collapse was initiated by khugepaged, check that there is
1280 * enough young pte to justify collapsing the page
1282 if (cc->is_khugepaged &&
1283 (pte_young(pteval) || page_is_young(page) ||
1284 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1289 result = SCAN_PAGE_RO;
1290 } else if (cc->is_khugepaged &&
1292 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1293 result = SCAN_LACK_REFERENCED_PAGE;
1295 result = SCAN_SUCCEED;
1298 pte_unmap_unlock(pte, ptl);
1299 if (result == SCAN_SUCCEED) {
1300 result = collapse_huge_page(mm, address, referenced,
1302 /* collapse_huge_page will return with the mmap_lock released */
1303 *mmap_locked = false;
1306 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1307 none_or_zero, result, unmapped);
1311 static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1313 struct mm_slot *slot = &mm_slot->slot;
1314 struct mm_struct *mm = slot->mm;
1316 lockdep_assert_held(&khugepaged_mm_lock);
1318 if (hpage_collapse_test_exit(mm)) {
1320 hash_del(&slot->hash);
1321 list_del(&slot->mm_node);
1324 * Not strictly needed because the mm exited already.
1326 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1329 /* khugepaged_mm_lock actually not necessary for the below */
1330 mm_slot_free(mm_slot_cache, mm_slot);
1337 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1338 * khugepaged should try to collapse the page table.
1340 * Note that following race exists:
1341 * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
1342 * emptying the A's ->pte_mapped_thp[] array.
1343 * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
1344 * retract_page_tables() finds a VMA in mm_struct A mapping the same extent
1345 * (at virtual address X) and adds an entry (for X) into mm_struct A's
1346 * ->pte-mapped_thp[] array.
1347 * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
1348 * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
1349 * (for X) into mm_struct A's ->pte-mapped_thp[] array.
1350 * Thus, it's possible the same address is added multiple times for the same
1351 * mm_struct. Should this happen, we'll simply attempt
1352 * collapse_pte_mapped_thp() multiple times for the same address, under the same
1353 * exclusive mmap_lock, and assuming the first call is successful, subsequent
1354 * attempts will return quickly (without grabbing any additional locks) when
1355 * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap
1356 * check, and since this is a rare occurrence, the cost of preventing this
1357 * "multiple-add" is thought to be more expensive than just handling it, should
1360 static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1363 struct khugepaged_mm_slot *mm_slot;
1364 struct mm_slot *slot;
1367 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1369 spin_lock(&khugepaged_mm_lock);
1370 slot = mm_slot_lookup(mm_slots_hash, mm);
1371 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
1372 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
1373 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1376 spin_unlock(&khugepaged_mm_lock);
1380 /* hpage must be locked, and mmap_lock must be held in write */
1381 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1382 pmd_t *pmdp, struct page *hpage)
1384 struct vm_fault vmf = {
1391 VM_BUG_ON(!PageTransHuge(hpage));
1392 mmap_assert_write_locked(vma->vm_mm);
1394 if (do_set_pmd(&vmf, hpage))
1398 return SCAN_SUCCEED;
1402 * A note about locking:
1403 * Trying to take the page table spinlocks would be useless here because those
1404 * are only used to synchronize:
1406 * - modifying terminal entries (ones that point to a data page, not to another
1408 * - installing *new* non-terminal entries
1410 * Instead, we need roughly the same kind of protection as free_pgtables() or
1411 * mm_take_all_locks() (but only for a single VMA):
1412 * The mmap lock together with this VMA's rmap locks covers all paths towards
1413 * the page table entries we're messing with here, except for hardware page
1414 * table walks and lockless_pages_from_mm().
1416 static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1417 unsigned long addr, pmd_t *pmdp)
1420 struct mmu_notifier_range range;
1422 mmap_assert_write_locked(mm);
1424 lockdep_assert_held_write(&vma->vm_file->f_mapping->i_mmap_rwsem);
1426 * All anon_vmas attached to the VMA have the same root and are
1427 * therefore locked by the same lock.
1430 lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
1432 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
1433 addr + HPAGE_PMD_SIZE);
1434 mmu_notifier_invalidate_range_start(&range);
1435 pmd = pmdp_collapse_flush(vma, addr, pmdp);
1436 tlb_remove_table_sync_one();
1437 mmu_notifier_invalidate_range_end(&range);
1439 page_table_check_pte_clear_range(mm, addr, pmd);
1440 pte_free(mm, pmd_pgtable(pmd));
1444 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1447 * @mm: process address space where collapse happens
1448 * @addr: THP collapse address
1449 * @install_pmd: If a huge PMD should be installed
1451 * This function checks whether all the PTEs in the PMD are pointing to the
1452 * right THP. If so, retract the page table so the THP can refault in with
1453 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1455 int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1458 unsigned long haddr = addr & HPAGE_PMD_MASK;
1459 struct vm_area_struct *vma = vma_lookup(mm, haddr);
1461 pte_t *start_pte, *pte;
1464 int count = 0, result = SCAN_FAIL;
1467 mmap_assert_write_locked(mm);
1469 /* Fast check before locking page if already PMD-mapped */
1470 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1471 if (result == SCAN_PMD_MAPPED)
1474 if (!vma || !vma->vm_file ||
1475 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1476 return SCAN_VMA_CHECK;
1479 * If we are here, we've succeeded in replacing all the native pages
1480 * in the page cache with a single hugepage. If a mm were to fault-in
1481 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1482 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1483 * analogously elide sysfs THP settings here.
1485 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
1486 return SCAN_VMA_CHECK;
1488 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1489 if (userfaultfd_wp(vma))
1490 return SCAN_PTE_UFFD_WP;
1492 hpage = find_lock_page(vma->vm_file->f_mapping,
1493 linear_page_index(vma, haddr));
1495 return SCAN_PAGE_NULL;
1497 if (!PageHead(hpage)) {
1502 if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1503 result = SCAN_PAGE_COMPOUND;
1512 * In MADV_COLLAPSE path, possible race with khugepaged where
1513 * all pte entries have been removed and pmd cleared. If so,
1514 * skip all the pte checks and just update the pmd mapping.
1516 goto maybe_install_pmd;
1521 /* Lock the vma before taking i_mmap and page table locks */
1522 vma_start_write(vma);
1525 * We need to lock the mapping so that from here on, only GUP-fast and
1526 * hardware page walks can access the parts of the page tables that
1527 * we're operating on.
1528 * See collapse_and_free_pmd().
1530 i_mmap_lock_write(vma->vm_file->f_mapping);
1533 * This spinlock should be unnecessary: Nobody else should be accessing
1534 * the page tables under spinlock protection here, only
1535 * lockless_pages_from_mm() and the hardware page walker can access page
1536 * tables while all the high-level locks are held in write mode.
1538 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1541 /* step 1: check all mapped PTEs are to the right huge page */
1542 for (i = 0, addr = haddr, pte = start_pte;
1543 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1546 /* empty pte, skip */
1550 /* page swapped out, abort */
1551 if (!pte_present(*pte)) {
1552 result = SCAN_PTE_NON_PRESENT;
1556 page = vm_normal_page(vma, addr, *pte);
1557 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1560 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1561 * page table, but the new page will not be a subpage of hpage.
1563 if (hpage + i != page)
1568 /* step 2: adjust rmap */
1569 for (i = 0, addr = haddr, pte = start_pte;
1570 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1575 page = vm_normal_page(vma, addr, *pte);
1576 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1578 page_remove_rmap(page, vma, false);
1581 pte_unmap_unlock(start_pte, ptl);
1583 /* step 3: set proper refcount and mm_counters. */
1585 page_ref_sub(hpage, count);
1586 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1589 /* step 4: remove pte entries */
1590 /* we make no change to anon, but protect concurrent anon page lookup */
1592 anon_vma_lock_write(vma->anon_vma);
1594 collapse_and_free_pmd(mm, vma, haddr, pmd);
1597 anon_vma_unlock_write(vma->anon_vma);
1598 i_mmap_unlock_write(vma->vm_file->f_mapping);
1601 /* step 5: install pmd entry */
1602 result = install_pmd
1603 ? set_huge_pmd(vma, haddr, pmd, hpage)
1612 pte_unmap_unlock(start_pte, ptl);
1613 i_mmap_unlock_write(vma->vm_file->f_mapping);
1617 static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
1619 struct mm_slot *slot = &mm_slot->slot;
1620 struct mm_struct *mm = slot->mm;
1623 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1626 if (!mmap_write_trylock(mm))
1629 if (unlikely(hpage_collapse_test_exit(mm)))
1632 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1633 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
1636 mm_slot->nr_pte_mapped_thp = 0;
1637 mmap_write_unlock(mm);
1640 static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
1641 struct mm_struct *target_mm,
1642 unsigned long target_addr, struct page *hpage,
1643 struct collapse_control *cc)
1645 struct vm_area_struct *vma;
1646 int target_result = SCAN_FAIL;
1648 i_mmap_lock_write(mapping);
1649 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1650 int result = SCAN_FAIL;
1651 struct mm_struct *mm = NULL;
1652 unsigned long addr = 0;
1654 bool is_target = false;
1657 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1658 * got written to. These VMAs are likely not worth investing
1659 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1662 * Note that vma->anon_vma check is racy: it can be set up after
1663 * the check but before we took mmap_lock by the fault path.
1664 * But page lock would prevent establishing any new ptes of the
1665 * page, so we are safe.
1667 * An alternative would be drop the check, but check that page
1668 * table is clear before calling pmdp_collapse_flush() under
1669 * ptl. It has higher chance to recover THP for the VMA, but
1670 * has higher cost too. It would also probably require locking
1673 if (READ_ONCE(vma->anon_vma)) {
1674 result = SCAN_PAGE_ANON;
1677 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1678 if (addr & ~HPAGE_PMD_MASK ||
1679 vma->vm_end < addr + HPAGE_PMD_SIZE) {
1680 result = SCAN_VMA_CHECK;
1684 is_target = mm == target_mm && addr == target_addr;
1685 result = find_pmd_or_thp_or_none(mm, addr, &pmd);
1686 if (result != SCAN_SUCCEED)
1689 * We need exclusive mmap_lock to retract page table.
1691 * We use trylock due to lock inversion: we need to acquire
1692 * mmap_lock while holding page lock. Fault path does it in
1693 * reverse order. Trylock is a way to avoid deadlock.
1695 * Also, it's not MADV_COLLAPSE's job to collapse other
1696 * mappings - let khugepaged take care of them later.
1698 result = SCAN_PTE_MAPPED_HUGEPAGE;
1699 if ((cc->is_khugepaged || is_target) &&
1700 mmap_write_trylock(mm)) {
1701 /* trylock for the same lock inversion as above */
1702 if (!vma_try_start_write(vma))
1706 * Re-check whether we have an ->anon_vma, because
1707 * collapse_and_free_pmd() requires that either no
1708 * ->anon_vma exists or the anon_vma is locked.
1709 * We already checked ->anon_vma above, but that check
1710 * is racy because ->anon_vma can be populated under the
1711 * mmap lock in read mode.
1713 if (vma->anon_vma) {
1714 result = SCAN_PAGE_ANON;
1718 * When a vma is registered with uffd-wp, we can't
1719 * recycle the pmd pgtable because there can be pte
1720 * markers installed. Skip it only, so the rest mm/vma
1721 * can still have the same file mapped hugely, however
1722 * it'll always mapped in small page size for uffd-wp
1723 * registered ranges.
1725 if (hpage_collapse_test_exit(mm)) {
1726 result = SCAN_ANY_PROCESS;
1729 if (userfaultfd_wp(vma)) {
1730 result = SCAN_PTE_UFFD_WP;
1733 collapse_and_free_pmd(mm, vma, addr, pmd);
1734 if (!cc->is_khugepaged && is_target)
1735 result = set_huge_pmd(vma, addr, pmd, hpage);
1737 result = SCAN_SUCCEED;
1740 mmap_write_unlock(mm);
1744 * Calling context will handle target mm/addr. Otherwise, let
1745 * khugepaged try again later.
1748 khugepaged_add_pte_mapped_thp(mm, addr);
1753 target_result = result;
1755 i_mmap_unlock_write(mapping);
1756 return target_result;
1760 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1762 * @mm: process address space where collapse happens
1763 * @addr: virtual collapse start address
1764 * @file: file that collapse on
1765 * @start: collapse start address
1766 * @cc: collapse context and scratchpad
1768 * Basic scheme is simple, details are more complex:
1769 * - allocate and lock a new huge page;
1770 * - scan page cache replacing old pages with the new one
1771 * + swap/gup in pages if necessary;
1773 * + keep old pages around in case rollback is required;
1774 * - if replacing succeeds:
1777 * + unlock huge page;
1778 * - if replacing failed;
1779 * + put all pages back and unfreeze them;
1780 * + restore gaps in the page cache;
1781 * + unlock and free huge page;
1783 static int collapse_file(struct mm_struct *mm, unsigned long addr,
1784 struct file *file, pgoff_t start,
1785 struct collapse_control *cc)
1787 struct address_space *mapping = file->f_mapping;
1789 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1790 LIST_HEAD(pagelist);
1791 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1792 int nr_none = 0, result = SCAN_SUCCEED;
1793 bool is_shmem = shmem_file(file);
1796 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1797 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1799 result = alloc_charge_hpage(&hpage, mm, cc);
1800 if (result != SCAN_SUCCEED)
1804 * Ensure we have slots for all the pages in the range. This is
1805 * almost certainly a no-op because most of the pages must be present
1809 xas_create_range(&xas);
1810 if (!xas_error(&xas))
1812 xas_unlock_irq(&xas);
1813 if (!xas_nomem(&xas, GFP_KERNEL)) {
1819 __SetPageLocked(hpage);
1821 __SetPageSwapBacked(hpage);
1822 hpage->index = start;
1823 hpage->mapping = mapping;
1826 * At this point the hpage is locked and not up-to-date.
1827 * It's safe to insert it into the page cache, because nobody would
1828 * be able to map it or use it in another way until we unlock it.
1831 xas_set(&xas, start);
1832 for (index = start; index < end; index++) {
1833 struct page *page = xas_next(&xas);
1834 struct folio *folio;
1836 VM_BUG_ON(index != xas.xa_index);
1840 * Stop if extent has been truncated or
1841 * hole-punched, and is now completely
1844 if (index == start) {
1845 if (!xas_next_entry(&xas, end - 1)) {
1846 result = SCAN_TRUNCATED;
1849 xas_set(&xas, index);
1851 if (!shmem_charge(mapping->host, 1)) {
1855 xas_store(&xas, hpage);
1860 if (xa_is_value(page) || !PageUptodate(page)) {
1861 xas_unlock_irq(&xas);
1862 /* swap in or instantiate fallocated page */
1863 if (shmem_get_folio(mapping->host, index,
1864 &folio, SGP_NOALLOC)) {
1868 page = folio_file_page(folio, index);
1869 } else if (trylock_page(page)) {
1871 xas_unlock_irq(&xas);
1873 result = SCAN_PAGE_LOCK;
1876 } else { /* !is_shmem */
1877 if (!page || xa_is_value(page)) {
1878 xas_unlock_irq(&xas);
1879 page_cache_sync_readahead(mapping, &file->f_ra,
1882 /* drain pagevecs to help isolate_lru_page() */
1884 page = find_lock_page(mapping, index);
1885 if (unlikely(page == NULL)) {
1889 } else if (PageDirty(page)) {
1891 * khugepaged only works on read-only fd,
1892 * so this page is dirty because it hasn't
1893 * been flushed since first write. There
1894 * won't be new dirty pages.
1896 * Trigger async flush here and hope the
1897 * writeback is done when khugepaged
1898 * revisits this page.
1900 * This is a one-off situation. We are not
1901 * forcing writeback in loop.
1903 xas_unlock_irq(&xas);
1904 filemap_flush(mapping);
1907 } else if (PageWriteback(page)) {
1908 xas_unlock_irq(&xas);
1911 } else if (trylock_page(page)) {
1913 xas_unlock_irq(&xas);
1915 result = SCAN_PAGE_LOCK;
1921 * The page must be locked, so we can drop the i_pages lock
1922 * without racing with truncate.
1924 VM_BUG_ON_PAGE(!PageLocked(page), page);
1926 /* make sure the page is up to date */
1927 if (unlikely(!PageUptodate(page))) {
1933 * If file was truncated then extended, or hole-punched, before
1934 * we locked the first page, then a THP might be there already.
1935 * This will be discovered on the first iteration.
1937 if (PageTransCompound(page)) {
1938 struct page *head = compound_head(page);
1940 result = compound_order(head) == HPAGE_PMD_ORDER &&
1941 head->index == start
1942 /* Maybe PMD-mapped */
1943 ? SCAN_PTE_MAPPED_HUGEPAGE
1944 : SCAN_PAGE_COMPOUND;
1948 folio = page_folio(page);
1950 if (folio_mapping(folio) != mapping) {
1951 result = SCAN_TRUNCATED;
1955 if (!is_shmem && (folio_test_dirty(folio) ||
1956 folio_test_writeback(folio))) {
1958 * khugepaged only works on read-only fd, so this
1959 * page is dirty because it hasn't been flushed
1960 * since first write.
1966 if (!folio_isolate_lru(folio)) {
1967 result = SCAN_DEL_PAGE_LRU;
1971 if (folio_has_private(folio) &&
1972 !filemap_release_folio(folio, GFP_KERNEL)) {
1973 result = SCAN_PAGE_HAS_PRIVATE;
1974 folio_putback_lru(folio);
1978 if (folio_mapped(folio))
1980 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1983 xas_set(&xas, index);
1985 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1988 * The page is expected to have page_count() == 3:
1989 * - we hold a pin on it;
1990 * - one reference from page cache;
1991 * - one from isolate_lru_page;
1993 if (!page_ref_freeze(page, 3)) {
1994 result = SCAN_PAGE_COUNT;
1995 xas_unlock_irq(&xas);
1996 putback_lru_page(page);
2001 * Add the page to the list to be able to undo the collapse if
2002 * something go wrong.
2004 list_add_tail(&page->lru, &pagelist);
2006 /* Finally, replace with the new page. */
2007 xas_store(&xas, hpage);
2014 nr = thp_nr_pages(hpage);
2017 __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
2019 __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
2020 filemap_nr_thps_inc(mapping);
2022 * Paired with smp_mb() in do_dentry_open() to ensure
2023 * i_writecount is up to date and the update to nr_thps is
2024 * visible. Ensures the page cache will be truncated if the
2025 * file is opened writable.
2028 if (inode_is_open_for_write(mapping->host)) {
2030 __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
2031 filemap_nr_thps_dec(mapping);
2037 __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
2038 /* nr_none is always 0 for non-shmem. */
2039 __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
2042 /* Join all the small entries into a single multi-index entry */
2043 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2044 xas_store(&xas, hpage);
2046 xas_unlock_irq(&xas);
2050 * If collapse is successful, flush must be done now before copying.
2051 * If collapse is unsuccessful, does flush actually need to be done?
2052 * Do it anyway, to clear the state.
2054 try_to_unmap_flush();
2056 if (result == SCAN_SUCCEED) {
2057 struct page *page, *tmp;
2058 struct folio *folio;
2061 * Replacing old pages with new one has succeeded, now we
2062 * need to copy the content and free the old pages.
2065 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2066 while (index < page->index) {
2067 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2070 copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
2072 list_del(&page->lru);
2073 page->mapping = NULL;
2074 page_ref_unfreeze(page, 1);
2075 ClearPageActive(page);
2076 ClearPageUnevictable(page);
2081 while (index < end) {
2082 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2086 folio = page_folio(hpage);
2087 folio_mark_uptodate(folio);
2088 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2091 folio_mark_dirty(folio);
2092 folio_add_lru(folio);
2095 * Remove pte page tables, so we can re-fault the page as huge.
2097 result = retract_page_tables(mapping, start, mm, addr, hpage,
2104 /* Something went wrong: roll back page cache changes */
2107 mapping->nrpages -= nr_none;
2108 shmem_uncharge(mapping->host, nr_none);
2111 xas_set(&xas, start);
2112 xas_for_each(&xas, page, end - 1) {
2113 page = list_first_entry_or_null(&pagelist,
2115 if (!page || xas.xa_index < page->index) {
2119 /* Put holes back where they were */
2120 xas_store(&xas, NULL);
2124 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
2126 /* Unfreeze the page. */
2127 list_del(&page->lru);
2128 page_ref_unfreeze(page, 2);
2129 xas_store(&xas, page);
2131 xas_unlock_irq(&xas);
2133 putback_lru_page(page);
2137 xas_unlock_irq(&xas);
2139 hpage->mapping = NULL;
2145 VM_BUG_ON(!list_empty(&pagelist));
2149 trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
2153 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2154 struct file *file, pgoff_t start,
2155 struct collapse_control *cc)
2157 struct page *page = NULL;
2158 struct address_space *mapping = file->f_mapping;
2159 XA_STATE(xas, &mapping->i_pages, start);
2161 int node = NUMA_NO_NODE;
2162 int result = SCAN_SUCCEED;
2166 memset(cc->node_load, 0, sizeof(cc->node_load));
2167 nodes_clear(cc->alloc_nmask);
2169 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2170 if (xas_retry(&xas, page))
2173 if (xa_is_value(page)) {
2175 if (cc->is_khugepaged &&
2176 swap > khugepaged_max_ptes_swap) {
2177 result = SCAN_EXCEED_SWAP_PTE;
2178 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2185 * TODO: khugepaged should compact smaller compound pages
2186 * into a PMD sized page
2188 if (PageTransCompound(page)) {
2189 struct page *head = compound_head(page);
2191 result = compound_order(head) == HPAGE_PMD_ORDER &&
2192 head->index == start
2193 /* Maybe PMD-mapped */
2194 ? SCAN_PTE_MAPPED_HUGEPAGE
2195 : SCAN_PAGE_COMPOUND;
2197 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2198 * by the caller won't touch the page cache, and so
2199 * it's safe to skip LRU and refcount checks before
2205 node = page_to_nid(page);
2206 if (hpage_collapse_scan_abort(node, cc)) {
2207 result = SCAN_SCAN_ABORT;
2210 cc->node_load[node]++;
2212 if (!PageLRU(page)) {
2213 result = SCAN_PAGE_LRU;
2217 if (page_count(page) !=
2218 1 + page_mapcount(page) + page_has_private(page)) {
2219 result = SCAN_PAGE_COUNT;
2224 * We probably should check if the page is referenced here, but
2225 * nobody would transfer pte_young() to PageReferenced() for us.
2226 * And rmap walk here is just too costly...
2231 if (need_resched()) {
2238 if (result == SCAN_SUCCEED) {
2239 if (cc->is_khugepaged &&
2240 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2241 result = SCAN_EXCEED_NONE_PTE;
2242 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2244 result = collapse_file(mm, addr, file, start, cc);
2248 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
2252 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2253 struct file *file, pgoff_t start,
2254 struct collapse_control *cc)
2259 static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
2263 static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
2270 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2271 struct collapse_control *cc)
2272 __releases(&khugepaged_mm_lock)
2273 __acquires(&khugepaged_mm_lock)
2275 struct vma_iterator vmi;
2276 struct khugepaged_mm_slot *mm_slot;
2277 struct mm_slot *slot;
2278 struct mm_struct *mm;
2279 struct vm_area_struct *vma;
2283 lockdep_assert_held(&khugepaged_mm_lock);
2284 *result = SCAN_FAIL;
2286 if (khugepaged_scan.mm_slot) {
2287 mm_slot = khugepaged_scan.mm_slot;
2288 slot = &mm_slot->slot;
2290 slot = list_entry(khugepaged_scan.mm_head.next,
2291 struct mm_slot, mm_node);
2292 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2293 khugepaged_scan.address = 0;
2294 khugepaged_scan.mm_slot = mm_slot;
2296 spin_unlock(&khugepaged_mm_lock);
2297 khugepaged_collapse_pte_mapped_thps(mm_slot);
2301 * Don't wait for semaphore (to avoid long wait times). Just move to
2302 * the next mm on the list.
2305 if (unlikely(!mmap_read_trylock(mm)))
2306 goto breakouterloop_mmap_lock;
2309 if (unlikely(hpage_collapse_test_exit(mm)))
2310 goto breakouterloop;
2312 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2313 for_each_vma(vmi, vma) {
2314 unsigned long hstart, hend;
2317 if (unlikely(hpage_collapse_test_exit(mm))) {
2321 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
2326 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2327 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2328 if (khugepaged_scan.address > hend)
2330 if (khugepaged_scan.address < hstart)
2331 khugepaged_scan.address = hstart;
2332 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2334 while (khugepaged_scan.address < hend) {
2335 bool mmap_locked = true;
2338 if (unlikely(hpage_collapse_test_exit(mm)))
2339 goto breakouterloop;
2341 VM_BUG_ON(khugepaged_scan.address < hstart ||
2342 khugepaged_scan.address + HPAGE_PMD_SIZE >
2344 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2345 struct file *file = get_file(vma->vm_file);
2346 pgoff_t pgoff = linear_page_index(vma,
2347 khugepaged_scan.address);
2349 mmap_read_unlock(mm);
2350 *result = hpage_collapse_scan_file(mm,
2351 khugepaged_scan.address,
2353 mmap_locked = false;
2356 *result = hpage_collapse_scan_pmd(mm, vma,
2357 khugepaged_scan.address,
2362 case SCAN_PTE_MAPPED_HUGEPAGE: {
2365 *result = find_pmd_or_thp_or_none(mm,
2366 khugepaged_scan.address,
2368 if (*result != SCAN_SUCCEED)
2370 if (!khugepaged_add_pte_mapped_thp(mm,
2371 khugepaged_scan.address))
2375 ++khugepaged_pages_collapsed;
2381 /* move to next address */
2382 khugepaged_scan.address += HPAGE_PMD_SIZE;
2383 progress += HPAGE_PMD_NR;
2386 * We released mmap_lock so break loop. Note
2387 * that we drop mmap_lock before all hugepage
2388 * allocations, so if allocation fails, we are
2389 * guaranteed to break here and report the
2390 * correct result back to caller.
2392 goto breakouterloop_mmap_lock;
2393 if (progress >= pages)
2394 goto breakouterloop;
2398 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2399 breakouterloop_mmap_lock:
2401 spin_lock(&khugepaged_mm_lock);
2402 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2404 * Release the current mm_slot if this mm is about to die, or
2405 * if we scanned all vmas of this mm.
2407 if (hpage_collapse_test_exit(mm) || !vma) {
2409 * Make sure that if mm_users is reaching zero while
2410 * khugepaged runs here, khugepaged_exit will find
2411 * mm_slot not pointing to the exiting mm.
2413 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2414 slot = list_entry(slot->mm_node.next,
2415 struct mm_slot, mm_node);
2416 khugepaged_scan.mm_slot =
2417 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2418 khugepaged_scan.address = 0;
2420 khugepaged_scan.mm_slot = NULL;
2421 khugepaged_full_scans++;
2424 collect_mm_slot(mm_slot);
2430 static int khugepaged_has_work(void)
2432 return !list_empty(&khugepaged_scan.mm_head) &&
2433 hugepage_flags_enabled();
2436 static int khugepaged_wait_event(void)
2438 return !list_empty(&khugepaged_scan.mm_head) ||
2439 kthread_should_stop();
2442 static void khugepaged_do_scan(struct collapse_control *cc)
2444 unsigned int progress = 0, pass_through_head = 0;
2445 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2447 int result = SCAN_SUCCEED;
2449 lru_add_drain_all();
2454 if (unlikely(kthread_should_stop() || try_to_freeze()))
2457 spin_lock(&khugepaged_mm_lock);
2458 if (!khugepaged_scan.mm_slot)
2459 pass_through_head++;
2460 if (khugepaged_has_work() &&
2461 pass_through_head < 2)
2462 progress += khugepaged_scan_mm_slot(pages - progress,
2466 spin_unlock(&khugepaged_mm_lock);
2468 if (progress >= pages)
2471 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2473 * If fail to allocate the first time, try to sleep for
2474 * a while. When hit again, cancel the scan.
2479 khugepaged_alloc_sleep();
2484 static bool khugepaged_should_wakeup(void)
2486 return kthread_should_stop() ||
2487 time_after_eq(jiffies, khugepaged_sleep_expire);
2490 static void khugepaged_wait_work(void)
2492 if (khugepaged_has_work()) {
2493 const unsigned long scan_sleep_jiffies =
2494 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2496 if (!scan_sleep_jiffies)
2499 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2500 wait_event_freezable_timeout(khugepaged_wait,
2501 khugepaged_should_wakeup(),
2502 scan_sleep_jiffies);
2506 if (hugepage_flags_enabled())
2507 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2510 static int khugepaged(void *none)
2512 struct khugepaged_mm_slot *mm_slot;
2515 set_user_nice(current, MAX_NICE);
2517 while (!kthread_should_stop()) {
2518 khugepaged_do_scan(&khugepaged_collapse_control);
2519 khugepaged_wait_work();
2522 spin_lock(&khugepaged_mm_lock);
2523 mm_slot = khugepaged_scan.mm_slot;
2524 khugepaged_scan.mm_slot = NULL;
2526 collect_mm_slot(mm_slot);
2527 spin_unlock(&khugepaged_mm_lock);
2531 static void set_recommended_min_free_kbytes(void)
2535 unsigned long recommended_min;
2537 if (!hugepage_flags_enabled()) {
2538 calculate_min_free_kbytes();
2542 for_each_populated_zone(zone) {
2544 * We don't need to worry about fragmentation of
2545 * ZONE_MOVABLE since it only has movable pages.
2547 if (zone_idx(zone) > gfp_zone(GFP_USER))
2553 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2554 recommended_min = pageblock_nr_pages * nr_zones * 2;
2557 * Make sure that on average at least two pageblocks are almost free
2558 * of another type, one for a migratetype to fall back to and a
2559 * second to avoid subsequent fallbacks of other types There are 3
2560 * MIGRATE_TYPES we care about.
2562 recommended_min += pageblock_nr_pages * nr_zones *
2563 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2565 /* don't ever allow to reserve more than 5% of the lowmem */
2566 recommended_min = min(recommended_min,
2567 (unsigned long) nr_free_buffer_pages() / 20);
2568 recommended_min <<= (PAGE_SHIFT-10);
2570 if (recommended_min > min_free_kbytes) {
2571 if (user_min_free_kbytes >= 0)
2572 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2573 min_free_kbytes, recommended_min);
2575 min_free_kbytes = recommended_min;
2579 setup_per_zone_wmarks();
2582 int start_stop_khugepaged(void)
2586 mutex_lock(&khugepaged_mutex);
2587 if (hugepage_flags_enabled()) {
2588 if (!khugepaged_thread)
2589 khugepaged_thread = kthread_run(khugepaged, NULL,
2591 if (IS_ERR(khugepaged_thread)) {
2592 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2593 err = PTR_ERR(khugepaged_thread);
2594 khugepaged_thread = NULL;
2598 if (!list_empty(&khugepaged_scan.mm_head))
2599 wake_up_interruptible(&khugepaged_wait);
2600 } else if (khugepaged_thread) {
2601 kthread_stop(khugepaged_thread);
2602 khugepaged_thread = NULL;
2604 set_recommended_min_free_kbytes();
2606 mutex_unlock(&khugepaged_mutex);
2610 void khugepaged_min_free_kbytes_update(void)
2612 mutex_lock(&khugepaged_mutex);
2613 if (hugepage_flags_enabled() && khugepaged_thread)
2614 set_recommended_min_free_kbytes();
2615 mutex_unlock(&khugepaged_mutex);
2618 bool current_is_khugepaged(void)
2620 return kthread_func(current) == khugepaged;
2623 static int madvise_collapse_errno(enum scan_result r)
2626 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2627 * actionable feedback to caller, so they may take an appropriate
2628 * fallback measure depending on the nature of the failure.
2631 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2633 case SCAN_CGROUP_CHARGE_FAIL:
2635 /* Resource temporary unavailable - trying again might succeed */
2636 case SCAN_PAGE_COUNT:
2637 case SCAN_PAGE_LOCK:
2639 case SCAN_DEL_PAGE_LRU:
2642 * Other: Trying again likely not to succeed / error intrinsic to
2643 * specified memory range. khugepaged likely won't be able to collapse
2651 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2652 unsigned long start, unsigned long end)
2654 struct collapse_control *cc;
2655 struct mm_struct *mm = vma->vm_mm;
2656 unsigned long hstart, hend, addr;
2657 int thps = 0, last_fail = SCAN_FAIL;
2658 bool mmap_locked = true;
2660 BUG_ON(vma->vm_start > start);
2661 BUG_ON(vma->vm_end < end);
2665 if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
2668 cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2671 cc->is_khugepaged = false;
2674 lru_add_drain_all();
2676 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2677 hend = end & HPAGE_PMD_MASK;
2679 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2680 int result = SCAN_FAIL;
2686 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2688 if (result != SCAN_SUCCEED) {
2693 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2695 mmap_assert_locked(mm);
2696 memset(cc->node_load, 0, sizeof(cc->node_load));
2697 nodes_clear(cc->alloc_nmask);
2698 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2699 struct file *file = get_file(vma->vm_file);
2700 pgoff_t pgoff = linear_page_index(vma, addr);
2702 mmap_read_unlock(mm);
2703 mmap_locked = false;
2704 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2708 result = hpage_collapse_scan_pmd(mm, vma, addr,
2712 *prev = NULL; /* Tell caller we dropped mmap_lock */
2717 case SCAN_PMD_MAPPED:
2720 case SCAN_PTE_MAPPED_HUGEPAGE:
2721 BUG_ON(mmap_locked);
2723 mmap_write_lock(mm);
2724 result = collapse_pte_mapped_thp(mm, addr, true);
2725 mmap_write_unlock(mm);
2727 /* Whitelisted set of results where continuing OK */
2729 case SCAN_PTE_NON_PRESENT:
2730 case SCAN_PTE_UFFD_WP:
2732 case SCAN_LACK_REFERENCED_PAGE:
2733 case SCAN_PAGE_NULL:
2734 case SCAN_PAGE_COUNT:
2735 case SCAN_PAGE_LOCK:
2736 case SCAN_PAGE_COMPOUND:
2738 case SCAN_DEL_PAGE_LRU:
2743 /* Other error, exit */
2749 /* Caller expects us to hold mmap_lock on return */
2753 mmap_assert_locked(mm);
2757 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2758 : madvise_collapse_errno(last_fail);