1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/page_table_check.h>
20 #include <linux/rcupdate_wait.h>
21 #include <linux/swapops.h>
22 #include <linux/shmem_fs.h>
23 #include <linux/ksm.h>
26 #include <asm/pgalloc.h>
38 SCAN_EXCEED_SHARED_PTE,
41 SCAN_PTE_MAPPED_HUGEPAGE,
43 SCAN_LACK_REFERENCED_PAGE,
56 SCAN_ALLOC_HUGE_PAGE_FAIL,
57 SCAN_CGROUP_CHARGE_FAIL,
59 SCAN_PAGE_HAS_PRIVATE,
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/huge_memory.h>
68 static struct task_struct *khugepaged_thread __read_mostly;
69 static DEFINE_MUTEX(khugepaged_mutex);
71 /* default scan 8*512 pte (or vmas) every 30 second */
72 static unsigned int khugepaged_pages_to_scan __read_mostly;
73 static unsigned int khugepaged_pages_collapsed;
74 static unsigned int khugepaged_full_scans;
75 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
76 /* during fragmentation poll the hugepage allocator once every minute */
77 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
78 static unsigned long khugepaged_sleep_expire;
79 static DEFINE_SPINLOCK(khugepaged_mm_lock);
80 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
82 * default collapse hugepages if there is at least one pte mapped like
83 * it would have happened if the vma was large enough during page
86 * Note that these are only respected if collapse was initiated by khugepaged.
88 static unsigned int khugepaged_max_ptes_none __read_mostly;
89 static unsigned int khugepaged_max_ptes_swap __read_mostly;
90 static unsigned int khugepaged_max_ptes_shared __read_mostly;
92 #define MM_SLOTS_HASH_BITS 10
93 static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
95 static struct kmem_cache *mm_slot_cache __ro_after_init;
97 struct collapse_control {
100 /* Num pages scanned per node */
101 u32 node_load[MAX_NUMNODES];
103 /* nodemask for allocation fallback */
104 nodemask_t alloc_nmask;
108 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
109 * @slot: hash lookup from mm to mm_slot
111 struct khugepaged_mm_slot {
116 * struct khugepaged_scan - cursor for scanning
117 * @mm_head: the head of the mm list to scan
118 * @mm_slot: the current mm_slot we are scanning
119 * @address: the next address inside that to be scanned
121 * There is only the one khugepaged_scan instance of this cursor structure.
123 struct khugepaged_scan {
124 struct list_head mm_head;
125 struct khugepaged_mm_slot *mm_slot;
126 unsigned long address;
129 static struct khugepaged_scan khugepaged_scan = {
130 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
134 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
135 struct kobj_attribute *attr,
138 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
141 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
142 struct kobj_attribute *attr,
143 const char *buf, size_t count)
148 err = kstrtouint(buf, 10, &msecs);
152 khugepaged_scan_sleep_millisecs = msecs;
153 khugepaged_sleep_expire = 0;
154 wake_up_interruptible(&khugepaged_wait);
158 static struct kobj_attribute scan_sleep_millisecs_attr =
159 __ATTR_RW(scan_sleep_millisecs);
161 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
162 struct kobj_attribute *attr,
165 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
168 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
169 struct kobj_attribute *attr,
170 const char *buf, size_t count)
175 err = kstrtouint(buf, 10, &msecs);
179 khugepaged_alloc_sleep_millisecs = msecs;
180 khugepaged_sleep_expire = 0;
181 wake_up_interruptible(&khugepaged_wait);
185 static struct kobj_attribute alloc_sleep_millisecs_attr =
186 __ATTR_RW(alloc_sleep_millisecs);
188 static ssize_t pages_to_scan_show(struct kobject *kobj,
189 struct kobj_attribute *attr,
192 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
194 static ssize_t pages_to_scan_store(struct kobject *kobj,
195 struct kobj_attribute *attr,
196 const char *buf, size_t count)
201 err = kstrtouint(buf, 10, &pages);
205 khugepaged_pages_to_scan = pages;
209 static struct kobj_attribute pages_to_scan_attr =
210 __ATTR_RW(pages_to_scan);
212 static ssize_t pages_collapsed_show(struct kobject *kobj,
213 struct kobj_attribute *attr,
216 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
218 static struct kobj_attribute pages_collapsed_attr =
219 __ATTR_RO(pages_collapsed);
221 static ssize_t full_scans_show(struct kobject *kobj,
222 struct kobj_attribute *attr,
225 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
227 static struct kobj_attribute full_scans_attr =
228 __ATTR_RO(full_scans);
230 static ssize_t defrag_show(struct kobject *kobj,
231 struct kobj_attribute *attr, char *buf)
233 return single_hugepage_flag_show(kobj, attr, buf,
234 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236 static ssize_t defrag_store(struct kobject *kobj,
237 struct kobj_attribute *attr,
238 const char *buf, size_t count)
240 return single_hugepage_flag_store(kobj, attr, buf, count,
241 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
243 static struct kobj_attribute khugepaged_defrag_attr =
247 * max_ptes_none controls if khugepaged should collapse hugepages over
248 * any unmapped ptes in turn potentially increasing the memory
249 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
250 * reduce the available free memory in the system as it
251 * runs. Increasing max_ptes_none will instead potentially reduce the
252 * free memory in the system during the khugepaged scan.
254 static ssize_t max_ptes_none_show(struct kobject *kobj,
255 struct kobj_attribute *attr,
258 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
260 static ssize_t max_ptes_none_store(struct kobject *kobj,
261 struct kobj_attribute *attr,
262 const char *buf, size_t count)
265 unsigned long max_ptes_none;
267 err = kstrtoul(buf, 10, &max_ptes_none);
268 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
271 khugepaged_max_ptes_none = max_ptes_none;
275 static struct kobj_attribute khugepaged_max_ptes_none_attr =
276 __ATTR_RW(max_ptes_none);
278 static ssize_t max_ptes_swap_show(struct kobject *kobj,
279 struct kobj_attribute *attr,
282 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
285 static ssize_t max_ptes_swap_store(struct kobject *kobj,
286 struct kobj_attribute *attr,
287 const char *buf, size_t count)
290 unsigned long max_ptes_swap;
292 err = kstrtoul(buf, 10, &max_ptes_swap);
293 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
296 khugepaged_max_ptes_swap = max_ptes_swap;
301 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
302 __ATTR_RW(max_ptes_swap);
304 static ssize_t max_ptes_shared_show(struct kobject *kobj,
305 struct kobj_attribute *attr,
308 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
311 static ssize_t max_ptes_shared_store(struct kobject *kobj,
312 struct kobj_attribute *attr,
313 const char *buf, size_t count)
316 unsigned long max_ptes_shared;
318 err = kstrtoul(buf, 10, &max_ptes_shared);
319 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
322 khugepaged_max_ptes_shared = max_ptes_shared;
327 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
328 __ATTR_RW(max_ptes_shared);
330 static struct attribute *khugepaged_attr[] = {
331 &khugepaged_defrag_attr.attr,
332 &khugepaged_max_ptes_none_attr.attr,
333 &khugepaged_max_ptes_swap_attr.attr,
334 &khugepaged_max_ptes_shared_attr.attr,
335 &pages_to_scan_attr.attr,
336 &pages_collapsed_attr.attr,
337 &full_scans_attr.attr,
338 &scan_sleep_millisecs_attr.attr,
339 &alloc_sleep_millisecs_attr.attr,
343 struct attribute_group khugepaged_attr_group = {
344 .attrs = khugepaged_attr,
345 .name = "khugepaged",
347 #endif /* CONFIG_SYSFS */
349 int hugepage_madvise(struct vm_area_struct *vma,
350 unsigned long *vm_flags, int advice)
356 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
357 * can't handle this properly after s390_enable_sie, so we simply
358 * ignore the madvise to prevent qemu from causing a SIGSEGV.
360 if (mm_has_pgste(vma->vm_mm))
363 *vm_flags &= ~VM_NOHUGEPAGE;
364 *vm_flags |= VM_HUGEPAGE;
366 * If the vma become good for khugepaged to scan,
367 * register it here without waiting a page fault that
368 * may not happen any time soon.
370 khugepaged_enter_vma(vma, *vm_flags);
372 case MADV_NOHUGEPAGE:
373 *vm_flags &= ~VM_HUGEPAGE;
374 *vm_flags |= VM_NOHUGEPAGE;
376 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 * this vma even if we leave the mm registered in khugepaged if
378 * it got registered before VM_NOHUGEPAGE was set.
386 int __init khugepaged_init(void)
388 mm_slot_cache = KMEM_CACHE(khugepaged_mm_slot, 0);
392 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
395 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
400 void __init khugepaged_destroy(void)
402 kmem_cache_destroy(mm_slot_cache);
405 static inline int hpage_collapse_test_exit(struct mm_struct *mm)
407 return atomic_read(&mm->mm_users) == 0;
410 static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
412 return hpage_collapse_test_exit(mm) ||
413 test_bit(MMF_DISABLE_THP, &mm->flags);
416 static bool hugepage_pmd_enabled(void)
419 * We cover both the anon and the file-backed case here; file-backed
420 * hugepages, when configured in, are determined by the global control.
421 * Anon pmd-sized hugepages are determined by the pmd-size control.
423 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
424 hugepage_global_enabled())
426 if (test_bit(PMD_ORDER, &huge_anon_orders_always))
428 if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
430 if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
431 hugepage_global_enabled())
436 void __khugepaged_enter(struct mm_struct *mm)
438 struct khugepaged_mm_slot *mm_slot;
439 struct mm_slot *slot;
442 /* __khugepaged_exit() must not run from under us */
443 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
444 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
447 mm_slot = mm_slot_alloc(mm_slot_cache);
451 slot = &mm_slot->slot;
453 spin_lock(&khugepaged_mm_lock);
454 mm_slot_insert(mm_slots_hash, mm, slot);
456 * Insert just behind the scanning cursor, to let the area settle
459 wakeup = list_empty(&khugepaged_scan.mm_head);
460 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
461 spin_unlock(&khugepaged_mm_lock);
465 wake_up_interruptible(&khugepaged_wait);
468 void khugepaged_enter_vma(struct vm_area_struct *vma,
469 unsigned long vm_flags)
471 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
472 hugepage_pmd_enabled()) {
473 if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
475 __khugepaged_enter(vma->vm_mm);
479 void __khugepaged_exit(struct mm_struct *mm)
481 struct khugepaged_mm_slot *mm_slot;
482 struct mm_slot *slot;
485 spin_lock(&khugepaged_mm_lock);
486 slot = mm_slot_lookup(mm_slots_hash, mm);
487 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
488 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
489 hash_del(&slot->hash);
490 list_del(&slot->mm_node);
493 spin_unlock(&khugepaged_mm_lock);
496 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
497 mm_slot_free(mm_slot_cache, mm_slot);
499 } else if (mm_slot) {
501 * This is required to serialize against
502 * hpage_collapse_test_exit() (which is guaranteed to run
503 * under mmap sem read mode). Stop here (after we return all
504 * pagetables will be destroyed) until khugepaged has finished
505 * working on the pagetables under the mmap_lock.
508 mmap_write_unlock(mm);
512 static void release_pte_folio(struct folio *folio)
514 node_stat_mod_folio(folio,
515 NR_ISOLATED_ANON + folio_is_file_lru(folio),
516 -folio_nr_pages(folio));
518 folio_putback_lru(folio);
521 static void release_pte_pages(pte_t *pte, pte_t *_pte,
522 struct list_head *compound_pagelist)
524 struct folio *folio, *tmp;
526 while (--_pte >= pte) {
527 pte_t pteval = ptep_get(_pte);
530 if (pte_none(pteval))
532 pfn = pte_pfn(pteval);
533 if (is_zero_pfn(pfn))
535 folio = pfn_folio(pfn);
536 if (folio_test_large(folio))
538 release_pte_folio(folio);
541 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
542 list_del(&folio->lru);
543 release_pte_folio(folio);
547 static bool is_refcount_suitable(struct folio *folio)
549 int expected_refcount;
551 expected_refcount = folio_mapcount(folio);
552 if (folio_test_swapcache(folio))
553 expected_refcount += folio_nr_pages(folio);
555 return folio_ref_count(folio) == expected_refcount;
558 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
559 unsigned long address,
561 struct collapse_control *cc,
562 struct list_head *compound_pagelist)
564 struct page *page = NULL;
565 struct folio *folio = NULL;
567 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
568 bool writable = false;
570 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
571 _pte++, address += PAGE_SIZE) {
572 pte_t pteval = ptep_get(_pte);
573 if (pte_none(pteval) || (pte_present(pteval) &&
574 is_zero_pfn(pte_pfn(pteval)))) {
576 if (!userfaultfd_armed(vma) &&
577 (!cc->is_khugepaged ||
578 none_or_zero <= khugepaged_max_ptes_none)) {
581 result = SCAN_EXCEED_NONE_PTE;
582 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
586 if (!pte_present(pteval)) {
587 result = SCAN_PTE_NON_PRESENT;
590 if (pte_uffd_wp(pteval)) {
591 result = SCAN_PTE_UFFD_WP;
594 page = vm_normal_page(vma, address, pteval);
595 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
596 result = SCAN_PAGE_NULL;
600 folio = page_folio(page);
601 VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
603 /* See hpage_collapse_scan_pmd(). */
604 if (folio_likely_mapped_shared(folio)) {
606 if (cc->is_khugepaged &&
607 shared > khugepaged_max_ptes_shared) {
608 result = SCAN_EXCEED_SHARED_PTE;
609 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
614 if (folio_test_large(folio)) {
618 * Check if we have dealt with the compound page
621 list_for_each_entry(f, compound_pagelist, lru) {
628 * We can do it before isolate_lru_page because the
629 * page can't be freed from under us. NOTE: PG_lock
630 * is needed to serialize against split_huge_page
631 * when invoked from the VM.
633 if (!folio_trylock(folio)) {
634 result = SCAN_PAGE_LOCK;
639 * Check if the page has any GUP (or other external) pins.
641 * The page table that maps the page has been already unlinked
642 * from the page table tree and this process cannot get
643 * an additional pin on the page.
645 * New pins can come later if the page is shared across fork,
646 * but not from this process. The other process cannot write to
647 * the page, only trigger CoW.
649 if (!is_refcount_suitable(folio)) {
651 result = SCAN_PAGE_COUNT;
656 * Isolate the page to avoid collapsing an hugepage
657 * currently in use by the VM.
659 if (!folio_isolate_lru(folio)) {
661 result = SCAN_DEL_PAGE_LRU;
664 node_stat_mod_folio(folio,
665 NR_ISOLATED_ANON + folio_is_file_lru(folio),
666 folio_nr_pages(folio));
667 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
668 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
670 if (folio_test_large(folio))
671 list_add_tail(&folio->lru, compound_pagelist);
674 * If collapse was initiated by khugepaged, check that there is
675 * enough young pte to justify collapsing the page
677 if (cc->is_khugepaged &&
678 (pte_young(pteval) || folio_test_young(folio) ||
679 folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
683 if (pte_write(pteval))
687 if (unlikely(!writable)) {
688 result = SCAN_PAGE_RO;
689 } else if (unlikely(cc->is_khugepaged && !referenced)) {
690 result = SCAN_LACK_REFERENCED_PAGE;
692 result = SCAN_SUCCEED;
693 trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
694 referenced, writable, result);
698 release_pte_pages(pte, _pte, compound_pagelist);
699 trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
700 referenced, writable, result);
704 static void __collapse_huge_page_copy_succeeded(pte_t *pte,
705 struct vm_area_struct *vma,
706 unsigned long address,
708 struct list_head *compound_pagelist)
710 struct folio *src, *tmp;
714 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
715 _pte++, address += PAGE_SIZE) {
716 pteval = ptep_get(_pte);
717 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
718 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
719 if (is_zero_pfn(pte_pfn(pteval))) {
721 * ptl mostly unnecessary.
724 ptep_clear(vma->vm_mm, address, _pte);
726 ksm_might_unmap_zero_page(vma->vm_mm, pteval);
729 struct page *src_page = pte_page(pteval);
731 src = page_folio(src_page);
732 if (!folio_test_large(src))
733 release_pte_folio(src);
735 * ptl mostly unnecessary, but preempt has to
736 * be disabled to update the per-cpu stats
737 * inside folio_remove_rmap_pte().
740 ptep_clear(vma->vm_mm, address, _pte);
741 folio_remove_rmap_pte(src, src_page, vma);
743 free_page_and_swap_cache(src_page);
747 list_for_each_entry_safe(src, tmp, compound_pagelist, lru) {
749 node_stat_sub_folio(src, NR_ISOLATED_ANON +
750 folio_is_file_lru(src));
752 free_swap_cache(src);
753 folio_putback_lru(src);
757 static void __collapse_huge_page_copy_failed(pte_t *pte,
760 struct vm_area_struct *vma,
761 struct list_head *compound_pagelist)
766 * Re-establish the PMD to point to the original page table
767 * entry. Restoring PMD needs to be done prior to releasing
768 * pages. Since pages are still isolated and locked here,
769 * acquiring anon_vma_lock_write is unnecessary.
771 pmd_ptl = pmd_lock(vma->vm_mm, pmd);
772 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
773 spin_unlock(pmd_ptl);
775 * Release both raw and compound pages isolated
776 * in __collapse_huge_page_isolate.
778 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
782 * __collapse_huge_page_copy - attempts to copy memory contents from raw
783 * pages to a hugepage. Cleans up the raw pages if copying succeeds;
784 * otherwise restores the original page table and releases isolated raw pages.
785 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
787 * @pte: starting of the PTEs to copy from
788 * @folio: the new hugepage to copy contents to
789 * @pmd: pointer to the new hugepage's PMD
790 * @orig_pmd: the original raw pages' PMD
791 * @vma: the original raw pages' virtual memory area
792 * @address: starting address to copy
793 * @ptl: lock on raw pages' PTEs
794 * @compound_pagelist: list that stores compound pages
796 static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
797 pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
798 unsigned long address, spinlock_t *ptl,
799 struct list_head *compound_pagelist)
802 int result = SCAN_SUCCEED;
805 * Copying pages' contents is subject to memory poison at any iteration.
807 for (i = 0; i < HPAGE_PMD_NR; i++) {
808 pte_t pteval = ptep_get(pte + i);
809 struct page *page = folio_page(folio, i);
810 unsigned long src_addr = address + i * PAGE_SIZE;
811 struct page *src_page;
813 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
814 clear_user_highpage(page, src_addr);
817 src_page = pte_page(pteval);
818 if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) {
819 result = SCAN_COPY_MC;
824 if (likely(result == SCAN_SUCCEED))
825 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
828 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
834 static void khugepaged_alloc_sleep(void)
838 add_wait_queue(&khugepaged_wait, &wait);
839 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
840 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
841 remove_wait_queue(&khugepaged_wait, &wait);
844 struct collapse_control khugepaged_collapse_control = {
845 .is_khugepaged = true,
848 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
853 * If node_reclaim_mode is disabled, then no extra effort is made to
854 * allocate memory locally.
856 if (!node_reclaim_enabled())
859 /* If there is a count for this node already, it must be acceptable */
860 if (cc->node_load[nid])
863 for (i = 0; i < MAX_NUMNODES; i++) {
864 if (!cc->node_load[i])
866 if (node_distance(nid, i) > node_reclaim_distance)
872 #define khugepaged_defrag() \
873 (transparent_hugepage_flags & \
874 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
876 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
877 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
879 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
883 static int hpage_collapse_find_target_node(struct collapse_control *cc)
885 int nid, target_node = 0, max_value = 0;
887 /* find first node with max normal pages hit */
888 for (nid = 0; nid < MAX_NUMNODES; nid++)
889 if (cc->node_load[nid] > max_value) {
890 max_value = cc->node_load[nid];
894 for_each_online_node(nid) {
895 if (max_value == cc->node_load[nid])
896 node_set(nid, cc->alloc_nmask);
902 static int hpage_collapse_find_target_node(struct collapse_control *cc)
909 * If mmap_lock temporarily dropped, revalidate vma
910 * before taking mmap_lock.
911 * Returns enum scan_result value.
914 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
916 struct vm_area_struct **vmap,
917 struct collapse_control *cc)
919 struct vm_area_struct *vma;
920 unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
922 if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
923 return SCAN_ANY_PROCESS;
925 *vmap = vma = find_vma(mm, address);
927 return SCAN_VMA_NULL;
929 if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
930 return SCAN_ADDRESS_RANGE;
931 if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
932 return SCAN_VMA_CHECK;
934 * Anon VMA expected, the address may be unmapped then
935 * remapped to file after khugepaged reaquired the mmap_lock.
937 * thp_vma_allowable_order may return true for qualified file
940 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
941 return SCAN_PAGE_ANON;
945 static int find_pmd_or_thp_or_none(struct mm_struct *mm,
946 unsigned long address,
951 *pmd = mm_find_pmd(mm, address);
953 return SCAN_PMD_NULL;
955 pmde = pmdp_get_lockless(*pmd);
957 return SCAN_PMD_NONE;
958 if (!pmd_present(pmde))
959 return SCAN_PMD_NULL;
960 if (pmd_trans_huge(pmde))
961 return SCAN_PMD_MAPPED;
962 if (pmd_devmap(pmde))
963 return SCAN_PMD_NULL;
965 return SCAN_PMD_NULL;
969 static int check_pmd_still_valid(struct mm_struct *mm,
970 unsigned long address,
974 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
976 if (result != SCAN_SUCCEED)
984 * Bring missing pages in from swap, to complete THP collapse.
985 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
987 * Called and returns without pte mapped or spinlocks held.
988 * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
990 static int __collapse_huge_page_swapin(struct mm_struct *mm,
991 struct vm_area_struct *vma,
992 unsigned long haddr, pmd_t *pmd,
997 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1002 for (address = haddr; address < end; address += PAGE_SIZE) {
1003 struct vm_fault vmf = {
1006 .pgoff = linear_page_index(vma, address),
1007 .flags = FAULT_FLAG_ALLOW_RETRY,
1012 pte = pte_offset_map_nolock(mm, pmd, address, &ptl);
1014 mmap_read_unlock(mm);
1015 result = SCAN_PMD_NULL;
1020 vmf.orig_pte = ptep_get_lockless(pte);
1021 if (!is_swap_pte(vmf.orig_pte))
1026 ret = do_swap_page(&vmf);
1027 /* Which unmaps pte (after perhaps re-checking the entry) */
1031 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1032 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1033 * we do not retry here and swap entry will remain in pagetable
1034 * resulting in later failure.
1036 if (ret & VM_FAULT_RETRY) {
1037 /* Likely, but not guaranteed, that page lock failed */
1038 result = SCAN_PAGE_LOCK;
1041 if (ret & VM_FAULT_ERROR) {
1042 mmap_read_unlock(mm);
1052 /* Drain LRU cache to remove extra pin on the swapped in pages */
1056 result = SCAN_SUCCEED;
1058 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1062 static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
1063 struct collapse_control *cc)
1065 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
1067 int node = hpage_collapse_find_target_node(cc);
1068 struct folio *folio;
1070 folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
1073 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1074 return SCAN_ALLOC_HUGE_PAGE_FAIL;
1077 count_vm_event(THP_COLLAPSE_ALLOC);
1078 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1081 return SCAN_CGROUP_CHARGE_FAIL;
1084 count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1087 return SCAN_SUCCEED;
1090 static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1091 int referenced, int unmapped,
1092 struct collapse_control *cc)
1094 LIST_HEAD(compound_pagelist);
1098 struct folio *folio;
1099 spinlock_t *pmd_ptl, *pte_ptl;
1100 int result = SCAN_FAIL;
1101 struct vm_area_struct *vma;
1102 struct mmu_notifier_range range;
1104 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1107 * Before allocating the hugepage, release the mmap_lock read lock.
1108 * The allocation can take potentially a long time if it involves
1109 * sync compaction, and we do not need to hold the mmap_lock during
1110 * that. We will recheck the vma after taking it again in write mode.
1112 mmap_read_unlock(mm);
1114 result = alloc_charge_folio(&folio, mm, cc);
1115 if (result != SCAN_SUCCEED)
1119 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1120 if (result != SCAN_SUCCEED) {
1121 mmap_read_unlock(mm);
1125 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1126 if (result != SCAN_SUCCEED) {
1127 mmap_read_unlock(mm);
1133 * __collapse_huge_page_swapin will return with mmap_lock
1134 * released when it fails. So we jump out_nolock directly in
1135 * that case. Continuing to collapse causes inconsistency.
1137 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1139 if (result != SCAN_SUCCEED)
1143 mmap_read_unlock(mm);
1145 * Prevent all access to pagetables with the exception of
1146 * gup_fast later handled by the ptep_clear_flush and the VM
1147 * handled by the anon_vma lock + PG_lock.
1149 * UFFDIO_MOVE is prevented to race as well thanks to the
1152 mmap_write_lock(mm);
1153 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1154 if (result != SCAN_SUCCEED)
1156 /* check if the pmd is still valid */
1157 result = check_pmd_still_valid(mm, address, pmd);
1158 if (result != SCAN_SUCCEED)
1161 vma_start_write(vma);
1162 anon_vma_lock_write(vma->anon_vma);
1164 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1165 address + HPAGE_PMD_SIZE);
1166 mmu_notifier_invalidate_range_start(&range);
1168 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1170 * This removes any huge TLB entry from the CPU so we won't allow
1171 * huge and small TLB entries for the same virtual address to
1172 * avoid the risk of CPU bugs in that area.
1174 * Parallel GUP-fast is fine since GUP-fast will back off when
1175 * it detects PMD is changed.
1177 _pmd = pmdp_collapse_flush(vma, address, pmd);
1178 spin_unlock(pmd_ptl);
1179 mmu_notifier_invalidate_range_end(&range);
1180 tlb_remove_table_sync_one();
1182 pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1184 result = __collapse_huge_page_isolate(vma, address, pte, cc,
1185 &compound_pagelist);
1186 spin_unlock(pte_ptl);
1188 result = SCAN_PMD_NULL;
1191 if (unlikely(result != SCAN_SUCCEED)) {
1195 BUG_ON(!pmd_none(*pmd));
1197 * We can only use set_pmd_at when establishing
1198 * hugepmds and never for establishing regular pmds that
1199 * points to regular pagetables. Use pmd_populate for that
1201 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1202 spin_unlock(pmd_ptl);
1203 anon_vma_unlock_write(vma->anon_vma);
1208 * All pages are isolated and locked so anon_vma rmap
1209 * can't run anymore.
1211 anon_vma_unlock_write(vma->anon_vma);
1213 result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
1214 vma, address, pte_ptl,
1215 &compound_pagelist);
1217 if (unlikely(result != SCAN_SUCCEED))
1221 * The smp_wmb() inside __folio_mark_uptodate() ensures the
1222 * copy_huge_page writes become visible before the set_pmd_at()
1225 __folio_mark_uptodate(folio);
1226 pgtable = pmd_pgtable(_pmd);
1228 _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
1229 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1232 BUG_ON(!pmd_none(*pmd));
1233 folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
1234 folio_add_lru_vma(folio, vma);
1235 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1236 set_pmd_at(mm, address, pmd, _pmd);
1237 update_mmu_cache_pmd(vma, address, pmd);
1238 spin_unlock(pmd_ptl);
1242 result = SCAN_SUCCEED;
1244 mmap_write_unlock(mm);
1248 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1252 static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1253 struct vm_area_struct *vma,
1254 unsigned long address, bool *mmap_locked,
1255 struct collapse_control *cc)
1259 int result = SCAN_FAIL, referenced = 0;
1260 int none_or_zero = 0, shared = 0;
1261 struct page *page = NULL;
1262 struct folio *folio = NULL;
1263 unsigned long _address;
1265 int node = NUMA_NO_NODE, unmapped = 0;
1266 bool writable = false;
1268 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1270 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1271 if (result != SCAN_SUCCEED)
1274 memset(cc->node_load, 0, sizeof(cc->node_load));
1275 nodes_clear(cc->alloc_nmask);
1276 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1278 result = SCAN_PMD_NULL;
1282 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1283 _pte++, _address += PAGE_SIZE) {
1284 pte_t pteval = ptep_get(_pte);
1285 if (is_swap_pte(pteval)) {
1287 if (!cc->is_khugepaged ||
1288 unmapped <= khugepaged_max_ptes_swap) {
1290 * Always be strict with uffd-wp
1291 * enabled swap entries. Please see
1292 * comment below for pte_uffd_wp().
1294 if (pte_swp_uffd_wp_any(pteval)) {
1295 result = SCAN_PTE_UFFD_WP;
1300 result = SCAN_EXCEED_SWAP_PTE;
1301 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1305 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1307 if (!userfaultfd_armed(vma) &&
1308 (!cc->is_khugepaged ||
1309 none_or_zero <= khugepaged_max_ptes_none)) {
1312 result = SCAN_EXCEED_NONE_PTE;
1313 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1317 if (pte_uffd_wp(pteval)) {
1319 * Don't collapse the page if any of the small
1320 * PTEs are armed with uffd write protection.
1321 * Here we can also mark the new huge pmd as
1322 * write protected if any of the small ones is
1323 * marked but that could bring unknown
1324 * userfault messages that falls outside of
1325 * the registered range. So, just be simple.
1327 result = SCAN_PTE_UFFD_WP;
1330 if (pte_write(pteval))
1333 page = vm_normal_page(vma, _address, pteval);
1334 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1335 result = SCAN_PAGE_NULL;
1338 folio = page_folio(page);
1340 if (!folio_test_anon(folio)) {
1341 result = SCAN_PAGE_ANON;
1346 * We treat a single page as shared if any part of the THP
1347 * is shared. "False negatives" from
1348 * folio_likely_mapped_shared() are not expected to matter
1351 if (folio_likely_mapped_shared(folio)) {
1353 if (cc->is_khugepaged &&
1354 shared > khugepaged_max_ptes_shared) {
1355 result = SCAN_EXCEED_SHARED_PTE;
1356 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1362 * Record which node the original page is from and save this
1363 * information to cc->node_load[].
1364 * Khugepaged will allocate hugepage from the node has the max
1367 node = folio_nid(folio);
1368 if (hpage_collapse_scan_abort(node, cc)) {
1369 result = SCAN_SCAN_ABORT;
1372 cc->node_load[node]++;
1373 if (!folio_test_lru(folio)) {
1374 result = SCAN_PAGE_LRU;
1377 if (folio_test_locked(folio)) {
1378 result = SCAN_PAGE_LOCK;
1383 * Check if the page has any GUP (or other external) pins.
1385 * Here the check may be racy:
1386 * it may see folio_mapcount() > folio_ref_count().
1387 * But such case is ephemeral we could always retry collapse
1388 * later. However it may report false positive if the page
1389 * has excessive GUP pins (i.e. 512). Anyway the same check
1390 * will be done again later the risk seems low.
1392 if (!is_refcount_suitable(folio)) {
1393 result = SCAN_PAGE_COUNT;
1398 * If collapse was initiated by khugepaged, check that there is
1399 * enough young pte to justify collapsing the page
1401 if (cc->is_khugepaged &&
1402 (pte_young(pteval) || folio_test_young(folio) ||
1403 folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
1408 result = SCAN_PAGE_RO;
1409 } else if (cc->is_khugepaged &&
1411 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1412 result = SCAN_LACK_REFERENCED_PAGE;
1414 result = SCAN_SUCCEED;
1417 pte_unmap_unlock(pte, ptl);
1418 if (result == SCAN_SUCCEED) {
1419 result = collapse_huge_page(mm, address, referenced,
1421 /* collapse_huge_page will return with the mmap_lock released */
1422 *mmap_locked = false;
1425 trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
1426 none_or_zero, result, unmapped);
1430 static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1432 struct mm_slot *slot = &mm_slot->slot;
1433 struct mm_struct *mm = slot->mm;
1435 lockdep_assert_held(&khugepaged_mm_lock);
1437 if (hpage_collapse_test_exit(mm)) {
1439 hash_del(&slot->hash);
1440 list_del(&slot->mm_node);
1443 * Not strictly needed because the mm exited already.
1445 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1448 /* khugepaged_mm_lock actually not necessary for the below */
1449 mm_slot_free(mm_slot_cache, mm_slot);
1455 /* hpage must be locked, and mmap_lock must be held */
1456 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1457 pmd_t *pmdp, struct page *hpage)
1459 struct vm_fault vmf = {
1466 VM_BUG_ON(!PageTransHuge(hpage));
1467 mmap_assert_locked(vma->vm_mm);
1469 if (do_set_pmd(&vmf, hpage))
1473 return SCAN_SUCCEED;
1477 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1480 * @mm: process address space where collapse happens
1481 * @addr: THP collapse address
1482 * @install_pmd: If a huge PMD should be installed
1484 * This function checks whether all the PTEs in the PMD are pointing to the
1485 * right THP. If so, retract the page table so the THP can refault in with
1486 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1488 int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1491 struct mmu_notifier_range range;
1492 bool notified = false;
1493 unsigned long haddr = addr & HPAGE_PMD_MASK;
1494 struct vm_area_struct *vma = vma_lookup(mm, haddr);
1495 struct folio *folio;
1496 pte_t *start_pte, *pte;
1497 pmd_t *pmd, pgt_pmd;
1498 spinlock_t *pml = NULL, *ptl;
1499 int nr_ptes = 0, result = SCAN_FAIL;
1502 mmap_assert_locked(mm);
1504 /* First check VMA found, in case page tables are being torn down */
1505 if (!vma || !vma->vm_file ||
1506 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1507 return SCAN_VMA_CHECK;
1509 /* Fast check before locking page if already PMD-mapped */
1510 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1511 if (result == SCAN_PMD_MAPPED)
1515 * If we are here, we've succeeded in replacing all the native pages
1516 * in the page cache with a single hugepage. If a mm were to fault-in
1517 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1518 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1519 * analogously elide sysfs THP settings here.
1521 if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
1522 return SCAN_VMA_CHECK;
1524 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1525 if (userfaultfd_wp(vma))
1526 return SCAN_PTE_UFFD_WP;
1528 folio = filemap_lock_folio(vma->vm_file->f_mapping,
1529 linear_page_index(vma, haddr));
1531 return SCAN_PAGE_NULL;
1533 if (folio_order(folio) != HPAGE_PMD_ORDER) {
1534 result = SCAN_PAGE_COMPOUND;
1538 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1544 * All pte entries have been removed and pmd cleared.
1545 * Skip all the pte checks and just update the pmd mapping.
1547 goto maybe_install_pmd;
1553 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1554 if (!start_pte) /* mmap_lock + page lock should prevent this */
1557 /* step 1: check all mapped PTEs are to the right huge page */
1558 for (i = 0, addr = haddr, pte = start_pte;
1559 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1561 pte_t ptent = ptep_get(pte);
1563 /* empty pte, skip */
1564 if (pte_none(ptent))
1567 /* page swapped out, abort */
1568 if (!pte_present(ptent)) {
1569 result = SCAN_PTE_NON_PRESENT;
1573 page = vm_normal_page(vma, addr, ptent);
1574 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1577 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1578 * page table, but the new page will not be a subpage of hpage.
1580 if (folio_page(folio, i) != page)
1584 pte_unmap_unlock(start_pte, ptl);
1585 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1586 haddr, haddr + HPAGE_PMD_SIZE);
1587 mmu_notifier_invalidate_range_start(&range);
1591 * pmd_lock covers a wider range than ptl, and (if split from mm's
1592 * page_table_lock) ptl nests inside pml. The less time we hold pml,
1593 * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1594 * inserts a valid as-if-COWed PTE without even looking up page cache.
1595 * So page lock of folio does not protect from it, so we must not drop
1596 * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1598 if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
1599 pml = pmd_lock(mm, pmd);
1601 start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl);
1602 if (!start_pte) /* mmap_lock + page lock should prevent this */
1606 else if (ptl != pml)
1607 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1609 /* step 2: clear page table and adjust rmap */
1610 for (i = 0, addr = haddr, pte = start_pte;
1611 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1613 pte_t ptent = ptep_get(pte);
1615 if (pte_none(ptent))
1618 * We dropped ptl after the first scan, to do the mmu_notifier:
1619 * page lock stops more PTEs of the folio being faulted in, but
1620 * does not stop write faults COWing anon copies from existing
1621 * PTEs; and does not stop those being swapped out or migrated.
1623 if (!pte_present(ptent)) {
1624 result = SCAN_PTE_NON_PRESENT;
1627 page = vm_normal_page(vma, addr, ptent);
1628 if (folio_page(folio, i) != page)
1632 * Must clear entry, or a racing truncate may re-remove it.
1633 * TLB flush can be left until pmdp_collapse_flush() does it.
1634 * PTE dirty? Shmem page is already dirty; file is read-only.
1636 ptep_clear(mm, addr, pte);
1637 folio_remove_rmap_pte(folio, page, vma);
1641 pte_unmap(start_pte);
1645 /* step 3: set proper refcount and mm_counters. */
1647 folio_ref_sub(folio, nr_ptes);
1648 add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
1651 /* step 4: remove empty page table */
1653 pml = pmd_lock(mm, pmd);
1655 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1657 pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1658 pmdp_get_lockless_sync();
1663 mmu_notifier_invalidate_range_end(&range);
1666 page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
1667 pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1670 /* step 5: install pmd entry */
1671 result = install_pmd
1672 ? set_huge_pmd(vma, haddr, pmd, &folio->page)
1678 folio_ref_sub(folio, nr_ptes);
1679 add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
1682 pte_unmap_unlock(start_pte, ptl);
1683 if (pml && pml != ptl)
1686 mmu_notifier_invalidate_range_end(&range);
1688 folio_unlock(folio);
1693 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1695 struct vm_area_struct *vma;
1697 i_mmap_lock_read(mapping);
1698 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1699 struct mmu_notifier_range range;
1700 struct mm_struct *mm;
1702 pmd_t *pmd, pgt_pmd;
1705 bool skipped_uffd = false;
1708 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1709 * got written to. These VMAs are likely not worth removing
1710 * page tables from, as PMD-mapping is likely to be split later.
1712 if (READ_ONCE(vma->anon_vma))
1715 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1716 if (addr & ~HPAGE_PMD_MASK ||
1717 vma->vm_end < addr + HPAGE_PMD_SIZE)
1721 if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
1724 if (hpage_collapse_test_exit(mm))
1727 * When a vma is registered with uffd-wp, we cannot recycle
1728 * the page table because there may be pte markers installed.
1729 * Other vmas can still have the same file mapped hugely, but
1730 * skip this one: it will always be mapped in small page size
1731 * for uffd-wp registered ranges.
1733 if (userfaultfd_wp(vma))
1736 /* PTEs were notified when unmapped; but now for the PMD? */
1737 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1738 addr, addr + HPAGE_PMD_SIZE);
1739 mmu_notifier_invalidate_range_start(&range);
1741 pml = pmd_lock(mm, pmd);
1742 ptl = pte_lockptr(mm, pmd);
1744 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1747 * Huge page lock is still held, so normally the page table
1748 * must remain empty; and we have already skipped anon_vma
1749 * and userfaultfd_wp() vmas. But since the mmap_lock is not
1750 * held, it is still possible for a racing userfaultfd_ioctl()
1751 * to have inserted ptes or markers. Now that we hold ptlock,
1752 * repeating the anon_vma check protects from one category,
1753 * and repeating the userfaultfd_wp() check from another.
1755 if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
1756 skipped_uffd = true;
1758 pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
1759 pmdp_get_lockless_sync();
1766 mmu_notifier_invalidate_range_end(&range);
1768 if (!skipped_uffd) {
1770 page_table_check_pte_clear_range(mm, addr, pgt_pmd);
1771 pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1774 i_mmap_unlock_read(mapping);
1778 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1780 * @mm: process address space where collapse happens
1781 * @addr: virtual collapse start address
1782 * @file: file that collapse on
1783 * @start: collapse start address
1784 * @cc: collapse context and scratchpad
1786 * Basic scheme is simple, details are more complex:
1787 * - allocate and lock a new huge page;
1788 * - scan page cache, locking old pages
1789 * + swap/gup in pages if necessary;
1790 * - copy data to new page
1791 * - handle shmem holes
1792 * + re-validate that holes weren't filled by someone else
1793 * + check for userfaultfd
1794 * - finalize updates to the page cache;
1795 * - if replacing succeeds:
1796 * + unlock huge page;
1798 * - if replacing failed;
1799 * + unlock old pages
1800 * + unlock and free huge page;
1802 static int collapse_file(struct mm_struct *mm, unsigned long addr,
1803 struct file *file, pgoff_t start,
1804 struct collapse_control *cc)
1806 struct address_space *mapping = file->f_mapping;
1808 struct folio *folio, *tmp, *new_folio;
1809 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1810 LIST_HEAD(pagelist);
1811 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1812 int nr_none = 0, result = SCAN_SUCCEED;
1813 bool is_shmem = shmem_file(file);
1815 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1816 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1818 result = alloc_charge_folio(&new_folio, mm, cc);
1819 if (result != SCAN_SUCCEED)
1822 __folio_set_locked(new_folio);
1824 __folio_set_swapbacked(new_folio);
1825 new_folio->index = start;
1826 new_folio->mapping = mapping;
1829 * Ensure we have slots for all the pages in the range. This is
1830 * almost certainly a no-op because most of the pages must be present
1834 xas_create_range(&xas);
1835 if (!xas_error(&xas))
1837 xas_unlock_irq(&xas);
1838 if (!xas_nomem(&xas, GFP_KERNEL)) {
1844 for (index = start; index < end; index++) {
1845 xas_set(&xas, index);
1846 folio = xas_load(&xas);
1848 VM_BUG_ON(index != xas.xa_index);
1852 * Stop if extent has been truncated or
1853 * hole-punched, and is now completely
1856 if (index == start) {
1857 if (!xas_next_entry(&xas, end - 1)) {
1858 result = SCAN_TRUNCATED;
1866 if (xa_is_value(folio) || !folio_test_uptodate(folio)) {
1867 xas_unlock_irq(&xas);
1868 /* swap in or instantiate fallocated page */
1869 if (shmem_get_folio(mapping->host, index,
1870 &folio, SGP_NOALLOC)) {
1874 /* drain lru cache to help isolate_lru_page() */
1876 } else if (folio_trylock(folio)) {
1878 xas_unlock_irq(&xas);
1880 result = SCAN_PAGE_LOCK;
1883 } else { /* !is_shmem */
1884 if (!folio || xa_is_value(folio)) {
1885 xas_unlock_irq(&xas);
1886 page_cache_sync_readahead(mapping, &file->f_ra,
1889 /* drain lru cache to help isolate_lru_page() */
1891 folio = filemap_lock_folio(mapping, index);
1892 if (IS_ERR(folio)) {
1896 } else if (folio_test_dirty(folio)) {
1898 * khugepaged only works on read-only fd,
1899 * so this page is dirty because it hasn't
1900 * been flushed since first write. There
1901 * won't be new dirty pages.
1903 * Trigger async flush here and hope the
1904 * writeback is done when khugepaged
1905 * revisits this page.
1907 * This is a one-off situation. We are not
1908 * forcing writeback in loop.
1910 xas_unlock_irq(&xas);
1911 filemap_flush(mapping);
1914 } else if (folio_test_writeback(folio)) {
1915 xas_unlock_irq(&xas);
1918 } else if (folio_trylock(folio)) {
1920 xas_unlock_irq(&xas);
1922 result = SCAN_PAGE_LOCK;
1928 * The folio must be locked, so we can drop the i_pages lock
1929 * without racing with truncate.
1931 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1933 /* make sure the folio is up to date */
1934 if (unlikely(!folio_test_uptodate(folio))) {
1940 * If file was truncated then extended, or hole-punched, before
1941 * we locked the first folio, then a THP might be there already.
1942 * This will be discovered on the first iteration.
1944 if (folio_test_large(folio)) {
1945 result = folio_order(folio) == HPAGE_PMD_ORDER &&
1946 folio->index == start
1947 /* Maybe PMD-mapped */
1948 ? SCAN_PTE_MAPPED_HUGEPAGE
1949 : SCAN_PAGE_COMPOUND;
1953 if (folio_mapping(folio) != mapping) {
1954 result = SCAN_TRUNCATED;
1958 if (!is_shmem && (folio_test_dirty(folio) ||
1959 folio_test_writeback(folio))) {
1961 * khugepaged only works on read-only fd, so this
1962 * folio is dirty because it hasn't been flushed
1963 * since first write.
1969 if (!folio_isolate_lru(folio)) {
1970 result = SCAN_DEL_PAGE_LRU;
1974 if (!filemap_release_folio(folio, GFP_KERNEL)) {
1975 result = SCAN_PAGE_HAS_PRIVATE;
1976 folio_putback_lru(folio);
1980 if (folio_mapped(folio))
1982 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1986 VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio);
1989 * We control three references to the folio:
1990 * - we hold a pin on it;
1991 * - one reference from page cache;
1992 * - one from lru_isolate_folio;
1993 * If those are the only references, then any new usage
1994 * of the folio will have to fetch it from the page
1995 * cache. That requires locking the folio to handle
1996 * truncate, so any new usage will be blocked until we
1997 * unlock folio after collapse/during rollback.
1999 if (folio_ref_count(folio) != 3) {
2000 result = SCAN_PAGE_COUNT;
2001 xas_unlock_irq(&xas);
2002 folio_putback_lru(folio);
2007 * Accumulate the folios that are being collapsed.
2009 list_add_tail(&folio->lru, &pagelist);
2012 folio_unlock(folio);
2018 filemap_nr_thps_inc(mapping);
2020 * Paired with the fence in do_dentry_open() -> get_write_access()
2021 * to ensure i_writecount is up to date and the update to nr_thps
2022 * is visible. Ensures the page cache will be truncated if the
2023 * file is opened writable.
2026 if (inode_is_open_for_write(mapping->host)) {
2028 filemap_nr_thps_dec(mapping);
2033 xas_unlock_irq(&xas);
2037 * If collapse is successful, flush must be done now before copying.
2038 * If collapse is unsuccessful, does flush actually need to be done?
2039 * Do it anyway, to clear the state.
2041 try_to_unmap_flush();
2043 if (result == SCAN_SUCCEED && nr_none &&
2044 !shmem_charge(mapping->host, nr_none))
2046 if (result != SCAN_SUCCEED) {
2052 * The old folios are locked, so they won't change anymore.
2055 dst = folio_page(new_folio, 0);
2056 list_for_each_entry(folio, &pagelist, lru) {
2057 while (index < folio->index) {
2058 clear_highpage(dst);
2062 if (copy_mc_highpage(dst, folio_page(folio, 0)) > 0) {
2063 result = SCAN_COPY_MC;
2069 while (index < end) {
2070 clear_highpage(dst);
2076 struct vm_area_struct *vma;
2077 int nr_none_check = 0;
2079 i_mmap_lock_read(mapping);
2082 xas_set(&xas, start);
2083 for (index = start; index < end; index++) {
2084 if (!xas_next(&xas)) {
2085 xas_store(&xas, XA_RETRY_ENTRY);
2086 if (xas_error(&xas)) {
2087 result = SCAN_STORE_FAILED;
2094 if (nr_none != nr_none_check) {
2095 result = SCAN_PAGE_FILLED;
2100 * If userspace observed a missing page in a VMA with
2101 * a MODE_MISSING userfaultfd, then it might expect a
2102 * UFFD_EVENT_PAGEFAULT for that page. If so, we need to
2103 * roll back to avoid suppressing such an event. Since
2104 * wp/minor userfaultfds don't give userspace any
2105 * guarantees that the kernel doesn't fill a missing
2106 * page with a zero page, so they don't matter here.
2108 * Any userfaultfds registered after this point will
2109 * not be able to observe any missing pages due to the
2110 * previously inserted retry entries.
2112 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2113 if (userfaultfd_missing(vma)) {
2114 result = SCAN_EXCEED_NONE_PTE;
2120 i_mmap_unlock_read(mapping);
2121 if (result != SCAN_SUCCEED) {
2122 xas_set(&xas, start);
2123 for (index = start; index < end; index++) {
2124 if (xas_next(&xas) == XA_RETRY_ENTRY)
2125 xas_store(&xas, NULL);
2128 xas_unlock_irq(&xas);
2136 __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
2138 __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
2141 __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
2142 /* nr_none is always 0 for non-shmem. */
2143 __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
2147 * Mark new_folio as uptodate before inserting it into the
2148 * page cache so that it isn't mistaken for an fallocated but
2151 folio_mark_uptodate(new_folio);
2152 folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
2155 folio_mark_dirty(new_folio);
2156 folio_add_lru(new_folio);
2158 /* Join all the small entries into a single multi-index entry. */
2159 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2160 xas_store(&xas, new_folio);
2161 WARN_ON_ONCE(xas_error(&xas));
2162 xas_unlock_irq(&xas);
2165 * Remove pte page tables, so we can re-fault the page as huge.
2166 * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
2168 retract_page_tables(mapping, start);
2169 if (cc && !cc->is_khugepaged)
2170 result = SCAN_PTE_MAPPED_HUGEPAGE;
2171 folio_unlock(new_folio);
2174 * The collapse has succeeded, so free the old folios.
2176 list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2177 list_del(&folio->lru);
2178 folio->mapping = NULL;
2179 folio_clear_active(folio);
2180 folio_clear_unevictable(folio);
2181 folio_unlock(folio);
2182 folio_put_refs(folio, 3);
2188 /* Something went wrong: roll back page cache changes */
2191 mapping->nrpages -= nr_none;
2192 xas_unlock_irq(&xas);
2193 shmem_uncharge(mapping->host, nr_none);
2196 list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2197 list_del(&folio->lru);
2198 folio_unlock(folio);
2199 folio_putback_lru(folio);
2203 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2204 * file only. This undo is not needed unless failure is
2205 * due to SCAN_COPY_MC.
2207 if (!is_shmem && result == SCAN_COPY_MC) {
2208 filemap_nr_thps_dec(mapping);
2210 * Paired with the fence in do_dentry_open() -> get_write_access()
2211 * to ensure the update to nr_thps is visible.
2216 new_folio->mapping = NULL;
2218 folio_unlock(new_folio);
2219 folio_put(new_folio);
2221 VM_BUG_ON(!list_empty(&pagelist));
2222 trace_mm_khugepaged_collapse_file(mm, new_folio, index, is_shmem, addr, file, HPAGE_PMD_NR, result);
2226 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2227 struct file *file, pgoff_t start,
2228 struct collapse_control *cc)
2230 struct folio *folio = NULL;
2231 struct address_space *mapping = file->f_mapping;
2232 XA_STATE(xas, &mapping->i_pages, start);
2234 int node = NUMA_NO_NODE;
2235 int result = SCAN_SUCCEED;
2239 memset(cc->node_load, 0, sizeof(cc->node_load));
2240 nodes_clear(cc->alloc_nmask);
2242 xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) {
2243 if (xas_retry(&xas, folio))
2246 if (xa_is_value(folio)) {
2248 if (cc->is_khugepaged &&
2249 swap > khugepaged_max_ptes_swap) {
2250 result = SCAN_EXCEED_SWAP_PTE;
2251 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2258 * TODO: khugepaged should compact smaller compound pages
2259 * into a PMD sized page
2261 if (folio_test_large(folio)) {
2262 result = folio_order(folio) == HPAGE_PMD_ORDER &&
2263 folio->index == start
2264 /* Maybe PMD-mapped */
2265 ? SCAN_PTE_MAPPED_HUGEPAGE
2266 : SCAN_PAGE_COMPOUND;
2268 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2269 * by the caller won't touch the page cache, and so
2270 * it's safe to skip LRU and refcount checks before
2276 node = folio_nid(folio);
2277 if (hpage_collapse_scan_abort(node, cc)) {
2278 result = SCAN_SCAN_ABORT;
2281 cc->node_load[node]++;
2283 if (!folio_test_lru(folio)) {
2284 result = SCAN_PAGE_LRU;
2288 if (folio_ref_count(folio) !=
2289 1 + folio_mapcount(folio) + folio_test_private(folio)) {
2290 result = SCAN_PAGE_COUNT;
2295 * We probably should check if the folio is referenced
2296 * here, but nobody would transfer pte_young() to
2297 * folio_test_referenced() for us. And rmap walk here
2298 * is just too costly...
2303 if (need_resched()) {
2310 if (result == SCAN_SUCCEED) {
2311 if (cc->is_khugepaged &&
2312 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2313 result = SCAN_EXCEED_NONE_PTE;
2314 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2316 result = collapse_file(mm, addr, file, start, cc);
2320 trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
2324 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2325 struct file *file, pgoff_t start,
2326 struct collapse_control *cc)
2332 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2333 struct collapse_control *cc)
2334 __releases(&khugepaged_mm_lock)
2335 __acquires(&khugepaged_mm_lock)
2337 struct vma_iterator vmi;
2338 struct khugepaged_mm_slot *mm_slot;
2339 struct mm_slot *slot;
2340 struct mm_struct *mm;
2341 struct vm_area_struct *vma;
2345 lockdep_assert_held(&khugepaged_mm_lock);
2346 *result = SCAN_FAIL;
2348 if (khugepaged_scan.mm_slot) {
2349 mm_slot = khugepaged_scan.mm_slot;
2350 slot = &mm_slot->slot;
2352 slot = list_entry(khugepaged_scan.mm_head.next,
2353 struct mm_slot, mm_node);
2354 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2355 khugepaged_scan.address = 0;
2356 khugepaged_scan.mm_slot = mm_slot;
2358 spin_unlock(&khugepaged_mm_lock);
2362 * Don't wait for semaphore (to avoid long wait times). Just move to
2363 * the next mm on the list.
2366 if (unlikely(!mmap_read_trylock(mm)))
2367 goto breakouterloop_mmap_lock;
2370 if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2371 goto breakouterloop;
2373 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2374 for_each_vma(vmi, vma) {
2375 unsigned long hstart, hend;
2378 if (unlikely(hpage_collapse_test_exit_or_disable(mm))) {
2382 if (!thp_vma_allowable_order(vma, vma->vm_flags,
2383 TVA_ENFORCE_SYSFS, PMD_ORDER)) {
2388 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2389 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2390 if (khugepaged_scan.address > hend)
2392 if (khugepaged_scan.address < hstart)
2393 khugepaged_scan.address = hstart;
2394 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2396 while (khugepaged_scan.address < hend) {
2397 bool mmap_locked = true;
2400 if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2401 goto breakouterloop;
2403 VM_BUG_ON(khugepaged_scan.address < hstart ||
2404 khugepaged_scan.address + HPAGE_PMD_SIZE >
2406 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2407 struct file *file = get_file(vma->vm_file);
2408 pgoff_t pgoff = linear_page_index(vma,
2409 khugepaged_scan.address);
2411 mmap_read_unlock(mm);
2412 mmap_locked = false;
2413 *result = hpage_collapse_scan_file(mm,
2414 khugepaged_scan.address, file, pgoff, cc);
2416 if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
2418 if (hpage_collapse_test_exit_or_disable(mm))
2419 goto breakouterloop;
2420 *result = collapse_pte_mapped_thp(mm,
2421 khugepaged_scan.address, false);
2422 if (*result == SCAN_PMD_MAPPED)
2423 *result = SCAN_SUCCEED;
2424 mmap_read_unlock(mm);
2427 *result = hpage_collapse_scan_pmd(mm, vma,
2428 khugepaged_scan.address, &mmap_locked, cc);
2431 if (*result == SCAN_SUCCEED)
2432 ++khugepaged_pages_collapsed;
2434 /* move to next address */
2435 khugepaged_scan.address += HPAGE_PMD_SIZE;
2436 progress += HPAGE_PMD_NR;
2439 * We released mmap_lock so break loop. Note
2440 * that we drop mmap_lock before all hugepage
2441 * allocations, so if allocation fails, we are
2442 * guaranteed to break here and report the
2443 * correct result back to caller.
2445 goto breakouterloop_mmap_lock;
2446 if (progress >= pages)
2447 goto breakouterloop;
2451 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2452 breakouterloop_mmap_lock:
2454 spin_lock(&khugepaged_mm_lock);
2455 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2457 * Release the current mm_slot if this mm is about to die, or
2458 * if we scanned all vmas of this mm.
2460 if (hpage_collapse_test_exit(mm) || !vma) {
2462 * Make sure that if mm_users is reaching zero while
2463 * khugepaged runs here, khugepaged_exit will find
2464 * mm_slot not pointing to the exiting mm.
2466 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2467 slot = list_entry(slot->mm_node.next,
2468 struct mm_slot, mm_node);
2469 khugepaged_scan.mm_slot =
2470 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2471 khugepaged_scan.address = 0;
2473 khugepaged_scan.mm_slot = NULL;
2474 khugepaged_full_scans++;
2477 collect_mm_slot(mm_slot);
2483 static int khugepaged_has_work(void)
2485 return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
2488 static int khugepaged_wait_event(void)
2490 return !list_empty(&khugepaged_scan.mm_head) ||
2491 kthread_should_stop();
2494 static void khugepaged_do_scan(struct collapse_control *cc)
2496 unsigned int progress = 0, pass_through_head = 0;
2497 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2499 int result = SCAN_SUCCEED;
2501 lru_add_drain_all();
2506 if (unlikely(kthread_should_stop()))
2509 spin_lock(&khugepaged_mm_lock);
2510 if (!khugepaged_scan.mm_slot)
2511 pass_through_head++;
2512 if (khugepaged_has_work() &&
2513 pass_through_head < 2)
2514 progress += khugepaged_scan_mm_slot(pages - progress,
2518 spin_unlock(&khugepaged_mm_lock);
2520 if (progress >= pages)
2523 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2525 * If fail to allocate the first time, try to sleep for
2526 * a while. When hit again, cancel the scan.
2531 khugepaged_alloc_sleep();
2536 static bool khugepaged_should_wakeup(void)
2538 return kthread_should_stop() ||
2539 time_after_eq(jiffies, khugepaged_sleep_expire);
2542 static void khugepaged_wait_work(void)
2544 if (khugepaged_has_work()) {
2545 const unsigned long scan_sleep_jiffies =
2546 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2548 if (!scan_sleep_jiffies)
2551 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2552 wait_event_freezable_timeout(khugepaged_wait,
2553 khugepaged_should_wakeup(),
2554 scan_sleep_jiffies);
2558 if (hugepage_pmd_enabled())
2559 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2562 static int khugepaged(void *none)
2564 struct khugepaged_mm_slot *mm_slot;
2567 set_user_nice(current, MAX_NICE);
2569 while (!kthread_should_stop()) {
2570 khugepaged_do_scan(&khugepaged_collapse_control);
2571 khugepaged_wait_work();
2574 spin_lock(&khugepaged_mm_lock);
2575 mm_slot = khugepaged_scan.mm_slot;
2576 khugepaged_scan.mm_slot = NULL;
2578 collect_mm_slot(mm_slot);
2579 spin_unlock(&khugepaged_mm_lock);
2583 static void set_recommended_min_free_kbytes(void)
2587 unsigned long recommended_min;
2589 if (!hugepage_pmd_enabled()) {
2590 calculate_min_free_kbytes();
2594 for_each_populated_zone(zone) {
2596 * We don't need to worry about fragmentation of
2597 * ZONE_MOVABLE since it only has movable pages.
2599 if (zone_idx(zone) > gfp_zone(GFP_USER))
2605 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2606 recommended_min = pageblock_nr_pages * nr_zones * 2;
2609 * Make sure that on average at least two pageblocks are almost free
2610 * of another type, one for a migratetype to fall back to and a
2611 * second to avoid subsequent fallbacks of other types There are 3
2612 * MIGRATE_TYPES we care about.
2614 recommended_min += pageblock_nr_pages * nr_zones *
2615 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2617 /* don't ever allow to reserve more than 5% of the lowmem */
2618 recommended_min = min(recommended_min,
2619 (unsigned long) nr_free_buffer_pages() / 20);
2620 recommended_min <<= (PAGE_SHIFT-10);
2622 if (recommended_min > min_free_kbytes) {
2623 if (user_min_free_kbytes >= 0)
2624 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2625 min_free_kbytes, recommended_min);
2627 min_free_kbytes = recommended_min;
2631 setup_per_zone_wmarks();
2634 int start_stop_khugepaged(void)
2638 mutex_lock(&khugepaged_mutex);
2639 if (hugepage_pmd_enabled()) {
2640 if (!khugepaged_thread)
2641 khugepaged_thread = kthread_run(khugepaged, NULL,
2643 if (IS_ERR(khugepaged_thread)) {
2644 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2645 err = PTR_ERR(khugepaged_thread);
2646 khugepaged_thread = NULL;
2650 if (!list_empty(&khugepaged_scan.mm_head))
2651 wake_up_interruptible(&khugepaged_wait);
2652 } else if (khugepaged_thread) {
2653 kthread_stop(khugepaged_thread);
2654 khugepaged_thread = NULL;
2656 set_recommended_min_free_kbytes();
2658 mutex_unlock(&khugepaged_mutex);
2662 void khugepaged_min_free_kbytes_update(void)
2664 mutex_lock(&khugepaged_mutex);
2665 if (hugepage_pmd_enabled() && khugepaged_thread)
2666 set_recommended_min_free_kbytes();
2667 mutex_unlock(&khugepaged_mutex);
2670 bool current_is_khugepaged(void)
2672 return kthread_func(current) == khugepaged;
2675 static int madvise_collapse_errno(enum scan_result r)
2678 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2679 * actionable feedback to caller, so they may take an appropriate
2680 * fallback measure depending on the nature of the failure.
2683 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2685 case SCAN_CGROUP_CHARGE_FAIL:
2686 case SCAN_EXCEED_NONE_PTE:
2688 /* Resource temporary unavailable - trying again might succeed */
2689 case SCAN_PAGE_COUNT:
2690 case SCAN_PAGE_LOCK:
2692 case SCAN_DEL_PAGE_LRU:
2693 case SCAN_PAGE_FILLED:
2696 * Other: Trying again likely not to succeed / error intrinsic to
2697 * specified memory range. khugepaged likely won't be able to collapse
2705 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2706 unsigned long start, unsigned long end)
2708 struct collapse_control *cc;
2709 struct mm_struct *mm = vma->vm_mm;
2710 unsigned long hstart, hend, addr;
2711 int thps = 0, last_fail = SCAN_FAIL;
2712 bool mmap_locked = true;
2714 BUG_ON(vma->vm_start > start);
2715 BUG_ON(vma->vm_end < end);
2719 if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
2722 cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2725 cc->is_khugepaged = false;
2728 lru_add_drain_all();
2730 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2731 hend = end & HPAGE_PMD_MASK;
2733 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2734 int result = SCAN_FAIL;
2740 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2742 if (result != SCAN_SUCCEED) {
2747 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2749 mmap_assert_locked(mm);
2750 memset(cc->node_load, 0, sizeof(cc->node_load));
2751 nodes_clear(cc->alloc_nmask);
2752 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2753 struct file *file = get_file(vma->vm_file);
2754 pgoff_t pgoff = linear_page_index(vma, addr);
2756 mmap_read_unlock(mm);
2757 mmap_locked = false;
2758 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2762 result = hpage_collapse_scan_pmd(mm, vma, addr,
2766 *prev = NULL; /* Tell caller we dropped mmap_lock */
2771 case SCAN_PMD_MAPPED:
2774 case SCAN_PTE_MAPPED_HUGEPAGE:
2775 BUG_ON(mmap_locked);
2778 result = collapse_pte_mapped_thp(mm, addr, true);
2779 mmap_read_unlock(mm);
2781 /* Whitelisted set of results where continuing OK */
2783 case SCAN_PTE_NON_PRESENT:
2784 case SCAN_PTE_UFFD_WP:
2786 case SCAN_LACK_REFERENCED_PAGE:
2787 case SCAN_PAGE_NULL:
2788 case SCAN_PAGE_COUNT:
2789 case SCAN_PAGE_LOCK:
2790 case SCAN_PAGE_COMPOUND:
2792 case SCAN_DEL_PAGE_LRU:
2797 /* Other error, exit */
2803 /* Caller expects us to hold mmap_lock on return */
2807 mmap_assert_locked(mm);
2811 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2812 : madvise_collapse_errno(last_fail);