1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
33 SCAN_LACK_REFERENCED_PAGE,
47 SCAN_ALLOC_HUGE_PAGE_FAIL,
48 SCAN_CGROUP_CHARGE_FAIL,
51 SCAN_PAGE_HAS_PRIVATE,
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/huge_memory.h>
57 /* default scan 8*512 pte (or vmas) every 30 second */
58 static unsigned int khugepaged_pages_to_scan __read_mostly;
59 static unsigned int khugepaged_pages_collapsed;
60 static unsigned int khugepaged_full_scans;
61 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
62 /* during fragmentation poll the hugepage allocator once every minute */
63 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
64 static unsigned long khugepaged_sleep_expire;
65 static DEFINE_SPINLOCK(khugepaged_mm_lock);
66 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
68 * default collapse hugepages if there is at least one pte mapped like
69 * it would have happened if the vma was large enough during page
72 static unsigned int khugepaged_max_ptes_none __read_mostly;
73 static unsigned int khugepaged_max_ptes_swap __read_mostly;
75 #define MM_SLOTS_HASH_BITS 10
76 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
78 static struct kmem_cache *mm_slot_cache __read_mostly;
81 * struct mm_slot - hash lookup from mm to mm_slot
82 * @hash: hash collision list
83 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
84 * @mm: the mm that this information is valid for
87 struct hlist_node hash;
88 struct list_head mm_node;
93 * struct khugepaged_scan - cursor for scanning
94 * @mm_head: the head of the mm list to scan
95 * @mm_slot: the current mm_slot we are scanning
96 * @address: the next address inside that to be scanned
98 * There is only the one khugepaged_scan instance of this cursor structure.
100 struct khugepaged_scan {
101 struct list_head mm_head;
102 struct mm_slot *mm_slot;
103 unsigned long address;
106 static struct khugepaged_scan khugepaged_scan = {
107 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
111 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
112 struct kobj_attribute *attr,
115 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
118 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
119 struct kobj_attribute *attr,
120 const char *buf, size_t count)
125 err = kstrtoul(buf, 10, &msecs);
126 if (err || msecs > UINT_MAX)
129 khugepaged_scan_sleep_millisecs = msecs;
130 khugepaged_sleep_expire = 0;
131 wake_up_interruptible(&khugepaged_wait);
135 static struct kobj_attribute scan_sleep_millisecs_attr =
136 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
137 scan_sleep_millisecs_store);
139 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
140 struct kobj_attribute *attr,
143 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
146 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
147 struct kobj_attribute *attr,
148 const char *buf, size_t count)
153 err = kstrtoul(buf, 10, &msecs);
154 if (err || msecs > UINT_MAX)
157 khugepaged_alloc_sleep_millisecs = msecs;
158 khugepaged_sleep_expire = 0;
159 wake_up_interruptible(&khugepaged_wait);
163 static struct kobj_attribute alloc_sleep_millisecs_attr =
164 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
165 alloc_sleep_millisecs_store);
167 static ssize_t pages_to_scan_show(struct kobject *kobj,
168 struct kobj_attribute *attr,
171 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
173 static ssize_t pages_to_scan_store(struct kobject *kobj,
174 struct kobj_attribute *attr,
175 const char *buf, size_t count)
180 err = kstrtoul(buf, 10, &pages);
181 if (err || !pages || pages > UINT_MAX)
184 khugepaged_pages_to_scan = pages;
188 static struct kobj_attribute pages_to_scan_attr =
189 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
190 pages_to_scan_store);
192 static ssize_t pages_collapsed_show(struct kobject *kobj,
193 struct kobj_attribute *attr,
196 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
198 static struct kobj_attribute pages_collapsed_attr =
199 __ATTR_RO(pages_collapsed);
201 static ssize_t full_scans_show(struct kobject *kobj,
202 struct kobj_attribute *attr,
205 return sprintf(buf, "%u\n", khugepaged_full_scans);
207 static struct kobj_attribute full_scans_attr =
208 __ATTR_RO(full_scans);
210 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
211 struct kobj_attribute *attr, char *buf)
213 return single_hugepage_flag_show(kobj, attr, buf,
214 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
216 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
217 struct kobj_attribute *attr,
218 const char *buf, size_t count)
220 return single_hugepage_flag_store(kobj, attr, buf, count,
221 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
223 static struct kobj_attribute khugepaged_defrag_attr =
224 __ATTR(defrag, 0644, khugepaged_defrag_show,
225 khugepaged_defrag_store);
228 * max_ptes_none controls if khugepaged should collapse hugepages over
229 * any unmapped ptes in turn potentially increasing the memory
230 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
231 * reduce the available free memory in the system as it
232 * runs. Increasing max_ptes_none will instead potentially reduce the
233 * free memory in the system during the khugepaged scan.
235 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
236 struct kobj_attribute *attr,
239 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
241 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
242 struct kobj_attribute *attr,
243 const char *buf, size_t count)
246 unsigned long max_ptes_none;
248 err = kstrtoul(buf, 10, &max_ptes_none);
249 if (err || max_ptes_none > HPAGE_PMD_NR-1)
252 khugepaged_max_ptes_none = max_ptes_none;
256 static struct kobj_attribute khugepaged_max_ptes_none_attr =
257 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
258 khugepaged_max_ptes_none_store);
260 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
261 struct kobj_attribute *attr,
264 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
267 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
268 struct kobj_attribute *attr,
269 const char *buf, size_t count)
272 unsigned long max_ptes_swap;
274 err = kstrtoul(buf, 10, &max_ptes_swap);
275 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
278 khugepaged_max_ptes_swap = max_ptes_swap;
283 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
284 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
285 khugepaged_max_ptes_swap_store);
287 static struct attribute *khugepaged_attr[] = {
288 &khugepaged_defrag_attr.attr,
289 &khugepaged_max_ptes_none_attr.attr,
290 &pages_to_scan_attr.attr,
291 &pages_collapsed_attr.attr,
292 &full_scans_attr.attr,
293 &scan_sleep_millisecs_attr.attr,
294 &alloc_sleep_millisecs_attr.attr,
295 &khugepaged_max_ptes_swap_attr.attr,
299 struct attribute_group khugepaged_attr_group = {
300 .attrs = khugepaged_attr,
301 .name = "khugepaged",
303 #endif /* CONFIG_SYSFS */
305 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
307 int hugepage_madvise(struct vm_area_struct *vma,
308 unsigned long *vm_flags, int advice)
314 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
315 * can't handle this properly after s390_enable_sie, so we simply
316 * ignore the madvise to prevent qemu from causing a SIGSEGV.
318 if (mm_has_pgste(vma->vm_mm))
321 *vm_flags &= ~VM_NOHUGEPAGE;
322 *vm_flags |= VM_HUGEPAGE;
324 * If the vma become good for khugepaged to scan,
325 * register it here without waiting a page fault that
326 * may not happen any time soon.
328 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
329 khugepaged_enter_vma_merge(vma, *vm_flags))
332 case MADV_NOHUGEPAGE:
333 *vm_flags &= ~VM_HUGEPAGE;
334 *vm_flags |= VM_NOHUGEPAGE;
336 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
337 * this vma even if we leave the mm registered in khugepaged if
338 * it got registered before VM_NOHUGEPAGE was set.
346 int __init khugepaged_init(void)
348 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
349 sizeof(struct mm_slot),
350 __alignof__(struct mm_slot), 0, NULL);
354 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
355 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
356 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
361 void __init khugepaged_destroy(void)
363 kmem_cache_destroy(mm_slot_cache);
366 static inline struct mm_slot *alloc_mm_slot(void)
368 if (!mm_slot_cache) /* initialization failed */
370 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
373 static inline void free_mm_slot(struct mm_slot *mm_slot)
375 kmem_cache_free(mm_slot_cache, mm_slot);
378 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
380 struct mm_slot *mm_slot;
382 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
383 if (mm == mm_slot->mm)
389 static void insert_to_mm_slots_hash(struct mm_struct *mm,
390 struct mm_slot *mm_slot)
393 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
396 static inline int khugepaged_test_exit(struct mm_struct *mm)
398 return atomic_read(&mm->mm_users) == 0;
401 static bool hugepage_vma_check(struct vm_area_struct *vma,
402 unsigned long vm_flags)
404 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
405 (vm_flags & VM_NOHUGEPAGE) ||
406 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
409 if (shmem_file(vma->vm_file) ||
410 (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
412 (vm_flags & VM_DENYWRITE))) {
413 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
415 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
418 if (!vma->anon_vma || vma->vm_ops)
420 if (is_vma_temporary_stack(vma))
422 return !(vm_flags & VM_NO_KHUGEPAGED);
425 int __khugepaged_enter(struct mm_struct *mm)
427 struct mm_slot *mm_slot;
430 mm_slot = alloc_mm_slot();
434 /* __khugepaged_exit() must not run from under us */
435 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
436 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
437 free_mm_slot(mm_slot);
441 spin_lock(&khugepaged_mm_lock);
442 insert_to_mm_slots_hash(mm, mm_slot);
444 * Insert just behind the scanning cursor, to let the area settle
447 wakeup = list_empty(&khugepaged_scan.mm_head);
448 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
449 spin_unlock(&khugepaged_mm_lock);
453 wake_up_interruptible(&khugepaged_wait);
458 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
459 unsigned long vm_flags)
461 unsigned long hstart, hend;
464 * khugepaged only supports read-only files for non-shmem files.
465 * khugepaged does not yet work on special mappings. And
466 * file-private shmem THP is not supported.
468 if (!hugepage_vma_check(vma, vm_flags))
471 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
472 hend = vma->vm_end & HPAGE_PMD_MASK;
474 return khugepaged_enter(vma, vm_flags);
478 void __khugepaged_exit(struct mm_struct *mm)
480 struct mm_slot *mm_slot;
483 spin_lock(&khugepaged_mm_lock);
484 mm_slot = get_mm_slot(mm);
485 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
486 hash_del(&mm_slot->hash);
487 list_del(&mm_slot->mm_node);
490 spin_unlock(&khugepaged_mm_lock);
493 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
494 free_mm_slot(mm_slot);
496 } else if (mm_slot) {
498 * This is required to serialize against
499 * khugepaged_test_exit() (which is guaranteed to run
500 * under mmap sem read mode). Stop here (after we
501 * return all pagetables will be destroyed) until
502 * khugepaged has finished working on the pagetables
503 * under the mmap_sem.
505 down_write(&mm->mmap_sem);
506 up_write(&mm->mmap_sem);
510 static void release_pte_page(struct page *page)
512 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
514 putback_lru_page(page);
517 static void release_pte_pages(pte_t *pte, pte_t *_pte)
519 while (--_pte >= pte) {
520 pte_t pteval = *_pte;
521 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
522 release_pte_page(pte_page(pteval));
526 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
527 unsigned long address,
530 struct page *page = NULL;
532 int none_or_zero = 0, result = 0, referenced = 0;
533 bool writable = false;
535 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
536 _pte++, address += PAGE_SIZE) {
537 pte_t pteval = *_pte;
538 if (pte_none(pteval) || (pte_present(pteval) &&
539 is_zero_pfn(pte_pfn(pteval)))) {
540 if (!userfaultfd_armed(vma) &&
541 ++none_or_zero <= khugepaged_max_ptes_none) {
544 result = SCAN_EXCEED_NONE_PTE;
548 if (!pte_present(pteval)) {
549 result = SCAN_PTE_NON_PRESENT;
552 page = vm_normal_page(vma, address, pteval);
553 if (unlikely(!page)) {
554 result = SCAN_PAGE_NULL;
558 /* TODO: teach khugepaged to collapse THP mapped with pte */
559 if (PageCompound(page)) {
560 result = SCAN_PAGE_COMPOUND;
564 VM_BUG_ON_PAGE(!PageAnon(page), page);
567 * We can do it before isolate_lru_page because the
568 * page can't be freed from under us. NOTE: PG_lock
569 * is needed to serialize against split_huge_page
570 * when invoked from the VM.
572 if (!trylock_page(page)) {
573 result = SCAN_PAGE_LOCK;
578 * cannot use mapcount: can't collapse if there's a gup pin.
579 * The page must only be referenced by the scanned process
580 * and page swap cache.
582 if (page_count(page) != 1 + PageSwapCache(page)) {
584 result = SCAN_PAGE_COUNT;
587 if (pte_write(pteval)) {
590 if (PageSwapCache(page) &&
591 !reuse_swap_page(page, NULL)) {
593 result = SCAN_SWAP_CACHE_PAGE;
597 * Page is not in the swap cache. It can be collapsed
603 * Isolate the page to avoid collapsing an hugepage
604 * currently in use by the VM.
606 if (isolate_lru_page(page)) {
608 result = SCAN_DEL_PAGE_LRU;
611 inc_node_page_state(page,
612 NR_ISOLATED_ANON + page_is_file_cache(page));
613 VM_BUG_ON_PAGE(!PageLocked(page), page);
614 VM_BUG_ON_PAGE(PageLRU(page), page);
616 /* There should be enough young pte to collapse the page */
617 if (pte_young(pteval) ||
618 page_is_young(page) || PageReferenced(page) ||
619 mmu_notifier_test_young(vma->vm_mm, address))
622 if (likely(writable)) {
623 if (likely(referenced)) {
624 result = SCAN_SUCCEED;
625 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
626 referenced, writable, result);
630 result = SCAN_PAGE_RO;
634 release_pte_pages(pte, _pte);
635 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
636 referenced, writable, result);
640 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
641 struct vm_area_struct *vma,
642 unsigned long address,
646 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
647 _pte++, page++, address += PAGE_SIZE) {
648 pte_t pteval = *_pte;
649 struct page *src_page;
651 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
652 clear_user_highpage(page, address);
653 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
654 if (is_zero_pfn(pte_pfn(pteval))) {
656 * ptl mostly unnecessary.
660 * paravirt calls inside pte_clear here are
663 pte_clear(vma->vm_mm, address, _pte);
667 src_page = pte_page(pteval);
668 copy_user_highpage(page, src_page, address, vma);
669 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
670 release_pte_page(src_page);
672 * ptl mostly unnecessary, but preempt has to
673 * be disabled to update the per-cpu stats
674 * inside page_remove_rmap().
678 * paravirt calls inside pte_clear here are
681 pte_clear(vma->vm_mm, address, _pte);
682 page_remove_rmap(src_page, false);
684 free_page_and_swap_cache(src_page);
689 static void khugepaged_alloc_sleep(void)
693 add_wait_queue(&khugepaged_wait, &wait);
694 freezable_schedule_timeout_interruptible(
695 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
696 remove_wait_queue(&khugepaged_wait, &wait);
699 static int khugepaged_node_load[MAX_NUMNODES];
701 static bool khugepaged_scan_abort(int nid)
706 * If node_reclaim_mode is disabled, then no extra effort is made to
707 * allocate memory locally.
709 if (!node_reclaim_mode)
712 /* If there is a count for this node already, it must be acceptable */
713 if (khugepaged_node_load[nid])
716 for (i = 0; i < MAX_NUMNODES; i++) {
717 if (!khugepaged_node_load[i])
719 if (node_distance(nid, i) > node_reclaim_distance)
725 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
726 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
728 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
732 static int khugepaged_find_target_node(void)
734 static int last_khugepaged_target_node = NUMA_NO_NODE;
735 int nid, target_node = 0, max_value = 0;
737 /* find first node with max normal pages hit */
738 for (nid = 0; nid < MAX_NUMNODES; nid++)
739 if (khugepaged_node_load[nid] > max_value) {
740 max_value = khugepaged_node_load[nid];
744 /* do some balance if several nodes have the same hit record */
745 if (target_node <= last_khugepaged_target_node)
746 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
748 if (max_value == khugepaged_node_load[nid]) {
753 last_khugepaged_target_node = target_node;
757 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
759 if (IS_ERR(*hpage)) {
765 khugepaged_alloc_sleep();
775 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
777 VM_BUG_ON_PAGE(*hpage, *hpage);
779 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
780 if (unlikely(!*hpage)) {
781 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
782 *hpage = ERR_PTR(-ENOMEM);
786 prep_transhuge_page(*hpage);
787 count_vm_event(THP_COLLAPSE_ALLOC);
791 static int khugepaged_find_target_node(void)
796 static inline struct page *alloc_khugepaged_hugepage(void)
800 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
803 prep_transhuge_page(page);
807 static struct page *khugepaged_alloc_hugepage(bool *wait)
812 hpage = alloc_khugepaged_hugepage();
814 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
819 khugepaged_alloc_sleep();
821 count_vm_event(THP_COLLAPSE_ALLOC);
822 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
827 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
830 *hpage = khugepaged_alloc_hugepage(wait);
832 if (unlikely(!*hpage))
839 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
848 * If mmap_sem temporarily dropped, revalidate vma
849 * before taking mmap_sem.
850 * Return 0 if succeeds, otherwise return none-zero
854 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
855 struct vm_area_struct **vmap)
857 struct vm_area_struct *vma;
858 unsigned long hstart, hend;
860 if (unlikely(khugepaged_test_exit(mm)))
861 return SCAN_ANY_PROCESS;
863 *vmap = vma = find_vma(mm, address);
865 return SCAN_VMA_NULL;
867 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
868 hend = vma->vm_end & HPAGE_PMD_MASK;
869 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
870 return SCAN_ADDRESS_RANGE;
871 if (!hugepage_vma_check(vma, vma->vm_flags))
872 return SCAN_VMA_CHECK;
877 * Bring missing pages in from swap, to complete THP collapse.
878 * Only done if khugepaged_scan_pmd believes it is worthwhile.
880 * Called and returns without pte mapped or spinlocks held,
881 * but with mmap_sem held to protect against vma changes.
884 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
885 struct vm_area_struct *vma,
886 unsigned long address, pmd_t *pmd,
891 struct vm_fault vmf = {
894 .flags = FAULT_FLAG_ALLOW_RETRY,
896 .pgoff = linear_page_index(vma, address),
899 /* we only decide to swapin, if there is enough young ptes */
900 if (referenced < HPAGE_PMD_NR/2) {
901 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
904 vmf.pte = pte_offset_map(pmd, address);
905 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
906 vmf.pte++, vmf.address += PAGE_SIZE) {
907 vmf.orig_pte = *vmf.pte;
908 if (!is_swap_pte(vmf.orig_pte))
911 ret = do_swap_page(&vmf);
913 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
914 if (ret & VM_FAULT_RETRY) {
915 down_read(&mm->mmap_sem);
916 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
917 /* vma is no longer available, don't continue to swapin */
918 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
921 /* check if the pmd is still valid */
922 if (mm_find_pmd(mm, address) != pmd) {
923 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
927 if (ret & VM_FAULT_ERROR) {
928 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
931 /* pte is unmapped now, we need to map it */
932 vmf.pte = pte_offset_map(pmd, vmf.address);
936 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
940 static void collapse_huge_page(struct mm_struct *mm,
941 unsigned long address,
943 int node, int referenced)
948 struct page *new_page;
949 spinlock_t *pmd_ptl, *pte_ptl;
950 int isolated = 0, result = 0;
951 struct mem_cgroup *memcg;
952 struct vm_area_struct *vma;
953 struct mmu_notifier_range range;
956 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
958 /* Only allocate from the target node */
959 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
962 * Before allocating the hugepage, release the mmap_sem read lock.
963 * The allocation can take potentially a long time if it involves
964 * sync compaction, and we do not need to hold the mmap_sem during
965 * that. We will recheck the vma after taking it again in write mode.
967 up_read(&mm->mmap_sem);
968 new_page = khugepaged_alloc_page(hpage, gfp, node);
970 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
974 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
975 result = SCAN_CGROUP_CHARGE_FAIL;
979 down_read(&mm->mmap_sem);
980 result = hugepage_vma_revalidate(mm, address, &vma);
982 mem_cgroup_cancel_charge(new_page, memcg, true);
983 up_read(&mm->mmap_sem);
987 pmd = mm_find_pmd(mm, address);
989 result = SCAN_PMD_NULL;
990 mem_cgroup_cancel_charge(new_page, memcg, true);
991 up_read(&mm->mmap_sem);
996 * __collapse_huge_page_swapin always returns with mmap_sem locked.
997 * If it fails, we release mmap_sem and jump out_nolock.
998 * Continuing to collapse causes inconsistency.
1000 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
1001 mem_cgroup_cancel_charge(new_page, memcg, true);
1002 up_read(&mm->mmap_sem);
1006 up_read(&mm->mmap_sem);
1008 * Prevent all access to pagetables with the exception of
1009 * gup_fast later handled by the ptep_clear_flush and the VM
1010 * handled by the anon_vma lock + PG_lock.
1012 down_write(&mm->mmap_sem);
1013 result = SCAN_ANY_PROCESS;
1014 if (!mmget_still_valid(mm))
1016 result = hugepage_vma_revalidate(mm, address, &vma);
1019 /* check if the pmd is still valid */
1020 if (mm_find_pmd(mm, address) != pmd)
1023 anon_vma_lock_write(vma->anon_vma);
1025 pte = pte_offset_map(pmd, address);
1026 pte_ptl = pte_lockptr(mm, pmd);
1028 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1029 address, address + HPAGE_PMD_SIZE);
1030 mmu_notifier_invalidate_range_start(&range);
1031 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1033 * After this gup_fast can't run anymore. This also removes
1034 * any huge TLB entry from the CPU so we won't allow
1035 * huge and small TLB entries for the same virtual address
1036 * to avoid the risk of CPU bugs in that area.
1038 _pmd = pmdp_collapse_flush(vma, address, pmd);
1039 spin_unlock(pmd_ptl);
1040 mmu_notifier_invalidate_range_end(&range);
1043 isolated = __collapse_huge_page_isolate(vma, address, pte);
1044 spin_unlock(pte_ptl);
1046 if (unlikely(!isolated)) {
1049 BUG_ON(!pmd_none(*pmd));
1051 * We can only use set_pmd_at when establishing
1052 * hugepmds and never for establishing regular pmds that
1053 * points to regular pagetables. Use pmd_populate for that
1055 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1056 spin_unlock(pmd_ptl);
1057 anon_vma_unlock_write(vma->anon_vma);
1063 * All pages are isolated and locked so anon_vma rmap
1064 * can't run anymore.
1066 anon_vma_unlock_write(vma->anon_vma);
1068 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1070 __SetPageUptodate(new_page);
1071 pgtable = pmd_pgtable(_pmd);
1073 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1074 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1077 * spin_lock() below is not the equivalent of smp_wmb(), so
1078 * this is needed to avoid the copy_huge_page writes to become
1079 * visible after the set_pmd_at() write.
1084 BUG_ON(!pmd_none(*pmd));
1085 page_add_new_anon_rmap(new_page, vma, address, true);
1086 mem_cgroup_commit_charge(new_page, memcg, false, true);
1087 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
1088 lru_cache_add_active_or_unevictable(new_page, vma);
1089 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1090 set_pmd_at(mm, address, pmd, _pmd);
1091 update_mmu_cache_pmd(vma, address, pmd);
1092 spin_unlock(pmd_ptl);
1096 khugepaged_pages_collapsed++;
1097 result = SCAN_SUCCEED;
1099 up_write(&mm->mmap_sem);
1101 trace_mm_collapse_huge_page(mm, isolated, result);
1104 mem_cgroup_cancel_charge(new_page, memcg, true);
1108 static int khugepaged_scan_pmd(struct mm_struct *mm,
1109 struct vm_area_struct *vma,
1110 unsigned long address,
1111 struct page **hpage)
1115 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1116 struct page *page = NULL;
1117 unsigned long _address;
1119 int node = NUMA_NO_NODE, unmapped = 0;
1120 bool writable = false;
1122 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1124 pmd = mm_find_pmd(mm, address);
1126 result = SCAN_PMD_NULL;
1130 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1131 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1132 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1133 _pte++, _address += PAGE_SIZE) {
1134 pte_t pteval = *_pte;
1135 if (is_swap_pte(pteval)) {
1136 if (++unmapped <= khugepaged_max_ptes_swap) {
1139 result = SCAN_EXCEED_SWAP_PTE;
1143 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1144 if (!userfaultfd_armed(vma) &&
1145 ++none_or_zero <= khugepaged_max_ptes_none) {
1148 result = SCAN_EXCEED_NONE_PTE;
1152 if (!pte_present(pteval)) {
1153 result = SCAN_PTE_NON_PRESENT;
1156 if (pte_write(pteval))
1159 page = vm_normal_page(vma, _address, pteval);
1160 if (unlikely(!page)) {
1161 result = SCAN_PAGE_NULL;
1165 /* TODO: teach khugepaged to collapse THP mapped with pte */
1166 if (PageCompound(page)) {
1167 result = SCAN_PAGE_COMPOUND;
1172 * Record which node the original page is from and save this
1173 * information to khugepaged_node_load[].
1174 * Khupaged will allocate hugepage from the node has the max
1177 node = page_to_nid(page);
1178 if (khugepaged_scan_abort(node)) {
1179 result = SCAN_SCAN_ABORT;
1182 khugepaged_node_load[node]++;
1183 if (!PageLRU(page)) {
1184 result = SCAN_PAGE_LRU;
1187 if (PageLocked(page)) {
1188 result = SCAN_PAGE_LOCK;
1191 if (!PageAnon(page)) {
1192 result = SCAN_PAGE_ANON;
1197 * cannot use mapcount: can't collapse if there's a gup pin.
1198 * The page must only be referenced by the scanned process
1199 * and page swap cache.
1201 if (page_count(page) != 1 + PageSwapCache(page)) {
1202 result = SCAN_PAGE_COUNT;
1205 if (pte_young(pteval) ||
1206 page_is_young(page) || PageReferenced(page) ||
1207 mmu_notifier_test_young(vma->vm_mm, address))
1212 result = SCAN_SUCCEED;
1215 result = SCAN_LACK_REFERENCED_PAGE;
1218 result = SCAN_PAGE_RO;
1221 pte_unmap_unlock(pte, ptl);
1223 node = khugepaged_find_target_node();
1224 /* collapse_huge_page will return with the mmap_sem released */
1225 collapse_huge_page(mm, address, hpage, node, referenced);
1228 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1229 none_or_zero, result, unmapped);
1233 static void collect_mm_slot(struct mm_slot *mm_slot)
1235 struct mm_struct *mm = mm_slot->mm;
1237 lockdep_assert_held(&khugepaged_mm_lock);
1239 if (khugepaged_test_exit(mm)) {
1241 hash_del(&mm_slot->hash);
1242 list_del(&mm_slot->mm_node);
1245 * Not strictly needed because the mm exited already.
1247 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1250 /* khugepaged_mm_lock actually not necessary for the below */
1251 free_mm_slot(mm_slot);
1256 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1257 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1259 struct vm_area_struct *vma;
1263 i_mmap_lock_write(mapping);
1264 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1265 /* probably overkill */
1268 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1269 if (addr & ~HPAGE_PMD_MASK)
1271 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1273 pmd = mm_find_pmd(vma->vm_mm, addr);
1277 * We need exclusive mmap_sem to retract page table.
1278 * If trylock fails we would end up with pte-mapped THP after
1279 * re-fault. Not ideal, but it's more important to not disturb
1280 * the system too much.
1282 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1283 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1284 /* assume page table is clear */
1285 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1287 up_write(&vma->vm_mm->mmap_sem);
1288 mm_dec_nr_ptes(vma->vm_mm);
1289 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1292 i_mmap_unlock_write(mapping);
1296 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1298 * Basic scheme is simple, details are more complex:
1299 * - allocate and lock a new huge page;
1300 * - scan page cache replacing old pages with the new one
1301 * + swap/gup in pages if necessary;
1303 * + keep old pages around in case rollback is required;
1304 * - if replacing succeeds:
1307 * + unlock huge page;
1308 * - if replacing failed;
1309 * + put all pages back and unfreeze them;
1310 * + restore gaps in the page cache;
1311 * + unlock and free huge page;
1313 static void collapse_file(struct mm_struct *mm,
1314 struct file *file, pgoff_t start,
1315 struct page **hpage, int node)
1317 struct address_space *mapping = file->f_mapping;
1319 struct page *new_page;
1320 struct mem_cgroup *memcg;
1321 pgoff_t index, end = start + HPAGE_PMD_NR;
1322 LIST_HEAD(pagelist);
1323 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1324 int nr_none = 0, result = SCAN_SUCCEED;
1325 bool is_shmem = shmem_file(file);
1327 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1328 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1330 /* Only allocate from the target node */
1331 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1333 new_page = khugepaged_alloc_page(hpage, gfp, node);
1335 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1339 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1340 result = SCAN_CGROUP_CHARGE_FAIL;
1344 /* This will be less messy when we use multi-index entries */
1347 xas_create_range(&xas);
1348 if (!xas_error(&xas))
1350 xas_unlock_irq(&xas);
1351 if (!xas_nomem(&xas, GFP_KERNEL)) {
1352 mem_cgroup_cancel_charge(new_page, memcg, true);
1358 __SetPageLocked(new_page);
1360 __SetPageSwapBacked(new_page);
1361 new_page->index = start;
1362 new_page->mapping = mapping;
1365 * At this point the new_page is locked and not up-to-date.
1366 * It's safe to insert it into the page cache, because nobody would
1367 * be able to map it or use it in another way until we unlock it.
1370 xas_set(&xas, start);
1371 for (index = start; index < end; index++) {
1372 struct page *page = xas_next(&xas);
1374 VM_BUG_ON(index != xas.xa_index);
1378 * Stop if extent has been truncated or
1379 * hole-punched, and is now completely
1382 if (index == start) {
1383 if (!xas_next_entry(&xas, end - 1)) {
1384 result = SCAN_TRUNCATED;
1387 xas_set(&xas, index);
1389 if (!shmem_charge(mapping->host, 1)) {
1393 xas_store(&xas, new_page);
1398 if (xa_is_value(page) || !PageUptodate(page)) {
1399 xas_unlock_irq(&xas);
1400 /* swap in or instantiate fallocated page */
1401 if (shmem_getpage(mapping->host, index, &page,
1406 } else if (trylock_page(page)) {
1408 xas_unlock_irq(&xas);
1410 result = SCAN_PAGE_LOCK;
1413 } else { /* !is_shmem */
1414 if (!page || xa_is_value(page)) {
1415 xas_unlock_irq(&xas);
1416 page_cache_sync_readahead(mapping, &file->f_ra,
1419 /* drain pagevecs to help isolate_lru_page() */
1421 page = find_lock_page(mapping, index);
1422 if (unlikely(page == NULL)) {
1426 } else if (!PageUptodate(page)) {
1427 xas_unlock_irq(&xas);
1428 wait_on_page_locked(page);
1429 if (!trylock_page(page)) {
1430 result = SCAN_PAGE_LOCK;
1434 } else if (PageDirty(page)) {
1437 } else if (trylock_page(page)) {
1439 xas_unlock_irq(&xas);
1441 result = SCAN_PAGE_LOCK;
1447 * The page must be locked, so we can drop the i_pages lock
1448 * without racing with truncate.
1450 VM_BUG_ON_PAGE(!PageLocked(page), page);
1451 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1454 * If file was truncated then extended, or hole-punched, before
1455 * we locked the first page, then a THP might be there already.
1457 if (PageTransCompound(page)) {
1458 result = SCAN_PAGE_COMPOUND;
1462 if (page_mapping(page) != mapping) {
1463 result = SCAN_TRUNCATED;
1467 if (isolate_lru_page(page)) {
1468 result = SCAN_DEL_PAGE_LRU;
1472 if (page_has_private(page) &&
1473 !try_to_release_page(page, GFP_KERNEL)) {
1474 result = SCAN_PAGE_HAS_PRIVATE;
1478 if (page_mapped(page))
1479 unmap_mapping_pages(mapping, index, 1, false);
1482 xas_set(&xas, index);
1484 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1485 VM_BUG_ON_PAGE(page_mapped(page), page);
1488 * The page is expected to have page_count() == 3:
1489 * - we hold a pin on it;
1490 * - one reference from page cache;
1491 * - one from isolate_lru_page;
1493 if (!page_ref_freeze(page, 3)) {
1494 result = SCAN_PAGE_COUNT;
1495 xas_unlock_irq(&xas);
1496 putback_lru_page(page);
1501 * Add the page to the list to be able to undo the collapse if
1502 * something go wrong.
1504 list_add_tail(&page->lru, &pagelist);
1506 /* Finally, replace with the new page. */
1507 xas_store(&xas, new_page);
1516 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1518 __inc_node_page_state(new_page, NR_FILE_THPS);
1521 struct zone *zone = page_zone(new_page);
1523 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1525 __mod_node_page_state(zone->zone_pgdat,
1530 xas_unlock_irq(&xas);
1533 if (result == SCAN_SUCCEED) {
1534 struct page *page, *tmp;
1537 * Replacing old pages with new one has succeeded, now we
1538 * need to copy the content and free the old pages.
1541 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1542 while (index < page->index) {
1543 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1546 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1548 list_del(&page->lru);
1549 page->mapping = NULL;
1550 page_ref_unfreeze(page, 1);
1551 ClearPageActive(page);
1552 ClearPageUnevictable(page);
1557 while (index < end) {
1558 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1562 SetPageUptodate(new_page);
1563 page_ref_add(new_page, HPAGE_PMD_NR - 1);
1564 mem_cgroup_commit_charge(new_page, memcg, false, true);
1567 set_page_dirty(new_page);
1568 lru_cache_add_anon(new_page);
1570 lru_cache_add_file(new_page);
1572 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
1575 * Remove pte page tables, so we can re-fault the page as huge.
1577 retract_page_tables(mapping, start);
1580 khugepaged_pages_collapsed++;
1584 /* Something went wrong: roll back page cache changes */
1586 mapping->nrpages -= nr_none;
1589 shmem_uncharge(mapping->host, nr_none);
1591 xas_set(&xas, start);
1592 xas_for_each(&xas, page, end - 1) {
1593 page = list_first_entry_or_null(&pagelist,
1595 if (!page || xas.xa_index < page->index) {
1599 /* Put holes back where they were */
1600 xas_store(&xas, NULL);
1604 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1606 /* Unfreeze the page. */
1607 list_del(&page->lru);
1608 page_ref_unfreeze(page, 2);
1609 xas_store(&xas, page);
1611 xas_unlock_irq(&xas);
1613 putback_lru_page(page);
1617 xas_unlock_irq(&xas);
1619 mem_cgroup_cancel_charge(new_page, memcg, true);
1620 new_page->mapping = NULL;
1623 unlock_page(new_page);
1625 VM_BUG_ON(!list_empty(&pagelist));
1626 /* TODO: tracepoints */
1629 static void khugepaged_scan_file(struct mm_struct *mm,
1630 struct file *file, pgoff_t start, struct page **hpage)
1632 struct page *page = NULL;
1633 struct address_space *mapping = file->f_mapping;
1634 XA_STATE(xas, &mapping->i_pages, start);
1636 int node = NUMA_NO_NODE;
1637 int result = SCAN_SUCCEED;
1641 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1643 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1644 if (xas_retry(&xas, page))
1647 if (xa_is_value(page)) {
1648 if (++swap > khugepaged_max_ptes_swap) {
1649 result = SCAN_EXCEED_SWAP_PTE;
1655 if (PageTransCompound(page)) {
1656 result = SCAN_PAGE_COMPOUND;
1660 node = page_to_nid(page);
1661 if (khugepaged_scan_abort(node)) {
1662 result = SCAN_SCAN_ABORT;
1665 khugepaged_node_load[node]++;
1667 if (!PageLRU(page)) {
1668 result = SCAN_PAGE_LRU;
1672 if (page_count(page) !=
1673 1 + page_mapcount(page) + page_has_private(page)) {
1674 result = SCAN_PAGE_COUNT;
1679 * We probably should check if the page is referenced here, but
1680 * nobody would transfer pte_young() to PageReferenced() for us.
1681 * And rmap walk here is just too costly...
1686 if (need_resched()) {
1693 if (result == SCAN_SUCCEED) {
1694 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1695 result = SCAN_EXCEED_NONE_PTE;
1697 node = khugepaged_find_target_node();
1698 collapse_file(mm, file, start, hpage, node);
1702 /* TODO: tracepoints */
1705 static void khugepaged_scan_file(struct mm_struct *mm,
1706 struct file *file, pgoff_t start, struct page **hpage)
1712 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1713 struct page **hpage)
1714 __releases(&khugepaged_mm_lock)
1715 __acquires(&khugepaged_mm_lock)
1717 struct mm_slot *mm_slot;
1718 struct mm_struct *mm;
1719 struct vm_area_struct *vma;
1723 lockdep_assert_held(&khugepaged_mm_lock);
1725 if (khugepaged_scan.mm_slot)
1726 mm_slot = khugepaged_scan.mm_slot;
1728 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1729 struct mm_slot, mm_node);
1730 khugepaged_scan.address = 0;
1731 khugepaged_scan.mm_slot = mm_slot;
1733 spin_unlock(&khugepaged_mm_lock);
1737 * Don't wait for semaphore (to avoid long wait times). Just move to
1738 * the next mm on the list.
1741 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1742 goto breakouterloop_mmap_sem;
1743 if (likely(!khugepaged_test_exit(mm)))
1744 vma = find_vma(mm, khugepaged_scan.address);
1747 for (; vma; vma = vma->vm_next) {
1748 unsigned long hstart, hend;
1751 if (unlikely(khugepaged_test_exit(mm))) {
1755 if (!hugepage_vma_check(vma, vma->vm_flags)) {
1760 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1761 hend = vma->vm_end & HPAGE_PMD_MASK;
1764 if (khugepaged_scan.address > hend)
1766 if (khugepaged_scan.address < hstart)
1767 khugepaged_scan.address = hstart;
1768 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1770 while (khugepaged_scan.address < hend) {
1773 if (unlikely(khugepaged_test_exit(mm)))
1774 goto breakouterloop;
1776 VM_BUG_ON(khugepaged_scan.address < hstart ||
1777 khugepaged_scan.address + HPAGE_PMD_SIZE >
1779 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
1781 pgoff_t pgoff = linear_page_index(vma,
1782 khugepaged_scan.address);
1784 if (shmem_file(vma->vm_file)
1785 && !shmem_huge_enabled(vma))
1787 file = get_file(vma->vm_file);
1788 up_read(&mm->mmap_sem);
1790 khugepaged_scan_file(mm, file, pgoff, hpage);
1793 ret = khugepaged_scan_pmd(mm, vma,
1794 khugepaged_scan.address,
1797 /* move to next address */
1798 khugepaged_scan.address += HPAGE_PMD_SIZE;
1799 progress += HPAGE_PMD_NR;
1801 /* we released mmap_sem so break loop */
1802 goto breakouterloop_mmap_sem;
1803 if (progress >= pages)
1804 goto breakouterloop;
1808 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1809 breakouterloop_mmap_sem:
1811 spin_lock(&khugepaged_mm_lock);
1812 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1814 * Release the current mm_slot if this mm is about to die, or
1815 * if we scanned all vmas of this mm.
1817 if (khugepaged_test_exit(mm) || !vma) {
1819 * Make sure that if mm_users is reaching zero while
1820 * khugepaged runs here, khugepaged_exit will find
1821 * mm_slot not pointing to the exiting mm.
1823 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1824 khugepaged_scan.mm_slot = list_entry(
1825 mm_slot->mm_node.next,
1826 struct mm_slot, mm_node);
1827 khugepaged_scan.address = 0;
1829 khugepaged_scan.mm_slot = NULL;
1830 khugepaged_full_scans++;
1833 collect_mm_slot(mm_slot);
1839 static int khugepaged_has_work(void)
1841 return !list_empty(&khugepaged_scan.mm_head) &&
1842 khugepaged_enabled();
1845 static int khugepaged_wait_event(void)
1847 return !list_empty(&khugepaged_scan.mm_head) ||
1848 kthread_should_stop();
1851 static void khugepaged_do_scan(void)
1853 struct page *hpage = NULL;
1854 unsigned int progress = 0, pass_through_head = 0;
1855 unsigned int pages = khugepaged_pages_to_scan;
1858 barrier(); /* write khugepaged_pages_to_scan to local stack */
1860 while (progress < pages) {
1861 if (!khugepaged_prealloc_page(&hpage, &wait))
1866 if (unlikely(kthread_should_stop() || try_to_freeze()))
1869 spin_lock(&khugepaged_mm_lock);
1870 if (!khugepaged_scan.mm_slot)
1871 pass_through_head++;
1872 if (khugepaged_has_work() &&
1873 pass_through_head < 2)
1874 progress += khugepaged_scan_mm_slot(pages - progress,
1878 spin_unlock(&khugepaged_mm_lock);
1881 if (!IS_ERR_OR_NULL(hpage))
1885 static bool khugepaged_should_wakeup(void)
1887 return kthread_should_stop() ||
1888 time_after_eq(jiffies, khugepaged_sleep_expire);
1891 static void khugepaged_wait_work(void)
1893 if (khugepaged_has_work()) {
1894 const unsigned long scan_sleep_jiffies =
1895 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1897 if (!scan_sleep_jiffies)
1900 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1901 wait_event_freezable_timeout(khugepaged_wait,
1902 khugepaged_should_wakeup(),
1903 scan_sleep_jiffies);
1907 if (khugepaged_enabled())
1908 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1911 static int khugepaged(void *none)
1913 struct mm_slot *mm_slot;
1916 set_user_nice(current, MAX_NICE);
1918 while (!kthread_should_stop()) {
1919 khugepaged_do_scan();
1920 khugepaged_wait_work();
1923 spin_lock(&khugepaged_mm_lock);
1924 mm_slot = khugepaged_scan.mm_slot;
1925 khugepaged_scan.mm_slot = NULL;
1927 collect_mm_slot(mm_slot);
1928 spin_unlock(&khugepaged_mm_lock);
1932 static void set_recommended_min_free_kbytes(void)
1936 unsigned long recommended_min;
1938 for_each_populated_zone(zone) {
1940 * We don't need to worry about fragmentation of
1941 * ZONE_MOVABLE since it only has movable pages.
1943 if (zone_idx(zone) > gfp_zone(GFP_USER))
1949 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1950 recommended_min = pageblock_nr_pages * nr_zones * 2;
1953 * Make sure that on average at least two pageblocks are almost free
1954 * of another type, one for a migratetype to fall back to and a
1955 * second to avoid subsequent fallbacks of other types There are 3
1956 * MIGRATE_TYPES we care about.
1958 recommended_min += pageblock_nr_pages * nr_zones *
1959 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1961 /* don't ever allow to reserve more than 5% of the lowmem */
1962 recommended_min = min(recommended_min,
1963 (unsigned long) nr_free_buffer_pages() / 20);
1964 recommended_min <<= (PAGE_SHIFT-10);
1966 if (recommended_min > min_free_kbytes) {
1967 if (user_min_free_kbytes >= 0)
1968 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1969 min_free_kbytes, recommended_min);
1971 min_free_kbytes = recommended_min;
1973 setup_per_zone_wmarks();
1976 int start_stop_khugepaged(void)
1978 static struct task_struct *khugepaged_thread __read_mostly;
1979 static DEFINE_MUTEX(khugepaged_mutex);
1982 mutex_lock(&khugepaged_mutex);
1983 if (khugepaged_enabled()) {
1984 if (!khugepaged_thread)
1985 khugepaged_thread = kthread_run(khugepaged, NULL,
1987 if (IS_ERR(khugepaged_thread)) {
1988 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1989 err = PTR_ERR(khugepaged_thread);
1990 khugepaged_thread = NULL;
1994 if (!list_empty(&khugepaged_scan.mm_head))
1995 wake_up_interruptible(&khugepaged_wait);
1997 set_recommended_min_free_kbytes();
1998 } else if (khugepaged_thread) {
1999 kthread_stop(khugepaged_thread);
2000 khugepaged_thread = NULL;
2003 mutex_unlock(&khugepaged_mutex);