mm/khugepaged: minor cleanup for collapse_file
[linux-2.6-block.git] / mm / khugepaged.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b46e756f
KS
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6e84f315 6#include <linux/sched/mm.h>
f7ccbae4 7#include <linux/sched/coredump.h>
b46e756f
KS
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
80110bbf 19#include <linux/page_table_check.h>
b46e756f 20#include <linux/swapops.h>
f3f0e1d2 21#include <linux/shmem_fs.h>
b46e756f
KS
22
23#include <asm/tlb.h>
24#include <asm/pgalloc.h>
25#include "internal.h"
26
27enum scan_result {
28 SCAN_FAIL,
29 SCAN_SUCCEED,
30 SCAN_PMD_NULL,
31 SCAN_EXCEED_NONE_PTE,
71a2c112
KS
32 SCAN_EXCEED_SWAP_PTE,
33 SCAN_EXCEED_SHARED_PTE,
b46e756f 34 SCAN_PTE_NON_PRESENT,
e1e267c7 35 SCAN_PTE_UFFD_WP,
b46e756f 36 SCAN_PAGE_RO,
0db501f7 37 SCAN_LACK_REFERENCED_PAGE,
b46e756f
KS
38 SCAN_PAGE_NULL,
39 SCAN_SCAN_ABORT,
40 SCAN_PAGE_COUNT,
41 SCAN_PAGE_LRU,
42 SCAN_PAGE_LOCK,
43 SCAN_PAGE_ANON,
44 SCAN_PAGE_COMPOUND,
45 SCAN_ANY_PROCESS,
46 SCAN_VMA_NULL,
47 SCAN_VMA_CHECK,
48 SCAN_ADDRESS_RANGE,
b46e756f
KS
49 SCAN_DEL_PAGE_LRU,
50 SCAN_ALLOC_HUGE_PAGE_FAIL,
51 SCAN_CGROUP_CHARGE_FAIL,
f3f0e1d2 52 SCAN_TRUNCATED,
99cb0dbd 53 SCAN_PAGE_HAS_PRIVATE,
b46e756f
KS
54};
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/huge_memory.h>
58
4aab2be0
VB
59static struct task_struct *khugepaged_thread __read_mostly;
60static DEFINE_MUTEX(khugepaged_mutex);
61
b46e756f
KS
62/* default scan 8*512 pte (or vmas) every 30 second */
63static unsigned int khugepaged_pages_to_scan __read_mostly;
64static unsigned int khugepaged_pages_collapsed;
65static unsigned int khugepaged_full_scans;
66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67/* during fragmentation poll the hugepage allocator once every minute */
68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69static unsigned long khugepaged_sleep_expire;
70static DEFINE_SPINLOCK(khugepaged_mm_lock);
71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72/*
73 * default collapse hugepages if there is at least one pte mapped like
74 * it would have happened if the vma was large enough during page
75 * fault.
76 */
77static unsigned int khugepaged_max_ptes_none __read_mostly;
78static unsigned int khugepaged_max_ptes_swap __read_mostly;
71a2c112 79static unsigned int khugepaged_max_ptes_shared __read_mostly;
b46e756f
KS
80
81#define MM_SLOTS_HASH_BITS 10
82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83
84static struct kmem_cache *mm_slot_cache __read_mostly;
85
27e1f827
SL
86#define MAX_PTE_MAPPED_THP 8
87
b46e756f
KS
88/**
89 * struct mm_slot - hash lookup from mm to mm_slot
90 * @hash: hash collision list
91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92 * @mm: the mm that this information is valid for
336e6b53
AS
93 * @nr_pte_mapped_thp: number of pte mapped THP
94 * @pte_mapped_thp: address array corresponding pte mapped THP
b46e756f
KS
95 */
96struct mm_slot {
97 struct hlist_node hash;
98 struct list_head mm_node;
99 struct mm_struct *mm;
27e1f827
SL
100
101 /* pte-mapped THP in this mm */
102 int nr_pte_mapped_thp;
103 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
b46e756f
KS
104};
105
106/**
107 * struct khugepaged_scan - cursor for scanning
108 * @mm_head: the head of the mm list to scan
109 * @mm_slot: the current mm_slot we are scanning
110 * @address: the next address inside that to be scanned
111 *
112 * There is only the one khugepaged_scan instance of this cursor structure.
113 */
114struct khugepaged_scan {
115 struct list_head mm_head;
116 struct mm_slot *mm_slot;
117 unsigned long address;
118};
119
120static struct khugepaged_scan khugepaged_scan = {
121 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122};
123
e1465d12 124#ifdef CONFIG_SYSFS
b46e756f
KS
125static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126 struct kobj_attribute *attr,
127 char *buf)
128{
ae7a927d 129 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
b46e756f
KS
130}
131
132static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 const char *buf, size_t count)
135{
dfefd226 136 unsigned int msecs;
b46e756f
KS
137 int err;
138
dfefd226
AD
139 err = kstrtouint(buf, 10, &msecs);
140 if (err)
b46e756f
KS
141 return -EINVAL;
142
143 khugepaged_scan_sleep_millisecs = msecs;
144 khugepaged_sleep_expire = 0;
145 wake_up_interruptible(&khugepaged_wait);
146
147 return count;
148}
149static struct kobj_attribute scan_sleep_millisecs_attr =
150 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
151 scan_sleep_millisecs_store);
152
153static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
154 struct kobj_attribute *attr,
155 char *buf)
156{
ae7a927d 157 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
b46e756f
KS
158}
159
160static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
161 struct kobj_attribute *attr,
162 const char *buf, size_t count)
163{
dfefd226 164 unsigned int msecs;
b46e756f
KS
165 int err;
166
dfefd226
AD
167 err = kstrtouint(buf, 10, &msecs);
168 if (err)
b46e756f
KS
169 return -EINVAL;
170
171 khugepaged_alloc_sleep_millisecs = msecs;
172 khugepaged_sleep_expire = 0;
173 wake_up_interruptible(&khugepaged_wait);
174
175 return count;
176}
177static struct kobj_attribute alloc_sleep_millisecs_attr =
178 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
179 alloc_sleep_millisecs_store);
180
181static ssize_t pages_to_scan_show(struct kobject *kobj,
182 struct kobj_attribute *attr,
183 char *buf)
184{
ae7a927d 185 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
b46e756f
KS
186}
187static ssize_t pages_to_scan_store(struct kobject *kobj,
188 struct kobj_attribute *attr,
189 const char *buf, size_t count)
190{
dfefd226 191 unsigned int pages;
b46e756f 192 int err;
b46e756f 193
dfefd226
AD
194 err = kstrtouint(buf, 10, &pages);
195 if (err || !pages)
b46e756f
KS
196 return -EINVAL;
197
198 khugepaged_pages_to_scan = pages;
199
200 return count;
201}
202static struct kobj_attribute pages_to_scan_attr =
203 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
204 pages_to_scan_store);
205
206static ssize_t pages_collapsed_show(struct kobject *kobj,
207 struct kobj_attribute *attr,
208 char *buf)
209{
ae7a927d 210 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
b46e756f
KS
211}
212static struct kobj_attribute pages_collapsed_attr =
213 __ATTR_RO(pages_collapsed);
214
215static ssize_t full_scans_show(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 char *buf)
218{
ae7a927d 219 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
b46e756f
KS
220}
221static struct kobj_attribute full_scans_attr =
222 __ATTR_RO(full_scans);
223
224static ssize_t khugepaged_defrag_show(struct kobject *kobj,
225 struct kobj_attribute *attr, char *buf)
226{
227 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 228 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
b46e756f
KS
229}
230static ssize_t khugepaged_defrag_store(struct kobject *kobj,
231 struct kobj_attribute *attr,
232 const char *buf, size_t count)
233{
234 return single_hugepage_flag_store(kobj, attr, buf, count,
235 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236}
237static struct kobj_attribute khugepaged_defrag_attr =
238 __ATTR(defrag, 0644, khugepaged_defrag_show,
239 khugepaged_defrag_store);
240
241/*
242 * max_ptes_none controls if khugepaged should collapse hugepages over
243 * any unmapped ptes in turn potentially increasing the memory
244 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
245 * reduce the available free memory in the system as it
246 * runs. Increasing max_ptes_none will instead potentially reduce the
247 * free memory in the system during the khugepaged scan.
248 */
249static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
250 struct kobj_attribute *attr,
251 char *buf)
252{
ae7a927d 253 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
b46e756f
KS
254}
255static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
256 struct kobj_attribute *attr,
257 const char *buf, size_t count)
258{
259 int err;
260 unsigned long max_ptes_none;
261
262 err = kstrtoul(buf, 10, &max_ptes_none);
36ee2c78 263 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
b46e756f
KS
264 return -EINVAL;
265
266 khugepaged_max_ptes_none = max_ptes_none;
267
268 return count;
269}
270static struct kobj_attribute khugepaged_max_ptes_none_attr =
271 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
272 khugepaged_max_ptes_none_store);
273
274static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
275 struct kobj_attribute *attr,
276 char *buf)
277{
ae7a927d 278 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
b46e756f
KS
279}
280
281static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
282 struct kobj_attribute *attr,
283 const char *buf, size_t count)
284{
285 int err;
286 unsigned long max_ptes_swap;
287
288 err = kstrtoul(buf, 10, &max_ptes_swap);
36ee2c78 289 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
b46e756f
KS
290 return -EINVAL;
291
292 khugepaged_max_ptes_swap = max_ptes_swap;
293
294 return count;
295}
296
297static struct kobj_attribute khugepaged_max_ptes_swap_attr =
298 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
299 khugepaged_max_ptes_swap_store);
300
71a2c112 301static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
ae7a927d
JP
302 struct kobj_attribute *attr,
303 char *buf)
71a2c112 304{
ae7a927d 305 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
71a2c112
KS
306}
307
308static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
309 struct kobj_attribute *attr,
310 const char *buf, size_t count)
311{
312 int err;
313 unsigned long max_ptes_shared;
314
315 err = kstrtoul(buf, 10, &max_ptes_shared);
36ee2c78 316 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
71a2c112
KS
317 return -EINVAL;
318
319 khugepaged_max_ptes_shared = max_ptes_shared;
320
321 return count;
322}
323
324static struct kobj_attribute khugepaged_max_ptes_shared_attr =
325 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
326 khugepaged_max_ptes_shared_store);
327
b46e756f
KS
328static struct attribute *khugepaged_attr[] = {
329 &khugepaged_defrag_attr.attr,
330 &khugepaged_max_ptes_none_attr.attr,
71a2c112
KS
331 &khugepaged_max_ptes_swap_attr.attr,
332 &khugepaged_max_ptes_shared_attr.attr,
b46e756f
KS
333 &pages_to_scan_attr.attr,
334 &pages_collapsed_attr.attr,
335 &full_scans_attr.attr,
336 &scan_sleep_millisecs_attr.attr,
337 &alloc_sleep_millisecs_attr.attr,
b46e756f
KS
338 NULL,
339};
340
341struct attribute_group khugepaged_attr_group = {
342 .attrs = khugepaged_attr,
343 .name = "khugepaged",
344};
e1465d12 345#endif /* CONFIG_SYSFS */
b46e756f 346
b46e756f
KS
347int hugepage_madvise(struct vm_area_struct *vma,
348 unsigned long *vm_flags, int advice)
349{
350 switch (advice) {
351 case MADV_HUGEPAGE:
352#ifdef CONFIG_S390
353 /*
354 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
355 * can't handle this properly after s390_enable_sie, so we simply
356 * ignore the madvise to prevent qemu from causing a SIGSEGV.
357 */
358 if (mm_has_pgste(vma->vm_mm))
359 return 0;
360#endif
361 *vm_flags &= ~VM_NOHUGEPAGE;
362 *vm_flags |= VM_HUGEPAGE;
363 /*
364 * If the vma become good for khugepaged to scan,
365 * register it here without waiting a page fault that
366 * may not happen any time soon.
367 */
c791576c 368 khugepaged_enter_vma(vma, *vm_flags);
b46e756f
KS
369 break;
370 case MADV_NOHUGEPAGE:
371 *vm_flags &= ~VM_HUGEPAGE;
372 *vm_flags |= VM_NOHUGEPAGE;
373 /*
374 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
375 * this vma even if we leave the mm registered in khugepaged if
376 * it got registered before VM_NOHUGEPAGE was set.
377 */
378 break;
379 }
380
381 return 0;
382}
383
384int __init khugepaged_init(void)
385{
386 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
387 sizeof(struct mm_slot),
388 __alignof__(struct mm_slot), 0, NULL);
389 if (!mm_slot_cache)
390 return -ENOMEM;
391
392 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
71a2c112 395 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
b46e756f
KS
396
397 return 0;
398}
399
400void __init khugepaged_destroy(void)
401{
402 kmem_cache_destroy(mm_slot_cache);
403}
404
405static inline struct mm_slot *alloc_mm_slot(void)
406{
407 if (!mm_slot_cache) /* initialization failed */
408 return NULL;
409 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
410}
411
412static inline void free_mm_slot(struct mm_slot *mm_slot)
413{
414 kmem_cache_free(mm_slot_cache, mm_slot);
415}
416
417static struct mm_slot *get_mm_slot(struct mm_struct *mm)
418{
419 struct mm_slot *mm_slot;
420
421 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
422 if (mm == mm_slot->mm)
423 return mm_slot;
424
425 return NULL;
426}
427
428static void insert_to_mm_slots_hash(struct mm_struct *mm,
429 struct mm_slot *mm_slot)
430{
431 mm_slot->mm = mm;
432 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
433}
434
435static inline int khugepaged_test_exit(struct mm_struct *mm)
436{
4d45e75a 437 return atomic_read(&mm->mm_users) == 0;
b46e756f
KS
438}
439
2647d11b
YS
440bool hugepage_vma_check(struct vm_area_struct *vma,
441 unsigned long vm_flags)
c2231020 442{
e6be37b2 443 if (!transhuge_vma_enabled(vma, vm_flags))
c2231020 444 return false;
99cb0dbd 445
cb648754
YS
446 if (vm_flags & VM_NO_KHUGEPAGED)
447 return false;
448
52b52bf1
YS
449 /* Don't run khugepaged against DAX vma */
450 if (vma_is_dax(vma))
451 return false;
452
a4aeaa06
YS
453 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
454 vma->vm_pgoff, HPAGE_PMD_NR))
455 return false;
456
cd89fb06 457 /* Enabled via shmem mount options or sysfs settings. */
a4aeaa06
YS
458 if (shmem_file(vma->vm_file))
459 return shmem_huge_enabled(vma);
cd89fb06
RR
460
461 /* THP settings require madvise. */
462 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
463 return false;
464
a4aeaa06 465 /* Only regular file is valid */
78d12c19
YS
466 if (file_thp_enabled(vma))
467 return true;
cd89fb06 468
25fa414a 469 if (!vma->anon_vma || !vma_is_anonymous(vma))
c2231020 470 return false;
222100ee 471 if (vma_is_temporary_stack(vma))
c2231020 472 return false;
cb648754
YS
473
474 return true;
c2231020
YS
475}
476
d2081b2b 477void __khugepaged_enter(struct mm_struct *mm)
b46e756f
KS
478{
479 struct mm_slot *mm_slot;
480 int wakeup;
481
482 mm_slot = alloc_mm_slot();
483 if (!mm_slot)
d2081b2b 484 return;
b46e756f
KS
485
486 /* __khugepaged_exit() must not run from under us */
28ff0a3c 487 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
b46e756f
KS
488 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
489 free_mm_slot(mm_slot);
d2081b2b 490 return;
b46e756f
KS
491 }
492
493 spin_lock(&khugepaged_mm_lock);
494 insert_to_mm_slots_hash(mm, mm_slot);
495 /*
496 * Insert just behind the scanning cursor, to let the area settle
497 * down a little.
498 */
499 wakeup = list_empty(&khugepaged_scan.mm_head);
500 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
501 spin_unlock(&khugepaged_mm_lock);
502
f1f10076 503 mmgrab(mm);
b46e756f
KS
504 if (wakeup)
505 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
506}
507
c791576c
YS
508void khugepaged_enter_vma(struct vm_area_struct *vma,
509 unsigned long vm_flags)
b46e756f 510{
2647d11b
YS
511 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
512 khugepaged_enabled() &&
513 (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
514 (vma->vm_end & HPAGE_PMD_MASK))) {
515 if (hugepage_vma_check(vma, vm_flags))
516 __khugepaged_enter(vma->vm_mm);
517 }
b46e756f
KS
518}
519
520void __khugepaged_exit(struct mm_struct *mm)
521{
522 struct mm_slot *mm_slot;
523 int free = 0;
524
525 spin_lock(&khugepaged_mm_lock);
526 mm_slot = get_mm_slot(mm);
527 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
528 hash_del(&mm_slot->hash);
529 list_del(&mm_slot->mm_node);
530 free = 1;
531 }
532 spin_unlock(&khugepaged_mm_lock);
533
534 if (free) {
535 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
536 free_mm_slot(mm_slot);
537 mmdrop(mm);
538 } else if (mm_slot) {
539 /*
540 * This is required to serialize against
541 * khugepaged_test_exit() (which is guaranteed to run
542 * under mmap sem read mode). Stop here (after we
543 * return all pagetables will be destroyed) until
544 * khugepaged has finished working on the pagetables
c1e8d7c6 545 * under the mmap_lock.
b46e756f 546 */
d8ed45c5
ML
547 mmap_write_lock(mm);
548 mmap_write_unlock(mm);
b46e756f
KS
549 }
550}
551
552static void release_pte_page(struct page *page)
553{
5503fbf2
KS
554 mod_node_page_state(page_pgdat(page),
555 NR_ISOLATED_ANON + page_is_file_lru(page),
556 -compound_nr(page));
b46e756f
KS
557 unlock_page(page);
558 putback_lru_page(page);
559}
560
5503fbf2
KS
561static void release_pte_pages(pte_t *pte, pte_t *_pte,
562 struct list_head *compound_pagelist)
b46e756f 563{
5503fbf2
KS
564 struct page *page, *tmp;
565
b46e756f
KS
566 while (--_pte >= pte) {
567 pte_t pteval = *_pte;
5503fbf2
KS
568
569 page = pte_page(pteval);
570 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
571 !PageCompound(page))
572 release_pte_page(page);
573 }
574
575 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
576 list_del(&page->lru);
577 release_pte_page(page);
b46e756f
KS
578 }
579}
580
9445689f
KS
581static bool is_refcount_suitable(struct page *page)
582{
583 int expected_refcount;
584
585 expected_refcount = total_mapcount(page);
586 if (PageSwapCache(page))
587 expected_refcount += compound_nr(page);
588
589 return page_count(page) == expected_refcount;
590}
591
b46e756f
KS
592static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
593 unsigned long address,
5503fbf2
KS
594 pte_t *pte,
595 struct list_head *compound_pagelist)
b46e756f
KS
596{
597 struct page *page = NULL;
598 pte_t *_pte;
71a2c112 599 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
0db501f7 600 bool writable = false;
b46e756f 601
36ee2c78 602 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
b46e756f
KS
603 _pte++, address += PAGE_SIZE) {
604 pte_t pteval = *_pte;
605 if (pte_none(pteval) || (pte_present(pteval) &&
606 is_zero_pfn(pte_pfn(pteval)))) {
607 if (!userfaultfd_armed(vma) &&
608 ++none_or_zero <= khugepaged_max_ptes_none) {
609 continue;
610 } else {
611 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 612 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
613 goto out;
614 }
615 }
616 if (!pte_present(pteval)) {
617 result = SCAN_PTE_NON_PRESENT;
618 goto out;
619 }
620 page = vm_normal_page(vma, address, pteval);
621 if (unlikely(!page)) {
622 result = SCAN_PAGE_NULL;
623 goto out;
624 }
625
5503fbf2
KS
626 VM_BUG_ON_PAGE(!PageAnon(page), page);
627
71a2c112
KS
628 if (page_mapcount(page) > 1 &&
629 ++shared > khugepaged_max_ptes_shared) {
630 result = SCAN_EXCEED_SHARED_PTE;
e9ea874a 631 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
71a2c112
KS
632 goto out;
633 }
634
fece2029 635 if (PageCompound(page)) {
5503fbf2
KS
636 struct page *p;
637 page = compound_head(page);
fece2029 638
5503fbf2
KS
639 /*
640 * Check if we have dealt with the compound page
641 * already
642 */
643 list_for_each_entry(p, compound_pagelist, lru) {
644 if (page == p)
645 goto next;
646 }
647 }
b46e756f
KS
648
649 /*
650 * We can do it before isolate_lru_page because the
651 * page can't be freed from under us. NOTE: PG_lock
652 * is needed to serialize against split_huge_page
653 * when invoked from the VM.
654 */
655 if (!trylock_page(page)) {
656 result = SCAN_PAGE_LOCK;
657 goto out;
658 }
659
660 /*
9445689f
KS
661 * Check if the page has any GUP (or other external) pins.
662 *
663 * The page table that maps the page has been already unlinked
664 * from the page table tree and this process cannot get
f0953a1b 665 * an additional pin on the page.
9445689f
KS
666 *
667 * New pins can come later if the page is shared across fork,
668 * but not from this process. The other process cannot write to
669 * the page, only trigger CoW.
b46e756f 670 */
9445689f 671 if (!is_refcount_suitable(page)) {
b46e756f
KS
672 unlock_page(page);
673 result = SCAN_PAGE_COUNT;
674 goto out;
675 }
b46e756f
KS
676
677 /*
678 * Isolate the page to avoid collapsing an hugepage
679 * currently in use by the VM.
680 */
681 if (isolate_lru_page(page)) {
682 unlock_page(page);
683 result = SCAN_DEL_PAGE_LRU;
684 goto out;
685 }
5503fbf2
KS
686 mod_node_page_state(page_pgdat(page),
687 NR_ISOLATED_ANON + page_is_file_lru(page),
688 compound_nr(page));
b46e756f
KS
689 VM_BUG_ON_PAGE(!PageLocked(page), page);
690 VM_BUG_ON_PAGE(PageLRU(page), page);
691
5503fbf2
KS
692 if (PageCompound(page))
693 list_add_tail(&page->lru, compound_pagelist);
694next:
0db501f7 695 /* There should be enough young pte to collapse the page */
b46e756f
KS
696 if (pte_young(pteval) ||
697 page_is_young(page) || PageReferenced(page) ||
698 mmu_notifier_test_young(vma->vm_mm, address))
0db501f7 699 referenced++;
5503fbf2
KS
700
701 if (pte_write(pteval))
702 writable = true;
b46e756f 703 }
74e579bf
ML
704
705 if (unlikely(!writable)) {
b46e756f 706 result = SCAN_PAGE_RO;
74e579bf
ML
707 } else if (unlikely(!referenced)) {
708 result = SCAN_LACK_REFERENCED_PAGE;
709 } else {
710 result = SCAN_SUCCEED;
711 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
712 referenced, writable, result);
713 return 1;
b46e756f 714 }
b46e756f 715out:
5503fbf2 716 release_pte_pages(pte, _pte, compound_pagelist);
b46e756f
KS
717 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
718 referenced, writable, result);
719 return 0;
720}
721
722static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
723 struct vm_area_struct *vma,
724 unsigned long address,
5503fbf2
KS
725 spinlock_t *ptl,
726 struct list_head *compound_pagelist)
b46e756f 727{
5503fbf2 728 struct page *src_page, *tmp;
b46e756f 729 pte_t *_pte;
338a16ba
DR
730 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
731 _pte++, page++, address += PAGE_SIZE) {
b46e756f 732 pte_t pteval = *_pte;
b46e756f
KS
733
734 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
735 clear_user_highpage(page, address);
736 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
737 if (is_zero_pfn(pte_pfn(pteval))) {
738 /*
739 * ptl mostly unnecessary.
740 */
741 spin_lock(ptl);
08d5b29e 742 ptep_clear(vma->vm_mm, address, _pte);
b46e756f
KS
743 spin_unlock(ptl);
744 }
745 } else {
746 src_page = pte_page(pteval);
747 copy_user_highpage(page, src_page, address, vma);
5503fbf2
KS
748 if (!PageCompound(src_page))
749 release_pte_page(src_page);
b46e756f
KS
750 /*
751 * ptl mostly unnecessary, but preempt has to
752 * be disabled to update the per-cpu stats
753 * inside page_remove_rmap().
754 */
755 spin_lock(ptl);
08d5b29e 756 ptep_clear(vma->vm_mm, address, _pte);
cea86fe2 757 page_remove_rmap(src_page, vma, false);
b46e756f
KS
758 spin_unlock(ptl);
759 free_page_and_swap_cache(src_page);
760 }
b46e756f 761 }
5503fbf2
KS
762
763 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
764 list_del(&src_page->lru);
765 release_pte_page(src_page);
766 }
b46e756f
KS
767}
768
769static void khugepaged_alloc_sleep(void)
770{
771 DEFINE_WAIT(wait);
772
773 add_wait_queue(&khugepaged_wait, &wait);
774 freezable_schedule_timeout_interruptible(
775 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
776 remove_wait_queue(&khugepaged_wait, &wait);
777}
778
779static int khugepaged_node_load[MAX_NUMNODES];
780
781static bool khugepaged_scan_abort(int nid)
782{
783 int i;
784
785 /*
a5f5f91d 786 * If node_reclaim_mode is disabled, then no extra effort is made to
b46e756f
KS
787 * allocate memory locally.
788 */
202e35db 789 if (!node_reclaim_enabled())
b46e756f
KS
790 return false;
791
792 /* If there is a count for this node already, it must be acceptable */
793 if (khugepaged_node_load[nid])
794 return false;
795
796 for (i = 0; i < MAX_NUMNODES; i++) {
797 if (!khugepaged_node_load[i])
798 continue;
a55c7454 799 if (node_distance(nid, i) > node_reclaim_distance)
b46e756f
KS
800 return true;
801 }
802 return false;
803}
804
805/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
806static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
807{
25160354 808 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
b46e756f
KS
809}
810
811#ifdef CONFIG_NUMA
812static int khugepaged_find_target_node(void)
813{
814 static int last_khugepaged_target_node = NUMA_NO_NODE;
815 int nid, target_node = 0, max_value = 0;
816
817 /* find first node with max normal pages hit */
818 for (nid = 0; nid < MAX_NUMNODES; nid++)
819 if (khugepaged_node_load[nid] > max_value) {
820 max_value = khugepaged_node_load[nid];
821 target_node = nid;
822 }
823
824 /* do some balance if several nodes have the same hit record */
825 if (target_node <= last_khugepaged_target_node)
826 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
827 nid++)
828 if (max_value == khugepaged_node_load[nid]) {
829 target_node = nid;
830 break;
831 }
832
833 last_khugepaged_target_node = target_node;
834 return target_node;
835}
836
837static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
838{
839 if (IS_ERR(*hpage)) {
840 if (!*wait)
841 return false;
842
843 *wait = false;
844 *hpage = NULL;
845 khugepaged_alloc_sleep();
846 } else if (*hpage) {
847 put_page(*hpage);
848 *hpage = NULL;
849 }
850
851 return true;
852}
853
854static struct page *
988ddb71 855khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
b46e756f
KS
856{
857 VM_BUG_ON_PAGE(*hpage, *hpage);
858
b46e756f
KS
859 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
860 if (unlikely(!*hpage)) {
861 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
862 *hpage = ERR_PTR(-ENOMEM);
863 return NULL;
864 }
865
866 prep_transhuge_page(*hpage);
867 count_vm_event(THP_COLLAPSE_ALLOC);
868 return *hpage;
869}
870#else
871static int khugepaged_find_target_node(void)
872{
873 return 0;
874}
875
876static inline struct page *alloc_khugepaged_hugepage(void)
877{
878 struct page *page;
879
880 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
881 HPAGE_PMD_ORDER);
882 if (page)
883 prep_transhuge_page(page);
884 return page;
885}
886
887static struct page *khugepaged_alloc_hugepage(bool *wait)
888{
889 struct page *hpage;
890
891 do {
892 hpage = alloc_khugepaged_hugepage();
893 if (!hpage) {
894 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
895 if (!*wait)
896 return NULL;
897
898 *wait = false;
899 khugepaged_alloc_sleep();
900 } else
901 count_vm_event(THP_COLLAPSE_ALLOC);
902 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
903
904 return hpage;
905}
906
907static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
908{
033b5d77
HD
909 /*
910 * If the hpage allocated earlier was briefly exposed in page cache
911 * before collapse_file() failed, it is possible that racing lookups
912 * have not yet completed, and would then be unpleasantly surprised by
913 * finding the hpage reused for the same mapping at a different offset.
914 * Just release the previous allocation if there is any danger of that.
915 */
916 if (*hpage && page_count(*hpage) > 1) {
917 put_page(*hpage);
918 *hpage = NULL;
919 }
920
b46e756f
KS
921 if (!*hpage)
922 *hpage = khugepaged_alloc_hugepage(wait);
923
924 if (unlikely(!*hpage))
925 return false;
926
927 return true;
928}
929
930static struct page *
988ddb71 931khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
b46e756f 932{
b46e756f
KS
933 VM_BUG_ON(!*hpage);
934
935 return *hpage;
936}
937#endif
938
b46e756f 939/*
c1e8d7c6
ML
940 * If mmap_lock temporarily dropped, revalidate vma
941 * before taking mmap_lock.
b46e756f
KS
942 * Return 0 if succeeds, otherwise return none-zero
943 * value (scan code).
944 */
945
c131f751
KS
946static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
947 struct vm_area_struct **vmap)
b46e756f
KS
948{
949 struct vm_area_struct *vma;
950 unsigned long hstart, hend;
951
952 if (unlikely(khugepaged_test_exit(mm)))
953 return SCAN_ANY_PROCESS;
954
c131f751 955 *vmap = vma = find_vma(mm, address);
b46e756f
KS
956 if (!vma)
957 return SCAN_VMA_NULL;
958
959 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
960 hend = vma->vm_end & HPAGE_PMD_MASK;
961 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
962 return SCAN_ADDRESS_RANGE;
50f8b92f 963 if (!hugepage_vma_check(vma, vma->vm_flags))
b46e756f 964 return SCAN_VMA_CHECK;
594cced1 965 /* Anon VMA expected */
25fa414a 966 if (!vma->anon_vma || !vma_is_anonymous(vma))
594cced1 967 return SCAN_VMA_CHECK;
b46e756f
KS
968 return 0;
969}
970
971/*
972 * Bring missing pages in from swap, to complete THP collapse.
973 * Only done if khugepaged_scan_pmd believes it is worthwhile.
974 *
4d928e20
ML
975 * Called and returns without pte mapped or spinlocks held.
976 * Note that if false is returned, mmap_lock will be released.
b46e756f
KS
977 */
978
979static bool __collapse_huge_page_swapin(struct mm_struct *mm,
980 struct vm_area_struct *vma,
2b635dd3 981 unsigned long haddr, pmd_t *pmd,
0db501f7 982 int referenced)
b46e756f 983{
2b740303
SJ
984 int swapped_in = 0;
985 vm_fault_t ret = 0;
2b635dd3
WD
986 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
987
988 for (address = haddr; address < end; address += PAGE_SIZE) {
989 struct vm_fault vmf = {
990 .vma = vma,
991 .address = address,
992 .pgoff = linear_page_index(vma, haddr),
993 .flags = FAULT_FLAG_ALLOW_RETRY,
994 .pmd = pmd,
995 };
996
997 vmf.pte = pte_offset_map(pmd, address);
2994302b 998 vmf.orig_pte = *vmf.pte;
2b635dd3
WD
999 if (!is_swap_pte(vmf.orig_pte)) {
1000 pte_unmap(vmf.pte);
b46e756f 1001 continue;
2b635dd3 1002 }
2994302b 1003 ret = do_swap_page(&vmf);
0db501f7 1004
4d928e20
ML
1005 /*
1006 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1007 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1008 * we do not retry here and swap entry will remain in pagetable
1009 * resulting in later failure.
1010 */
b46e756f 1011 if (ret & VM_FAULT_RETRY) {
4d928e20
ML
1012 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1013 return false;
b46e756f
KS
1014 }
1015 if (ret & VM_FAULT_ERROR) {
4d928e20 1016 mmap_read_unlock(mm);
0db501f7 1017 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f
KS
1018 return false;
1019 }
4d928e20 1020 swapped_in++;
b46e756f 1021 }
ae2c5d80
KS
1022
1023 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1024 if (swapped_in)
1025 lru_add_drain();
1026
0db501f7 1027 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
b46e756f
KS
1028 return true;
1029}
1030
1031static void collapse_huge_page(struct mm_struct *mm,
1032 unsigned long address,
1033 struct page **hpage,
ffe945e6 1034 int node, int referenced, int unmapped)
b46e756f 1035{
5503fbf2 1036 LIST_HEAD(compound_pagelist);
b46e756f
KS
1037 pmd_t *pmd, _pmd;
1038 pte_t *pte;
1039 pgtable_t pgtable;
1040 struct page *new_page;
1041 spinlock_t *pmd_ptl, *pte_ptl;
1042 int isolated = 0, result = 0;
c131f751 1043 struct vm_area_struct *vma;
ac46d4f3 1044 struct mmu_notifier_range range;
b46e756f
KS
1045 gfp_t gfp;
1046
1047 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1048
1049 /* Only allocate from the target node */
41b6167e 1050 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
b46e756f 1051
988ddb71 1052 /*
c1e8d7c6 1053 * Before allocating the hugepage, release the mmap_lock read lock.
988ddb71 1054 * The allocation can take potentially a long time if it involves
c1e8d7c6 1055 * sync compaction, and we do not need to hold the mmap_lock during
988ddb71
KS
1056 * that. We will recheck the vma after taking it again in write mode.
1057 */
d8ed45c5 1058 mmap_read_unlock(mm);
988ddb71 1059 new_page = khugepaged_alloc_page(hpage, gfp, node);
b46e756f
KS
1060 if (!new_page) {
1061 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1062 goto out_nolock;
1063 }
1064
8f425e4e 1065 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
b46e756f
KS
1066 result = SCAN_CGROUP_CHARGE_FAIL;
1067 goto out_nolock;
1068 }
9d82c694 1069 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
b46e756f 1070
d8ed45c5 1071 mmap_read_lock(mm);
c131f751 1072 result = hugepage_vma_revalidate(mm, address, &vma);
b46e756f 1073 if (result) {
d8ed45c5 1074 mmap_read_unlock(mm);
b46e756f
KS
1075 goto out_nolock;
1076 }
1077
1078 pmd = mm_find_pmd(mm, address);
1079 if (!pmd) {
1080 result = SCAN_PMD_NULL;
d8ed45c5 1081 mmap_read_unlock(mm);
b46e756f
KS
1082 goto out_nolock;
1083 }
1084
1085 /*
4d928e20
ML
1086 * __collapse_huge_page_swapin will return with mmap_lock released
1087 * when it fails. So we jump out_nolock directly in that case.
b46e756f
KS
1088 * Continuing to collapse causes inconsistency.
1089 */
ffe945e6
KS
1090 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1091 pmd, referenced)) {
b46e756f
KS
1092 goto out_nolock;
1093 }
1094
d8ed45c5 1095 mmap_read_unlock(mm);
b46e756f
KS
1096 /*
1097 * Prevent all access to pagetables with the exception of
1098 * gup_fast later handled by the ptep_clear_flush and the VM
1099 * handled by the anon_vma lock + PG_lock.
1100 */
d8ed45c5 1101 mmap_write_lock(mm);
c131f751 1102 result = hugepage_vma_revalidate(mm, address, &vma);
b46e756f 1103 if (result)
18d24a7c 1104 goto out_up_write;
b46e756f
KS
1105 /* check if the pmd is still valid */
1106 if (mm_find_pmd(mm, address) != pmd)
18d24a7c 1107 goto out_up_write;
b46e756f
KS
1108
1109 anon_vma_lock_write(vma->anon_vma);
1110
7269f999 1111 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
6f4f13e8 1112 address, address + HPAGE_PMD_SIZE);
ac46d4f3 1113 mmu_notifier_invalidate_range_start(&range);
ec649c9d
VS
1114
1115 pte = pte_offset_map(pmd, address);
1116 pte_ptl = pte_lockptr(mm, pmd);
1117
b46e756f
KS
1118 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1119 /*
1120 * After this gup_fast can't run anymore. This also removes
1121 * any huge TLB entry from the CPU so we won't allow
1122 * huge and small TLB entries for the same virtual address
1123 * to avoid the risk of CPU bugs in that area.
1124 */
1125 _pmd = pmdp_collapse_flush(vma, address, pmd);
1126 spin_unlock(pmd_ptl);
ac46d4f3 1127 mmu_notifier_invalidate_range_end(&range);
b46e756f
KS
1128
1129 spin_lock(pte_ptl);
5503fbf2
KS
1130 isolated = __collapse_huge_page_isolate(vma, address, pte,
1131 &compound_pagelist);
b46e756f
KS
1132 spin_unlock(pte_ptl);
1133
1134 if (unlikely(!isolated)) {
1135 pte_unmap(pte);
1136 spin_lock(pmd_ptl);
1137 BUG_ON(!pmd_none(*pmd));
1138 /*
1139 * We can only use set_pmd_at when establishing
1140 * hugepmds and never for establishing regular pmds that
1141 * points to regular pagetables. Use pmd_populate for that
1142 */
1143 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1144 spin_unlock(pmd_ptl);
1145 anon_vma_unlock_write(vma->anon_vma);
1146 result = SCAN_FAIL;
18d24a7c 1147 goto out_up_write;
b46e756f
KS
1148 }
1149
1150 /*
1151 * All pages are isolated and locked so anon_vma rmap
1152 * can't run anymore.
1153 */
1154 anon_vma_unlock_write(vma->anon_vma);
1155
5503fbf2
KS
1156 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1157 &compound_pagelist);
b46e756f 1158 pte_unmap(pte);
588d01f9
ML
1159 /*
1160 * spin_lock() below is not the equivalent of smp_wmb(), but
1161 * the smp_wmb() inside __SetPageUptodate() can be reused to
1162 * avoid the copy_huge_page writes to become visible after
1163 * the set_pmd_at() write.
1164 */
b46e756f
KS
1165 __SetPageUptodate(new_page);
1166 pgtable = pmd_pgtable(_pmd);
1167
1168 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
f55e1014 1169 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
b46e756f 1170
b46e756f
KS
1171 spin_lock(pmd_ptl);
1172 BUG_ON(!pmd_none(*pmd));
40f2bbf7 1173 page_add_new_anon_rmap(new_page, vma, address);
b518154e 1174 lru_cache_add_inactive_or_unevictable(new_page, vma);
b46e756f
KS
1175 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1176 set_pmd_at(mm, address, pmd, _pmd);
1177 update_mmu_cache_pmd(vma, address, pmd);
1178 spin_unlock(pmd_ptl);
1179
1180 *hpage = NULL;
1181
1182 khugepaged_pages_collapsed++;
1183 result = SCAN_SUCCEED;
1184out_up_write:
d8ed45c5 1185 mmap_write_unlock(mm);
b46e756f 1186out_nolock:
9d82c694 1187 if (!IS_ERR_OR_NULL(*hpage))
bbc6b703 1188 mem_cgroup_uncharge(page_folio(*hpage));
b46e756f
KS
1189 trace_mm_collapse_huge_page(mm, isolated, result);
1190 return;
b46e756f
KS
1191}
1192
1193static int khugepaged_scan_pmd(struct mm_struct *mm,
1194 struct vm_area_struct *vma,
1195 unsigned long address,
1196 struct page **hpage)
1197{
1198 pmd_t *pmd;
1199 pte_t *pte, *_pte;
71a2c112
KS
1200 int ret = 0, result = 0, referenced = 0;
1201 int none_or_zero = 0, shared = 0;
b46e756f
KS
1202 struct page *page = NULL;
1203 unsigned long _address;
1204 spinlock_t *ptl;
1205 int node = NUMA_NO_NODE, unmapped = 0;
0db501f7 1206 bool writable = false;
b46e756f
KS
1207
1208 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1209
1210 pmd = mm_find_pmd(mm, address);
1211 if (!pmd) {
1212 result = SCAN_PMD_NULL;
1213 goto out;
1214 }
1215
1216 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1217 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
36ee2c78 1218 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
b46e756f
KS
1219 _pte++, _address += PAGE_SIZE) {
1220 pte_t pteval = *_pte;
1221 if (is_swap_pte(pteval)) {
1222 if (++unmapped <= khugepaged_max_ptes_swap) {
e1e267c7
PX
1223 /*
1224 * Always be strict with uffd-wp
1225 * enabled swap entries. Please see
1226 * comment below for pte_uffd_wp().
1227 */
1228 if (pte_swp_uffd_wp(pteval)) {
1229 result = SCAN_PTE_UFFD_WP;
1230 goto out_unmap;
1231 }
b46e756f
KS
1232 continue;
1233 } else {
1234 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 1235 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
b46e756f
KS
1236 goto out_unmap;
1237 }
1238 }
1239 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1240 if (!userfaultfd_armed(vma) &&
1241 ++none_or_zero <= khugepaged_max_ptes_none) {
1242 continue;
1243 } else {
1244 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 1245 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
1246 goto out_unmap;
1247 }
1248 }
e1e267c7
PX
1249 if (pte_uffd_wp(pteval)) {
1250 /*
1251 * Don't collapse the page if any of the small
1252 * PTEs are armed with uffd write protection.
1253 * Here we can also mark the new huge pmd as
1254 * write protected if any of the small ones is
8958b249 1255 * marked but that could bring unknown
e1e267c7
PX
1256 * userfault messages that falls outside of
1257 * the registered range. So, just be simple.
1258 */
1259 result = SCAN_PTE_UFFD_WP;
1260 goto out_unmap;
1261 }
b46e756f
KS
1262 if (pte_write(pteval))
1263 writable = true;
1264
1265 page = vm_normal_page(vma, _address, pteval);
1266 if (unlikely(!page)) {
1267 result = SCAN_PAGE_NULL;
1268 goto out_unmap;
1269 }
1270
71a2c112
KS
1271 if (page_mapcount(page) > 1 &&
1272 ++shared > khugepaged_max_ptes_shared) {
1273 result = SCAN_EXCEED_SHARED_PTE;
e9ea874a 1274 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
71a2c112
KS
1275 goto out_unmap;
1276 }
1277
5503fbf2 1278 page = compound_head(page);
b46e756f
KS
1279
1280 /*
1281 * Record which node the original page is from and save this
1282 * information to khugepaged_node_load[].
0b8f0d87 1283 * Khugepaged will allocate hugepage from the node has the max
b46e756f
KS
1284 * hit record.
1285 */
1286 node = page_to_nid(page);
1287 if (khugepaged_scan_abort(node)) {
1288 result = SCAN_SCAN_ABORT;
1289 goto out_unmap;
1290 }
1291 khugepaged_node_load[node]++;
1292 if (!PageLRU(page)) {
1293 result = SCAN_PAGE_LRU;
1294 goto out_unmap;
1295 }
1296 if (PageLocked(page)) {
1297 result = SCAN_PAGE_LOCK;
1298 goto out_unmap;
1299 }
1300 if (!PageAnon(page)) {
1301 result = SCAN_PAGE_ANON;
1302 goto out_unmap;
1303 }
1304
1305 /*
9445689f
KS
1306 * Check if the page has any GUP (or other external) pins.
1307 *
36ee2c78 1308 * Here the check is racy it may see total_mapcount > refcount
9445689f
KS
1309 * in some cases.
1310 * For example, one process with one forked child process.
1311 * The parent has the PMD split due to MADV_DONTNEED, then
1312 * the child is trying unmap the whole PMD, but khugepaged
1313 * may be scanning the parent between the child has
1314 * PageDoubleMap flag cleared and dec the mapcount. So
1315 * khugepaged may see total_mapcount > refcount.
1316 *
1317 * But such case is ephemeral we could always retry collapse
1318 * later. However it may report false positive if the page
1319 * has excessive GUP pins (i.e. 512). Anyway the same check
1320 * will be done again later the risk seems low.
b46e756f 1321 */
9445689f 1322 if (!is_refcount_suitable(page)) {
b46e756f
KS
1323 result = SCAN_PAGE_COUNT;
1324 goto out_unmap;
1325 }
1326 if (pte_young(pteval) ||
1327 page_is_young(page) || PageReferenced(page) ||
1328 mmu_notifier_test_young(vma->vm_mm, address))
0db501f7 1329 referenced++;
b46e756f 1330 }
ffe945e6 1331 if (!writable) {
b46e756f 1332 result = SCAN_PAGE_RO;
ffe945e6
KS
1333 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1334 result = SCAN_LACK_REFERENCED_PAGE;
1335 } else {
1336 result = SCAN_SUCCEED;
1337 ret = 1;
b46e756f
KS
1338 }
1339out_unmap:
1340 pte_unmap_unlock(pte, ptl);
1341 if (ret) {
1342 node = khugepaged_find_target_node();
c1e8d7c6 1343 /* collapse_huge_page will return with the mmap_lock released */
ffe945e6
KS
1344 collapse_huge_page(mm, address, hpage, node,
1345 referenced, unmapped);
b46e756f
KS
1346 }
1347out:
1348 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1349 none_or_zero, result, unmapped);
1350 return ret;
1351}
1352
1353static void collect_mm_slot(struct mm_slot *mm_slot)
1354{
1355 struct mm_struct *mm = mm_slot->mm;
1356
35f3aa39 1357 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f
KS
1358
1359 if (khugepaged_test_exit(mm)) {
1360 /* free mm_slot */
1361 hash_del(&mm_slot->hash);
1362 list_del(&mm_slot->mm_node);
1363
1364 /*
1365 * Not strictly needed because the mm exited already.
1366 *
1367 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1368 */
1369
1370 /* khugepaged_mm_lock actually not necessary for the below */
1371 free_mm_slot(mm_slot);
1372 mmdrop(mm);
1373 }
1374}
1375
396bcc52 1376#ifdef CONFIG_SHMEM
27e1f827
SL
1377/*
1378 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1379 * khugepaged should try to collapse the page table.
1380 */
1381static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1382 unsigned long addr)
1383{
1384 struct mm_slot *mm_slot;
1385
1386 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1387
1388 spin_lock(&khugepaged_mm_lock);
1389 mm_slot = get_mm_slot(mm);
1390 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1391 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1392 spin_unlock(&khugepaged_mm_lock);
1393 return 0;
1394}
1395
e59a47b8
PT
1396static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1397 unsigned long addr, pmd_t *pmdp)
1398{
1399 spinlock_t *ptl;
1400 pmd_t pmd;
1401
80110bbf 1402 mmap_assert_write_locked(mm);
e59a47b8
PT
1403 ptl = pmd_lock(vma->vm_mm, pmdp);
1404 pmd = pmdp_collapse_flush(vma, addr, pmdp);
1405 spin_unlock(ptl);
1406 mm_dec_nr_ptes(mm);
80110bbf 1407 page_table_check_pte_clear_range(mm, addr, pmd);
e59a47b8
PT
1408 pte_free(mm, pmd_pgtable(pmd));
1409}
1410
27e1f827 1411/**
336e6b53
AS
1412 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1413 * address haddr.
1414 *
1415 * @mm: process address space where collapse happens
1416 * @addr: THP collapse address
27e1f827
SL
1417 *
1418 * This function checks whether all the PTEs in the PMD are pointing to the
1419 * right THP. If so, retract the page table so the THP can refault in with
1420 * as pmd-mapped.
1421 */
1422void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1423{
1424 unsigned long haddr = addr & HPAGE_PMD_MASK;
1425 struct vm_area_struct *vma = find_vma(mm, haddr);
119a5fc1 1426 struct page *hpage;
27e1f827 1427 pte_t *start_pte, *pte;
e59a47b8 1428 pmd_t *pmd;
27e1f827
SL
1429 spinlock_t *ptl;
1430 int count = 0;
1431 int i;
1432
1433 if (!vma || !vma->vm_file ||
fef792a4 1434 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
27e1f827
SL
1435 return;
1436
1437 /*
1438 * This vm_flags may not have VM_HUGEPAGE if the page was not
1439 * collapsed by this mm. But we can still collapse if the page is
1440 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1441 * will not fail the vma for missing VM_HUGEPAGE
1442 */
1443 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1444 return;
1445
deb4c93a
PX
1446 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1447 if (userfaultfd_wp(vma))
1448 return;
1449
119a5fc1
HD
1450 hpage = find_lock_page(vma->vm_file->f_mapping,
1451 linear_page_index(vma, haddr));
1452 if (!hpage)
1453 return;
1454
1455 if (!PageHead(hpage))
1456 goto drop_hpage;
1457
27e1f827
SL
1458 pmd = mm_find_pmd(mm, haddr);
1459 if (!pmd)
119a5fc1 1460 goto drop_hpage;
27e1f827
SL
1461
1462 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1463
1464 /* step 1: check all mapped PTEs are to the right huge page */
1465 for (i = 0, addr = haddr, pte = start_pte;
1466 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1467 struct page *page;
1468
1469 /* empty pte, skip */
1470 if (pte_none(*pte))
1471 continue;
1472
1473 /* page swapped out, abort */
1474 if (!pte_present(*pte))
1475 goto abort;
1476
1477 page = vm_normal_page(vma, addr, *pte);
1478
27e1f827 1479 /*
119a5fc1
HD
1480 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1481 * page table, but the new page will not be a subpage of hpage.
27e1f827 1482 */
119a5fc1 1483 if (hpage + i != page)
27e1f827
SL
1484 goto abort;
1485 count++;
1486 }
1487
1488 /* step 2: adjust rmap */
1489 for (i = 0, addr = haddr, pte = start_pte;
1490 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1491 struct page *page;
1492
1493 if (pte_none(*pte))
1494 continue;
1495 page = vm_normal_page(vma, addr, *pte);
cea86fe2 1496 page_remove_rmap(page, vma, false);
27e1f827
SL
1497 }
1498
1499 pte_unmap_unlock(start_pte, ptl);
1500
1501 /* step 3: set proper refcount and mm_counters. */
119a5fc1 1502 if (count) {
27e1f827
SL
1503 page_ref_sub(hpage, count);
1504 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1505 }
1506
1507 /* step 4: collapse pmd */
e59a47b8 1508 collapse_and_free_pmd(mm, vma, haddr, pmd);
119a5fc1
HD
1509drop_hpage:
1510 unlock_page(hpage);
1511 put_page(hpage);
27e1f827
SL
1512 return;
1513
1514abort:
1515 pte_unmap_unlock(start_pte, ptl);
119a5fc1 1516 goto drop_hpage;
27e1f827
SL
1517}
1518
0edf61e5 1519static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
27e1f827
SL
1520{
1521 struct mm_struct *mm = mm_slot->mm;
1522 int i;
1523
1524 if (likely(mm_slot->nr_pte_mapped_thp == 0))
0edf61e5 1525 return;
27e1f827 1526
d8ed45c5 1527 if (!mmap_write_trylock(mm))
0edf61e5 1528 return;
27e1f827
SL
1529
1530 if (unlikely(khugepaged_test_exit(mm)))
1531 goto out;
1532
1533 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1534 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1535
1536out:
1537 mm_slot->nr_pte_mapped_thp = 0;
d8ed45c5 1538 mmap_write_unlock(mm);
27e1f827
SL
1539}
1540
f3f0e1d2
KS
1541static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1542{
1543 struct vm_area_struct *vma;
18e77600 1544 struct mm_struct *mm;
f3f0e1d2 1545 unsigned long addr;
e59a47b8 1546 pmd_t *pmd;
f3f0e1d2
KS
1547
1548 i_mmap_lock_write(mapping);
1549 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
27e1f827
SL
1550 /*
1551 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1552 * got written to. These VMAs are likely not worth investing
3e4e28c5 1553 * mmap_write_lock(mm) as PMD-mapping is likely to be split
27e1f827
SL
1554 * later.
1555 *
36ee2c78 1556 * Note that vma->anon_vma check is racy: it can be set up after
c1e8d7c6 1557 * the check but before we took mmap_lock by the fault path.
27e1f827
SL
1558 * But page lock would prevent establishing any new ptes of the
1559 * page, so we are safe.
1560 *
1561 * An alternative would be drop the check, but check that page
1562 * table is clear before calling pmdp_collapse_flush() under
1563 * ptl. It has higher chance to recover THP for the VMA, but
1564 * has higher cost too.
1565 */
f3f0e1d2
KS
1566 if (vma->anon_vma)
1567 continue;
1568 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1569 if (addr & ~HPAGE_PMD_MASK)
1570 continue;
1571 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1572 continue;
18e77600
HD
1573 mm = vma->vm_mm;
1574 pmd = mm_find_pmd(mm, addr);
f3f0e1d2
KS
1575 if (!pmd)
1576 continue;
1577 /*
c1e8d7c6 1578 * We need exclusive mmap_lock to retract page table.
27e1f827
SL
1579 *
1580 * We use trylock due to lock inversion: we need to acquire
c1e8d7c6 1581 * mmap_lock while holding page lock. Fault path does it in
27e1f827 1582 * reverse order. Trylock is a way to avoid deadlock.
f3f0e1d2 1583 */
18e77600 1584 if (mmap_write_trylock(mm)) {
deb4c93a
PX
1585 /*
1586 * When a vma is registered with uffd-wp, we can't
1587 * recycle the pmd pgtable because there can be pte
1588 * markers installed. Skip it only, so the rest mm/vma
1589 * can still have the same file mapped hugely, however
1590 * it'll always mapped in small page size for uffd-wp
1591 * registered ranges.
1592 */
1593 if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma))
e59a47b8 1594 collapse_and_free_pmd(mm, vma, addr, pmd);
18e77600 1595 mmap_write_unlock(mm);
27e1f827
SL
1596 } else {
1597 /* Try again later */
18e77600 1598 khugepaged_add_pte_mapped_thp(mm, addr);
f3f0e1d2
KS
1599 }
1600 }
1601 i_mmap_unlock_write(mapping);
1602}
1603
1604/**
99cb0dbd 1605 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
f3f0e1d2 1606 *
336e6b53
AS
1607 * @mm: process address space where collapse happens
1608 * @file: file that collapse on
1609 * @start: collapse start address
1610 * @hpage: new allocated huge page for collapse
1611 * @node: appointed node the new huge page allocate from
1612 *
f3f0e1d2 1613 * Basic scheme is simple, details are more complex:
87c460a0 1614 * - allocate and lock a new huge page;
77da9389 1615 * - scan page cache replacing old pages with the new one
99cb0dbd 1616 * + swap/gup in pages if necessary;
f3f0e1d2 1617 * + fill in gaps;
77da9389
MW
1618 * + keep old pages around in case rollback is required;
1619 * - if replacing succeeds:
f3f0e1d2
KS
1620 * + copy data over;
1621 * + free old pages;
87c460a0 1622 * + unlock huge page;
f3f0e1d2
KS
1623 * - if replacing failed;
1624 * + put all pages back and unfreeze them;
77da9389 1625 * + restore gaps in the page cache;
87c460a0 1626 * + unlock and free huge page;
f3f0e1d2 1627 */
579c571e
SL
1628static void collapse_file(struct mm_struct *mm,
1629 struct file *file, pgoff_t start,
f3f0e1d2
KS
1630 struct page **hpage, int node)
1631{
579c571e 1632 struct address_space *mapping = file->f_mapping;
f3f0e1d2 1633 gfp_t gfp;
77da9389 1634 struct page *new_page;
f3f0e1d2
KS
1635 pgoff_t index, end = start + HPAGE_PMD_NR;
1636 LIST_HEAD(pagelist);
77da9389 1637 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
f3f0e1d2 1638 int nr_none = 0, result = SCAN_SUCCEED;
99cb0dbd 1639 bool is_shmem = shmem_file(file);
bf9ecead 1640 int nr;
f3f0e1d2 1641
99cb0dbd 1642 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
f3f0e1d2
KS
1643 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1644
1645 /* Only allocate from the target node */
41b6167e 1646 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
f3f0e1d2
KS
1647
1648 new_page = khugepaged_alloc_page(hpage, gfp, node);
1649 if (!new_page) {
1650 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1651 goto out;
1652 }
1653
8f425e4e 1654 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
f3f0e1d2
KS
1655 result = SCAN_CGROUP_CHARGE_FAIL;
1656 goto out;
1657 }
9d82c694 1658 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
f3f0e1d2 1659
6b24ca4a
MWO
1660 /*
1661 * Ensure we have slots for all the pages in the range. This is
1662 * almost certainly a no-op because most of the pages must be present
1663 */
95feeabb
HD
1664 do {
1665 xas_lock_irq(&xas);
1666 xas_create_range(&xas);
1667 if (!xas_error(&xas))
1668 break;
1669 xas_unlock_irq(&xas);
1670 if (!xas_nomem(&xas, GFP_KERNEL)) {
95feeabb
HD
1671 result = SCAN_FAIL;
1672 goto out;
1673 }
1674 } while (1);
1675
042a3082 1676 __SetPageLocked(new_page);
99cb0dbd
SL
1677 if (is_shmem)
1678 __SetPageSwapBacked(new_page);
f3f0e1d2
KS
1679 new_page->index = start;
1680 new_page->mapping = mapping;
f3f0e1d2 1681
f3f0e1d2 1682 /*
87c460a0
HD
1683 * At this point the new_page is locked and not up-to-date.
1684 * It's safe to insert it into the page cache, because nobody would
1685 * be able to map it or use it in another way until we unlock it.
f3f0e1d2
KS
1686 */
1687
77da9389
MW
1688 xas_set(&xas, start);
1689 for (index = start; index < end; index++) {
1690 struct page *page = xas_next(&xas);
1691
1692 VM_BUG_ON(index != xas.xa_index);
99cb0dbd
SL
1693 if (is_shmem) {
1694 if (!page) {
1695 /*
1696 * Stop if extent has been truncated or
1697 * hole-punched, and is now completely
1698 * empty.
1699 */
1700 if (index == start) {
1701 if (!xas_next_entry(&xas, end - 1)) {
1702 result = SCAN_TRUNCATED;
1703 goto xa_locked;
1704 }
1705 xas_set(&xas, index);
1706 }
1707 if (!shmem_charge(mapping->host, 1)) {
1708 result = SCAN_FAIL;
042a3082 1709 goto xa_locked;
701270fa 1710 }
99cb0dbd
SL
1711 xas_store(&xas, new_page);
1712 nr_none++;
1713 continue;
701270fa 1714 }
99cb0dbd
SL
1715
1716 if (xa_is_value(page) || !PageUptodate(page)) {
1717 xas_unlock_irq(&xas);
1718 /* swap in or instantiate fallocated page */
1719 if (shmem_getpage(mapping->host, index, &page,
acdd9f8e 1720 SGP_NOALLOC)) {
99cb0dbd
SL
1721 result = SCAN_FAIL;
1722 goto xa_unlocked;
1723 }
1724 } else if (trylock_page(page)) {
1725 get_page(page);
1726 xas_unlock_irq(&xas);
1727 } else {
1728 result = SCAN_PAGE_LOCK;
042a3082 1729 goto xa_locked;
77da9389 1730 }
99cb0dbd
SL
1731 } else { /* !is_shmem */
1732 if (!page || xa_is_value(page)) {
1733 xas_unlock_irq(&xas);
1734 page_cache_sync_readahead(mapping, &file->f_ra,
1735 file, index,
e5a59d30 1736 end - index);
99cb0dbd
SL
1737 /* drain pagevecs to help isolate_lru_page() */
1738 lru_add_drain();
1739 page = find_lock_page(mapping, index);
1740 if (unlikely(page == NULL)) {
1741 result = SCAN_FAIL;
1742 goto xa_unlocked;
1743 }
75f36069
SL
1744 } else if (PageDirty(page)) {
1745 /*
1746 * khugepaged only works on read-only fd,
1747 * so this page is dirty because it hasn't
1748 * been flushed since first write. There
1749 * won't be new dirty pages.
1750 *
1751 * Trigger async flush here and hope the
1752 * writeback is done when khugepaged
1753 * revisits this page.
1754 *
1755 * This is a one-off situation. We are not
1756 * forcing writeback in loop.
1757 */
1758 xas_unlock_irq(&xas);
1759 filemap_flush(mapping);
1760 result = SCAN_FAIL;
1761 goto xa_unlocked;
74c42e1b
RW
1762 } else if (PageWriteback(page)) {
1763 xas_unlock_irq(&xas);
1764 result = SCAN_FAIL;
1765 goto xa_unlocked;
99cb0dbd
SL
1766 } else if (trylock_page(page)) {
1767 get_page(page);
1768 xas_unlock_irq(&xas);
1769 } else {
1770 result = SCAN_PAGE_LOCK;
1771 goto xa_locked;
f3f0e1d2 1772 }
f3f0e1d2
KS
1773 }
1774
1775 /*
b93b0163 1776 * The page must be locked, so we can drop the i_pages lock
f3f0e1d2
KS
1777 * without racing with truncate.
1778 */
1779 VM_BUG_ON_PAGE(!PageLocked(page), page);
4655e5e5
SL
1780
1781 /* make sure the page is up to date */
1782 if (unlikely(!PageUptodate(page))) {
1783 result = SCAN_FAIL;
1784 goto out_unlock;
1785 }
06a5e126
HD
1786
1787 /*
1788 * If file was truncated then extended, or hole-punched, before
1789 * we locked the first page, then a THP might be there already.
1790 */
1791 if (PageTransCompound(page)) {
1792 result = SCAN_PAGE_COMPOUND;
1793 goto out_unlock;
1794 }
f3f0e1d2
KS
1795
1796 if (page_mapping(page) != mapping) {
1797 result = SCAN_TRUNCATED;
1798 goto out_unlock;
1799 }
f3f0e1d2 1800
74c42e1b
RW
1801 if (!is_shmem && (PageDirty(page) ||
1802 PageWriteback(page))) {
4655e5e5
SL
1803 /*
1804 * khugepaged only works on read-only fd, so this
1805 * page is dirty because it hasn't been flushed
1806 * since first write.
1807 */
1808 result = SCAN_FAIL;
1809 goto out_unlock;
1810 }
1811
f3f0e1d2
KS
1812 if (isolate_lru_page(page)) {
1813 result = SCAN_DEL_PAGE_LRU;
042a3082 1814 goto out_unlock;
f3f0e1d2
KS
1815 }
1816
99cb0dbd
SL
1817 if (page_has_private(page) &&
1818 !try_to_release_page(page, GFP_KERNEL)) {
1819 result = SCAN_PAGE_HAS_PRIVATE;
2f33a706 1820 putback_lru_page(page);
99cb0dbd
SL
1821 goto out_unlock;
1822 }
1823
f3f0e1d2 1824 if (page_mapped(page))
869f7ee6
MWO
1825 try_to_unmap(page_folio(page),
1826 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
f3f0e1d2 1827
77da9389
MW
1828 xas_lock_irq(&xas);
1829 xas_set(&xas, index);
f3f0e1d2 1830
77da9389 1831 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
f3f0e1d2
KS
1832
1833 /*
1834 * The page is expected to have page_count() == 3:
1835 * - we hold a pin on it;
77da9389 1836 * - one reference from page cache;
f3f0e1d2
KS
1837 * - one from isolate_lru_page;
1838 */
1839 if (!page_ref_freeze(page, 3)) {
1840 result = SCAN_PAGE_COUNT;
042a3082
HD
1841 xas_unlock_irq(&xas);
1842 putback_lru_page(page);
1843 goto out_unlock;
f3f0e1d2
KS
1844 }
1845
1846 /*
1847 * Add the page to the list to be able to undo the collapse if
1848 * something go wrong.
1849 */
1850 list_add_tail(&page->lru, &pagelist);
1851
1852 /* Finally, replace with the new page. */
4101196b 1853 xas_store(&xas, new_page);
f3f0e1d2 1854 continue;
f3f0e1d2
KS
1855out_unlock:
1856 unlock_page(page);
1857 put_page(page);
042a3082 1858 goto xa_unlocked;
f3f0e1d2 1859 }
bf9ecead 1860 nr = thp_nr_pages(new_page);
f3f0e1d2 1861
99cb0dbd 1862 if (is_shmem)
57b2847d 1863 __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
09d91cda 1864 else {
bf9ecead 1865 __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
09d91cda 1866 filemap_nr_thps_inc(mapping);
eb6ecbed
CF
1867 /*
1868 * Paired with smp_mb() in do_dentry_open() to ensure
1869 * i_writecount is up to date and the update to nr_thps is
1870 * visible. Ensures the page cache will be truncated if the
1871 * file is opened writable.
1872 */
1873 smp_mb();
1874 if (inode_is_open_for_write(mapping->host)) {
1875 result = SCAN_FAIL;
1876 __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1877 filemap_nr_thps_dec(mapping);
1878 goto xa_locked;
1879 }
09d91cda 1880 }
99cb0dbd 1881
042a3082 1882 if (nr_none) {
9d82c694 1883 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
2f55f070
ML
1884 /* nr_none is always 0 for non-shmem. */
1885 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
042a3082
HD
1886 }
1887
6b24ca4a
MWO
1888 /* Join all the small entries into a single multi-index entry */
1889 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
1890 xas_store(&xas, new_page);
042a3082
HD
1891xa_locked:
1892 xas_unlock_irq(&xas);
77da9389 1893xa_unlocked:
042a3082 1894
6d9df8a5
HD
1895 /*
1896 * If collapse is successful, flush must be done now before copying.
1897 * If collapse is unsuccessful, does flush actually need to be done?
1898 * Do it anyway, to clear the state.
1899 */
1900 try_to_unmap_flush();
1901
f3f0e1d2 1902 if (result == SCAN_SUCCEED) {
77da9389 1903 struct page *page, *tmp;
f3f0e1d2
KS
1904
1905 /*
77da9389
MW
1906 * Replacing old pages with new one has succeeded, now we
1907 * need to copy the content and free the old pages.
f3f0e1d2 1908 */
2af8ff29 1909 index = start;
f3f0e1d2 1910 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2af8ff29
HD
1911 while (index < page->index) {
1912 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1913 index++;
1914 }
f3f0e1d2
KS
1915 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1916 page);
1917 list_del(&page->lru);
f3f0e1d2 1918 page->mapping = NULL;
042a3082 1919 page_ref_unfreeze(page, 1);
f3f0e1d2
KS
1920 ClearPageActive(page);
1921 ClearPageUnevictable(page);
042a3082 1922 unlock_page(page);
f3f0e1d2 1923 put_page(page);
2af8ff29
HD
1924 index++;
1925 }
1926 while (index < end) {
1927 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1928 index++;
f3f0e1d2
KS
1929 }
1930
f3f0e1d2 1931 SetPageUptodate(new_page);
87c460a0 1932 page_ref_add(new_page, HPAGE_PMD_NR - 1);
6058eaec 1933 if (is_shmem)
99cb0dbd 1934 set_page_dirty(new_page);
6058eaec 1935 lru_cache_add(new_page);
f3f0e1d2 1936
042a3082
HD
1937 /*
1938 * Remove pte page tables, so we can re-fault the page as huge.
1939 */
1940 retract_page_tables(mapping, start);
f3f0e1d2 1941 *hpage = NULL;
87aa7529
YS
1942
1943 khugepaged_pages_collapsed++;
f3f0e1d2 1944 } else {
77da9389 1945 struct page *page;
aaa52e34 1946
77da9389 1947 /* Something went wrong: roll back page cache changes */
77da9389 1948 xas_lock_irq(&xas);
2f55f070
ML
1949 if (nr_none) {
1950 mapping->nrpages -= nr_none;
99cb0dbd 1951 shmem_uncharge(mapping->host, nr_none);
2f55f070 1952 }
aaa52e34 1953
77da9389
MW
1954 xas_set(&xas, start);
1955 xas_for_each(&xas, page, end - 1) {
f3f0e1d2
KS
1956 page = list_first_entry_or_null(&pagelist,
1957 struct page, lru);
77da9389 1958 if (!page || xas.xa_index < page->index) {
f3f0e1d2
KS
1959 if (!nr_none)
1960 break;
f3f0e1d2 1961 nr_none--;
59749e6c 1962 /* Put holes back where they were */
77da9389 1963 xas_store(&xas, NULL);
f3f0e1d2
KS
1964 continue;
1965 }
1966
77da9389 1967 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
f3f0e1d2
KS
1968
1969 /* Unfreeze the page. */
1970 list_del(&page->lru);
1971 page_ref_unfreeze(page, 2);
77da9389
MW
1972 xas_store(&xas, page);
1973 xas_pause(&xas);
1974 xas_unlock_irq(&xas);
f3f0e1d2 1975 unlock_page(page);
042a3082 1976 putback_lru_page(page);
77da9389 1977 xas_lock_irq(&xas);
f3f0e1d2
KS
1978 }
1979 VM_BUG_ON(nr_none);
77da9389 1980 xas_unlock_irq(&xas);
f3f0e1d2 1981
f3f0e1d2
KS
1982 new_page->mapping = NULL;
1983 }
042a3082
HD
1984
1985 unlock_page(new_page);
f3f0e1d2
KS
1986out:
1987 VM_BUG_ON(!list_empty(&pagelist));
9d82c694 1988 if (!IS_ERR_OR_NULL(*hpage))
bbc6b703 1989 mem_cgroup_uncharge(page_folio(*hpage));
f3f0e1d2
KS
1990 /* TODO: tracepoints */
1991}
1992
579c571e
SL
1993static void khugepaged_scan_file(struct mm_struct *mm,
1994 struct file *file, pgoff_t start, struct page **hpage)
f3f0e1d2
KS
1995{
1996 struct page *page = NULL;
579c571e 1997 struct address_space *mapping = file->f_mapping;
85b392db 1998 XA_STATE(xas, &mapping->i_pages, start);
f3f0e1d2
KS
1999 int present, swap;
2000 int node = NUMA_NO_NODE;
2001 int result = SCAN_SUCCEED;
2002
2003 present = 0;
2004 swap = 0;
2005 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2006 rcu_read_lock();
85b392db
MW
2007 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2008 if (xas_retry(&xas, page))
f3f0e1d2 2009 continue;
f3f0e1d2 2010
85b392db 2011 if (xa_is_value(page)) {
f3f0e1d2
KS
2012 if (++swap > khugepaged_max_ptes_swap) {
2013 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 2014 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
f3f0e1d2
KS
2015 break;
2016 }
2017 continue;
2018 }
2019
6b24ca4a
MWO
2020 /*
2021 * XXX: khugepaged should compact smaller compound pages
2022 * into a PMD sized page
2023 */
f3f0e1d2
KS
2024 if (PageTransCompound(page)) {
2025 result = SCAN_PAGE_COMPOUND;
2026 break;
2027 }
2028
2029 node = page_to_nid(page);
2030 if (khugepaged_scan_abort(node)) {
2031 result = SCAN_SCAN_ABORT;
2032 break;
2033 }
2034 khugepaged_node_load[node]++;
2035
2036 if (!PageLRU(page)) {
2037 result = SCAN_PAGE_LRU;
2038 break;
2039 }
2040
99cb0dbd
SL
2041 if (page_count(page) !=
2042 1 + page_mapcount(page) + page_has_private(page)) {
f3f0e1d2
KS
2043 result = SCAN_PAGE_COUNT;
2044 break;
2045 }
2046
2047 /*
2048 * We probably should check if the page is referenced here, but
2049 * nobody would transfer pte_young() to PageReferenced() for us.
2050 * And rmap walk here is just too costly...
2051 */
2052
2053 present++;
2054
2055 if (need_resched()) {
85b392db 2056 xas_pause(&xas);
f3f0e1d2 2057 cond_resched_rcu();
f3f0e1d2
KS
2058 }
2059 }
2060 rcu_read_unlock();
2061
2062 if (result == SCAN_SUCCEED) {
2063 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2064 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 2065 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
f3f0e1d2
KS
2066 } else {
2067 node = khugepaged_find_target_node();
579c571e 2068 collapse_file(mm, file, start, hpage, node);
f3f0e1d2
KS
2069 }
2070 }
2071
2072 /* TODO: tracepoints */
2073}
2074#else
579c571e
SL
2075static void khugepaged_scan_file(struct mm_struct *mm,
2076 struct file *file, pgoff_t start, struct page **hpage)
f3f0e1d2
KS
2077{
2078 BUILD_BUG();
2079}
27e1f827 2080
0edf61e5 2081static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
27e1f827 2082{
27e1f827 2083}
f3f0e1d2
KS
2084#endif
2085
b46e756f
KS
2086static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2087 struct page **hpage)
2088 __releases(&khugepaged_mm_lock)
2089 __acquires(&khugepaged_mm_lock)
2090{
2091 struct mm_slot *mm_slot;
2092 struct mm_struct *mm;
2093 struct vm_area_struct *vma;
2094 int progress = 0;
2095
2096 VM_BUG_ON(!pages);
35f3aa39 2097 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f
KS
2098
2099 if (khugepaged_scan.mm_slot)
2100 mm_slot = khugepaged_scan.mm_slot;
2101 else {
2102 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2103 struct mm_slot, mm_node);
2104 khugepaged_scan.address = 0;
2105 khugepaged_scan.mm_slot = mm_slot;
2106 }
2107 spin_unlock(&khugepaged_mm_lock);
27e1f827 2108 khugepaged_collapse_pte_mapped_thps(mm_slot);
b46e756f
KS
2109
2110 mm = mm_slot->mm;
3b454ad3
YS
2111 /*
2112 * Don't wait for semaphore (to avoid long wait times). Just move to
2113 * the next mm on the list.
2114 */
2115 vma = NULL;
d8ed45c5 2116 if (unlikely(!mmap_read_trylock(mm)))
c1e8d7c6 2117 goto breakouterloop_mmap_lock;
3b454ad3 2118 if (likely(!khugepaged_test_exit(mm)))
b46e756f
KS
2119 vma = find_vma(mm, khugepaged_scan.address);
2120
2121 progress++;
2122 for (; vma; vma = vma->vm_next) {
2123 unsigned long hstart, hend;
2124
2125 cond_resched();
2126 if (unlikely(khugepaged_test_exit(mm))) {
2127 progress++;
2128 break;
2129 }
50f8b92f 2130 if (!hugepage_vma_check(vma, vma->vm_flags)) {
b46e756f
KS
2131skip:
2132 progress++;
2133 continue;
2134 }
2135 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2136 hend = vma->vm_end & HPAGE_PMD_MASK;
2137 if (hstart >= hend)
2138 goto skip;
2139 if (khugepaged_scan.address > hend)
2140 goto skip;
2141 if (khugepaged_scan.address < hstart)
2142 khugepaged_scan.address = hstart;
2143 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2144
2145 while (khugepaged_scan.address < hend) {
2146 int ret;
2147 cond_resched();
2148 if (unlikely(khugepaged_test_exit(mm)))
2149 goto breakouterloop;
2150
2151 VM_BUG_ON(khugepaged_scan.address < hstart ||
2152 khugepaged_scan.address + HPAGE_PMD_SIZE >
2153 hend);
99cb0dbd 2154 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
396bcc52 2155 struct file *file = get_file(vma->vm_file);
f3f0e1d2
KS
2156 pgoff_t pgoff = linear_page_index(vma,
2157 khugepaged_scan.address);
99cb0dbd 2158
d8ed45c5 2159 mmap_read_unlock(mm);
f3f0e1d2 2160 ret = 1;
579c571e 2161 khugepaged_scan_file(mm, file, pgoff, hpage);
f3f0e1d2
KS
2162 fput(file);
2163 } else {
2164 ret = khugepaged_scan_pmd(mm, vma,
2165 khugepaged_scan.address,
2166 hpage);
2167 }
b46e756f
KS
2168 /* move to next address */
2169 khugepaged_scan.address += HPAGE_PMD_SIZE;
2170 progress += HPAGE_PMD_NR;
2171 if (ret)
c1e8d7c6
ML
2172 /* we released mmap_lock so break loop */
2173 goto breakouterloop_mmap_lock;
b46e756f
KS
2174 if (progress >= pages)
2175 goto breakouterloop;
2176 }
2177 }
2178breakouterloop:
d8ed45c5 2179 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
c1e8d7c6 2180breakouterloop_mmap_lock:
b46e756f
KS
2181
2182 spin_lock(&khugepaged_mm_lock);
2183 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2184 /*
2185 * Release the current mm_slot if this mm is about to die, or
2186 * if we scanned all vmas of this mm.
2187 */
2188 if (khugepaged_test_exit(mm) || !vma) {
2189 /*
2190 * Make sure that if mm_users is reaching zero while
2191 * khugepaged runs here, khugepaged_exit will find
2192 * mm_slot not pointing to the exiting mm.
2193 */
2194 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2195 khugepaged_scan.mm_slot = list_entry(
2196 mm_slot->mm_node.next,
2197 struct mm_slot, mm_node);
2198 khugepaged_scan.address = 0;
2199 } else {
2200 khugepaged_scan.mm_slot = NULL;
2201 khugepaged_full_scans++;
2202 }
2203
2204 collect_mm_slot(mm_slot);
2205 }
2206
2207 return progress;
2208}
2209
2210static int khugepaged_has_work(void)
2211{
2212 return !list_empty(&khugepaged_scan.mm_head) &&
2213 khugepaged_enabled();
2214}
2215
2216static int khugepaged_wait_event(void)
2217{
2218 return !list_empty(&khugepaged_scan.mm_head) ||
2219 kthread_should_stop();
2220}
2221
2222static void khugepaged_do_scan(void)
2223{
2224 struct page *hpage = NULL;
2225 unsigned int progress = 0, pass_through_head = 0;
89dc6a96 2226 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
b46e756f
KS
2227 bool wait = true;
2228
a980df33
KS
2229 lru_add_drain_all();
2230
b46e756f
KS
2231 while (progress < pages) {
2232 if (!khugepaged_prealloc_page(&hpage, &wait))
2233 break;
2234
2235 cond_resched();
2236
2237 if (unlikely(kthread_should_stop() || try_to_freeze()))
2238 break;
2239
2240 spin_lock(&khugepaged_mm_lock);
2241 if (!khugepaged_scan.mm_slot)
2242 pass_through_head++;
2243 if (khugepaged_has_work() &&
2244 pass_through_head < 2)
2245 progress += khugepaged_scan_mm_slot(pages - progress,
2246 &hpage);
2247 else
2248 progress = pages;
2249 spin_unlock(&khugepaged_mm_lock);
2250 }
2251
2252 if (!IS_ERR_OR_NULL(hpage))
2253 put_page(hpage);
2254}
2255
2256static bool khugepaged_should_wakeup(void)
2257{
2258 return kthread_should_stop() ||
2259 time_after_eq(jiffies, khugepaged_sleep_expire);
2260}
2261
2262static void khugepaged_wait_work(void)
2263{
2264 if (khugepaged_has_work()) {
2265 const unsigned long scan_sleep_jiffies =
2266 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2267
2268 if (!scan_sleep_jiffies)
2269 return;
2270
2271 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2272 wait_event_freezable_timeout(khugepaged_wait,
2273 khugepaged_should_wakeup(),
2274 scan_sleep_jiffies);
2275 return;
2276 }
2277
2278 if (khugepaged_enabled())
2279 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2280}
2281
2282static int khugepaged(void *none)
2283{
2284 struct mm_slot *mm_slot;
2285
2286 set_freezable();
2287 set_user_nice(current, MAX_NICE);
2288
2289 while (!kthread_should_stop()) {
2290 khugepaged_do_scan();
2291 khugepaged_wait_work();
2292 }
2293
2294 spin_lock(&khugepaged_mm_lock);
2295 mm_slot = khugepaged_scan.mm_slot;
2296 khugepaged_scan.mm_slot = NULL;
2297 if (mm_slot)
2298 collect_mm_slot(mm_slot);
2299 spin_unlock(&khugepaged_mm_lock);
2300 return 0;
2301}
2302
2303static void set_recommended_min_free_kbytes(void)
2304{
2305 struct zone *zone;
2306 int nr_zones = 0;
2307 unsigned long recommended_min;
2308
bd3400ea
LF
2309 if (!khugepaged_enabled()) {
2310 calculate_min_free_kbytes();
2311 goto update_wmarks;
2312 }
2313
b7d349c7
JK
2314 for_each_populated_zone(zone) {
2315 /*
2316 * We don't need to worry about fragmentation of
2317 * ZONE_MOVABLE since it only has movable pages.
2318 */
2319 if (zone_idx(zone) > gfp_zone(GFP_USER))
2320 continue;
2321
b46e756f 2322 nr_zones++;
b7d349c7 2323 }
b46e756f
KS
2324
2325 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2326 recommended_min = pageblock_nr_pages * nr_zones * 2;
2327
2328 /*
2329 * Make sure that on average at least two pageblocks are almost free
2330 * of another type, one for a migratetype to fall back to and a
2331 * second to avoid subsequent fallbacks of other types There are 3
2332 * MIGRATE_TYPES we care about.
2333 */
2334 recommended_min += pageblock_nr_pages * nr_zones *
2335 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2336
2337 /* don't ever allow to reserve more than 5% of the lowmem */
2338 recommended_min = min(recommended_min,
2339 (unsigned long) nr_free_buffer_pages() / 20);
2340 recommended_min <<= (PAGE_SHIFT-10);
2341
2342 if (recommended_min > min_free_kbytes) {
2343 if (user_min_free_kbytes >= 0)
2344 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2345 min_free_kbytes, recommended_min);
2346
2347 min_free_kbytes = recommended_min;
2348 }
bd3400ea
LF
2349
2350update_wmarks:
b46e756f
KS
2351 setup_per_zone_wmarks();
2352}
2353
2354int start_stop_khugepaged(void)
2355{
b46e756f
KS
2356 int err = 0;
2357
2358 mutex_lock(&khugepaged_mutex);
2359 if (khugepaged_enabled()) {
2360 if (!khugepaged_thread)
2361 khugepaged_thread = kthread_run(khugepaged, NULL,
2362 "khugepaged");
2363 if (IS_ERR(khugepaged_thread)) {
2364 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2365 err = PTR_ERR(khugepaged_thread);
2366 khugepaged_thread = NULL;
2367 goto fail;
2368 }
2369
2370 if (!list_empty(&khugepaged_scan.mm_head))
2371 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
2372 } else if (khugepaged_thread) {
2373 kthread_stop(khugepaged_thread);
2374 khugepaged_thread = NULL;
2375 }
bd3400ea 2376 set_recommended_min_free_kbytes();
b46e756f
KS
2377fail:
2378 mutex_unlock(&khugepaged_mutex);
2379 return err;
2380}
4aab2be0
VB
2381
2382void khugepaged_min_free_kbytes_update(void)
2383{
2384 mutex_lock(&khugepaged_mutex);
2385 if (khugepaged_enabled() && khugepaged_thread)
2386 set_recommended_min_free_kbytes();
2387 mutex_unlock(&khugepaged_mutex);
2388}