mm/rmap: pass rmap flags to hugepage_add_anon_rmap()
[linux-2.6-block.git] / mm / khugepaged.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b46e756f
KS
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6e84f315 6#include <linux/sched/mm.h>
f7ccbae4 7#include <linux/sched/coredump.h>
b46e756f
KS
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
80110bbf 19#include <linux/page_table_check.h>
b46e756f 20#include <linux/swapops.h>
f3f0e1d2 21#include <linux/shmem_fs.h>
b46e756f
KS
22
23#include <asm/tlb.h>
24#include <asm/pgalloc.h>
25#include "internal.h"
26
27enum scan_result {
28 SCAN_FAIL,
29 SCAN_SUCCEED,
30 SCAN_PMD_NULL,
31 SCAN_EXCEED_NONE_PTE,
71a2c112
KS
32 SCAN_EXCEED_SWAP_PTE,
33 SCAN_EXCEED_SHARED_PTE,
b46e756f 34 SCAN_PTE_NON_PRESENT,
e1e267c7 35 SCAN_PTE_UFFD_WP,
b46e756f 36 SCAN_PAGE_RO,
0db501f7 37 SCAN_LACK_REFERENCED_PAGE,
b46e756f
KS
38 SCAN_PAGE_NULL,
39 SCAN_SCAN_ABORT,
40 SCAN_PAGE_COUNT,
41 SCAN_PAGE_LRU,
42 SCAN_PAGE_LOCK,
43 SCAN_PAGE_ANON,
44 SCAN_PAGE_COMPOUND,
45 SCAN_ANY_PROCESS,
46 SCAN_VMA_NULL,
47 SCAN_VMA_CHECK,
48 SCAN_ADDRESS_RANGE,
b46e756f
KS
49 SCAN_DEL_PAGE_LRU,
50 SCAN_ALLOC_HUGE_PAGE_FAIL,
51 SCAN_CGROUP_CHARGE_FAIL,
f3f0e1d2 52 SCAN_TRUNCATED,
99cb0dbd 53 SCAN_PAGE_HAS_PRIVATE,
b46e756f
KS
54};
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/huge_memory.h>
58
4aab2be0
VB
59static struct task_struct *khugepaged_thread __read_mostly;
60static DEFINE_MUTEX(khugepaged_mutex);
61
b46e756f
KS
62/* default scan 8*512 pte (or vmas) every 30 second */
63static unsigned int khugepaged_pages_to_scan __read_mostly;
64static unsigned int khugepaged_pages_collapsed;
65static unsigned int khugepaged_full_scans;
66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67/* during fragmentation poll the hugepage allocator once every minute */
68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69static unsigned long khugepaged_sleep_expire;
70static DEFINE_SPINLOCK(khugepaged_mm_lock);
71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72/*
73 * default collapse hugepages if there is at least one pte mapped like
74 * it would have happened if the vma was large enough during page
75 * fault.
76 */
77static unsigned int khugepaged_max_ptes_none __read_mostly;
78static unsigned int khugepaged_max_ptes_swap __read_mostly;
71a2c112 79static unsigned int khugepaged_max_ptes_shared __read_mostly;
b46e756f
KS
80
81#define MM_SLOTS_HASH_BITS 10
82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83
84static struct kmem_cache *mm_slot_cache __read_mostly;
85
27e1f827
SL
86#define MAX_PTE_MAPPED_THP 8
87
b46e756f
KS
88/**
89 * struct mm_slot - hash lookup from mm to mm_slot
90 * @hash: hash collision list
91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92 * @mm: the mm that this information is valid for
336e6b53
AS
93 * @nr_pte_mapped_thp: number of pte mapped THP
94 * @pte_mapped_thp: address array corresponding pte mapped THP
b46e756f
KS
95 */
96struct mm_slot {
97 struct hlist_node hash;
98 struct list_head mm_node;
99 struct mm_struct *mm;
27e1f827
SL
100
101 /* pte-mapped THP in this mm */
102 int nr_pte_mapped_thp;
103 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
b46e756f
KS
104};
105
106/**
107 * struct khugepaged_scan - cursor for scanning
108 * @mm_head: the head of the mm list to scan
109 * @mm_slot: the current mm_slot we are scanning
110 * @address: the next address inside that to be scanned
111 *
112 * There is only the one khugepaged_scan instance of this cursor structure.
113 */
114struct khugepaged_scan {
115 struct list_head mm_head;
116 struct mm_slot *mm_slot;
117 unsigned long address;
118};
119
120static struct khugepaged_scan khugepaged_scan = {
121 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122};
123
e1465d12 124#ifdef CONFIG_SYSFS
b46e756f
KS
125static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126 struct kobj_attribute *attr,
127 char *buf)
128{
ae7a927d 129 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
b46e756f
KS
130}
131
132static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 const char *buf, size_t count)
135{
dfefd226 136 unsigned int msecs;
b46e756f
KS
137 int err;
138
dfefd226
AD
139 err = kstrtouint(buf, 10, &msecs);
140 if (err)
b46e756f
KS
141 return -EINVAL;
142
143 khugepaged_scan_sleep_millisecs = msecs;
144 khugepaged_sleep_expire = 0;
145 wake_up_interruptible(&khugepaged_wait);
146
147 return count;
148}
149static struct kobj_attribute scan_sleep_millisecs_attr =
150 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
151 scan_sleep_millisecs_store);
152
153static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
154 struct kobj_attribute *attr,
155 char *buf)
156{
ae7a927d 157 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
b46e756f
KS
158}
159
160static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
161 struct kobj_attribute *attr,
162 const char *buf, size_t count)
163{
dfefd226 164 unsigned int msecs;
b46e756f
KS
165 int err;
166
dfefd226
AD
167 err = kstrtouint(buf, 10, &msecs);
168 if (err)
b46e756f
KS
169 return -EINVAL;
170
171 khugepaged_alloc_sleep_millisecs = msecs;
172 khugepaged_sleep_expire = 0;
173 wake_up_interruptible(&khugepaged_wait);
174
175 return count;
176}
177static struct kobj_attribute alloc_sleep_millisecs_attr =
178 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
179 alloc_sleep_millisecs_store);
180
181static ssize_t pages_to_scan_show(struct kobject *kobj,
182 struct kobj_attribute *attr,
183 char *buf)
184{
ae7a927d 185 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
b46e756f
KS
186}
187static ssize_t pages_to_scan_store(struct kobject *kobj,
188 struct kobj_attribute *attr,
189 const char *buf, size_t count)
190{
dfefd226 191 unsigned int pages;
b46e756f 192 int err;
b46e756f 193
dfefd226
AD
194 err = kstrtouint(buf, 10, &pages);
195 if (err || !pages)
b46e756f
KS
196 return -EINVAL;
197
198 khugepaged_pages_to_scan = pages;
199
200 return count;
201}
202static struct kobj_attribute pages_to_scan_attr =
203 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
204 pages_to_scan_store);
205
206static ssize_t pages_collapsed_show(struct kobject *kobj,
207 struct kobj_attribute *attr,
208 char *buf)
209{
ae7a927d 210 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
b46e756f
KS
211}
212static struct kobj_attribute pages_collapsed_attr =
213 __ATTR_RO(pages_collapsed);
214
215static ssize_t full_scans_show(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 char *buf)
218{
ae7a927d 219 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
b46e756f
KS
220}
221static struct kobj_attribute full_scans_attr =
222 __ATTR_RO(full_scans);
223
224static ssize_t khugepaged_defrag_show(struct kobject *kobj,
225 struct kobj_attribute *attr, char *buf)
226{
227 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 228 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
b46e756f
KS
229}
230static ssize_t khugepaged_defrag_store(struct kobject *kobj,
231 struct kobj_attribute *attr,
232 const char *buf, size_t count)
233{
234 return single_hugepage_flag_store(kobj, attr, buf, count,
235 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236}
237static struct kobj_attribute khugepaged_defrag_attr =
238 __ATTR(defrag, 0644, khugepaged_defrag_show,
239 khugepaged_defrag_store);
240
241/*
242 * max_ptes_none controls if khugepaged should collapse hugepages over
243 * any unmapped ptes in turn potentially increasing the memory
244 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
245 * reduce the available free memory in the system as it
246 * runs. Increasing max_ptes_none will instead potentially reduce the
247 * free memory in the system during the khugepaged scan.
248 */
249static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
250 struct kobj_attribute *attr,
251 char *buf)
252{
ae7a927d 253 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
b46e756f
KS
254}
255static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
256 struct kobj_attribute *attr,
257 const char *buf, size_t count)
258{
259 int err;
260 unsigned long max_ptes_none;
261
262 err = kstrtoul(buf, 10, &max_ptes_none);
263 if (err || max_ptes_none > HPAGE_PMD_NR-1)
264 return -EINVAL;
265
266 khugepaged_max_ptes_none = max_ptes_none;
267
268 return count;
269}
270static struct kobj_attribute khugepaged_max_ptes_none_attr =
271 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
272 khugepaged_max_ptes_none_store);
273
274static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
275 struct kobj_attribute *attr,
276 char *buf)
277{
ae7a927d 278 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
b46e756f
KS
279}
280
281static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
282 struct kobj_attribute *attr,
283 const char *buf, size_t count)
284{
285 int err;
286 unsigned long max_ptes_swap;
287
288 err = kstrtoul(buf, 10, &max_ptes_swap);
289 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
290 return -EINVAL;
291
292 khugepaged_max_ptes_swap = max_ptes_swap;
293
294 return count;
295}
296
297static struct kobj_attribute khugepaged_max_ptes_swap_attr =
298 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
299 khugepaged_max_ptes_swap_store);
300
71a2c112 301static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
ae7a927d
JP
302 struct kobj_attribute *attr,
303 char *buf)
71a2c112 304{
ae7a927d 305 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
71a2c112
KS
306}
307
308static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
309 struct kobj_attribute *attr,
310 const char *buf, size_t count)
311{
312 int err;
313 unsigned long max_ptes_shared;
314
315 err = kstrtoul(buf, 10, &max_ptes_shared);
316 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
317 return -EINVAL;
318
319 khugepaged_max_ptes_shared = max_ptes_shared;
320
321 return count;
322}
323
324static struct kobj_attribute khugepaged_max_ptes_shared_attr =
325 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
326 khugepaged_max_ptes_shared_store);
327
b46e756f
KS
328static struct attribute *khugepaged_attr[] = {
329 &khugepaged_defrag_attr.attr,
330 &khugepaged_max_ptes_none_attr.attr,
71a2c112
KS
331 &khugepaged_max_ptes_swap_attr.attr,
332 &khugepaged_max_ptes_shared_attr.attr,
b46e756f
KS
333 &pages_to_scan_attr.attr,
334 &pages_collapsed_attr.attr,
335 &full_scans_attr.attr,
336 &scan_sleep_millisecs_attr.attr,
337 &alloc_sleep_millisecs_attr.attr,
b46e756f
KS
338 NULL,
339};
340
341struct attribute_group khugepaged_attr_group = {
342 .attrs = khugepaged_attr,
343 .name = "khugepaged",
344};
e1465d12 345#endif /* CONFIG_SYSFS */
b46e756f 346
b46e756f
KS
347int hugepage_madvise(struct vm_area_struct *vma,
348 unsigned long *vm_flags, int advice)
349{
350 switch (advice) {
351 case MADV_HUGEPAGE:
352#ifdef CONFIG_S390
353 /*
354 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
355 * can't handle this properly after s390_enable_sie, so we simply
356 * ignore the madvise to prevent qemu from causing a SIGSEGV.
357 */
358 if (mm_has_pgste(vma->vm_mm))
359 return 0;
360#endif
361 *vm_flags &= ~VM_NOHUGEPAGE;
362 *vm_flags |= VM_HUGEPAGE;
363 /*
364 * If the vma become good for khugepaged to scan,
365 * register it here without waiting a page fault that
366 * may not happen any time soon.
367 */
368 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
369 khugepaged_enter_vma_merge(vma, *vm_flags))
370 return -ENOMEM;
371 break;
372 case MADV_NOHUGEPAGE:
373 *vm_flags &= ~VM_HUGEPAGE;
374 *vm_flags |= VM_NOHUGEPAGE;
375 /*
376 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 * this vma even if we leave the mm registered in khugepaged if
378 * it got registered before VM_NOHUGEPAGE was set.
379 */
380 break;
381 }
382
383 return 0;
384}
385
386int __init khugepaged_init(void)
387{
388 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389 sizeof(struct mm_slot),
390 __alignof__(struct mm_slot), 0, NULL);
391 if (!mm_slot_cache)
392 return -ENOMEM;
393
394 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
395 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
396 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
71a2c112 397 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
b46e756f
KS
398
399 return 0;
400}
401
402void __init khugepaged_destroy(void)
403{
404 kmem_cache_destroy(mm_slot_cache);
405}
406
407static inline struct mm_slot *alloc_mm_slot(void)
408{
409 if (!mm_slot_cache) /* initialization failed */
410 return NULL;
411 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
412}
413
414static inline void free_mm_slot(struct mm_slot *mm_slot)
415{
416 kmem_cache_free(mm_slot_cache, mm_slot);
417}
418
419static struct mm_slot *get_mm_slot(struct mm_struct *mm)
420{
421 struct mm_slot *mm_slot;
422
423 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
424 if (mm == mm_slot->mm)
425 return mm_slot;
426
427 return NULL;
428}
429
430static void insert_to_mm_slots_hash(struct mm_struct *mm,
431 struct mm_slot *mm_slot)
432{
433 mm_slot->mm = mm;
434 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
435}
436
437static inline int khugepaged_test_exit(struct mm_struct *mm)
438{
4d45e75a 439 return atomic_read(&mm->mm_users) == 0;
b46e756f
KS
440}
441
50f8b92f
SL
442static bool hugepage_vma_check(struct vm_area_struct *vma,
443 unsigned long vm_flags)
c2231020 444{
e6be37b2 445 if (!transhuge_vma_enabled(vma, vm_flags))
c2231020 446 return false;
99cb0dbd 447
a4aeaa06
YS
448 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
449 vma->vm_pgoff, HPAGE_PMD_NR))
450 return false;
451
cd89fb06 452 /* Enabled via shmem mount options or sysfs settings. */
a4aeaa06
YS
453 if (shmem_file(vma->vm_file))
454 return shmem_huge_enabled(vma);
cd89fb06
RR
455
456 /* THP settings require madvise. */
457 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
458 return false;
459
a4aeaa06 460 /* Only regular file is valid */
cd89fb06 461 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
eb6ecbed 462 (vm_flags & VM_EXEC)) {
a4aeaa06
YS
463 struct inode *inode = vma->vm_file->f_inode;
464
465 return !inode_is_open_for_write(inode) &&
466 S_ISREG(inode->i_mode);
cd89fb06
RR
467 }
468
25fa414a 469 if (!vma->anon_vma || !vma_is_anonymous(vma))
c2231020 470 return false;
222100ee 471 if (vma_is_temporary_stack(vma))
c2231020 472 return false;
50f8b92f 473 return !(vm_flags & VM_NO_KHUGEPAGED);
c2231020
YS
474}
475
b46e756f
KS
476int __khugepaged_enter(struct mm_struct *mm)
477{
478 struct mm_slot *mm_slot;
479 int wakeup;
480
481 mm_slot = alloc_mm_slot();
482 if (!mm_slot)
483 return -ENOMEM;
484
485 /* __khugepaged_exit() must not run from under us */
28ff0a3c 486 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
b46e756f
KS
487 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
488 free_mm_slot(mm_slot);
489 return 0;
490 }
491
492 spin_lock(&khugepaged_mm_lock);
493 insert_to_mm_slots_hash(mm, mm_slot);
494 /*
495 * Insert just behind the scanning cursor, to let the area settle
496 * down a little.
497 */
498 wakeup = list_empty(&khugepaged_scan.mm_head);
499 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
500 spin_unlock(&khugepaged_mm_lock);
501
f1f10076 502 mmgrab(mm);
b46e756f
KS
503 if (wakeup)
504 wake_up_interruptible(&khugepaged_wait);
505
506 return 0;
507}
508
509int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
510 unsigned long vm_flags)
511{
512 unsigned long hstart, hend;
c2231020
YS
513
514 /*
99cb0dbd
SL
515 * khugepaged only supports read-only files for non-shmem files.
516 * khugepaged does not yet work on special mappings. And
517 * file-private shmem THP is not supported.
c2231020 518 */
50f8b92f 519 if (!hugepage_vma_check(vma, vm_flags))
b46e756f 520 return 0;
c2231020 521
b46e756f
KS
522 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
523 hend = vma->vm_end & HPAGE_PMD_MASK;
524 if (hstart < hend)
525 return khugepaged_enter(vma, vm_flags);
526 return 0;
527}
528
529void __khugepaged_exit(struct mm_struct *mm)
530{
531 struct mm_slot *mm_slot;
532 int free = 0;
533
534 spin_lock(&khugepaged_mm_lock);
535 mm_slot = get_mm_slot(mm);
536 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
537 hash_del(&mm_slot->hash);
538 list_del(&mm_slot->mm_node);
539 free = 1;
540 }
541 spin_unlock(&khugepaged_mm_lock);
542
543 if (free) {
544 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
545 free_mm_slot(mm_slot);
546 mmdrop(mm);
547 } else if (mm_slot) {
548 /*
549 * This is required to serialize against
550 * khugepaged_test_exit() (which is guaranteed to run
551 * under mmap sem read mode). Stop here (after we
552 * return all pagetables will be destroyed) until
553 * khugepaged has finished working on the pagetables
c1e8d7c6 554 * under the mmap_lock.
b46e756f 555 */
d8ed45c5
ML
556 mmap_write_lock(mm);
557 mmap_write_unlock(mm);
b46e756f
KS
558 }
559}
560
561static void release_pte_page(struct page *page)
562{
5503fbf2
KS
563 mod_node_page_state(page_pgdat(page),
564 NR_ISOLATED_ANON + page_is_file_lru(page),
565 -compound_nr(page));
b46e756f
KS
566 unlock_page(page);
567 putback_lru_page(page);
568}
569
5503fbf2
KS
570static void release_pte_pages(pte_t *pte, pte_t *_pte,
571 struct list_head *compound_pagelist)
b46e756f 572{
5503fbf2
KS
573 struct page *page, *tmp;
574
b46e756f
KS
575 while (--_pte >= pte) {
576 pte_t pteval = *_pte;
5503fbf2
KS
577
578 page = pte_page(pteval);
579 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
580 !PageCompound(page))
581 release_pte_page(page);
582 }
583
584 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
585 list_del(&page->lru);
586 release_pte_page(page);
b46e756f
KS
587 }
588}
589
9445689f
KS
590static bool is_refcount_suitable(struct page *page)
591{
592 int expected_refcount;
593
594 expected_refcount = total_mapcount(page);
595 if (PageSwapCache(page))
596 expected_refcount += compound_nr(page);
597
598 return page_count(page) == expected_refcount;
599}
600
b46e756f
KS
601static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
602 unsigned long address,
5503fbf2
KS
603 pte_t *pte,
604 struct list_head *compound_pagelist)
b46e756f
KS
605{
606 struct page *page = NULL;
607 pte_t *_pte;
71a2c112 608 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
0db501f7 609 bool writable = false;
b46e756f
KS
610
611 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
612 _pte++, address += PAGE_SIZE) {
613 pte_t pteval = *_pte;
614 if (pte_none(pteval) || (pte_present(pteval) &&
615 is_zero_pfn(pte_pfn(pteval)))) {
616 if (!userfaultfd_armed(vma) &&
617 ++none_or_zero <= khugepaged_max_ptes_none) {
618 continue;
619 } else {
620 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 621 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
622 goto out;
623 }
624 }
625 if (!pte_present(pteval)) {
626 result = SCAN_PTE_NON_PRESENT;
627 goto out;
628 }
629 page = vm_normal_page(vma, address, pteval);
630 if (unlikely(!page)) {
631 result = SCAN_PAGE_NULL;
632 goto out;
633 }
634
5503fbf2
KS
635 VM_BUG_ON_PAGE(!PageAnon(page), page);
636
71a2c112
KS
637 if (page_mapcount(page) > 1 &&
638 ++shared > khugepaged_max_ptes_shared) {
639 result = SCAN_EXCEED_SHARED_PTE;
e9ea874a 640 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
71a2c112
KS
641 goto out;
642 }
643
fece2029 644 if (PageCompound(page)) {
5503fbf2
KS
645 struct page *p;
646 page = compound_head(page);
fece2029 647
5503fbf2
KS
648 /*
649 * Check if we have dealt with the compound page
650 * already
651 */
652 list_for_each_entry(p, compound_pagelist, lru) {
653 if (page == p)
654 goto next;
655 }
656 }
b46e756f
KS
657
658 /*
659 * We can do it before isolate_lru_page because the
660 * page can't be freed from under us. NOTE: PG_lock
661 * is needed to serialize against split_huge_page
662 * when invoked from the VM.
663 */
664 if (!trylock_page(page)) {
665 result = SCAN_PAGE_LOCK;
666 goto out;
667 }
668
669 /*
9445689f
KS
670 * Check if the page has any GUP (or other external) pins.
671 *
672 * The page table that maps the page has been already unlinked
673 * from the page table tree and this process cannot get
f0953a1b 674 * an additional pin on the page.
9445689f
KS
675 *
676 * New pins can come later if the page is shared across fork,
677 * but not from this process. The other process cannot write to
678 * the page, only trigger CoW.
b46e756f 679 */
9445689f 680 if (!is_refcount_suitable(page)) {
b46e756f
KS
681 unlock_page(page);
682 result = SCAN_PAGE_COUNT;
683 goto out;
684 }
b46e756f
KS
685
686 /*
687 * Isolate the page to avoid collapsing an hugepage
688 * currently in use by the VM.
689 */
690 if (isolate_lru_page(page)) {
691 unlock_page(page);
692 result = SCAN_DEL_PAGE_LRU;
693 goto out;
694 }
5503fbf2
KS
695 mod_node_page_state(page_pgdat(page),
696 NR_ISOLATED_ANON + page_is_file_lru(page),
697 compound_nr(page));
b46e756f
KS
698 VM_BUG_ON_PAGE(!PageLocked(page), page);
699 VM_BUG_ON_PAGE(PageLRU(page), page);
700
5503fbf2
KS
701 if (PageCompound(page))
702 list_add_tail(&page->lru, compound_pagelist);
703next:
0db501f7 704 /* There should be enough young pte to collapse the page */
b46e756f
KS
705 if (pte_young(pteval) ||
706 page_is_young(page) || PageReferenced(page) ||
707 mmu_notifier_test_young(vma->vm_mm, address))
0db501f7 708 referenced++;
5503fbf2
KS
709
710 if (pte_write(pteval))
711 writable = true;
b46e756f 712 }
74e579bf
ML
713
714 if (unlikely(!writable)) {
b46e756f 715 result = SCAN_PAGE_RO;
74e579bf
ML
716 } else if (unlikely(!referenced)) {
717 result = SCAN_LACK_REFERENCED_PAGE;
718 } else {
719 result = SCAN_SUCCEED;
720 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
721 referenced, writable, result);
722 return 1;
b46e756f 723 }
b46e756f 724out:
5503fbf2 725 release_pte_pages(pte, _pte, compound_pagelist);
b46e756f
KS
726 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
727 referenced, writable, result);
728 return 0;
729}
730
731static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
732 struct vm_area_struct *vma,
733 unsigned long address,
5503fbf2
KS
734 spinlock_t *ptl,
735 struct list_head *compound_pagelist)
b46e756f 736{
5503fbf2 737 struct page *src_page, *tmp;
b46e756f 738 pte_t *_pte;
338a16ba
DR
739 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
740 _pte++, page++, address += PAGE_SIZE) {
b46e756f 741 pte_t pteval = *_pte;
b46e756f
KS
742
743 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
744 clear_user_highpage(page, address);
745 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
746 if (is_zero_pfn(pte_pfn(pteval))) {
747 /*
748 * ptl mostly unnecessary.
749 */
750 spin_lock(ptl);
08d5b29e 751 ptep_clear(vma->vm_mm, address, _pte);
b46e756f
KS
752 spin_unlock(ptl);
753 }
754 } else {
755 src_page = pte_page(pteval);
756 copy_user_highpage(page, src_page, address, vma);
5503fbf2
KS
757 if (!PageCompound(src_page))
758 release_pte_page(src_page);
b46e756f
KS
759 /*
760 * ptl mostly unnecessary, but preempt has to
761 * be disabled to update the per-cpu stats
762 * inside page_remove_rmap().
763 */
764 spin_lock(ptl);
08d5b29e 765 ptep_clear(vma->vm_mm, address, _pte);
cea86fe2 766 page_remove_rmap(src_page, vma, false);
b46e756f
KS
767 spin_unlock(ptl);
768 free_page_and_swap_cache(src_page);
769 }
b46e756f 770 }
5503fbf2
KS
771
772 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
773 list_del(&src_page->lru);
774 release_pte_page(src_page);
775 }
b46e756f
KS
776}
777
778static void khugepaged_alloc_sleep(void)
779{
780 DEFINE_WAIT(wait);
781
782 add_wait_queue(&khugepaged_wait, &wait);
783 freezable_schedule_timeout_interruptible(
784 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
785 remove_wait_queue(&khugepaged_wait, &wait);
786}
787
788static int khugepaged_node_load[MAX_NUMNODES];
789
790static bool khugepaged_scan_abort(int nid)
791{
792 int i;
793
794 /*
a5f5f91d 795 * If node_reclaim_mode is disabled, then no extra effort is made to
b46e756f
KS
796 * allocate memory locally.
797 */
202e35db 798 if (!node_reclaim_enabled())
b46e756f
KS
799 return false;
800
801 /* If there is a count for this node already, it must be acceptable */
802 if (khugepaged_node_load[nid])
803 return false;
804
805 for (i = 0; i < MAX_NUMNODES; i++) {
806 if (!khugepaged_node_load[i])
807 continue;
a55c7454 808 if (node_distance(nid, i) > node_reclaim_distance)
b46e756f
KS
809 return true;
810 }
811 return false;
812}
813
814/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
815static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
816{
25160354 817 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
b46e756f
KS
818}
819
820#ifdef CONFIG_NUMA
821static int khugepaged_find_target_node(void)
822{
823 static int last_khugepaged_target_node = NUMA_NO_NODE;
824 int nid, target_node = 0, max_value = 0;
825
826 /* find first node with max normal pages hit */
827 for (nid = 0; nid < MAX_NUMNODES; nid++)
828 if (khugepaged_node_load[nid] > max_value) {
829 max_value = khugepaged_node_load[nid];
830 target_node = nid;
831 }
832
833 /* do some balance if several nodes have the same hit record */
834 if (target_node <= last_khugepaged_target_node)
835 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
836 nid++)
837 if (max_value == khugepaged_node_load[nid]) {
838 target_node = nid;
839 break;
840 }
841
842 last_khugepaged_target_node = target_node;
843 return target_node;
844}
845
846static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
847{
848 if (IS_ERR(*hpage)) {
849 if (!*wait)
850 return false;
851
852 *wait = false;
853 *hpage = NULL;
854 khugepaged_alloc_sleep();
855 } else if (*hpage) {
856 put_page(*hpage);
857 *hpage = NULL;
858 }
859
860 return true;
861}
862
863static struct page *
988ddb71 864khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
b46e756f
KS
865{
866 VM_BUG_ON_PAGE(*hpage, *hpage);
867
b46e756f
KS
868 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
869 if (unlikely(!*hpage)) {
870 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
871 *hpage = ERR_PTR(-ENOMEM);
872 return NULL;
873 }
874
875 prep_transhuge_page(*hpage);
876 count_vm_event(THP_COLLAPSE_ALLOC);
877 return *hpage;
878}
879#else
880static int khugepaged_find_target_node(void)
881{
882 return 0;
883}
884
885static inline struct page *alloc_khugepaged_hugepage(void)
886{
887 struct page *page;
888
889 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
890 HPAGE_PMD_ORDER);
891 if (page)
892 prep_transhuge_page(page);
893 return page;
894}
895
896static struct page *khugepaged_alloc_hugepage(bool *wait)
897{
898 struct page *hpage;
899
900 do {
901 hpage = alloc_khugepaged_hugepage();
902 if (!hpage) {
903 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
904 if (!*wait)
905 return NULL;
906
907 *wait = false;
908 khugepaged_alloc_sleep();
909 } else
910 count_vm_event(THP_COLLAPSE_ALLOC);
911 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
912
913 return hpage;
914}
915
916static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
917{
033b5d77
HD
918 /*
919 * If the hpage allocated earlier was briefly exposed in page cache
920 * before collapse_file() failed, it is possible that racing lookups
921 * have not yet completed, and would then be unpleasantly surprised by
922 * finding the hpage reused for the same mapping at a different offset.
923 * Just release the previous allocation if there is any danger of that.
924 */
925 if (*hpage && page_count(*hpage) > 1) {
926 put_page(*hpage);
927 *hpage = NULL;
928 }
929
b46e756f
KS
930 if (!*hpage)
931 *hpage = khugepaged_alloc_hugepage(wait);
932
933 if (unlikely(!*hpage))
934 return false;
935
936 return true;
937}
938
939static struct page *
988ddb71 940khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
b46e756f 941{
b46e756f
KS
942 VM_BUG_ON(!*hpage);
943
944 return *hpage;
945}
946#endif
947
b46e756f 948/*
c1e8d7c6
ML
949 * If mmap_lock temporarily dropped, revalidate vma
950 * before taking mmap_lock.
b46e756f
KS
951 * Return 0 if succeeds, otherwise return none-zero
952 * value (scan code).
953 */
954
c131f751
KS
955static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
956 struct vm_area_struct **vmap)
b46e756f
KS
957{
958 struct vm_area_struct *vma;
959 unsigned long hstart, hend;
960
961 if (unlikely(khugepaged_test_exit(mm)))
962 return SCAN_ANY_PROCESS;
963
c131f751 964 *vmap = vma = find_vma(mm, address);
b46e756f
KS
965 if (!vma)
966 return SCAN_VMA_NULL;
967
968 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
969 hend = vma->vm_end & HPAGE_PMD_MASK;
970 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
971 return SCAN_ADDRESS_RANGE;
50f8b92f 972 if (!hugepage_vma_check(vma, vma->vm_flags))
b46e756f 973 return SCAN_VMA_CHECK;
594cced1 974 /* Anon VMA expected */
25fa414a 975 if (!vma->anon_vma || !vma_is_anonymous(vma))
594cced1 976 return SCAN_VMA_CHECK;
b46e756f
KS
977 return 0;
978}
979
980/*
981 * Bring missing pages in from swap, to complete THP collapse.
982 * Only done if khugepaged_scan_pmd believes it is worthwhile.
983 *
984 * Called and returns without pte mapped or spinlocks held,
c1e8d7c6 985 * but with mmap_lock held to protect against vma changes.
b46e756f
KS
986 */
987
988static bool __collapse_huge_page_swapin(struct mm_struct *mm,
989 struct vm_area_struct *vma,
2b635dd3 990 unsigned long haddr, pmd_t *pmd,
0db501f7 991 int referenced)
b46e756f 992{
2b740303
SJ
993 int swapped_in = 0;
994 vm_fault_t ret = 0;
2b635dd3
WD
995 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
996
997 for (address = haddr; address < end; address += PAGE_SIZE) {
998 struct vm_fault vmf = {
999 .vma = vma,
1000 .address = address,
1001 .pgoff = linear_page_index(vma, haddr),
1002 .flags = FAULT_FLAG_ALLOW_RETRY,
1003 .pmd = pmd,
1004 };
1005
1006 vmf.pte = pte_offset_map(pmd, address);
2994302b 1007 vmf.orig_pte = *vmf.pte;
2b635dd3
WD
1008 if (!is_swap_pte(vmf.orig_pte)) {
1009 pte_unmap(vmf.pte);
b46e756f 1010 continue;
2b635dd3 1011 }
b46e756f 1012 swapped_in++;
2994302b 1013 ret = do_swap_page(&vmf);
0db501f7 1014
c1e8d7c6 1015 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
b46e756f 1016 if (ret & VM_FAULT_RETRY) {
d8ed45c5 1017 mmap_read_lock(mm);
2b635dd3 1018 if (hugepage_vma_revalidate(mm, haddr, &vma)) {
47f863ea 1019 /* vma is no longer available, don't continue to swapin */
0db501f7 1020 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f 1021 return false;
47f863ea 1022 }
b46e756f 1023 /* check if the pmd is still valid */
2b635dd3 1024 if (mm_find_pmd(mm, haddr) != pmd) {
835152a2 1025 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f 1026 return false;
835152a2 1027 }
b46e756f
KS
1028 }
1029 if (ret & VM_FAULT_ERROR) {
0db501f7 1030 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f
KS
1031 return false;
1032 }
b46e756f 1033 }
ae2c5d80
KS
1034
1035 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1036 if (swapped_in)
1037 lru_add_drain();
1038
0db501f7 1039 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
b46e756f
KS
1040 return true;
1041}
1042
1043static void collapse_huge_page(struct mm_struct *mm,
1044 unsigned long address,
1045 struct page **hpage,
ffe945e6 1046 int node, int referenced, int unmapped)
b46e756f 1047{
5503fbf2 1048 LIST_HEAD(compound_pagelist);
b46e756f
KS
1049 pmd_t *pmd, _pmd;
1050 pte_t *pte;
1051 pgtable_t pgtable;
1052 struct page *new_page;
1053 spinlock_t *pmd_ptl, *pte_ptl;
1054 int isolated = 0, result = 0;
c131f751 1055 struct vm_area_struct *vma;
ac46d4f3 1056 struct mmu_notifier_range range;
b46e756f
KS
1057 gfp_t gfp;
1058
1059 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1060
1061 /* Only allocate from the target node */
41b6167e 1062 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
b46e756f 1063
988ddb71 1064 /*
c1e8d7c6 1065 * Before allocating the hugepage, release the mmap_lock read lock.
988ddb71 1066 * The allocation can take potentially a long time if it involves
c1e8d7c6 1067 * sync compaction, and we do not need to hold the mmap_lock during
988ddb71
KS
1068 * that. We will recheck the vma after taking it again in write mode.
1069 */
d8ed45c5 1070 mmap_read_unlock(mm);
988ddb71 1071 new_page = khugepaged_alloc_page(hpage, gfp, node);
b46e756f
KS
1072 if (!new_page) {
1073 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1074 goto out_nolock;
1075 }
1076
8f425e4e 1077 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
b46e756f
KS
1078 result = SCAN_CGROUP_CHARGE_FAIL;
1079 goto out_nolock;
1080 }
9d82c694 1081 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
b46e756f 1082
d8ed45c5 1083 mmap_read_lock(mm);
c131f751 1084 result = hugepage_vma_revalidate(mm, address, &vma);
b46e756f 1085 if (result) {
d8ed45c5 1086 mmap_read_unlock(mm);
b46e756f
KS
1087 goto out_nolock;
1088 }
1089
1090 pmd = mm_find_pmd(mm, address);
1091 if (!pmd) {
1092 result = SCAN_PMD_NULL;
d8ed45c5 1093 mmap_read_unlock(mm);
b46e756f
KS
1094 goto out_nolock;
1095 }
1096
1097 /*
c1e8d7c6
ML
1098 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1099 * If it fails, we release mmap_lock and jump out_nolock.
b46e756f
KS
1100 * Continuing to collapse causes inconsistency.
1101 */
ffe945e6
KS
1102 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1103 pmd, referenced)) {
d8ed45c5 1104 mmap_read_unlock(mm);
b46e756f
KS
1105 goto out_nolock;
1106 }
1107
d8ed45c5 1108 mmap_read_unlock(mm);
b46e756f
KS
1109 /*
1110 * Prevent all access to pagetables with the exception of
1111 * gup_fast later handled by the ptep_clear_flush and the VM
1112 * handled by the anon_vma lock + PG_lock.
1113 */
d8ed45c5 1114 mmap_write_lock(mm);
c131f751 1115 result = hugepage_vma_revalidate(mm, address, &vma);
b46e756f 1116 if (result)
18d24a7c 1117 goto out_up_write;
b46e756f
KS
1118 /* check if the pmd is still valid */
1119 if (mm_find_pmd(mm, address) != pmd)
18d24a7c 1120 goto out_up_write;
b46e756f
KS
1121
1122 anon_vma_lock_write(vma->anon_vma);
1123
7269f999 1124 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
6f4f13e8 1125 address, address + HPAGE_PMD_SIZE);
ac46d4f3 1126 mmu_notifier_invalidate_range_start(&range);
ec649c9d
VS
1127
1128 pte = pte_offset_map(pmd, address);
1129 pte_ptl = pte_lockptr(mm, pmd);
1130
b46e756f
KS
1131 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1132 /*
1133 * After this gup_fast can't run anymore. This also removes
1134 * any huge TLB entry from the CPU so we won't allow
1135 * huge and small TLB entries for the same virtual address
1136 * to avoid the risk of CPU bugs in that area.
1137 */
1138 _pmd = pmdp_collapse_flush(vma, address, pmd);
1139 spin_unlock(pmd_ptl);
ac46d4f3 1140 mmu_notifier_invalidate_range_end(&range);
b46e756f
KS
1141
1142 spin_lock(pte_ptl);
5503fbf2
KS
1143 isolated = __collapse_huge_page_isolate(vma, address, pte,
1144 &compound_pagelist);
b46e756f
KS
1145 spin_unlock(pte_ptl);
1146
1147 if (unlikely(!isolated)) {
1148 pte_unmap(pte);
1149 spin_lock(pmd_ptl);
1150 BUG_ON(!pmd_none(*pmd));
1151 /*
1152 * We can only use set_pmd_at when establishing
1153 * hugepmds and never for establishing regular pmds that
1154 * points to regular pagetables. Use pmd_populate for that
1155 */
1156 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1157 spin_unlock(pmd_ptl);
1158 anon_vma_unlock_write(vma->anon_vma);
1159 result = SCAN_FAIL;
18d24a7c 1160 goto out_up_write;
b46e756f
KS
1161 }
1162
1163 /*
1164 * All pages are isolated and locked so anon_vma rmap
1165 * can't run anymore.
1166 */
1167 anon_vma_unlock_write(vma->anon_vma);
1168
5503fbf2
KS
1169 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1170 &compound_pagelist);
b46e756f 1171 pte_unmap(pte);
588d01f9
ML
1172 /*
1173 * spin_lock() below is not the equivalent of smp_wmb(), but
1174 * the smp_wmb() inside __SetPageUptodate() can be reused to
1175 * avoid the copy_huge_page writes to become visible after
1176 * the set_pmd_at() write.
1177 */
b46e756f
KS
1178 __SetPageUptodate(new_page);
1179 pgtable = pmd_pgtable(_pmd);
1180
1181 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
f55e1014 1182 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
b46e756f 1183
b46e756f
KS
1184 spin_lock(pmd_ptl);
1185 BUG_ON(!pmd_none(*pmd));
be5d0a74 1186 page_add_new_anon_rmap(new_page, vma, address, true);
b518154e 1187 lru_cache_add_inactive_or_unevictable(new_page, vma);
b46e756f
KS
1188 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1189 set_pmd_at(mm, address, pmd, _pmd);
1190 update_mmu_cache_pmd(vma, address, pmd);
1191 spin_unlock(pmd_ptl);
1192
1193 *hpage = NULL;
1194
1195 khugepaged_pages_collapsed++;
1196 result = SCAN_SUCCEED;
1197out_up_write:
d8ed45c5 1198 mmap_write_unlock(mm);
b46e756f 1199out_nolock:
9d82c694 1200 if (!IS_ERR_OR_NULL(*hpage))
bbc6b703 1201 mem_cgroup_uncharge(page_folio(*hpage));
b46e756f
KS
1202 trace_mm_collapse_huge_page(mm, isolated, result);
1203 return;
b46e756f
KS
1204}
1205
1206static int khugepaged_scan_pmd(struct mm_struct *mm,
1207 struct vm_area_struct *vma,
1208 unsigned long address,
1209 struct page **hpage)
1210{
1211 pmd_t *pmd;
1212 pte_t *pte, *_pte;
71a2c112
KS
1213 int ret = 0, result = 0, referenced = 0;
1214 int none_or_zero = 0, shared = 0;
b46e756f
KS
1215 struct page *page = NULL;
1216 unsigned long _address;
1217 spinlock_t *ptl;
1218 int node = NUMA_NO_NODE, unmapped = 0;
0db501f7 1219 bool writable = false;
b46e756f
KS
1220
1221 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1222
1223 pmd = mm_find_pmd(mm, address);
1224 if (!pmd) {
1225 result = SCAN_PMD_NULL;
1226 goto out;
1227 }
1228
1229 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1230 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1231 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1232 _pte++, _address += PAGE_SIZE) {
1233 pte_t pteval = *_pte;
1234 if (is_swap_pte(pteval)) {
1235 if (++unmapped <= khugepaged_max_ptes_swap) {
e1e267c7
PX
1236 /*
1237 * Always be strict with uffd-wp
1238 * enabled swap entries. Please see
1239 * comment below for pte_uffd_wp().
1240 */
1241 if (pte_swp_uffd_wp(pteval)) {
1242 result = SCAN_PTE_UFFD_WP;
1243 goto out_unmap;
1244 }
b46e756f
KS
1245 continue;
1246 } else {
1247 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 1248 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
b46e756f
KS
1249 goto out_unmap;
1250 }
1251 }
1252 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1253 if (!userfaultfd_armed(vma) &&
1254 ++none_or_zero <= khugepaged_max_ptes_none) {
1255 continue;
1256 } else {
1257 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 1258 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
1259 goto out_unmap;
1260 }
1261 }
e1e267c7
PX
1262 if (pte_uffd_wp(pteval)) {
1263 /*
1264 * Don't collapse the page if any of the small
1265 * PTEs are armed with uffd write protection.
1266 * Here we can also mark the new huge pmd as
1267 * write protected if any of the small ones is
8958b249 1268 * marked but that could bring unknown
e1e267c7
PX
1269 * userfault messages that falls outside of
1270 * the registered range. So, just be simple.
1271 */
1272 result = SCAN_PTE_UFFD_WP;
1273 goto out_unmap;
1274 }
b46e756f
KS
1275 if (pte_write(pteval))
1276 writable = true;
1277
1278 page = vm_normal_page(vma, _address, pteval);
1279 if (unlikely(!page)) {
1280 result = SCAN_PAGE_NULL;
1281 goto out_unmap;
1282 }
1283
71a2c112
KS
1284 if (page_mapcount(page) > 1 &&
1285 ++shared > khugepaged_max_ptes_shared) {
1286 result = SCAN_EXCEED_SHARED_PTE;
e9ea874a 1287 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
71a2c112
KS
1288 goto out_unmap;
1289 }
1290
5503fbf2 1291 page = compound_head(page);
b46e756f
KS
1292
1293 /*
1294 * Record which node the original page is from and save this
1295 * information to khugepaged_node_load[].
0b8f0d87 1296 * Khugepaged will allocate hugepage from the node has the max
b46e756f
KS
1297 * hit record.
1298 */
1299 node = page_to_nid(page);
1300 if (khugepaged_scan_abort(node)) {
1301 result = SCAN_SCAN_ABORT;
1302 goto out_unmap;
1303 }
1304 khugepaged_node_load[node]++;
1305 if (!PageLRU(page)) {
1306 result = SCAN_PAGE_LRU;
1307 goto out_unmap;
1308 }
1309 if (PageLocked(page)) {
1310 result = SCAN_PAGE_LOCK;
1311 goto out_unmap;
1312 }
1313 if (!PageAnon(page)) {
1314 result = SCAN_PAGE_ANON;
1315 goto out_unmap;
1316 }
1317
1318 /*
9445689f
KS
1319 * Check if the page has any GUP (or other external) pins.
1320 *
1321 * Here the check is racy it may see totmal_mapcount > refcount
1322 * in some cases.
1323 * For example, one process with one forked child process.
1324 * The parent has the PMD split due to MADV_DONTNEED, then
1325 * the child is trying unmap the whole PMD, but khugepaged
1326 * may be scanning the parent between the child has
1327 * PageDoubleMap flag cleared and dec the mapcount. So
1328 * khugepaged may see total_mapcount > refcount.
1329 *
1330 * But such case is ephemeral we could always retry collapse
1331 * later. However it may report false positive if the page
1332 * has excessive GUP pins (i.e. 512). Anyway the same check
1333 * will be done again later the risk seems low.
b46e756f 1334 */
9445689f 1335 if (!is_refcount_suitable(page)) {
b46e756f
KS
1336 result = SCAN_PAGE_COUNT;
1337 goto out_unmap;
1338 }
1339 if (pte_young(pteval) ||
1340 page_is_young(page) || PageReferenced(page) ||
1341 mmu_notifier_test_young(vma->vm_mm, address))
0db501f7 1342 referenced++;
b46e756f 1343 }
ffe945e6 1344 if (!writable) {
b46e756f 1345 result = SCAN_PAGE_RO;
ffe945e6
KS
1346 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1347 result = SCAN_LACK_REFERENCED_PAGE;
1348 } else {
1349 result = SCAN_SUCCEED;
1350 ret = 1;
b46e756f
KS
1351 }
1352out_unmap:
1353 pte_unmap_unlock(pte, ptl);
1354 if (ret) {
1355 node = khugepaged_find_target_node();
c1e8d7c6 1356 /* collapse_huge_page will return with the mmap_lock released */
ffe945e6
KS
1357 collapse_huge_page(mm, address, hpage, node,
1358 referenced, unmapped);
b46e756f
KS
1359 }
1360out:
1361 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1362 none_or_zero, result, unmapped);
1363 return ret;
1364}
1365
1366static void collect_mm_slot(struct mm_slot *mm_slot)
1367{
1368 struct mm_struct *mm = mm_slot->mm;
1369
35f3aa39 1370 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f
KS
1371
1372 if (khugepaged_test_exit(mm)) {
1373 /* free mm_slot */
1374 hash_del(&mm_slot->hash);
1375 list_del(&mm_slot->mm_node);
1376
1377 /*
1378 * Not strictly needed because the mm exited already.
1379 *
1380 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1381 */
1382
1383 /* khugepaged_mm_lock actually not necessary for the below */
1384 free_mm_slot(mm_slot);
1385 mmdrop(mm);
1386 }
1387}
1388
396bcc52 1389#ifdef CONFIG_SHMEM
27e1f827
SL
1390/*
1391 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1392 * khugepaged should try to collapse the page table.
1393 */
1394static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1395 unsigned long addr)
1396{
1397 struct mm_slot *mm_slot;
1398
1399 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1400
1401 spin_lock(&khugepaged_mm_lock);
1402 mm_slot = get_mm_slot(mm);
1403 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1404 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1405 spin_unlock(&khugepaged_mm_lock);
1406 return 0;
1407}
1408
e59a47b8
PT
1409static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1410 unsigned long addr, pmd_t *pmdp)
1411{
1412 spinlock_t *ptl;
1413 pmd_t pmd;
1414
80110bbf 1415 mmap_assert_write_locked(mm);
e59a47b8
PT
1416 ptl = pmd_lock(vma->vm_mm, pmdp);
1417 pmd = pmdp_collapse_flush(vma, addr, pmdp);
1418 spin_unlock(ptl);
1419 mm_dec_nr_ptes(mm);
80110bbf 1420 page_table_check_pte_clear_range(mm, addr, pmd);
e59a47b8
PT
1421 pte_free(mm, pmd_pgtable(pmd));
1422}
1423
27e1f827 1424/**
336e6b53
AS
1425 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1426 * address haddr.
1427 *
1428 * @mm: process address space where collapse happens
1429 * @addr: THP collapse address
27e1f827
SL
1430 *
1431 * This function checks whether all the PTEs in the PMD are pointing to the
1432 * right THP. If so, retract the page table so the THP can refault in with
1433 * as pmd-mapped.
1434 */
1435void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1436{
1437 unsigned long haddr = addr & HPAGE_PMD_MASK;
1438 struct vm_area_struct *vma = find_vma(mm, haddr);
119a5fc1 1439 struct page *hpage;
27e1f827 1440 pte_t *start_pte, *pte;
e59a47b8 1441 pmd_t *pmd;
27e1f827
SL
1442 spinlock_t *ptl;
1443 int count = 0;
1444 int i;
1445
1446 if (!vma || !vma->vm_file ||
fef792a4 1447 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
27e1f827
SL
1448 return;
1449
1450 /*
1451 * This vm_flags may not have VM_HUGEPAGE if the page was not
1452 * collapsed by this mm. But we can still collapse if the page is
1453 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1454 * will not fail the vma for missing VM_HUGEPAGE
1455 */
1456 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1457 return;
1458
119a5fc1
HD
1459 hpage = find_lock_page(vma->vm_file->f_mapping,
1460 linear_page_index(vma, haddr));
1461 if (!hpage)
1462 return;
1463
1464 if (!PageHead(hpage))
1465 goto drop_hpage;
1466
27e1f827
SL
1467 pmd = mm_find_pmd(mm, haddr);
1468 if (!pmd)
119a5fc1 1469 goto drop_hpage;
27e1f827
SL
1470
1471 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1472
1473 /* step 1: check all mapped PTEs are to the right huge page */
1474 for (i = 0, addr = haddr, pte = start_pte;
1475 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1476 struct page *page;
1477
1478 /* empty pte, skip */
1479 if (pte_none(*pte))
1480 continue;
1481
1482 /* page swapped out, abort */
1483 if (!pte_present(*pte))
1484 goto abort;
1485
1486 page = vm_normal_page(vma, addr, *pte);
1487
27e1f827 1488 /*
119a5fc1
HD
1489 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1490 * page table, but the new page will not be a subpage of hpage.
27e1f827 1491 */
119a5fc1 1492 if (hpage + i != page)
27e1f827
SL
1493 goto abort;
1494 count++;
1495 }
1496
1497 /* step 2: adjust rmap */
1498 for (i = 0, addr = haddr, pte = start_pte;
1499 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1500 struct page *page;
1501
1502 if (pte_none(*pte))
1503 continue;
1504 page = vm_normal_page(vma, addr, *pte);
cea86fe2 1505 page_remove_rmap(page, vma, false);
27e1f827
SL
1506 }
1507
1508 pte_unmap_unlock(start_pte, ptl);
1509
1510 /* step 3: set proper refcount and mm_counters. */
119a5fc1 1511 if (count) {
27e1f827
SL
1512 page_ref_sub(hpage, count);
1513 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1514 }
1515
1516 /* step 4: collapse pmd */
e59a47b8 1517 collapse_and_free_pmd(mm, vma, haddr, pmd);
119a5fc1
HD
1518drop_hpage:
1519 unlock_page(hpage);
1520 put_page(hpage);
27e1f827
SL
1521 return;
1522
1523abort:
1524 pte_unmap_unlock(start_pte, ptl);
119a5fc1 1525 goto drop_hpage;
27e1f827
SL
1526}
1527
0edf61e5 1528static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
27e1f827
SL
1529{
1530 struct mm_struct *mm = mm_slot->mm;
1531 int i;
1532
1533 if (likely(mm_slot->nr_pte_mapped_thp == 0))
0edf61e5 1534 return;
27e1f827 1535
d8ed45c5 1536 if (!mmap_write_trylock(mm))
0edf61e5 1537 return;
27e1f827
SL
1538
1539 if (unlikely(khugepaged_test_exit(mm)))
1540 goto out;
1541
1542 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1543 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1544
1545out:
1546 mm_slot->nr_pte_mapped_thp = 0;
d8ed45c5 1547 mmap_write_unlock(mm);
27e1f827
SL
1548}
1549
f3f0e1d2
KS
1550static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1551{
1552 struct vm_area_struct *vma;
18e77600 1553 struct mm_struct *mm;
f3f0e1d2 1554 unsigned long addr;
e59a47b8 1555 pmd_t *pmd;
f3f0e1d2
KS
1556
1557 i_mmap_lock_write(mapping);
1558 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
27e1f827
SL
1559 /*
1560 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1561 * got written to. These VMAs are likely not worth investing
3e4e28c5 1562 * mmap_write_lock(mm) as PMD-mapping is likely to be split
27e1f827
SL
1563 * later.
1564 *
1565 * Not that vma->anon_vma check is racy: it can be set up after
c1e8d7c6 1566 * the check but before we took mmap_lock by the fault path.
27e1f827
SL
1567 * But page lock would prevent establishing any new ptes of the
1568 * page, so we are safe.
1569 *
1570 * An alternative would be drop the check, but check that page
1571 * table is clear before calling pmdp_collapse_flush() under
1572 * ptl. It has higher chance to recover THP for the VMA, but
1573 * has higher cost too.
1574 */
f3f0e1d2
KS
1575 if (vma->anon_vma)
1576 continue;
1577 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1578 if (addr & ~HPAGE_PMD_MASK)
1579 continue;
1580 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1581 continue;
18e77600
HD
1582 mm = vma->vm_mm;
1583 pmd = mm_find_pmd(mm, addr);
f3f0e1d2
KS
1584 if (!pmd)
1585 continue;
1586 /*
c1e8d7c6 1587 * We need exclusive mmap_lock to retract page table.
27e1f827
SL
1588 *
1589 * We use trylock due to lock inversion: we need to acquire
c1e8d7c6 1590 * mmap_lock while holding page lock. Fault path does it in
27e1f827 1591 * reverse order. Trylock is a way to avoid deadlock.
f3f0e1d2 1592 */
18e77600 1593 if (mmap_write_trylock(mm)) {
e59a47b8
PT
1594 if (!khugepaged_test_exit(mm))
1595 collapse_and_free_pmd(mm, vma, addr, pmd);
18e77600 1596 mmap_write_unlock(mm);
27e1f827
SL
1597 } else {
1598 /* Try again later */
18e77600 1599 khugepaged_add_pte_mapped_thp(mm, addr);
f3f0e1d2
KS
1600 }
1601 }
1602 i_mmap_unlock_write(mapping);
1603}
1604
1605/**
99cb0dbd 1606 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
f3f0e1d2 1607 *
336e6b53
AS
1608 * @mm: process address space where collapse happens
1609 * @file: file that collapse on
1610 * @start: collapse start address
1611 * @hpage: new allocated huge page for collapse
1612 * @node: appointed node the new huge page allocate from
1613 *
f3f0e1d2 1614 * Basic scheme is simple, details are more complex:
87c460a0 1615 * - allocate and lock a new huge page;
77da9389 1616 * - scan page cache replacing old pages with the new one
99cb0dbd 1617 * + swap/gup in pages if necessary;
f3f0e1d2 1618 * + fill in gaps;
77da9389
MW
1619 * + keep old pages around in case rollback is required;
1620 * - if replacing succeeds:
f3f0e1d2
KS
1621 * + copy data over;
1622 * + free old pages;
87c460a0 1623 * + unlock huge page;
f3f0e1d2
KS
1624 * - if replacing failed;
1625 * + put all pages back and unfreeze them;
77da9389 1626 * + restore gaps in the page cache;
87c460a0 1627 * + unlock and free huge page;
f3f0e1d2 1628 */
579c571e
SL
1629static void collapse_file(struct mm_struct *mm,
1630 struct file *file, pgoff_t start,
f3f0e1d2
KS
1631 struct page **hpage, int node)
1632{
579c571e 1633 struct address_space *mapping = file->f_mapping;
f3f0e1d2 1634 gfp_t gfp;
77da9389 1635 struct page *new_page;
f3f0e1d2
KS
1636 pgoff_t index, end = start + HPAGE_PMD_NR;
1637 LIST_HEAD(pagelist);
77da9389 1638 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
f3f0e1d2 1639 int nr_none = 0, result = SCAN_SUCCEED;
99cb0dbd 1640 bool is_shmem = shmem_file(file);
bf9ecead 1641 int nr;
f3f0e1d2 1642
99cb0dbd 1643 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
f3f0e1d2
KS
1644 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1645
1646 /* Only allocate from the target node */
41b6167e 1647 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
f3f0e1d2
KS
1648
1649 new_page = khugepaged_alloc_page(hpage, gfp, node);
1650 if (!new_page) {
1651 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1652 goto out;
1653 }
1654
8f425e4e 1655 if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
f3f0e1d2
KS
1656 result = SCAN_CGROUP_CHARGE_FAIL;
1657 goto out;
1658 }
9d82c694 1659 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
f3f0e1d2 1660
6b24ca4a
MWO
1661 /*
1662 * Ensure we have slots for all the pages in the range. This is
1663 * almost certainly a no-op because most of the pages must be present
1664 */
95feeabb
HD
1665 do {
1666 xas_lock_irq(&xas);
1667 xas_create_range(&xas);
1668 if (!xas_error(&xas))
1669 break;
1670 xas_unlock_irq(&xas);
1671 if (!xas_nomem(&xas, GFP_KERNEL)) {
95feeabb
HD
1672 result = SCAN_FAIL;
1673 goto out;
1674 }
1675 } while (1);
1676
042a3082 1677 __SetPageLocked(new_page);
99cb0dbd
SL
1678 if (is_shmem)
1679 __SetPageSwapBacked(new_page);
f3f0e1d2
KS
1680 new_page->index = start;
1681 new_page->mapping = mapping;
f3f0e1d2 1682
f3f0e1d2 1683 /*
87c460a0
HD
1684 * At this point the new_page is locked and not up-to-date.
1685 * It's safe to insert it into the page cache, because nobody would
1686 * be able to map it or use it in another way until we unlock it.
f3f0e1d2
KS
1687 */
1688
77da9389
MW
1689 xas_set(&xas, start);
1690 for (index = start; index < end; index++) {
1691 struct page *page = xas_next(&xas);
1692
1693 VM_BUG_ON(index != xas.xa_index);
99cb0dbd
SL
1694 if (is_shmem) {
1695 if (!page) {
1696 /*
1697 * Stop if extent has been truncated or
1698 * hole-punched, and is now completely
1699 * empty.
1700 */
1701 if (index == start) {
1702 if (!xas_next_entry(&xas, end - 1)) {
1703 result = SCAN_TRUNCATED;
1704 goto xa_locked;
1705 }
1706 xas_set(&xas, index);
1707 }
1708 if (!shmem_charge(mapping->host, 1)) {
1709 result = SCAN_FAIL;
042a3082 1710 goto xa_locked;
701270fa 1711 }
99cb0dbd
SL
1712 xas_store(&xas, new_page);
1713 nr_none++;
1714 continue;
701270fa 1715 }
99cb0dbd
SL
1716
1717 if (xa_is_value(page) || !PageUptodate(page)) {
1718 xas_unlock_irq(&xas);
1719 /* swap in or instantiate fallocated page */
1720 if (shmem_getpage(mapping->host, index, &page,
acdd9f8e 1721 SGP_NOALLOC)) {
99cb0dbd
SL
1722 result = SCAN_FAIL;
1723 goto xa_unlocked;
1724 }
1725 } else if (trylock_page(page)) {
1726 get_page(page);
1727 xas_unlock_irq(&xas);
1728 } else {
1729 result = SCAN_PAGE_LOCK;
042a3082 1730 goto xa_locked;
77da9389 1731 }
99cb0dbd
SL
1732 } else { /* !is_shmem */
1733 if (!page || xa_is_value(page)) {
1734 xas_unlock_irq(&xas);
1735 page_cache_sync_readahead(mapping, &file->f_ra,
1736 file, index,
e5a59d30 1737 end - index);
99cb0dbd
SL
1738 /* drain pagevecs to help isolate_lru_page() */
1739 lru_add_drain();
1740 page = find_lock_page(mapping, index);
1741 if (unlikely(page == NULL)) {
1742 result = SCAN_FAIL;
1743 goto xa_unlocked;
1744 }
75f36069
SL
1745 } else if (PageDirty(page)) {
1746 /*
1747 * khugepaged only works on read-only fd,
1748 * so this page is dirty because it hasn't
1749 * been flushed since first write. There
1750 * won't be new dirty pages.
1751 *
1752 * Trigger async flush here and hope the
1753 * writeback is done when khugepaged
1754 * revisits this page.
1755 *
1756 * This is a one-off situation. We are not
1757 * forcing writeback in loop.
1758 */
1759 xas_unlock_irq(&xas);
1760 filemap_flush(mapping);
1761 result = SCAN_FAIL;
1762 goto xa_unlocked;
74c42e1b
RW
1763 } else if (PageWriteback(page)) {
1764 xas_unlock_irq(&xas);
1765 result = SCAN_FAIL;
1766 goto xa_unlocked;
99cb0dbd
SL
1767 } else if (trylock_page(page)) {
1768 get_page(page);
1769 xas_unlock_irq(&xas);
1770 } else {
1771 result = SCAN_PAGE_LOCK;
1772 goto xa_locked;
f3f0e1d2 1773 }
f3f0e1d2
KS
1774 }
1775
1776 /*
b93b0163 1777 * The page must be locked, so we can drop the i_pages lock
f3f0e1d2
KS
1778 * without racing with truncate.
1779 */
1780 VM_BUG_ON_PAGE(!PageLocked(page), page);
4655e5e5
SL
1781
1782 /* make sure the page is up to date */
1783 if (unlikely(!PageUptodate(page))) {
1784 result = SCAN_FAIL;
1785 goto out_unlock;
1786 }
06a5e126
HD
1787
1788 /*
1789 * If file was truncated then extended, or hole-punched, before
1790 * we locked the first page, then a THP might be there already.
1791 */
1792 if (PageTransCompound(page)) {
1793 result = SCAN_PAGE_COMPOUND;
1794 goto out_unlock;
1795 }
f3f0e1d2
KS
1796
1797 if (page_mapping(page) != mapping) {
1798 result = SCAN_TRUNCATED;
1799 goto out_unlock;
1800 }
f3f0e1d2 1801
74c42e1b
RW
1802 if (!is_shmem && (PageDirty(page) ||
1803 PageWriteback(page))) {
4655e5e5
SL
1804 /*
1805 * khugepaged only works on read-only fd, so this
1806 * page is dirty because it hasn't been flushed
1807 * since first write.
1808 */
1809 result = SCAN_FAIL;
1810 goto out_unlock;
1811 }
1812
f3f0e1d2
KS
1813 if (isolate_lru_page(page)) {
1814 result = SCAN_DEL_PAGE_LRU;
042a3082 1815 goto out_unlock;
f3f0e1d2
KS
1816 }
1817
99cb0dbd
SL
1818 if (page_has_private(page) &&
1819 !try_to_release_page(page, GFP_KERNEL)) {
1820 result = SCAN_PAGE_HAS_PRIVATE;
2f33a706 1821 putback_lru_page(page);
99cb0dbd
SL
1822 goto out_unlock;
1823 }
1824
f3f0e1d2 1825 if (page_mapped(page))
869f7ee6
MWO
1826 try_to_unmap(page_folio(page),
1827 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
f3f0e1d2 1828
77da9389
MW
1829 xas_lock_irq(&xas);
1830 xas_set(&xas, index);
f3f0e1d2 1831
77da9389 1832 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
f3f0e1d2
KS
1833
1834 /*
1835 * The page is expected to have page_count() == 3:
1836 * - we hold a pin on it;
77da9389 1837 * - one reference from page cache;
f3f0e1d2
KS
1838 * - one from isolate_lru_page;
1839 */
1840 if (!page_ref_freeze(page, 3)) {
1841 result = SCAN_PAGE_COUNT;
042a3082
HD
1842 xas_unlock_irq(&xas);
1843 putback_lru_page(page);
1844 goto out_unlock;
f3f0e1d2
KS
1845 }
1846
1847 /*
1848 * Add the page to the list to be able to undo the collapse if
1849 * something go wrong.
1850 */
1851 list_add_tail(&page->lru, &pagelist);
1852
1853 /* Finally, replace with the new page. */
4101196b 1854 xas_store(&xas, new_page);
f3f0e1d2 1855 continue;
f3f0e1d2
KS
1856out_unlock:
1857 unlock_page(page);
1858 put_page(page);
042a3082 1859 goto xa_unlocked;
f3f0e1d2 1860 }
bf9ecead 1861 nr = thp_nr_pages(new_page);
f3f0e1d2 1862
99cb0dbd 1863 if (is_shmem)
57b2847d 1864 __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
09d91cda 1865 else {
bf9ecead 1866 __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
09d91cda 1867 filemap_nr_thps_inc(mapping);
eb6ecbed
CF
1868 /*
1869 * Paired with smp_mb() in do_dentry_open() to ensure
1870 * i_writecount is up to date and the update to nr_thps is
1871 * visible. Ensures the page cache will be truncated if the
1872 * file is opened writable.
1873 */
1874 smp_mb();
1875 if (inode_is_open_for_write(mapping->host)) {
1876 result = SCAN_FAIL;
1877 __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
1878 filemap_nr_thps_dec(mapping);
1879 goto xa_locked;
1880 }
09d91cda 1881 }
99cb0dbd 1882
042a3082 1883 if (nr_none) {
9d82c694 1884 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
99cb0dbd 1885 if (is_shmem)
9d82c694 1886 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
042a3082
HD
1887 }
1888
6b24ca4a
MWO
1889 /* Join all the small entries into a single multi-index entry */
1890 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
1891 xas_store(&xas, new_page);
042a3082
HD
1892xa_locked:
1893 xas_unlock_irq(&xas);
77da9389 1894xa_unlocked:
042a3082 1895
6d9df8a5
HD
1896 /*
1897 * If collapse is successful, flush must be done now before copying.
1898 * If collapse is unsuccessful, does flush actually need to be done?
1899 * Do it anyway, to clear the state.
1900 */
1901 try_to_unmap_flush();
1902
f3f0e1d2 1903 if (result == SCAN_SUCCEED) {
77da9389 1904 struct page *page, *tmp;
f3f0e1d2
KS
1905
1906 /*
77da9389
MW
1907 * Replacing old pages with new one has succeeded, now we
1908 * need to copy the content and free the old pages.
f3f0e1d2 1909 */
2af8ff29 1910 index = start;
f3f0e1d2 1911 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2af8ff29
HD
1912 while (index < page->index) {
1913 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1914 index++;
1915 }
f3f0e1d2
KS
1916 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1917 page);
1918 list_del(&page->lru);
f3f0e1d2 1919 page->mapping = NULL;
042a3082 1920 page_ref_unfreeze(page, 1);
f3f0e1d2
KS
1921 ClearPageActive(page);
1922 ClearPageUnevictable(page);
042a3082 1923 unlock_page(page);
f3f0e1d2 1924 put_page(page);
2af8ff29
HD
1925 index++;
1926 }
1927 while (index < end) {
1928 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1929 index++;
f3f0e1d2
KS
1930 }
1931
f3f0e1d2 1932 SetPageUptodate(new_page);
87c460a0 1933 page_ref_add(new_page, HPAGE_PMD_NR - 1);
6058eaec 1934 if (is_shmem)
99cb0dbd 1935 set_page_dirty(new_page);
6058eaec 1936 lru_cache_add(new_page);
f3f0e1d2 1937
042a3082
HD
1938 /*
1939 * Remove pte page tables, so we can re-fault the page as huge.
1940 */
1941 retract_page_tables(mapping, start);
f3f0e1d2 1942 *hpage = NULL;
87aa7529
YS
1943
1944 khugepaged_pages_collapsed++;
f3f0e1d2 1945 } else {
77da9389 1946 struct page *page;
aaa52e34 1947
77da9389 1948 /* Something went wrong: roll back page cache changes */
77da9389 1949 xas_lock_irq(&xas);
aaa52e34 1950 mapping->nrpages -= nr_none;
99cb0dbd
SL
1951
1952 if (is_shmem)
1953 shmem_uncharge(mapping->host, nr_none);
aaa52e34 1954
77da9389
MW
1955 xas_set(&xas, start);
1956 xas_for_each(&xas, page, end - 1) {
f3f0e1d2
KS
1957 page = list_first_entry_or_null(&pagelist,
1958 struct page, lru);
77da9389 1959 if (!page || xas.xa_index < page->index) {
f3f0e1d2
KS
1960 if (!nr_none)
1961 break;
f3f0e1d2 1962 nr_none--;
59749e6c 1963 /* Put holes back where they were */
77da9389 1964 xas_store(&xas, NULL);
f3f0e1d2
KS
1965 continue;
1966 }
1967
77da9389 1968 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
f3f0e1d2
KS
1969
1970 /* Unfreeze the page. */
1971 list_del(&page->lru);
1972 page_ref_unfreeze(page, 2);
77da9389
MW
1973 xas_store(&xas, page);
1974 xas_pause(&xas);
1975 xas_unlock_irq(&xas);
f3f0e1d2 1976 unlock_page(page);
042a3082 1977 putback_lru_page(page);
77da9389 1978 xas_lock_irq(&xas);
f3f0e1d2
KS
1979 }
1980 VM_BUG_ON(nr_none);
77da9389 1981 xas_unlock_irq(&xas);
f3f0e1d2 1982
f3f0e1d2
KS
1983 new_page->mapping = NULL;
1984 }
042a3082
HD
1985
1986 unlock_page(new_page);
f3f0e1d2
KS
1987out:
1988 VM_BUG_ON(!list_empty(&pagelist));
9d82c694 1989 if (!IS_ERR_OR_NULL(*hpage))
bbc6b703 1990 mem_cgroup_uncharge(page_folio(*hpage));
f3f0e1d2
KS
1991 /* TODO: tracepoints */
1992}
1993
579c571e
SL
1994static void khugepaged_scan_file(struct mm_struct *mm,
1995 struct file *file, pgoff_t start, struct page **hpage)
f3f0e1d2
KS
1996{
1997 struct page *page = NULL;
579c571e 1998 struct address_space *mapping = file->f_mapping;
85b392db 1999 XA_STATE(xas, &mapping->i_pages, start);
f3f0e1d2
KS
2000 int present, swap;
2001 int node = NUMA_NO_NODE;
2002 int result = SCAN_SUCCEED;
2003
2004 present = 0;
2005 swap = 0;
2006 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2007 rcu_read_lock();
85b392db
MW
2008 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2009 if (xas_retry(&xas, page))
f3f0e1d2 2010 continue;
f3f0e1d2 2011
85b392db 2012 if (xa_is_value(page)) {
f3f0e1d2
KS
2013 if (++swap > khugepaged_max_ptes_swap) {
2014 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 2015 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
f3f0e1d2
KS
2016 break;
2017 }
2018 continue;
2019 }
2020
6b24ca4a
MWO
2021 /*
2022 * XXX: khugepaged should compact smaller compound pages
2023 * into a PMD sized page
2024 */
f3f0e1d2
KS
2025 if (PageTransCompound(page)) {
2026 result = SCAN_PAGE_COMPOUND;
2027 break;
2028 }
2029
2030 node = page_to_nid(page);
2031 if (khugepaged_scan_abort(node)) {
2032 result = SCAN_SCAN_ABORT;
2033 break;
2034 }
2035 khugepaged_node_load[node]++;
2036
2037 if (!PageLRU(page)) {
2038 result = SCAN_PAGE_LRU;
2039 break;
2040 }
2041
99cb0dbd
SL
2042 if (page_count(page) !=
2043 1 + page_mapcount(page) + page_has_private(page)) {
f3f0e1d2
KS
2044 result = SCAN_PAGE_COUNT;
2045 break;
2046 }
2047
2048 /*
2049 * We probably should check if the page is referenced here, but
2050 * nobody would transfer pte_young() to PageReferenced() for us.
2051 * And rmap walk here is just too costly...
2052 */
2053
2054 present++;
2055
2056 if (need_resched()) {
85b392db 2057 xas_pause(&xas);
f3f0e1d2 2058 cond_resched_rcu();
f3f0e1d2
KS
2059 }
2060 }
2061 rcu_read_unlock();
2062
2063 if (result == SCAN_SUCCEED) {
2064 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2065 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 2066 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
f3f0e1d2
KS
2067 } else {
2068 node = khugepaged_find_target_node();
579c571e 2069 collapse_file(mm, file, start, hpage, node);
f3f0e1d2
KS
2070 }
2071 }
2072
2073 /* TODO: tracepoints */
2074}
2075#else
579c571e
SL
2076static void khugepaged_scan_file(struct mm_struct *mm,
2077 struct file *file, pgoff_t start, struct page **hpage)
f3f0e1d2
KS
2078{
2079 BUILD_BUG();
2080}
27e1f827 2081
0edf61e5 2082static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
27e1f827 2083{
27e1f827 2084}
f3f0e1d2
KS
2085#endif
2086
b46e756f
KS
2087static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2088 struct page **hpage)
2089 __releases(&khugepaged_mm_lock)
2090 __acquires(&khugepaged_mm_lock)
2091{
2092 struct mm_slot *mm_slot;
2093 struct mm_struct *mm;
2094 struct vm_area_struct *vma;
2095 int progress = 0;
2096
2097 VM_BUG_ON(!pages);
35f3aa39 2098 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f
KS
2099
2100 if (khugepaged_scan.mm_slot)
2101 mm_slot = khugepaged_scan.mm_slot;
2102 else {
2103 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2104 struct mm_slot, mm_node);
2105 khugepaged_scan.address = 0;
2106 khugepaged_scan.mm_slot = mm_slot;
2107 }
2108 spin_unlock(&khugepaged_mm_lock);
27e1f827 2109 khugepaged_collapse_pte_mapped_thps(mm_slot);
b46e756f
KS
2110
2111 mm = mm_slot->mm;
3b454ad3
YS
2112 /*
2113 * Don't wait for semaphore (to avoid long wait times). Just move to
2114 * the next mm on the list.
2115 */
2116 vma = NULL;
d8ed45c5 2117 if (unlikely(!mmap_read_trylock(mm)))
c1e8d7c6 2118 goto breakouterloop_mmap_lock;
3b454ad3 2119 if (likely(!khugepaged_test_exit(mm)))
b46e756f
KS
2120 vma = find_vma(mm, khugepaged_scan.address);
2121
2122 progress++;
2123 for (; vma; vma = vma->vm_next) {
2124 unsigned long hstart, hend;
2125
2126 cond_resched();
2127 if (unlikely(khugepaged_test_exit(mm))) {
2128 progress++;
2129 break;
2130 }
50f8b92f 2131 if (!hugepage_vma_check(vma, vma->vm_flags)) {
b46e756f
KS
2132skip:
2133 progress++;
2134 continue;
2135 }
2136 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2137 hend = vma->vm_end & HPAGE_PMD_MASK;
2138 if (hstart >= hend)
2139 goto skip;
2140 if (khugepaged_scan.address > hend)
2141 goto skip;
2142 if (khugepaged_scan.address < hstart)
2143 khugepaged_scan.address = hstart;
2144 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
396bcc52
MWO
2145 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2146 goto skip;
b46e756f
KS
2147
2148 while (khugepaged_scan.address < hend) {
2149 int ret;
2150 cond_resched();
2151 if (unlikely(khugepaged_test_exit(mm)))
2152 goto breakouterloop;
2153
2154 VM_BUG_ON(khugepaged_scan.address < hstart ||
2155 khugepaged_scan.address + HPAGE_PMD_SIZE >
2156 hend);
99cb0dbd 2157 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
396bcc52 2158 struct file *file = get_file(vma->vm_file);
f3f0e1d2
KS
2159 pgoff_t pgoff = linear_page_index(vma,
2160 khugepaged_scan.address);
99cb0dbd 2161
d8ed45c5 2162 mmap_read_unlock(mm);
f3f0e1d2 2163 ret = 1;
579c571e 2164 khugepaged_scan_file(mm, file, pgoff, hpage);
f3f0e1d2
KS
2165 fput(file);
2166 } else {
2167 ret = khugepaged_scan_pmd(mm, vma,
2168 khugepaged_scan.address,
2169 hpage);
2170 }
b46e756f
KS
2171 /* move to next address */
2172 khugepaged_scan.address += HPAGE_PMD_SIZE;
2173 progress += HPAGE_PMD_NR;
2174 if (ret)
c1e8d7c6
ML
2175 /* we released mmap_lock so break loop */
2176 goto breakouterloop_mmap_lock;
b46e756f
KS
2177 if (progress >= pages)
2178 goto breakouterloop;
2179 }
2180 }
2181breakouterloop:
d8ed45c5 2182 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
c1e8d7c6 2183breakouterloop_mmap_lock:
b46e756f
KS
2184
2185 spin_lock(&khugepaged_mm_lock);
2186 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2187 /*
2188 * Release the current mm_slot if this mm is about to die, or
2189 * if we scanned all vmas of this mm.
2190 */
2191 if (khugepaged_test_exit(mm) || !vma) {
2192 /*
2193 * Make sure that if mm_users is reaching zero while
2194 * khugepaged runs here, khugepaged_exit will find
2195 * mm_slot not pointing to the exiting mm.
2196 */
2197 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2198 khugepaged_scan.mm_slot = list_entry(
2199 mm_slot->mm_node.next,
2200 struct mm_slot, mm_node);
2201 khugepaged_scan.address = 0;
2202 } else {
2203 khugepaged_scan.mm_slot = NULL;
2204 khugepaged_full_scans++;
2205 }
2206
2207 collect_mm_slot(mm_slot);
2208 }
2209
2210 return progress;
2211}
2212
2213static int khugepaged_has_work(void)
2214{
2215 return !list_empty(&khugepaged_scan.mm_head) &&
2216 khugepaged_enabled();
2217}
2218
2219static int khugepaged_wait_event(void)
2220{
2221 return !list_empty(&khugepaged_scan.mm_head) ||
2222 kthread_should_stop();
2223}
2224
2225static void khugepaged_do_scan(void)
2226{
2227 struct page *hpage = NULL;
2228 unsigned int progress = 0, pass_through_head = 0;
89dc6a96 2229 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
b46e756f
KS
2230 bool wait = true;
2231
a980df33
KS
2232 lru_add_drain_all();
2233
b46e756f
KS
2234 while (progress < pages) {
2235 if (!khugepaged_prealloc_page(&hpage, &wait))
2236 break;
2237
2238 cond_resched();
2239
2240 if (unlikely(kthread_should_stop() || try_to_freeze()))
2241 break;
2242
2243 spin_lock(&khugepaged_mm_lock);
2244 if (!khugepaged_scan.mm_slot)
2245 pass_through_head++;
2246 if (khugepaged_has_work() &&
2247 pass_through_head < 2)
2248 progress += khugepaged_scan_mm_slot(pages - progress,
2249 &hpage);
2250 else
2251 progress = pages;
2252 spin_unlock(&khugepaged_mm_lock);
2253 }
2254
2255 if (!IS_ERR_OR_NULL(hpage))
2256 put_page(hpage);
2257}
2258
2259static bool khugepaged_should_wakeup(void)
2260{
2261 return kthread_should_stop() ||
2262 time_after_eq(jiffies, khugepaged_sleep_expire);
2263}
2264
2265static void khugepaged_wait_work(void)
2266{
2267 if (khugepaged_has_work()) {
2268 const unsigned long scan_sleep_jiffies =
2269 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2270
2271 if (!scan_sleep_jiffies)
2272 return;
2273
2274 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2275 wait_event_freezable_timeout(khugepaged_wait,
2276 khugepaged_should_wakeup(),
2277 scan_sleep_jiffies);
2278 return;
2279 }
2280
2281 if (khugepaged_enabled())
2282 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2283}
2284
2285static int khugepaged(void *none)
2286{
2287 struct mm_slot *mm_slot;
2288
2289 set_freezable();
2290 set_user_nice(current, MAX_NICE);
2291
2292 while (!kthread_should_stop()) {
2293 khugepaged_do_scan();
2294 khugepaged_wait_work();
2295 }
2296
2297 spin_lock(&khugepaged_mm_lock);
2298 mm_slot = khugepaged_scan.mm_slot;
2299 khugepaged_scan.mm_slot = NULL;
2300 if (mm_slot)
2301 collect_mm_slot(mm_slot);
2302 spin_unlock(&khugepaged_mm_lock);
2303 return 0;
2304}
2305
2306static void set_recommended_min_free_kbytes(void)
2307{
2308 struct zone *zone;
2309 int nr_zones = 0;
2310 unsigned long recommended_min;
2311
bd3400ea
LF
2312 if (!khugepaged_enabled()) {
2313 calculate_min_free_kbytes();
2314 goto update_wmarks;
2315 }
2316
b7d349c7
JK
2317 for_each_populated_zone(zone) {
2318 /*
2319 * We don't need to worry about fragmentation of
2320 * ZONE_MOVABLE since it only has movable pages.
2321 */
2322 if (zone_idx(zone) > gfp_zone(GFP_USER))
2323 continue;
2324
b46e756f 2325 nr_zones++;
b7d349c7 2326 }
b46e756f
KS
2327
2328 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2329 recommended_min = pageblock_nr_pages * nr_zones * 2;
2330
2331 /*
2332 * Make sure that on average at least two pageblocks are almost free
2333 * of another type, one for a migratetype to fall back to and a
2334 * second to avoid subsequent fallbacks of other types There are 3
2335 * MIGRATE_TYPES we care about.
2336 */
2337 recommended_min += pageblock_nr_pages * nr_zones *
2338 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2339
2340 /* don't ever allow to reserve more than 5% of the lowmem */
2341 recommended_min = min(recommended_min,
2342 (unsigned long) nr_free_buffer_pages() / 20);
2343 recommended_min <<= (PAGE_SHIFT-10);
2344
2345 if (recommended_min > min_free_kbytes) {
2346 if (user_min_free_kbytes >= 0)
2347 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2348 min_free_kbytes, recommended_min);
2349
2350 min_free_kbytes = recommended_min;
2351 }
bd3400ea
LF
2352
2353update_wmarks:
b46e756f
KS
2354 setup_per_zone_wmarks();
2355}
2356
2357int start_stop_khugepaged(void)
2358{
b46e756f
KS
2359 int err = 0;
2360
2361 mutex_lock(&khugepaged_mutex);
2362 if (khugepaged_enabled()) {
2363 if (!khugepaged_thread)
2364 khugepaged_thread = kthread_run(khugepaged, NULL,
2365 "khugepaged");
2366 if (IS_ERR(khugepaged_thread)) {
2367 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2368 err = PTR_ERR(khugepaged_thread);
2369 khugepaged_thread = NULL;
2370 goto fail;
2371 }
2372
2373 if (!list_empty(&khugepaged_scan.mm_head))
2374 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
2375 } else if (khugepaged_thread) {
2376 kthread_stop(khugepaged_thread);
2377 khugepaged_thread = NULL;
2378 }
bd3400ea 2379 set_recommended_min_free_kbytes();
b46e756f
KS
2380fail:
2381 mutex_unlock(&khugepaged_mutex);
2382 return err;
2383}
4aab2be0
VB
2384
2385void khugepaged_min_free_kbytes_update(void)
2386{
2387 mutex_lock(&khugepaged_mutex);
2388 if (khugepaged_enabled() && khugepaged_thread)
2389 set_recommended_min_free_kbytes();
2390 mutex_unlock(&khugepaged_mutex);
2391}