mm/huge_memory: page_remove_rmap() -> folio_remove_rmap_pmd()
[linux-block.git] / mm / khugepaged.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b46e756f
KS
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6e84f315 6#include <linux/sched/mm.h>
f7ccbae4 7#include <linux/sched/coredump.h>
b46e756f
KS
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
80110bbf 19#include <linux/page_table_check.h>
b46e756f 20#include <linux/swapops.h>
f3f0e1d2 21#include <linux/shmem_fs.h>
e2942062 22#include <linux/ksm.h>
b46e756f
KS
23
24#include <asm/tlb.h>
25#include <asm/pgalloc.h>
26#include "internal.h"
b26e2701 27#include "mm_slot.h"
b46e756f
KS
28
29enum scan_result {
30 SCAN_FAIL,
31 SCAN_SUCCEED,
32 SCAN_PMD_NULL,
34488399 33 SCAN_PMD_NONE,
50722804 34 SCAN_PMD_MAPPED,
b46e756f 35 SCAN_EXCEED_NONE_PTE,
71a2c112
KS
36 SCAN_EXCEED_SWAP_PTE,
37 SCAN_EXCEED_SHARED_PTE,
b46e756f 38 SCAN_PTE_NON_PRESENT,
e1e267c7 39 SCAN_PTE_UFFD_WP,
58ac9a89 40 SCAN_PTE_MAPPED_HUGEPAGE,
b46e756f 41 SCAN_PAGE_RO,
0db501f7 42 SCAN_LACK_REFERENCED_PAGE,
b46e756f
KS
43 SCAN_PAGE_NULL,
44 SCAN_SCAN_ABORT,
45 SCAN_PAGE_COUNT,
46 SCAN_PAGE_LRU,
47 SCAN_PAGE_LOCK,
48 SCAN_PAGE_ANON,
49 SCAN_PAGE_COMPOUND,
50 SCAN_ANY_PROCESS,
51 SCAN_VMA_NULL,
52 SCAN_VMA_CHECK,
53 SCAN_ADDRESS_RANGE,
b46e756f
KS
54 SCAN_DEL_PAGE_LRU,
55 SCAN_ALLOC_HUGE_PAGE_FAIL,
56 SCAN_CGROUP_CHARGE_FAIL,
f3f0e1d2 57 SCAN_TRUNCATED,
99cb0dbd 58 SCAN_PAGE_HAS_PRIVATE,
2ce0bdfe 59 SCAN_STORE_FAILED,
98c76c9f 60 SCAN_COPY_MC,
ac492b9c 61 SCAN_PAGE_FILLED,
b46e756f
KS
62};
63
64#define CREATE_TRACE_POINTS
65#include <trace/events/huge_memory.h>
66
4aab2be0
VB
67static struct task_struct *khugepaged_thread __read_mostly;
68static DEFINE_MUTEX(khugepaged_mutex);
69
b46e756f
KS
70/* default scan 8*512 pte (or vmas) every 30 second */
71static unsigned int khugepaged_pages_to_scan __read_mostly;
72static unsigned int khugepaged_pages_collapsed;
73static unsigned int khugepaged_full_scans;
74static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
75/* during fragmentation poll the hugepage allocator once every minute */
76static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
77static unsigned long khugepaged_sleep_expire;
78static DEFINE_SPINLOCK(khugepaged_mm_lock);
79static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
80/*
81 * default collapse hugepages if there is at least one pte mapped like
82 * it would have happened if the vma was large enough during page
83 * fault.
d8ea7cc8
ZK
84 *
85 * Note that these are only respected if collapse was initiated by khugepaged.
b46e756f
KS
86 */
87static unsigned int khugepaged_max_ptes_none __read_mostly;
88static unsigned int khugepaged_max_ptes_swap __read_mostly;
71a2c112 89static unsigned int khugepaged_max_ptes_shared __read_mostly;
b46e756f
KS
90
91#define MM_SLOTS_HASH_BITS 10
e1ad3e66 92static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
b46e756f 93
68279f9c 94static struct kmem_cache *mm_slot_cache __ro_after_init;
b46e756f 95
34d6b470 96struct collapse_control {
d8ea7cc8
ZK
97 bool is_khugepaged;
98
34d6b470
ZK
99 /* Num pages scanned per node */
100 u32 node_load[MAX_NUMNODES];
101
e031ff96
YS
102 /* nodemask for allocation fallback */
103 nodemask_t alloc_nmask;
34d6b470
ZK
104};
105
b46e756f 106/**
b26e2701
QZ
107 * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
108 * @slot: hash lookup from mm to mm_slot
b46e756f 109 */
b26e2701
QZ
110struct khugepaged_mm_slot {
111 struct mm_slot slot;
b46e756f
KS
112};
113
114/**
115 * struct khugepaged_scan - cursor for scanning
116 * @mm_head: the head of the mm list to scan
117 * @mm_slot: the current mm_slot we are scanning
118 * @address: the next address inside that to be scanned
119 *
120 * There is only the one khugepaged_scan instance of this cursor structure.
121 */
122struct khugepaged_scan {
123 struct list_head mm_head;
b26e2701 124 struct khugepaged_mm_slot *mm_slot;
b46e756f
KS
125 unsigned long address;
126};
127
128static struct khugepaged_scan khugepaged_scan = {
129 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
130};
131
e1465d12 132#ifdef CONFIG_SYSFS
b46e756f
KS
133static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
134 struct kobj_attribute *attr,
135 char *buf)
136{
ae7a927d 137 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
b46e756f
KS
138}
139
140static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
141 struct kobj_attribute *attr,
142 const char *buf, size_t count)
143{
dfefd226 144 unsigned int msecs;
b46e756f
KS
145 int err;
146
dfefd226
AD
147 err = kstrtouint(buf, 10, &msecs);
148 if (err)
b46e756f
KS
149 return -EINVAL;
150
151 khugepaged_scan_sleep_millisecs = msecs;
152 khugepaged_sleep_expire = 0;
153 wake_up_interruptible(&khugepaged_wait);
154
155 return count;
156}
157static struct kobj_attribute scan_sleep_millisecs_attr =
6dcdc94d 158 __ATTR_RW(scan_sleep_millisecs);
b46e756f
KS
159
160static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
161 struct kobj_attribute *attr,
162 char *buf)
163{
ae7a927d 164 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
b46e756f
KS
165}
166
167static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
168 struct kobj_attribute *attr,
169 const char *buf, size_t count)
170{
dfefd226 171 unsigned int msecs;
b46e756f
KS
172 int err;
173
dfefd226
AD
174 err = kstrtouint(buf, 10, &msecs);
175 if (err)
b46e756f
KS
176 return -EINVAL;
177
178 khugepaged_alloc_sleep_millisecs = msecs;
179 khugepaged_sleep_expire = 0;
180 wake_up_interruptible(&khugepaged_wait);
181
182 return count;
183}
184static struct kobj_attribute alloc_sleep_millisecs_attr =
6dcdc94d 185 __ATTR_RW(alloc_sleep_millisecs);
b46e756f
KS
186
187static ssize_t pages_to_scan_show(struct kobject *kobj,
188 struct kobj_attribute *attr,
189 char *buf)
190{
ae7a927d 191 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
b46e756f
KS
192}
193static ssize_t pages_to_scan_store(struct kobject *kobj,
194 struct kobj_attribute *attr,
195 const char *buf, size_t count)
196{
dfefd226 197 unsigned int pages;
b46e756f 198 int err;
b46e756f 199
dfefd226
AD
200 err = kstrtouint(buf, 10, &pages);
201 if (err || !pages)
b46e756f
KS
202 return -EINVAL;
203
204 khugepaged_pages_to_scan = pages;
205
206 return count;
207}
208static struct kobj_attribute pages_to_scan_attr =
6dcdc94d 209 __ATTR_RW(pages_to_scan);
b46e756f
KS
210
211static ssize_t pages_collapsed_show(struct kobject *kobj,
212 struct kobj_attribute *attr,
213 char *buf)
214{
ae7a927d 215 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
b46e756f
KS
216}
217static struct kobj_attribute pages_collapsed_attr =
218 __ATTR_RO(pages_collapsed);
219
220static ssize_t full_scans_show(struct kobject *kobj,
221 struct kobj_attribute *attr,
222 char *buf)
223{
ae7a927d 224 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
b46e756f
KS
225}
226static struct kobj_attribute full_scans_attr =
227 __ATTR_RO(full_scans);
228
6dcdc94d
ML
229static ssize_t defrag_show(struct kobject *kobj,
230 struct kobj_attribute *attr, char *buf)
b46e756f
KS
231{
232 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 233 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
b46e756f 234}
6dcdc94d
ML
235static ssize_t defrag_store(struct kobject *kobj,
236 struct kobj_attribute *attr,
237 const char *buf, size_t count)
b46e756f
KS
238{
239 return single_hugepage_flag_store(kobj, attr, buf, count,
240 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
241}
242static struct kobj_attribute khugepaged_defrag_attr =
6dcdc94d 243 __ATTR_RW(defrag);
b46e756f
KS
244
245/*
246 * max_ptes_none controls if khugepaged should collapse hugepages over
247 * any unmapped ptes in turn potentially increasing the memory
248 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
249 * reduce the available free memory in the system as it
250 * runs. Increasing max_ptes_none will instead potentially reduce the
251 * free memory in the system during the khugepaged scan.
252 */
6dcdc94d
ML
253static ssize_t max_ptes_none_show(struct kobject *kobj,
254 struct kobj_attribute *attr,
255 char *buf)
b46e756f 256{
ae7a927d 257 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
b46e756f 258}
6dcdc94d
ML
259static ssize_t max_ptes_none_store(struct kobject *kobj,
260 struct kobj_attribute *attr,
261 const char *buf, size_t count)
b46e756f
KS
262{
263 int err;
264 unsigned long max_ptes_none;
265
266 err = kstrtoul(buf, 10, &max_ptes_none);
36ee2c78 267 if (err || max_ptes_none > HPAGE_PMD_NR - 1)
b46e756f
KS
268 return -EINVAL;
269
270 khugepaged_max_ptes_none = max_ptes_none;
271
272 return count;
273}
274static struct kobj_attribute khugepaged_max_ptes_none_attr =
6dcdc94d 275 __ATTR_RW(max_ptes_none);
b46e756f 276
6dcdc94d
ML
277static ssize_t max_ptes_swap_show(struct kobject *kobj,
278 struct kobj_attribute *attr,
279 char *buf)
b46e756f 280{
ae7a927d 281 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
b46e756f
KS
282}
283
6dcdc94d
ML
284static ssize_t max_ptes_swap_store(struct kobject *kobj,
285 struct kobj_attribute *attr,
286 const char *buf, size_t count)
b46e756f
KS
287{
288 int err;
289 unsigned long max_ptes_swap;
290
291 err = kstrtoul(buf, 10, &max_ptes_swap);
36ee2c78 292 if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
b46e756f
KS
293 return -EINVAL;
294
295 khugepaged_max_ptes_swap = max_ptes_swap;
296
297 return count;
298}
299
300static struct kobj_attribute khugepaged_max_ptes_swap_attr =
6dcdc94d 301 __ATTR_RW(max_ptes_swap);
b46e756f 302
6dcdc94d
ML
303static ssize_t max_ptes_shared_show(struct kobject *kobj,
304 struct kobj_attribute *attr,
305 char *buf)
71a2c112 306{
ae7a927d 307 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
71a2c112
KS
308}
309
6dcdc94d
ML
310static ssize_t max_ptes_shared_store(struct kobject *kobj,
311 struct kobj_attribute *attr,
312 const char *buf, size_t count)
71a2c112
KS
313{
314 int err;
315 unsigned long max_ptes_shared;
316
317 err = kstrtoul(buf, 10, &max_ptes_shared);
36ee2c78 318 if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
71a2c112
KS
319 return -EINVAL;
320
321 khugepaged_max_ptes_shared = max_ptes_shared;
322
323 return count;
324}
325
326static struct kobj_attribute khugepaged_max_ptes_shared_attr =
6dcdc94d 327 __ATTR_RW(max_ptes_shared);
71a2c112 328
b46e756f
KS
329static struct attribute *khugepaged_attr[] = {
330 &khugepaged_defrag_attr.attr,
331 &khugepaged_max_ptes_none_attr.attr,
71a2c112
KS
332 &khugepaged_max_ptes_swap_attr.attr,
333 &khugepaged_max_ptes_shared_attr.attr,
b46e756f
KS
334 &pages_to_scan_attr.attr,
335 &pages_collapsed_attr.attr,
336 &full_scans_attr.attr,
337 &scan_sleep_millisecs_attr.attr,
338 &alloc_sleep_millisecs_attr.attr,
b46e756f
KS
339 NULL,
340};
341
342struct attribute_group khugepaged_attr_group = {
343 .attrs = khugepaged_attr,
344 .name = "khugepaged",
345};
e1465d12 346#endif /* CONFIG_SYSFS */
b46e756f 347
b46e756f
KS
348int hugepage_madvise(struct vm_area_struct *vma,
349 unsigned long *vm_flags, int advice)
350{
351 switch (advice) {
352 case MADV_HUGEPAGE:
353#ifdef CONFIG_S390
354 /*
355 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
356 * can't handle this properly after s390_enable_sie, so we simply
357 * ignore the madvise to prevent qemu from causing a SIGSEGV.
358 */
359 if (mm_has_pgste(vma->vm_mm))
360 return 0;
361#endif
362 *vm_flags &= ~VM_NOHUGEPAGE;
363 *vm_flags |= VM_HUGEPAGE;
364 /*
365 * If the vma become good for khugepaged to scan,
366 * register it here without waiting a page fault that
367 * may not happen any time soon.
368 */
c791576c 369 khugepaged_enter_vma(vma, *vm_flags);
b46e756f
KS
370 break;
371 case MADV_NOHUGEPAGE:
372 *vm_flags &= ~VM_HUGEPAGE;
373 *vm_flags |= VM_NOHUGEPAGE;
374 /*
375 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
376 * this vma even if we leave the mm registered in khugepaged if
377 * it got registered before VM_NOHUGEPAGE was set.
378 */
379 break;
380 }
381
382 return 0;
383}
384
385int __init khugepaged_init(void)
386{
387 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
b26e2701
QZ
388 sizeof(struct khugepaged_mm_slot),
389 __alignof__(struct khugepaged_mm_slot),
390 0, NULL);
b46e756f
KS
391 if (!mm_slot_cache)
392 return -ENOMEM;
393
394 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
395 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
396 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
71a2c112 397 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
b46e756f
KS
398
399 return 0;
400}
401
402void __init khugepaged_destroy(void)
403{
404 kmem_cache_destroy(mm_slot_cache);
405}
406
7d2c4385 407static inline int hpage_collapse_test_exit(struct mm_struct *mm)
b46e756f 408{
4d45e75a 409 return atomic_read(&mm->mm_users) == 0;
b46e756f
KS
410}
411
d2081b2b 412void __khugepaged_enter(struct mm_struct *mm)
b46e756f 413{
b26e2701
QZ
414 struct khugepaged_mm_slot *mm_slot;
415 struct mm_slot *slot;
b46e756f
KS
416 int wakeup;
417
16618670
XH
418 /* __khugepaged_exit() must not run from under us */
419 VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
420 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
421 return;
422
b26e2701 423 mm_slot = mm_slot_alloc(mm_slot_cache);
b46e756f 424 if (!mm_slot)
d2081b2b 425 return;
b46e756f 426
b26e2701
QZ
427 slot = &mm_slot->slot;
428
b46e756f 429 spin_lock(&khugepaged_mm_lock);
b26e2701 430 mm_slot_insert(mm_slots_hash, mm, slot);
b46e756f
KS
431 /*
432 * Insert just behind the scanning cursor, to let the area settle
433 * down a little.
434 */
435 wakeup = list_empty(&khugepaged_scan.mm_head);
b26e2701 436 list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
b46e756f
KS
437 spin_unlock(&khugepaged_mm_lock);
438
f1f10076 439 mmgrab(mm);
b46e756f
KS
440 if (wakeup)
441 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
442}
443
c791576c
YS
444void khugepaged_enter_vma(struct vm_area_struct *vma,
445 unsigned long vm_flags)
b46e756f 446{
2647d11b 447 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
1064026b 448 hugepage_flags_enabled()) {
3485b883
RR
449 if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
450 PMD_ORDER))
2647d11b
YS
451 __khugepaged_enter(vma->vm_mm);
452 }
b46e756f
KS
453}
454
455void __khugepaged_exit(struct mm_struct *mm)
456{
b26e2701
QZ
457 struct khugepaged_mm_slot *mm_slot;
458 struct mm_slot *slot;
b46e756f
KS
459 int free = 0;
460
461 spin_lock(&khugepaged_mm_lock);
b26e2701
QZ
462 slot = mm_slot_lookup(mm_slots_hash, mm);
463 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
b46e756f 464 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
b26e2701
QZ
465 hash_del(&slot->hash);
466 list_del(&slot->mm_node);
b46e756f
KS
467 free = 1;
468 }
469 spin_unlock(&khugepaged_mm_lock);
470
471 if (free) {
472 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
b26e2701 473 mm_slot_free(mm_slot_cache, mm_slot);
b46e756f
KS
474 mmdrop(mm);
475 } else if (mm_slot) {
476 /*
477 * This is required to serialize against
7d2c4385
ZK
478 * hpage_collapse_test_exit() (which is guaranteed to run
479 * under mmap sem read mode). Stop here (after we return all
480 * pagetables will be destroyed) until khugepaged has finished
481 * working on the pagetables under the mmap_lock.
b46e756f 482 */
d8ed45c5
ML
483 mmap_write_lock(mm);
484 mmap_write_unlock(mm);
b46e756f
KS
485 }
486}
487
92644f58
VMO
488static void release_pte_folio(struct folio *folio)
489{
490 node_stat_mod_folio(folio,
491 NR_ISOLATED_ANON + folio_is_file_lru(folio),
492 -folio_nr_pages(folio));
493 folio_unlock(folio);
494 folio_putback_lru(folio);
495}
496
b46e756f
KS
497static void release_pte_page(struct page *page)
498{
92644f58 499 release_pte_folio(page_folio(page));
b46e756f
KS
500}
501
5503fbf2
KS
502static void release_pte_pages(pte_t *pte, pte_t *_pte,
503 struct list_head *compound_pagelist)
b46e756f 504{
9bdfeea4 505 struct folio *folio, *tmp;
5503fbf2 506
b46e756f 507 while (--_pte >= pte) {
c33c7948 508 pte_t pteval = ptep_get(_pte);
f528260b 509 unsigned long pfn;
5503fbf2 510
f528260b
VMO
511 if (pte_none(pteval))
512 continue;
513 pfn = pte_pfn(pteval);
514 if (is_zero_pfn(pfn))
515 continue;
516 folio = pfn_folio(pfn);
517 if (folio_test_large(folio))
518 continue;
519 release_pte_folio(folio);
5503fbf2
KS
520 }
521
9bdfeea4
VMO
522 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
523 list_del(&folio->lru);
524 release_pte_folio(folio);
b46e756f
KS
525 }
526}
527
dbf85c21 528static bool is_refcount_suitable(struct folio *folio)
9445689f
KS
529{
530 int expected_refcount;
531
dbf85c21
VMO
532 expected_refcount = folio_mapcount(folio);
533 if (folio_test_swapcache(folio))
534 expected_refcount += folio_nr_pages(folio);
9445689f 535
dbf85c21 536 return folio_ref_count(folio) == expected_refcount;
9445689f
KS
537}
538
b46e756f
KS
539static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
540 unsigned long address,
5503fbf2 541 pte_t *pte,
d8ea7cc8 542 struct collapse_control *cc,
5503fbf2 543 struct list_head *compound_pagelist)
b46e756f
KS
544{
545 struct page *page = NULL;
8dd1e896 546 struct folio *folio = NULL;
b46e756f 547 pte_t *_pte;
50ad2f24 548 int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
0db501f7 549 bool writable = false;
b46e756f 550
36ee2c78 551 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
b46e756f 552 _pte++, address += PAGE_SIZE) {
c33c7948 553 pte_t pteval = ptep_get(_pte);
b46e756f
KS
554 if (pte_none(pteval) || (pte_present(pteval) &&
555 is_zero_pfn(pte_pfn(pteval)))) {
d8ea7cc8 556 ++none_or_zero;
b46e756f 557 if (!userfaultfd_armed(vma) &&
d8ea7cc8
ZK
558 (!cc->is_khugepaged ||
559 none_or_zero <= khugepaged_max_ptes_none)) {
b46e756f
KS
560 continue;
561 } else {
562 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 563 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
564 goto out;
565 }
566 }
567 if (!pte_present(pteval)) {
568 result = SCAN_PTE_NON_PRESENT;
569 goto out;
570 }
dd47ac42
PX
571 if (pte_uffd_wp(pteval)) {
572 result = SCAN_PTE_UFFD_WP;
573 goto out;
574 }
b46e756f 575 page = vm_normal_page(vma, address, pteval);
3218f871 576 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
b46e756f
KS
577 result = SCAN_PAGE_NULL;
578 goto out;
579 }
580
8dd1e896
VMO
581 folio = page_folio(page);
582 VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
5503fbf2 583
d8ea7cc8
ZK
584 if (page_mapcount(page) > 1) {
585 ++shared;
586 if (cc->is_khugepaged &&
587 shared > khugepaged_max_ptes_shared) {
588 result = SCAN_EXCEED_SHARED_PTE;
589 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
590 goto out;
591 }
71a2c112
KS
592 }
593
8dd1e896
VMO
594 if (folio_test_large(folio)) {
595 struct folio *f;
fece2029 596
5503fbf2
KS
597 /*
598 * Check if we have dealt with the compound page
599 * already
600 */
8dd1e896
VMO
601 list_for_each_entry(f, compound_pagelist, lru) {
602 if (folio == f)
5503fbf2
KS
603 goto next;
604 }
605 }
b46e756f
KS
606
607 /*
608 * We can do it before isolate_lru_page because the
609 * page can't be freed from under us. NOTE: PG_lock
610 * is needed to serialize against split_huge_page
611 * when invoked from the VM.
612 */
8dd1e896 613 if (!folio_trylock(folio)) {
b46e756f
KS
614 result = SCAN_PAGE_LOCK;
615 goto out;
616 }
617
618 /*
9445689f
KS
619 * Check if the page has any GUP (or other external) pins.
620 *
621 * The page table that maps the page has been already unlinked
622 * from the page table tree and this process cannot get
f0953a1b 623 * an additional pin on the page.
9445689f
KS
624 *
625 * New pins can come later if the page is shared across fork,
626 * but not from this process. The other process cannot write to
627 * the page, only trigger CoW.
b46e756f 628 */
dbf85c21 629 if (!is_refcount_suitable(folio)) {
8dd1e896 630 folio_unlock(folio);
b46e756f
KS
631 result = SCAN_PAGE_COUNT;
632 goto out;
633 }
b46e756f
KS
634
635 /*
636 * Isolate the page to avoid collapsing an hugepage
637 * currently in use by the VM.
638 */
8dd1e896
VMO
639 if (!folio_isolate_lru(folio)) {
640 folio_unlock(folio);
b46e756f
KS
641 result = SCAN_DEL_PAGE_LRU;
642 goto out;
643 }
8dd1e896
VMO
644 node_stat_mod_folio(folio,
645 NR_ISOLATED_ANON + folio_is_file_lru(folio),
646 folio_nr_pages(folio));
647 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
648 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
649
650 if (folio_test_large(folio))
651 list_add_tail(&folio->lru, compound_pagelist);
5503fbf2 652next:
d8ea7cc8
ZK
653 /*
654 * If collapse was initiated by khugepaged, check that there is
655 * enough young pte to justify collapsing the page
656 */
657 if (cc->is_khugepaged &&
8dd1e896
VMO
658 (pte_young(pteval) || folio_test_young(folio) ||
659 folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
d8ea7cc8 660 address)))
0db501f7 661 referenced++;
5503fbf2
KS
662
663 if (pte_write(pteval))
664 writable = true;
b46e756f 665 }
74e579bf
ML
666
667 if (unlikely(!writable)) {
b46e756f 668 result = SCAN_PAGE_RO;
d8ea7cc8 669 } else if (unlikely(cc->is_khugepaged && !referenced)) {
74e579bf
ML
670 result = SCAN_LACK_REFERENCED_PAGE;
671 } else {
672 result = SCAN_SUCCEED;
8dd1e896 673 trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
74e579bf 674 referenced, writable, result);
50ad2f24 675 return result;
b46e756f 676 }
b46e756f 677out:
5503fbf2 678 release_pte_pages(pte, _pte, compound_pagelist);
8dd1e896 679 trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
b46e756f 680 referenced, writable, result);
50ad2f24 681 return result;
b46e756f
KS
682}
683
98c76c9f
JY
684static void __collapse_huge_page_copy_succeeded(pte_t *pte,
685 struct vm_area_struct *vma,
686 unsigned long address,
687 spinlock_t *ptl,
688 struct list_head *compound_pagelist)
b46e756f 689{
98c76c9f
JY
690 struct page *src_page;
691 struct page *tmp;
b46e756f 692 pte_t *_pte;
98c76c9f 693 pte_t pteval;
b46e756f 694
98c76c9f
JY
695 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
696 _pte++, address += PAGE_SIZE) {
c33c7948 697 pteval = ptep_get(_pte);
b46e756f 698 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
b46e756f
KS
699 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
700 if (is_zero_pfn(pte_pfn(pteval))) {
701 /*
702 * ptl mostly unnecessary.
703 */
704 spin_lock(ptl);
08d5b29e 705 ptep_clear(vma->vm_mm, address, _pte);
b46e756f 706 spin_unlock(ptl);
6080d19f 707 ksm_might_unmap_zero_page(vma->vm_mm, pteval);
b46e756f
KS
708 }
709 } else {
710 src_page = pte_page(pteval);
5503fbf2
KS
711 if (!PageCompound(src_page))
712 release_pte_page(src_page);
b46e756f
KS
713 /*
714 * ptl mostly unnecessary, but preempt has to
715 * be disabled to update the per-cpu stats
716 * inside page_remove_rmap().
717 */
718 spin_lock(ptl);
08d5b29e 719 ptep_clear(vma->vm_mm, address, _pte);
cea86fe2 720 page_remove_rmap(src_page, vma, false);
b46e756f
KS
721 spin_unlock(ptl);
722 free_page_and_swap_cache(src_page);
723 }
b46e756f 724 }
5503fbf2
KS
725
726 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
727 list_del(&src_page->lru);
1baec203
ML
728 mod_node_page_state(page_pgdat(src_page),
729 NR_ISOLATED_ANON + page_is_file_lru(src_page),
730 -compound_nr(src_page));
731 unlock_page(src_page);
732 free_swap_cache(src_page);
733 putback_lru_page(src_page);
5503fbf2 734 }
b46e756f
KS
735}
736
98c76c9f
JY
737static void __collapse_huge_page_copy_failed(pte_t *pte,
738 pmd_t *pmd,
739 pmd_t orig_pmd,
740 struct vm_area_struct *vma,
741 struct list_head *compound_pagelist)
742{
743 spinlock_t *pmd_ptl;
744
745 /*
746 * Re-establish the PMD to point to the original page table
747 * entry. Restoring PMD needs to be done prior to releasing
748 * pages. Since pages are still isolated and locked here,
749 * acquiring anon_vma_lock_write is unnecessary.
750 */
751 pmd_ptl = pmd_lock(vma->vm_mm, pmd);
752 pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
753 spin_unlock(pmd_ptl);
754 /*
755 * Release both raw and compound pages isolated
756 * in __collapse_huge_page_isolate.
757 */
758 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
759}
760
761/*
762 * __collapse_huge_page_copy - attempts to copy memory contents from raw
763 * pages to a hugepage. Cleans up the raw pages if copying succeeds;
764 * otherwise restores the original page table and releases isolated raw pages.
765 * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
766 *
767 * @pte: starting of the PTEs to copy from
768 * @page: the new hugepage to copy contents to
769 * @pmd: pointer to the new hugepage's PMD
770 * @orig_pmd: the original raw pages' PMD
771 * @vma: the original raw pages' virtual memory area
772 * @address: starting address to copy
773 * @ptl: lock on raw pages' PTEs
774 * @compound_pagelist: list that stores compound pages
775 */
776static int __collapse_huge_page_copy(pte_t *pte,
777 struct page *page,
778 pmd_t *pmd,
779 pmd_t orig_pmd,
780 struct vm_area_struct *vma,
781 unsigned long address,
782 spinlock_t *ptl,
783 struct list_head *compound_pagelist)
784{
785 struct page *src_page;
786 pte_t *_pte;
787 pte_t pteval;
788 unsigned long _address;
789 int result = SCAN_SUCCEED;
790
791 /*
792 * Copying pages' contents is subject to memory poison at any iteration.
793 */
794 for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
795 _pte++, page++, _address += PAGE_SIZE) {
c33c7948 796 pteval = ptep_get(_pte);
98c76c9f
JY
797 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
798 clear_user_highpage(page, _address);
799 continue;
800 }
801 src_page = pte_page(pteval);
802 if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
803 result = SCAN_COPY_MC;
804 break;
805 }
806 }
807
808 if (likely(result == SCAN_SUCCEED))
809 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
810 compound_pagelist);
811 else
812 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
813 compound_pagelist);
814
815 return result;
816}
817
b46e756f
KS
818static void khugepaged_alloc_sleep(void)
819{
820 DEFINE_WAIT(wait);
821
822 add_wait_queue(&khugepaged_wait, &wait);
f5d39b02
PZ
823 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
824 schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
b46e756f
KS
825 remove_wait_queue(&khugepaged_wait, &wait);
826}
827
34d6b470 828struct collapse_control khugepaged_collapse_control = {
d8ea7cc8 829 .is_khugepaged = true,
34d6b470 830};
b46e756f 831
7d2c4385 832static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
b46e756f
KS
833{
834 int i;
835
836 /*
a5f5f91d 837 * If node_reclaim_mode is disabled, then no extra effort is made to
b46e756f
KS
838 * allocate memory locally.
839 */
202e35db 840 if (!node_reclaim_enabled())
b46e756f
KS
841 return false;
842
843 /* If there is a count for this node already, it must be acceptable */
34d6b470 844 if (cc->node_load[nid])
b46e756f
KS
845 return false;
846
847 for (i = 0; i < MAX_NUMNODES; i++) {
34d6b470 848 if (!cc->node_load[i])
b46e756f 849 continue;
a55c7454 850 if (node_distance(nid, i) > node_reclaim_distance)
b46e756f
KS
851 return true;
852 }
853 return false;
854}
855
1064026b
YS
856#define khugepaged_defrag() \
857 (transparent_hugepage_flags & \
858 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
859
b46e756f
KS
860/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
861static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
862{
25160354 863 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
b46e756f
KS
864}
865
866#ifdef CONFIG_NUMA
7d2c4385 867static int hpage_collapse_find_target_node(struct collapse_control *cc)
b46e756f 868{
b46e756f
KS
869 int nid, target_node = 0, max_value = 0;
870
871 /* find first node with max normal pages hit */
872 for (nid = 0; nid < MAX_NUMNODES; nid++)
34d6b470
ZK
873 if (cc->node_load[nid] > max_value) {
874 max_value = cc->node_load[nid];
b46e756f
KS
875 target_node = nid;
876 }
877
e031ff96
YS
878 for_each_online_node(nid) {
879 if (max_value == cc->node_load[nid])
880 node_set(nid, cc->alloc_nmask);
881 }
b46e756f 882
b46e756f
KS
883 return target_node;
884}
c6a7f445 885#else
7d2c4385 886static int hpage_collapse_find_target_node(struct collapse_control *cc)
b46e756f 887{
c6a7f445 888 return 0;
b46e756f 889}
c6a7f445 890#endif
b46e756f 891
b455f39d 892static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node,
e031ff96 893 nodemask_t *nmask)
b46e756f 894{
b455f39d
VMO
895 *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask);
896
897 if (unlikely(!*folio)) {
b46e756f 898 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
9710a78a 899 return false;
b46e756f
KS
900 }
901
b46e756f 902 count_vm_event(THP_COLLAPSE_ALLOC);
b46e756f
KS
903 return true;
904}
905
b46e756f 906/*
c1e8d7c6
ML
907 * If mmap_lock temporarily dropped, revalidate vma
908 * before taking mmap_lock.
50ad2f24 909 * Returns enum scan_result value.
b46e756f
KS
910 */
911
c131f751 912static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
34488399 913 bool expect_anon,
a7f4e6e4
ZK
914 struct vm_area_struct **vmap,
915 struct collapse_control *cc)
b46e756f
KS
916{
917 struct vm_area_struct *vma;
b46e756f 918
7d2c4385 919 if (unlikely(hpage_collapse_test_exit(mm)))
b46e756f
KS
920 return SCAN_ANY_PROCESS;
921
c131f751 922 *vmap = vma = find_vma(mm, address);
b46e756f
KS
923 if (!vma)
924 return SCAN_VMA_NULL;
925
3485b883 926 if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
b46e756f 927 return SCAN_ADDRESS_RANGE;
3485b883
RR
928 if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
929 cc->is_khugepaged, PMD_ORDER))
b46e756f 930 return SCAN_VMA_CHECK;
f707fa49
YS
931 /*
932 * Anon VMA expected, the address may be unmapped then
933 * remapped to file after khugepaged reaquired the mmap_lock.
934 *
3485b883 935 * thp_vma_allowable_order may return true for qualified file
f707fa49
YS
936 * vmas.
937 */
34488399
ZK
938 if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
939 return SCAN_PAGE_ANON;
50ad2f24 940 return SCAN_SUCCEED;
b46e756f
KS
941}
942
50722804
ZK
943static int find_pmd_or_thp_or_none(struct mm_struct *mm,
944 unsigned long address,
945 pmd_t **pmd)
946{
947 pmd_t pmde;
948
949 *pmd = mm_find_pmd(mm, address);
950 if (!*pmd)
951 return SCAN_PMD_NULL;
952
dab6e717 953 pmde = pmdp_get_lockless(*pmd);
34488399
ZK
954 if (pmd_none(pmde))
955 return SCAN_PMD_NONE;
edb5d0cf
ZK
956 if (!pmd_present(pmde))
957 return SCAN_PMD_NULL;
50722804
ZK
958 if (pmd_trans_huge(pmde))
959 return SCAN_PMD_MAPPED;
edb5d0cf
ZK
960 if (pmd_devmap(pmde))
961 return SCAN_PMD_NULL;
50722804
ZK
962 if (pmd_bad(pmde))
963 return SCAN_PMD_NULL;
964 return SCAN_SUCCEED;
965}
966
967static int check_pmd_still_valid(struct mm_struct *mm,
968 unsigned long address,
969 pmd_t *pmd)
970{
971 pmd_t *new_pmd;
972 int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
973
974 if (result != SCAN_SUCCEED)
975 return result;
976 if (new_pmd != pmd)
977 return SCAN_FAIL;
978 return SCAN_SUCCEED;
b46e756f
KS
979}
980
981/*
982 * Bring missing pages in from swap, to complete THP collapse.
7d2c4385 983 * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
b46e756f 984 *
4d928e20 985 * Called and returns without pte mapped or spinlocks held.
895f5ee4 986 * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
b46e756f 987 */
50ad2f24
ZK
988static int __collapse_huge_page_swapin(struct mm_struct *mm,
989 struct vm_area_struct *vma,
990 unsigned long haddr, pmd_t *pmd,
991 int referenced)
b46e756f 992{
2b740303
SJ
993 int swapped_in = 0;
994 vm_fault_t ret = 0;
2b635dd3 995 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
895f5ee4
HD
996 int result;
997 pte_t *pte = NULL;
c7ad0880 998 spinlock_t *ptl;
2b635dd3
WD
999
1000 for (address = haddr; address < end; address += PAGE_SIZE) {
1001 struct vm_fault vmf = {
1002 .vma = vma,
1003 .address = address,
895f5ee4 1004 .pgoff = linear_page_index(vma, address),
2b635dd3
WD
1005 .flags = FAULT_FLAG_ALLOW_RETRY,
1006 .pmd = pmd,
1007 };
1008
895f5ee4 1009 if (!pte++) {
c7ad0880 1010 pte = pte_offset_map_nolock(mm, pmd, address, &ptl);
895f5ee4
HD
1011 if (!pte) {
1012 mmap_read_unlock(mm);
1013 result = SCAN_PMD_NULL;
1014 goto out;
1015 }
2b635dd3 1016 }
895f5ee4 1017
c7ad0880 1018 vmf.orig_pte = ptep_get_lockless(pte);
895f5ee4
HD
1019 if (!is_swap_pte(vmf.orig_pte))
1020 continue;
1021
1022 vmf.pte = pte;
c7ad0880 1023 vmf.ptl = ptl;
2994302b 1024 ret = do_swap_page(&vmf);
895f5ee4
HD
1025 /* Which unmaps pte (after perhaps re-checking the entry) */
1026 pte = NULL;
0db501f7 1027
4d928e20
ML
1028 /*
1029 * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1030 * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1031 * we do not retry here and swap entry will remain in pagetable
1032 * resulting in later failure.
1033 */
b46e756f 1034 if (ret & VM_FAULT_RETRY) {
50ad2f24 1035 /* Likely, but not guaranteed, that page lock failed */
895f5ee4
HD
1036 result = SCAN_PAGE_LOCK;
1037 goto out;
b46e756f
KS
1038 }
1039 if (ret & VM_FAULT_ERROR) {
4d928e20 1040 mmap_read_unlock(mm);
895f5ee4
HD
1041 result = SCAN_FAIL;
1042 goto out;
b46e756f 1043 }
4d928e20 1044 swapped_in++;
b46e756f 1045 }
ae2c5d80 1046
895f5ee4
HD
1047 if (pte)
1048 pte_unmap(pte);
1049
1fec6890 1050 /* Drain LRU cache to remove extra pin on the swapped in pages */
ae2c5d80
KS
1051 if (swapped_in)
1052 lru_add_drain();
1053
895f5ee4
HD
1054 result = SCAN_SUCCEED;
1055out:
1056 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1057 return result;
b46e756f
KS
1058}
1059
9710a78a
ZK
1060static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
1061 struct collapse_control *cc)
1062{
7d8faaf1 1063 gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
e031ff96 1064 GFP_TRANSHUGE);
7d2c4385 1065 int node = hpage_collapse_find_target_node(cc);
94c02ad7 1066 struct folio *folio;
9710a78a 1067
b455f39d
VMO
1068 if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) {
1069 *hpage = NULL;
9710a78a 1070 return SCAN_ALLOC_HUGE_PAGE_FAIL;
b455f39d 1071 }
94c02ad7 1072
94c02ad7
PX
1073 if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1074 folio_put(folio);
1075 *hpage = NULL;
9710a78a 1076 return SCAN_CGROUP_CHARGE_FAIL;
94c02ad7 1077 }
94c02ad7 1078
b455f39d
VMO
1079 count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1080
1081 *hpage = folio_page(folio, 0);
9710a78a
ZK
1082 return SCAN_SUCCEED;
1083}
1084
50ad2f24
ZK
1085static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1086 int referenced, int unmapped,
1087 struct collapse_control *cc)
b46e756f 1088{
5503fbf2 1089 LIST_HEAD(compound_pagelist);
b46e756f
KS
1090 pmd_t *pmd, _pmd;
1091 pte_t *pte;
1092 pgtable_t pgtable;
54327268 1093 struct folio *folio;
50ad2f24 1094 struct page *hpage;
b46e756f 1095 spinlock_t *pmd_ptl, *pte_ptl;
50ad2f24 1096 int result = SCAN_FAIL;
c131f751 1097 struct vm_area_struct *vma;
ac46d4f3 1098 struct mmu_notifier_range range;
b46e756f
KS
1099
1100 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1101
988ddb71 1102 /*
c1e8d7c6 1103 * Before allocating the hugepage, release the mmap_lock read lock.
988ddb71 1104 * The allocation can take potentially a long time if it involves
c1e8d7c6 1105 * sync compaction, and we do not need to hold the mmap_lock during
988ddb71
KS
1106 * that. We will recheck the vma after taking it again in write mode.
1107 */
d8ed45c5 1108 mmap_read_unlock(mm);
b46e756f 1109
50ad2f24 1110 result = alloc_charge_hpage(&hpage, mm, cc);
9710a78a 1111 if (result != SCAN_SUCCEED)
b46e756f 1112 goto out_nolock;
b46e756f 1113
d8ed45c5 1114 mmap_read_lock(mm);
34488399 1115 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
50ad2f24 1116 if (result != SCAN_SUCCEED) {
d8ed45c5 1117 mmap_read_unlock(mm);
b46e756f
KS
1118 goto out_nolock;
1119 }
1120
50722804
ZK
1121 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1122 if (result != SCAN_SUCCEED) {
d8ed45c5 1123 mmap_read_unlock(mm);
b46e756f
KS
1124 goto out_nolock;
1125 }
1126
50ad2f24
ZK
1127 if (unmapped) {
1128 /*
1129 * __collapse_huge_page_swapin will return with mmap_lock
1130 * released when it fails. So we jump out_nolock directly in
1131 * that case. Continuing to collapse causes inconsistency.
1132 */
1133 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1134 referenced);
1135 if (result != SCAN_SUCCEED)
1136 goto out_nolock;
b46e756f
KS
1137 }
1138
d8ed45c5 1139 mmap_read_unlock(mm);
b46e756f
KS
1140 /*
1141 * Prevent all access to pagetables with the exception of
1142 * gup_fast later handled by the ptep_clear_flush and the VM
1143 * handled by the anon_vma lock + PG_lock.
adef4406
AA
1144 *
1145 * UFFDIO_MOVE is prevented to race as well thanks to the
1146 * mmap_lock.
b46e756f 1147 */
d8ed45c5 1148 mmap_write_lock(mm);
34488399 1149 result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
50ad2f24 1150 if (result != SCAN_SUCCEED)
18d24a7c 1151 goto out_up_write;
b46e756f 1152 /* check if the pmd is still valid */
50722804
ZK
1153 result = check_pmd_still_valid(mm, address, pmd);
1154 if (result != SCAN_SUCCEED)
18d24a7c 1155 goto out_up_write;
b46e756f 1156
55fd6fcc 1157 vma_start_write(vma);
b46e756f
KS
1158 anon_vma_lock_write(vma->anon_vma);
1159
7d4a8be0
AP
1160 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1161 address + HPAGE_PMD_SIZE);
ac46d4f3 1162 mmu_notifier_invalidate_range_start(&range);
ec649c9d 1163
b46e756f
KS
1164 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1165 /*
70cbc3cc
YS
1166 * This removes any huge TLB entry from the CPU so we won't allow
1167 * huge and small TLB entries for the same virtual address to
1168 * avoid the risk of CPU bugs in that area.
1169 *
1170 * Parallel fast GUP is fine since fast GUP will back off when
1171 * it detects PMD is changed.
b46e756f
KS
1172 */
1173 _pmd = pmdp_collapse_flush(vma, address, pmd);
1174 spin_unlock(pmd_ptl);
ac46d4f3 1175 mmu_notifier_invalidate_range_end(&range);
2ba99c5e 1176 tlb_remove_table_sync_one();
b46e756f 1177
895f5ee4
HD
1178 pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1179 if (pte) {
1180 result = __collapse_huge_page_isolate(vma, address, pte, cc,
1181 &compound_pagelist);
1182 spin_unlock(pte_ptl);
1183 } else {
1184 result = SCAN_PMD_NULL;
1185 }
b46e756f 1186
50ad2f24 1187 if (unlikely(result != SCAN_SUCCEED)) {
895f5ee4
HD
1188 if (pte)
1189 pte_unmap(pte);
b46e756f
KS
1190 spin_lock(pmd_ptl);
1191 BUG_ON(!pmd_none(*pmd));
1192 /*
1193 * We can only use set_pmd_at when establishing
1194 * hugepmds and never for establishing regular pmds that
1195 * points to regular pagetables. Use pmd_populate for that
1196 */
1197 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1198 spin_unlock(pmd_ptl);
1199 anon_vma_unlock_write(vma->anon_vma);
18d24a7c 1200 goto out_up_write;
b46e756f
KS
1201 }
1202
1203 /*
1204 * All pages are isolated and locked so anon_vma rmap
1205 * can't run anymore.
1206 */
1207 anon_vma_unlock_write(vma->anon_vma);
1208
98c76c9f
JY
1209 result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
1210 vma, address, pte_ptl,
1211 &compound_pagelist);
b46e756f 1212 pte_unmap(pte);
98c76c9f
JY
1213 if (unlikely(result != SCAN_SUCCEED))
1214 goto out_up_write;
1215
54327268 1216 folio = page_folio(hpage);
588d01f9 1217 /*
54327268
MWO
1218 * The smp_wmb() inside __folio_mark_uptodate() ensures the
1219 * copy_huge_page writes become visible before the set_pmd_at()
1220 * write.
588d01f9 1221 */
54327268 1222 __folio_mark_uptodate(folio);
b46e756f
KS
1223 pgtable = pmd_pgtable(_pmd);
1224
50ad2f24 1225 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
f55e1014 1226 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
b46e756f 1227
b46e756f
KS
1228 spin_lock(pmd_ptl);
1229 BUG_ON(!pmd_none(*pmd));
54327268
MWO
1230 folio_add_new_anon_rmap(folio, vma, address);
1231 folio_add_lru_vma(folio, vma);
b46e756f
KS
1232 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1233 set_pmd_at(mm, address, pmd, _pmd);
1234 update_mmu_cache_pmd(vma, address, pmd);
1235 spin_unlock(pmd_ptl);
1236
50ad2f24 1237 hpage = NULL;
b46e756f 1238
b46e756f
KS
1239 result = SCAN_SUCCEED;
1240out_up_write:
d8ed45c5 1241 mmap_write_unlock(mm);
b46e756f 1242out_nolock:
7cb1d7ef 1243 if (hpage)
50ad2f24 1244 put_page(hpage);
50ad2f24
ZK
1245 trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1246 return result;
b46e756f
KS
1247}
1248
7d2c4385
ZK
1249static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1250 struct vm_area_struct *vma,
1251 unsigned long address, bool *mmap_locked,
1252 struct collapse_control *cc)
b46e756f
KS
1253{
1254 pmd_t *pmd;
1255 pte_t *pte, *_pte;
50ad2f24 1256 int result = SCAN_FAIL, referenced = 0;
71a2c112 1257 int none_or_zero = 0, shared = 0;
b46e756f 1258 struct page *page = NULL;
5c07ebb3 1259 struct folio *folio = NULL;
b46e756f
KS
1260 unsigned long _address;
1261 spinlock_t *ptl;
1262 int node = NUMA_NO_NODE, unmapped = 0;
0db501f7 1263 bool writable = false;
b46e756f
KS
1264
1265 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1266
50722804
ZK
1267 result = find_pmd_or_thp_or_none(mm, address, &pmd);
1268 if (result != SCAN_SUCCEED)
b46e756f 1269 goto out;
b46e756f 1270
34d6b470 1271 memset(cc->node_load, 0, sizeof(cc->node_load));
e031ff96 1272 nodes_clear(cc->alloc_nmask);
b46e756f 1273 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
895f5ee4
HD
1274 if (!pte) {
1275 result = SCAN_PMD_NULL;
1276 goto out;
1277 }
1278
36ee2c78 1279 for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
b46e756f 1280 _pte++, _address += PAGE_SIZE) {
c33c7948 1281 pte_t pteval = ptep_get(_pte);
b46e756f 1282 if (is_swap_pte(pteval)) {
d8ea7cc8
ZK
1283 ++unmapped;
1284 if (!cc->is_khugepaged ||
1285 unmapped <= khugepaged_max_ptes_swap) {
e1e267c7
PX
1286 /*
1287 * Always be strict with uffd-wp
1288 * enabled swap entries. Please see
1289 * comment below for pte_uffd_wp().
1290 */
2bad466c 1291 if (pte_swp_uffd_wp_any(pteval)) {
e1e267c7
PX
1292 result = SCAN_PTE_UFFD_WP;
1293 goto out_unmap;
1294 }
b46e756f
KS
1295 continue;
1296 } else {
1297 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 1298 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
b46e756f
KS
1299 goto out_unmap;
1300 }
1301 }
1302 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
d8ea7cc8 1303 ++none_or_zero;
b46e756f 1304 if (!userfaultfd_armed(vma) &&
d8ea7cc8
ZK
1305 (!cc->is_khugepaged ||
1306 none_or_zero <= khugepaged_max_ptes_none)) {
b46e756f
KS
1307 continue;
1308 } else {
1309 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 1310 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
b46e756f
KS
1311 goto out_unmap;
1312 }
1313 }
e1e267c7
PX
1314 if (pte_uffd_wp(pteval)) {
1315 /*
1316 * Don't collapse the page if any of the small
1317 * PTEs are armed with uffd write protection.
1318 * Here we can also mark the new huge pmd as
1319 * write protected if any of the small ones is
8958b249 1320 * marked but that could bring unknown
e1e267c7
PX
1321 * userfault messages that falls outside of
1322 * the registered range. So, just be simple.
1323 */
1324 result = SCAN_PTE_UFFD_WP;
1325 goto out_unmap;
1326 }
b46e756f
KS
1327 if (pte_write(pteval))
1328 writable = true;
1329
1330 page = vm_normal_page(vma, _address, pteval);
3218f871 1331 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
b46e756f
KS
1332 result = SCAN_PAGE_NULL;
1333 goto out_unmap;
1334 }
1335
d8ea7cc8
ZK
1336 if (page_mapcount(page) > 1) {
1337 ++shared;
1338 if (cc->is_khugepaged &&
1339 shared > khugepaged_max_ptes_shared) {
1340 result = SCAN_EXCEED_SHARED_PTE;
1341 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1342 goto out_unmap;
1343 }
71a2c112
KS
1344 }
1345
5c07ebb3 1346 folio = page_folio(page);
b46e756f
KS
1347 /*
1348 * Record which node the original page is from and save this
34d6b470 1349 * information to cc->node_load[].
0b8f0d87 1350 * Khugepaged will allocate hugepage from the node has the max
b46e756f
KS
1351 * hit record.
1352 */
5c07ebb3 1353 node = folio_nid(folio);
7d2c4385 1354 if (hpage_collapse_scan_abort(node, cc)) {
b46e756f
KS
1355 result = SCAN_SCAN_ABORT;
1356 goto out_unmap;
1357 }
34d6b470 1358 cc->node_load[node]++;
5c07ebb3 1359 if (!folio_test_lru(folio)) {
b46e756f
KS
1360 result = SCAN_PAGE_LRU;
1361 goto out_unmap;
1362 }
5c07ebb3 1363 if (folio_test_locked(folio)) {
b46e756f
KS
1364 result = SCAN_PAGE_LOCK;
1365 goto out_unmap;
1366 }
5c07ebb3 1367 if (!folio_test_anon(folio)) {
b46e756f
KS
1368 result = SCAN_PAGE_ANON;
1369 goto out_unmap;
1370 }
1371
1372 /*
9445689f
KS
1373 * Check if the page has any GUP (or other external) pins.
1374 *
cb67f428
HD
1375 * Here the check may be racy:
1376 * it may see total_mapcount > refcount in some cases?
9445689f
KS
1377 * But such case is ephemeral we could always retry collapse
1378 * later. However it may report false positive if the page
1379 * has excessive GUP pins (i.e. 512). Anyway the same check
1380 * will be done again later the risk seems low.
b46e756f 1381 */
dbf85c21 1382 if (!is_refcount_suitable(folio)) {
b46e756f
KS
1383 result = SCAN_PAGE_COUNT;
1384 goto out_unmap;
1385 }
d8ea7cc8
ZK
1386
1387 /*
1388 * If collapse was initiated by khugepaged, check that there is
1389 * enough young pte to justify collapsing the page
1390 */
1391 if (cc->is_khugepaged &&
5c07ebb3
VMO
1392 (pte_young(pteval) || folio_test_young(folio) ||
1393 folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
d8ea7cc8 1394 address)))
0db501f7 1395 referenced++;
b46e756f 1396 }
ffe945e6 1397 if (!writable) {
b46e756f 1398 result = SCAN_PAGE_RO;
d8ea7cc8
ZK
1399 } else if (cc->is_khugepaged &&
1400 (!referenced ||
1401 (unmapped && referenced < HPAGE_PMD_NR / 2))) {
ffe945e6
KS
1402 result = SCAN_LACK_REFERENCED_PAGE;
1403 } else {
1404 result = SCAN_SUCCEED;
b46e756f
KS
1405 }
1406out_unmap:
1407 pte_unmap_unlock(pte, ptl);
50ad2f24
ZK
1408 if (result == SCAN_SUCCEED) {
1409 result = collapse_huge_page(mm, address, referenced,
1410 unmapped, cc);
c1e8d7c6 1411 /* collapse_huge_page will return with the mmap_lock released */
50ad2f24 1412 *mmap_locked = false;
b46e756f
KS
1413 }
1414out:
5c07ebb3 1415 trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
b46e756f 1416 none_or_zero, result, unmapped);
50ad2f24 1417 return result;
b46e756f
KS
1418}
1419
b26e2701 1420static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
b46e756f 1421{
b26e2701
QZ
1422 struct mm_slot *slot = &mm_slot->slot;
1423 struct mm_struct *mm = slot->mm;
b46e756f 1424
35f3aa39 1425 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f 1426
7d2c4385 1427 if (hpage_collapse_test_exit(mm)) {
b46e756f 1428 /* free mm_slot */
b26e2701
QZ
1429 hash_del(&slot->hash);
1430 list_del(&slot->mm_node);
b46e756f
KS
1431
1432 /*
1433 * Not strictly needed because the mm exited already.
1434 *
1435 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1436 */
1437
1438 /* khugepaged_mm_lock actually not necessary for the below */
b26e2701 1439 mm_slot_free(mm_slot_cache, mm_slot);
b46e756f
KS
1440 mmdrop(mm);
1441 }
1442}
1443
396bcc52 1444#ifdef CONFIG_SHMEM
1043173e 1445/* hpage must be locked, and mmap_lock must be held */
34488399
ZK
1446static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1447 pmd_t *pmdp, struct page *hpage)
1448{
1449 struct vm_fault vmf = {
1450 .vma = vma,
1451 .address = addr,
1452 .flags = 0,
1453 .pmd = pmdp,
1454 };
1455
1456 VM_BUG_ON(!PageTransHuge(hpage));
1043173e 1457 mmap_assert_locked(vma->vm_mm);
34488399
ZK
1458
1459 if (do_set_pmd(&vmf, hpage))
1460 return SCAN_FAIL;
1461
1462 get_page(hpage);
1463 return SCAN_SUCCEED;
27e1f827
SL
1464}
1465
1466/**
336e6b53
AS
1467 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1468 * address haddr.
1469 *
1470 * @mm: process address space where collapse happens
1471 * @addr: THP collapse address
34488399 1472 * @install_pmd: If a huge PMD should be installed
27e1f827
SL
1473 *
1474 * This function checks whether all the PTEs in the PMD are pointing to the
1475 * right THP. If so, retract the page table so the THP can refault in with
34488399 1476 * as pmd-mapped. Possibly install a huge PMD mapping the THP.
27e1f827 1477 */
34488399
ZK
1478int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1479 bool install_pmd)
27e1f827 1480{
1043173e
HD
1481 struct mmu_notifier_range range;
1482 bool notified = false;
27e1f827 1483 unsigned long haddr = addr & HPAGE_PMD_MASK;
94d815b2 1484 struct vm_area_struct *vma = vma_lookup(mm, haddr);
98b32d29 1485 struct folio *folio;
27e1f827 1486 pte_t *start_pte, *pte;
1043173e 1487 pmd_t *pmd, pgt_pmd;
a9846049 1488 spinlock_t *pml = NULL, *ptl;
1043173e 1489 int nr_ptes = 0, result = SCAN_FAIL;
27e1f827
SL
1490 int i;
1491
1043173e
HD
1492 mmap_assert_locked(mm);
1493
1494 /* First check VMA found, in case page tables are being torn down */
1495 if (!vma || !vma->vm_file ||
1496 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1497 return SCAN_VMA_CHECK;
58ac9a89 1498
34488399 1499 /* Fast check before locking page if already PMD-mapped */
58ac9a89 1500 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
34488399
ZK
1501 if (result == SCAN_PMD_MAPPED)
1502 return result;
58ac9a89 1503
27e1f827 1504 /*
a7f4e6e4
ZK
1505 * If we are here, we've succeeded in replacing all the native pages
1506 * in the page cache with a single hugepage. If a mm were to fault-in
1507 * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1508 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1509 * analogously elide sysfs THP settings here.
27e1f827 1510 */
3485b883
RR
1511 if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
1512 PMD_ORDER))
34488399 1513 return SCAN_VMA_CHECK;
27e1f827 1514
deb4c93a
PX
1515 /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1516 if (userfaultfd_wp(vma))
34488399 1517 return SCAN_PTE_UFFD_WP;
deb4c93a 1518
98b32d29 1519 folio = filemap_lock_folio(vma->vm_file->f_mapping,
119a5fc1 1520 linear_page_index(vma, haddr));
98b32d29 1521 if (IS_ERR(folio))
34488399 1522 return SCAN_PAGE_NULL;
119a5fc1 1523
98b32d29 1524 if (folio_order(folio) != HPAGE_PMD_ORDER) {
34488399 1525 result = SCAN_PAGE_COMPOUND;
98b32d29 1526 goto drop_folio;
34488399 1527 }
119a5fc1 1528
1043173e 1529 result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
34488399
ZK
1530 switch (result) {
1531 case SCAN_SUCCEED:
1532 break;
1533 case SCAN_PMD_NONE:
1534 /*
1d65b771
HD
1535 * All pte entries have been removed and pmd cleared.
1536 * Skip all the pte checks and just update the pmd mapping.
34488399
ZK
1537 */
1538 goto maybe_install_pmd;
1539 default:
98b32d29 1540 goto drop_folio;
34488399 1541 }
27e1f827 1542
34488399 1543 result = SCAN_FAIL;
895f5ee4 1544 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1043173e 1545 if (!start_pte) /* mmap_lock + page lock should prevent this */
98b32d29 1546 goto drop_folio;
27e1f827
SL
1547
1548 /* step 1: check all mapped PTEs are to the right huge page */
1549 for (i = 0, addr = haddr, pte = start_pte;
1550 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1551 struct page *page;
c33c7948 1552 pte_t ptent = ptep_get(pte);
27e1f827
SL
1553
1554 /* empty pte, skip */
c33c7948 1555 if (pte_none(ptent))
27e1f827
SL
1556 continue;
1557
1558 /* page swapped out, abort */
c33c7948 1559 if (!pte_present(ptent)) {
34488399 1560 result = SCAN_PTE_NON_PRESENT;
27e1f827 1561 goto abort;
34488399 1562 }
27e1f827 1563
c33c7948 1564 page = vm_normal_page(vma, addr, ptent);
3218f871
AS
1565 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1566 page = NULL;
27e1f827 1567 /*
119a5fc1
HD
1568 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1569 * page table, but the new page will not be a subpage of hpage.
27e1f827 1570 */
98b32d29 1571 if (folio_page(folio, i) != page)
27e1f827 1572 goto abort;
27e1f827
SL
1573 }
1574
1043173e
HD
1575 pte_unmap_unlock(start_pte, ptl);
1576 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1577 haddr, haddr + HPAGE_PMD_SIZE);
1578 mmu_notifier_invalidate_range_start(&range);
1579 notified = true;
a9846049
HD
1580
1581 /*
1582 * pmd_lock covers a wider range than ptl, and (if split from mm's
1583 * page_table_lock) ptl nests inside pml. The less time we hold pml,
1584 * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1585 * inserts a valid as-if-COWed PTE without even looking up page cache.
98b32d29 1586 * So page lock of folio does not protect from it, so we must not drop
a9846049
HD
1587 * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1588 */
1589 if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
1590 pml = pmd_lock(mm, pmd);
1591
1592 start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl);
1043173e
HD
1593 if (!start_pte) /* mmap_lock + page lock should prevent this */
1594 goto abort;
a9846049
HD
1595 if (!pml)
1596 spin_lock(ptl);
1597 else if (ptl != pml)
1598 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1043173e
HD
1599
1600 /* step 2: clear page table and adjust rmap */
27e1f827
SL
1601 for (i = 0, addr = haddr, pte = start_pte;
1602 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1603 struct page *page;
c33c7948 1604 pte_t ptent = ptep_get(pte);
27e1f827 1605
c33c7948 1606 if (pte_none(ptent))
27e1f827 1607 continue;
1043173e
HD
1608 /*
1609 * We dropped ptl after the first scan, to do the mmu_notifier:
98b32d29 1610 * page lock stops more PTEs of the folio being faulted in, but
1043173e
HD
1611 * does not stop write faults COWing anon copies from existing
1612 * PTEs; and does not stop those being swapped out or migrated.
1613 */
1614 if (!pte_present(ptent)) {
1615 result = SCAN_PTE_NON_PRESENT;
1616 goto abort;
1617 }
c33c7948 1618 page = vm_normal_page(vma, addr, ptent);
98b32d29 1619 if (folio_page(folio, i) != page)
3218f871 1620 goto abort;
1043173e
HD
1621
1622 /*
1623 * Must clear entry, or a racing truncate may re-remove it.
1624 * TLB flush can be left until pmdp_collapse_flush() does it.
1625 * PTE dirty? Shmem page is already dirty; file is read-only.
1626 */
1627 ptep_clear(mm, addr, pte);
cea86fe2 1628 page_remove_rmap(page, vma, false);
1043173e 1629 nr_ptes++;
27e1f827
SL
1630 }
1631
a9846049
HD
1632 pte_unmap(start_pte);
1633 if (!pml)
1634 spin_unlock(ptl);
27e1f827
SL
1635
1636 /* step 3: set proper refcount and mm_counters. */
1043173e 1637 if (nr_ptes) {
98b32d29
VMO
1638 folio_ref_sub(folio, nr_ptes);
1639 add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
27e1f827
SL
1640 }
1641
a9846049
HD
1642 /* step 4: remove empty page table */
1643 if (!pml) {
1644 pml = pmd_lock(mm, pmd);
1645 if (ptl != pml)
1646 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1647 }
1043173e
HD
1648 pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1649 pmdp_get_lockless_sync();
1650 if (ptl != pml)
1651 spin_unlock(ptl);
1652 spin_unlock(pml);
ab0c3f12 1653
1043173e 1654 mmu_notifier_invalidate_range_end(&range);
34488399 1655
1043173e
HD
1656 mm_dec_nr_ptes(mm);
1657 page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
1658 pte_free_defer(mm, pmd_pgtable(pgt_pmd));
8d3c106e 1659
34488399
ZK
1660maybe_install_pmd:
1661 /* step 5: install pmd entry */
1662 result = install_pmd
98b32d29 1663 ? set_huge_pmd(vma, haddr, pmd, &folio->page)
34488399 1664 : SCAN_SUCCEED;
98b32d29 1665 goto drop_folio;
1043173e
HD
1666abort:
1667 if (nr_ptes) {
1668 flush_tlb_mm(mm);
98b32d29
VMO
1669 folio_ref_sub(folio, nr_ptes);
1670 add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes);
1043173e
HD
1671 }
1672 if (start_pte)
1673 pte_unmap_unlock(start_pte, ptl);
a9846049
HD
1674 if (pml && pml != ptl)
1675 spin_unlock(pml);
1043173e
HD
1676 if (notified)
1677 mmu_notifier_invalidate_range_end(&range);
98b32d29
VMO
1678drop_folio:
1679 folio_unlock(folio);
1680 folio_put(folio);
34488399 1681 return result;
27e1f827
SL
1682}
1683
1d65b771 1684static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
f3f0e1d2
KS
1685{
1686 struct vm_area_struct *vma;
f3f0e1d2 1687
1d65b771 1688 i_mmap_lock_read(mapping);
f3f0e1d2 1689 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1d65b771
HD
1690 struct mmu_notifier_range range;
1691 struct mm_struct *mm;
1692 unsigned long addr;
1693 pmd_t *pmd, pgt_pmd;
1694 spinlock_t *pml;
1695 spinlock_t *ptl;
1696 bool skipped_uffd = false;
34488399 1697
27e1f827
SL
1698 /*
1699 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1d65b771
HD
1700 * got written to. These VMAs are likely not worth removing
1701 * page tables from, as PMD-mapping is likely to be split later.
27e1f827 1702 */
1d65b771
HD
1703 if (READ_ONCE(vma->anon_vma))
1704 continue;
1705
f3f0e1d2 1706 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
34488399 1707 if (addr & ~HPAGE_PMD_MASK ||
1d65b771
HD
1708 vma->vm_end < addr + HPAGE_PMD_SIZE)
1709 continue;
1710
18e77600 1711 mm = vma->vm_mm;
1d65b771
HD
1712 if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
1713 continue;
1714
1715 if (hpage_collapse_test_exit(mm))
1716 continue;
f3f0e1d2 1717 /*
1d65b771
HD
1718 * When a vma is registered with uffd-wp, we cannot recycle
1719 * the page table because there may be pte markers installed.
1720 * Other vmas can still have the same file mapped hugely, but
1721 * skip this one: it will always be mapped in small page size
1722 * for uffd-wp registered ranges.
f3f0e1d2 1723 */
1d65b771
HD
1724 if (userfaultfd_wp(vma))
1725 continue;
1726
1727 /* PTEs were notified when unmapped; but now for the PMD? */
1728 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1729 addr, addr + HPAGE_PMD_SIZE);
1730 mmu_notifier_invalidate_range_start(&range);
1731
1732 pml = pmd_lock(mm, pmd);
1733 ptl = pte_lockptr(mm, pmd);
1734 if (ptl != pml)
1735 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
55fd6fcc 1736
34488399 1737 /*
1d65b771
HD
1738 * Huge page lock is still held, so normally the page table
1739 * must remain empty; and we have already skipped anon_vma
1740 * and userfaultfd_wp() vmas. But since the mmap_lock is not
1741 * held, it is still possible for a racing userfaultfd_ioctl()
1742 * to have inserted ptes or markers. Now that we hold ptlock,
1743 * repeating the anon_vma check protects from one category,
1744 * and repeating the userfaultfd_wp() check from another.
34488399 1745 */
1d65b771
HD
1746 if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
1747 skipped_uffd = true;
1748 } else {
1749 pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
1750 pmdp_get_lockless_sync();
1751 }
1752
1753 if (ptl != pml)
1754 spin_unlock(ptl);
1755 spin_unlock(pml);
1756
1757 mmu_notifier_invalidate_range_end(&range);
1758
1759 if (!skipped_uffd) {
1760 mm_dec_nr_ptes(mm);
1761 page_table_check_pte_clear_range(mm, addr, pgt_pmd);
1762 pte_free_defer(mm, pmd_pgtable(pgt_pmd));
f3f0e1d2
KS
1763 }
1764 }
1d65b771 1765 i_mmap_unlock_read(mapping);
f3f0e1d2
KS
1766}
1767
1768/**
99cb0dbd 1769 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
f3f0e1d2 1770 *
336e6b53 1771 * @mm: process address space where collapse happens
34488399 1772 * @addr: virtual collapse start address
336e6b53
AS
1773 * @file: file that collapse on
1774 * @start: collapse start address
9710a78a 1775 * @cc: collapse context and scratchpad
336e6b53 1776 *
f3f0e1d2 1777 * Basic scheme is simple, details are more complex:
87c460a0 1778 * - allocate and lock a new huge page;
a2e17cc2 1779 * - scan page cache, locking old pages
99cb0dbd 1780 * + swap/gup in pages if necessary;
a2e17cc2
DS
1781 * - copy data to new page
1782 * - handle shmem holes
1783 * + re-validate that holes weren't filled by someone else
1784 * + check for userfaultfd
ac492b9c 1785 * - finalize updates to the page cache;
77da9389 1786 * - if replacing succeeds:
87c460a0 1787 * + unlock huge page;
a2e17cc2 1788 * + free old pages;
f3f0e1d2 1789 * - if replacing failed;
a2e17cc2 1790 * + unlock old pages
87c460a0 1791 * + unlock and free huge page;
f3f0e1d2 1792 */
34488399
ZK
1793static int collapse_file(struct mm_struct *mm, unsigned long addr,
1794 struct file *file, pgoff_t start,
1795 struct collapse_control *cc)
f3f0e1d2 1796{
579c571e 1797 struct address_space *mapping = file->f_mapping;
50ad2f24 1798 struct page *hpage;
12904d95
JY
1799 struct page *page;
1800 struct page *tmp;
1801 struct folio *folio;
4c9473e8 1802 pgoff_t index = 0, end = start + HPAGE_PMD_NR;
f3f0e1d2 1803 LIST_HEAD(pagelist);
77da9389 1804 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
f3f0e1d2 1805 int nr_none = 0, result = SCAN_SUCCEED;
99cb0dbd 1806 bool is_shmem = shmem_file(file);
4c9473e8 1807 int nr = 0;
f3f0e1d2 1808
99cb0dbd 1809 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
f3f0e1d2
KS
1810 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1811
50ad2f24 1812 result = alloc_charge_hpage(&hpage, mm, cc);
9710a78a 1813 if (result != SCAN_SUCCEED)
f3f0e1d2 1814 goto out;
f3f0e1d2 1815
cae106dd
DS
1816 __SetPageLocked(hpage);
1817 if (is_shmem)
1818 __SetPageSwapBacked(hpage);
1819 hpage->index = start;
1820 hpage->mapping = mapping;
1821
6b24ca4a
MWO
1822 /*
1823 * Ensure we have slots for all the pages in the range. This is
1824 * almost certainly a no-op because most of the pages must be present
1825 */
95feeabb
HD
1826 do {
1827 xas_lock_irq(&xas);
1828 xas_create_range(&xas);
1829 if (!xas_error(&xas))
1830 break;
1831 xas_unlock_irq(&xas);
1832 if (!xas_nomem(&xas, GFP_KERNEL)) {
95feeabb 1833 result = SCAN_FAIL;
cae106dd 1834 goto rollback;
95feeabb
HD
1835 }
1836 } while (1);
1837
77da9389 1838 for (index = start; index < end; index++) {
e8c716bc
HD
1839 xas_set(&xas, index);
1840 page = xas_load(&xas);
77da9389
MW
1841
1842 VM_BUG_ON(index != xas.xa_index);
99cb0dbd
SL
1843 if (is_shmem) {
1844 if (!page) {
1845 /*
1846 * Stop if extent has been truncated or
1847 * hole-punched, and is now completely
1848 * empty.
1849 */
1850 if (index == start) {
1851 if (!xas_next_entry(&xas, end - 1)) {
1852 result = SCAN_TRUNCATED;
1853 goto xa_locked;
1854 }
99cb0dbd 1855 }
99cb0dbd
SL
1856 nr_none++;
1857 continue;
701270fa 1858 }
99cb0dbd
SL
1859
1860 if (xa_is_value(page) || !PageUptodate(page)) {
1861 xas_unlock_irq(&xas);
1862 /* swap in or instantiate fallocated page */
7459c149
MWO
1863 if (shmem_get_folio(mapping->host, index,
1864 &folio, SGP_NOALLOC)) {
99cb0dbd
SL
1865 result = SCAN_FAIL;
1866 goto xa_unlocked;
1867 }
1fec6890 1868 /* drain lru cache to help isolate_lru_page() */
efa3d814 1869 lru_add_drain();
7459c149 1870 page = folio_file_page(folio, index);
99cb0dbd
SL
1871 } else if (trylock_page(page)) {
1872 get_page(page);
1873 xas_unlock_irq(&xas);
1874 } else {
1875 result = SCAN_PAGE_LOCK;
042a3082 1876 goto xa_locked;
77da9389 1877 }
99cb0dbd
SL
1878 } else { /* !is_shmem */
1879 if (!page || xa_is_value(page)) {
1880 xas_unlock_irq(&xas);
1881 page_cache_sync_readahead(mapping, &file->f_ra,
1882 file, index,
e5a59d30 1883 end - index);
1fec6890 1884 /* drain lru cache to help isolate_lru_page() */
99cb0dbd
SL
1885 lru_add_drain();
1886 page = find_lock_page(mapping, index);
1887 if (unlikely(page == NULL)) {
1888 result = SCAN_FAIL;
1889 goto xa_unlocked;
1890 }
75f36069
SL
1891 } else if (PageDirty(page)) {
1892 /*
1893 * khugepaged only works on read-only fd,
1894 * so this page is dirty because it hasn't
1895 * been flushed since first write. There
1896 * won't be new dirty pages.
1897 *
1898 * Trigger async flush here and hope the
1899 * writeback is done when khugepaged
1900 * revisits this page.
1901 *
1902 * This is a one-off situation. We are not
1903 * forcing writeback in loop.
1904 */
1905 xas_unlock_irq(&xas);
1906 filemap_flush(mapping);
1907 result = SCAN_FAIL;
1908 goto xa_unlocked;
74c42e1b
RW
1909 } else if (PageWriteback(page)) {
1910 xas_unlock_irq(&xas);
1911 result = SCAN_FAIL;
1912 goto xa_unlocked;
99cb0dbd
SL
1913 } else if (trylock_page(page)) {
1914 get_page(page);
1915 xas_unlock_irq(&xas);
1916 } else {
1917 result = SCAN_PAGE_LOCK;
1918 goto xa_locked;
f3f0e1d2 1919 }
f3f0e1d2
KS
1920 }
1921
1922 /*
b93b0163 1923 * The page must be locked, so we can drop the i_pages lock
f3f0e1d2
KS
1924 * without racing with truncate.
1925 */
1926 VM_BUG_ON_PAGE(!PageLocked(page), page);
4655e5e5
SL
1927
1928 /* make sure the page is up to date */
1929 if (unlikely(!PageUptodate(page))) {
1930 result = SCAN_FAIL;
1931 goto out_unlock;
1932 }
06a5e126
HD
1933
1934 /*
1935 * If file was truncated then extended, or hole-punched, before
1936 * we locked the first page, then a THP might be there already.
58ac9a89 1937 * This will be discovered on the first iteration.
06a5e126
HD
1938 */
1939 if (PageTransCompound(page)) {
58ac9a89
ZK
1940 struct page *head = compound_head(page);
1941
1942 result = compound_order(head) == HPAGE_PMD_ORDER &&
1943 head->index == start
1944 /* Maybe PMD-mapped */
1945 ? SCAN_PTE_MAPPED_HUGEPAGE
1946 : SCAN_PAGE_COMPOUND;
06a5e126
HD
1947 goto out_unlock;
1948 }
f3f0e1d2 1949
64ab3195
VMO
1950 folio = page_folio(page);
1951
1952 if (folio_mapping(folio) != mapping) {
f3f0e1d2
KS
1953 result = SCAN_TRUNCATED;
1954 goto out_unlock;
1955 }
f3f0e1d2 1956
64ab3195
VMO
1957 if (!is_shmem && (folio_test_dirty(folio) ||
1958 folio_test_writeback(folio))) {
4655e5e5
SL
1959 /*
1960 * khugepaged only works on read-only fd, so this
1961 * page is dirty because it hasn't been flushed
1962 * since first write.
1963 */
1964 result = SCAN_FAIL;
1965 goto out_unlock;
1966 }
1967
be2d5756 1968 if (!folio_isolate_lru(folio)) {
f3f0e1d2 1969 result = SCAN_DEL_PAGE_LRU;
042a3082 1970 goto out_unlock;
f3f0e1d2
KS
1971 }
1972
0201ebf2 1973 if (!filemap_release_folio(folio, GFP_KERNEL)) {
99cb0dbd 1974 result = SCAN_PAGE_HAS_PRIVATE;
64ab3195 1975 folio_putback_lru(folio);
99cb0dbd
SL
1976 goto out_unlock;
1977 }
1978
64ab3195
VMO
1979 if (folio_mapped(folio))
1980 try_to_unmap(folio,
869f7ee6 1981 TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
f3f0e1d2 1982
77da9389 1983 xas_lock_irq(&xas);
f3f0e1d2 1984
e8c716bc 1985 VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page);
f3f0e1d2
KS
1986
1987 /*
a2e17cc2 1988 * We control three references to the page:
f3f0e1d2 1989 * - we hold a pin on it;
77da9389 1990 * - one reference from page cache;
f3f0e1d2 1991 * - one from isolate_lru_page;
a2e17cc2
DS
1992 * If those are the only references, then any new usage of the
1993 * page will have to fetch it from the page cache. That requires
1994 * locking the page to handle truncate, so any new usage will be
1995 * blocked until we unlock page after collapse/during rollback.
f3f0e1d2 1996 */
a2e17cc2 1997 if (page_count(page) != 3) {
f3f0e1d2 1998 result = SCAN_PAGE_COUNT;
042a3082
HD
1999 xas_unlock_irq(&xas);
2000 putback_lru_page(page);
2001 goto out_unlock;
f3f0e1d2
KS
2002 }
2003
2004 /*
a2e17cc2 2005 * Accumulate the pages that are being collapsed.
f3f0e1d2
KS
2006 */
2007 list_add_tail(&page->lru, &pagelist);
f3f0e1d2 2008 continue;
f3f0e1d2
KS
2009out_unlock:
2010 unlock_page(page);
2011 put_page(page);
042a3082 2012 goto xa_unlocked;
f3f0e1d2
KS
2013 }
2014
12904d95 2015 if (!is_shmem) {
09d91cda 2016 filemap_nr_thps_inc(mapping);
eb6ecbed
CF
2017 /*
2018 * Paired with smp_mb() in do_dentry_open() to ensure
2019 * i_writecount is up to date and the update to nr_thps is
2020 * visible. Ensures the page cache will be truncated if the
2021 * file is opened writable.
2022 */
2023 smp_mb();
2024 if (inode_is_open_for_write(mapping->host)) {
2025 result = SCAN_FAIL;
eb6ecbed 2026 filemap_nr_thps_dec(mapping);
eb6ecbed 2027 }
09d91cda 2028 }
99cb0dbd 2029
042a3082
HD
2030xa_locked:
2031 xas_unlock_irq(&xas);
77da9389 2032xa_unlocked:
042a3082 2033
6d9df8a5
HD
2034 /*
2035 * If collapse is successful, flush must be done now before copying.
2036 * If collapse is unsuccessful, does flush actually need to be done?
2037 * Do it anyway, to clear the state.
2038 */
2039 try_to_unmap_flush();
2040
509f0069
HD
2041 if (result == SCAN_SUCCEED && nr_none &&
2042 !shmem_charge(mapping->host, nr_none))
2043 result = SCAN_FAIL;
2044 if (result != SCAN_SUCCEED) {
2045 nr_none = 0;
cae106dd 2046 goto rollback;
509f0069 2047 }
cae106dd
DS
2048
2049 /*
a2e17cc2 2050 * The old pages are locked, so they won't change anymore.
cae106dd
DS
2051 */
2052 index = start;
2053 list_for_each_entry(page, &pagelist, lru) {
2054 while (index < page->index) {
12904d95
JY
2055 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2056 index++;
2057 }
cae106dd
DS
2058 if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
2059 result = SCAN_COPY_MC;
2060 goto rollback;
2061 }
2062 index++;
2063 }
2064 while (index < end) {
2065 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2066 index++;
2067 }
2068
ac492b9c
DS
2069 if (nr_none) {
2070 struct vm_area_struct *vma;
2071 int nr_none_check = 0;
2072
2073 i_mmap_lock_read(mapping);
2074 xas_lock_irq(&xas);
2075
2076 xas_set(&xas, start);
2077 for (index = start; index < end; index++) {
2078 if (!xas_next(&xas)) {
2079 xas_store(&xas, XA_RETRY_ENTRY);
2080 if (xas_error(&xas)) {
2081 result = SCAN_STORE_FAILED;
2082 goto immap_locked;
2083 }
2084 nr_none_check++;
2085 }
2086 }
2087
2088 if (nr_none != nr_none_check) {
2089 result = SCAN_PAGE_FILLED;
2090 goto immap_locked;
2091 }
2092
2093 /*
2094 * If userspace observed a missing page in a VMA with a MODE_MISSING
2095 * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that
2096 * page. If so, we need to roll back to avoid suppressing such an
2097 * event. Since wp/minor userfaultfds don't give userspace any
2098 * guarantees that the kernel doesn't fill a missing page with a zero
2099 * page, so they don't matter here.
2100 *
2101 * Any userfaultfds registered after this point will not be able to
2102 * observe any missing pages due to the previously inserted retry
2103 * entries.
2104 */
2105 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2106 if (userfaultfd_missing(vma)) {
2107 result = SCAN_EXCEED_NONE_PTE;
2108 goto immap_locked;
2109 }
2110 }
2111
2112immap_locked:
2113 i_mmap_unlock_read(mapping);
2114 if (result != SCAN_SUCCEED) {
2115 xas_set(&xas, start);
2116 for (index = start; index < end; index++) {
2117 if (xas_next(&xas) == XA_RETRY_ENTRY)
2118 xas_store(&xas, NULL);
2119 }
2120
2121 xas_unlock_irq(&xas);
2122 goto rollback;
2123 }
2124 } else {
2125 xas_lock_irq(&xas);
12904d95
JY
2126 }
2127
2128 nr = thp_nr_pages(hpage);
cae106dd
DS
2129 if (is_shmem)
2130 __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
2131 else
2132 __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
12904d95 2133
cae106dd
DS
2134 if (nr_none) {
2135 __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
2136 /* nr_none is always 0 for non-shmem. */
2137 __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
2138 }
f3f0e1d2 2139
a2e17cc2
DS
2140 /*
2141 * Mark hpage as uptodate before inserting it into the page cache so
2142 * that it isn't mistaken for an fallocated but unwritten page.
2143 */
cae106dd
DS
2144 folio = page_folio(hpage);
2145 folio_mark_uptodate(folio);
2146 folio_ref_add(folio, HPAGE_PMD_NR - 1);
284a344e 2147
cae106dd
DS
2148 if (is_shmem)
2149 folio_mark_dirty(folio);
2150 folio_add_lru(folio);
f3f0e1d2 2151
a2e17cc2
DS
2152 /* Join all the small entries into a single multi-index entry. */
2153 xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2154 xas_store(&xas, hpage);
0175ab61 2155 WARN_ON_ONCE(xas_error(&xas));
a2e17cc2
DS
2156 xas_unlock_irq(&xas);
2157
cae106dd
DS
2158 /*
2159 * Remove pte page tables, so we can re-fault the page as huge.
1d65b771 2160 * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
cae106dd 2161 */
1d65b771
HD
2162 retract_page_tables(mapping, start);
2163 if (cc && !cc->is_khugepaged)
2164 result = SCAN_PTE_MAPPED_HUGEPAGE;
cae106dd 2165 unlock_page(hpage);
ac492b9c
DS
2166
2167 /*
2168 * The collapse has succeeded, so free the old pages.
2169 */
2170 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2171 list_del(&page->lru);
2172 page->mapping = NULL;
ac492b9c
DS
2173 ClearPageActive(page);
2174 ClearPageUnevictable(page);
2175 unlock_page(page);
a2e17cc2 2176 folio_put_refs(page_folio(page), 3);
ac492b9c
DS
2177 }
2178
cae106dd
DS
2179 goto out;
2180
2181rollback:
2182 /* Something went wrong: roll back page cache changes */
cae106dd 2183 if (nr_none) {
a2e17cc2 2184 xas_lock_irq(&xas);
cae106dd 2185 mapping->nrpages -= nr_none;
a2e17cc2 2186 xas_unlock_irq(&xas);
509f0069 2187 shmem_uncharge(mapping->host, nr_none);
cae106dd 2188 }
aaa52e34 2189
a2e17cc2 2190 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
cae106dd 2191 list_del(&page->lru);
cae106dd
DS
2192 unlock_page(page);
2193 putback_lru_page(page);
a2e17cc2 2194 put_page(page);
cae106dd 2195 }
cae106dd
DS
2196 /*
2197 * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2198 * file only. This undo is not needed unless failure is
2199 * due to SCAN_COPY_MC.
2200 */
2201 if (!is_shmem && result == SCAN_COPY_MC) {
2202 filemap_nr_thps_dec(mapping);
12904d95 2203 /*
cae106dd
DS
2204 * Paired with smp_mb() in do_dentry_open() to
2205 * ensure the update to nr_thps is visible.
12904d95 2206 */
cae106dd
DS
2207 smp_mb();
2208 }
12904d95 2209
cae106dd 2210 hpage->mapping = NULL;
042a3082 2211
cae106dd
DS
2212 unlock_page(hpage);
2213 put_page(hpage);
f3f0e1d2
KS
2214out:
2215 VM_BUG_ON(!list_empty(&pagelist));
4c9473e8 2216 trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
50ad2f24 2217 return result;
f3f0e1d2
KS
2218}
2219
34488399
ZK
2220static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2221 struct file *file, pgoff_t start,
2222 struct collapse_control *cc)
f3f0e1d2
KS
2223{
2224 struct page *page = NULL;
579c571e 2225 struct address_space *mapping = file->f_mapping;
85b392db 2226 XA_STATE(xas, &mapping->i_pages, start);
f3f0e1d2
KS
2227 int present, swap;
2228 int node = NUMA_NO_NODE;
2229 int result = SCAN_SUCCEED;
2230
2231 present = 0;
2232 swap = 0;
34d6b470 2233 memset(cc->node_load, 0, sizeof(cc->node_load));
e031ff96 2234 nodes_clear(cc->alloc_nmask);
f3f0e1d2 2235 rcu_read_lock();
85b392db
MW
2236 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2237 if (xas_retry(&xas, page))
f3f0e1d2 2238 continue;
f3f0e1d2 2239
85b392db 2240 if (xa_is_value(page)) {
d8ea7cc8
ZK
2241 ++swap;
2242 if (cc->is_khugepaged &&
2243 swap > khugepaged_max_ptes_swap) {
f3f0e1d2 2244 result = SCAN_EXCEED_SWAP_PTE;
e9ea874a 2245 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
f3f0e1d2
KS
2246 break;
2247 }
2248 continue;
2249 }
2250
6b24ca4a 2251 /*
58ac9a89 2252 * TODO: khugepaged should compact smaller compound pages
6b24ca4a
MWO
2253 * into a PMD sized page
2254 */
f3f0e1d2 2255 if (PageTransCompound(page)) {
58ac9a89
ZK
2256 struct page *head = compound_head(page);
2257
2258 result = compound_order(head) == HPAGE_PMD_ORDER &&
2259 head->index == start
2260 /* Maybe PMD-mapped */
2261 ? SCAN_PTE_MAPPED_HUGEPAGE
2262 : SCAN_PAGE_COMPOUND;
2263 /*
2264 * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2265 * by the caller won't touch the page cache, and so
2266 * it's safe to skip LRU and refcount checks before
2267 * returning.
2268 */
f3f0e1d2
KS
2269 break;
2270 }
2271
2272 node = page_to_nid(page);
7d2c4385 2273 if (hpage_collapse_scan_abort(node, cc)) {
f3f0e1d2
KS
2274 result = SCAN_SCAN_ABORT;
2275 break;
2276 }
34d6b470 2277 cc->node_load[node]++;
f3f0e1d2
KS
2278
2279 if (!PageLRU(page)) {
2280 result = SCAN_PAGE_LRU;
2281 break;
2282 }
2283
99cb0dbd
SL
2284 if (page_count(page) !=
2285 1 + page_mapcount(page) + page_has_private(page)) {
f3f0e1d2
KS
2286 result = SCAN_PAGE_COUNT;
2287 break;
2288 }
2289
2290 /*
2291 * We probably should check if the page is referenced here, but
2292 * nobody would transfer pte_young() to PageReferenced() for us.
2293 * And rmap walk here is just too costly...
2294 */
2295
2296 present++;
2297
2298 if (need_resched()) {
85b392db 2299 xas_pause(&xas);
f3f0e1d2 2300 cond_resched_rcu();
f3f0e1d2
KS
2301 }
2302 }
2303 rcu_read_unlock();
2304
2305 if (result == SCAN_SUCCEED) {
d8ea7cc8
ZK
2306 if (cc->is_khugepaged &&
2307 present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
f3f0e1d2 2308 result = SCAN_EXCEED_NONE_PTE;
e9ea874a 2309 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
f3f0e1d2 2310 } else {
34488399 2311 result = collapse_file(mm, addr, file, start, cc);
f3f0e1d2
KS
2312 }
2313 }
2314
045634ff 2315 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);
50ad2f24 2316 return result;
f3f0e1d2
KS
2317}
2318#else
34488399
ZK
2319static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2320 struct file *file, pgoff_t start,
2321 struct collapse_control *cc)
f3f0e1d2
KS
2322{
2323 BUILD_BUG();
2324}
2325#endif
2326
50ad2f24 2327static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
34d6b470 2328 struct collapse_control *cc)
b46e756f
KS
2329 __releases(&khugepaged_mm_lock)
2330 __acquires(&khugepaged_mm_lock)
2331{
68540502 2332 struct vma_iterator vmi;
b26e2701
QZ
2333 struct khugepaged_mm_slot *mm_slot;
2334 struct mm_slot *slot;
b46e756f
KS
2335 struct mm_struct *mm;
2336 struct vm_area_struct *vma;
2337 int progress = 0;
2338
2339 VM_BUG_ON(!pages);
35f3aa39 2340 lockdep_assert_held(&khugepaged_mm_lock);
50ad2f24 2341 *result = SCAN_FAIL;
b46e756f 2342
b26e2701 2343 if (khugepaged_scan.mm_slot) {
b46e756f 2344 mm_slot = khugepaged_scan.mm_slot;
b26e2701
QZ
2345 slot = &mm_slot->slot;
2346 } else {
2347 slot = list_entry(khugepaged_scan.mm_head.next,
b46e756f 2348 struct mm_slot, mm_node);
b26e2701 2349 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
b46e756f
KS
2350 khugepaged_scan.address = 0;
2351 khugepaged_scan.mm_slot = mm_slot;
2352 }
2353 spin_unlock(&khugepaged_mm_lock);
2354
b26e2701 2355 mm = slot->mm;
3b454ad3
YS
2356 /*
2357 * Don't wait for semaphore (to avoid long wait times). Just move to
2358 * the next mm on the list.
2359 */
2360 vma = NULL;
d8ed45c5 2361 if (unlikely(!mmap_read_trylock(mm)))
c1e8d7c6 2362 goto breakouterloop_mmap_lock;
b46e756f
KS
2363
2364 progress++;
68540502
MWO
2365 if (unlikely(hpage_collapse_test_exit(mm)))
2366 goto breakouterloop;
2367
2368 vma_iter_init(&vmi, mm, khugepaged_scan.address);
2369 for_each_vma(vmi, vma) {
b46e756f
KS
2370 unsigned long hstart, hend;
2371
2372 cond_resched();
7d2c4385 2373 if (unlikely(hpage_collapse_test_exit(mm))) {
b46e756f
KS
2374 progress++;
2375 break;
2376 }
3485b883
RR
2377 if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
2378 true, PMD_ORDER)) {
b46e756f
KS
2379skip:
2380 progress++;
2381 continue;
2382 }
4fa6893f
YS
2383 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2384 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
b46e756f
KS
2385 if (khugepaged_scan.address > hend)
2386 goto skip;
2387 if (khugepaged_scan.address < hstart)
2388 khugepaged_scan.address = hstart;
2389 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2390
2391 while (khugepaged_scan.address < hend) {
50ad2f24
ZK
2392 bool mmap_locked = true;
2393
b46e756f 2394 cond_resched();
7d2c4385 2395 if (unlikely(hpage_collapse_test_exit(mm)))
b46e756f
KS
2396 goto breakouterloop;
2397
2398 VM_BUG_ON(khugepaged_scan.address < hstart ||
2399 khugepaged_scan.address + HPAGE_PMD_SIZE >
2400 hend);
99cb0dbd 2401 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
396bcc52 2402 struct file *file = get_file(vma->vm_file);
f3f0e1d2
KS
2403 pgoff_t pgoff = linear_page_index(vma,
2404 khugepaged_scan.address);
99cb0dbd 2405
d8ed45c5 2406 mmap_read_unlock(mm);
50ad2f24 2407 mmap_locked = false;
d50791c2
HD
2408 *result = hpage_collapse_scan_file(mm,
2409 khugepaged_scan.address, file, pgoff, cc);
f3f0e1d2 2410 fput(file);
d50791c2
HD
2411 if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
2412 mmap_read_lock(mm);
2413 if (hpage_collapse_test_exit(mm))
2414 goto breakouterloop;
2415 *result = collapse_pte_mapped_thp(mm,
2416 khugepaged_scan.address, false);
2417 if (*result == SCAN_PMD_MAPPED)
2418 *result = SCAN_SUCCEED;
2419 mmap_read_unlock(mm);
2420 }
f3f0e1d2 2421 } else {
7d2c4385 2422 *result = hpage_collapse_scan_pmd(mm, vma,
d50791c2 2423 khugepaged_scan.address, &mmap_locked, cc);
f3f0e1d2 2424 }
d50791c2
HD
2425
2426 if (*result == SCAN_SUCCEED)
50ad2f24 2427 ++khugepaged_pages_collapsed;
58ac9a89 2428
b46e756f
KS
2429 /* move to next address */
2430 khugepaged_scan.address += HPAGE_PMD_SIZE;
2431 progress += HPAGE_PMD_NR;
50ad2f24
ZK
2432 if (!mmap_locked)
2433 /*
2434 * We released mmap_lock so break loop. Note
2435 * that we drop mmap_lock before all hugepage
2436 * allocations, so if allocation fails, we are
2437 * guaranteed to break here and report the
2438 * correct result back to caller.
2439 */
c1e8d7c6 2440 goto breakouterloop_mmap_lock;
b46e756f
KS
2441 if (progress >= pages)
2442 goto breakouterloop;
2443 }
2444 }
2445breakouterloop:
d8ed45c5 2446 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
c1e8d7c6 2447breakouterloop_mmap_lock:
b46e756f
KS
2448
2449 spin_lock(&khugepaged_mm_lock);
2450 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2451 /*
2452 * Release the current mm_slot if this mm is about to die, or
2453 * if we scanned all vmas of this mm.
2454 */
7d2c4385 2455 if (hpage_collapse_test_exit(mm) || !vma) {
b46e756f
KS
2456 /*
2457 * Make sure that if mm_users is reaching zero while
2458 * khugepaged runs here, khugepaged_exit will find
2459 * mm_slot not pointing to the exiting mm.
2460 */
b26e2701
QZ
2461 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2462 slot = list_entry(slot->mm_node.next,
2463 struct mm_slot, mm_node);
2464 khugepaged_scan.mm_slot =
2465 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
b46e756f
KS
2466 khugepaged_scan.address = 0;
2467 } else {
2468 khugepaged_scan.mm_slot = NULL;
2469 khugepaged_full_scans++;
2470 }
2471
2472 collect_mm_slot(mm_slot);
2473 }
2474
2475 return progress;
2476}
2477
2478static int khugepaged_has_work(void)
2479{
2480 return !list_empty(&khugepaged_scan.mm_head) &&
1064026b 2481 hugepage_flags_enabled();
b46e756f
KS
2482}
2483
2484static int khugepaged_wait_event(void)
2485{
2486 return !list_empty(&khugepaged_scan.mm_head) ||
2487 kthread_should_stop();
2488}
2489
34d6b470 2490static void khugepaged_do_scan(struct collapse_control *cc)
b46e756f 2491{
b46e756f 2492 unsigned int progress = 0, pass_through_head = 0;
89dc6a96 2493 unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
b46e756f 2494 bool wait = true;
50ad2f24 2495 int result = SCAN_SUCCEED;
b46e756f 2496
a980df33
KS
2497 lru_add_drain_all();
2498
c6a7f445 2499 while (true) {
b46e756f
KS
2500 cond_resched();
2501
b39ca208 2502 if (unlikely(kthread_should_stop()))
b46e756f
KS
2503 break;
2504
2505 spin_lock(&khugepaged_mm_lock);
2506 if (!khugepaged_scan.mm_slot)
2507 pass_through_head++;
2508 if (khugepaged_has_work() &&
2509 pass_through_head < 2)
2510 progress += khugepaged_scan_mm_slot(pages - progress,
50ad2f24 2511 &result, cc);
b46e756f
KS
2512 else
2513 progress = pages;
2514 spin_unlock(&khugepaged_mm_lock);
b46e756f 2515
c6a7f445
YS
2516 if (progress >= pages)
2517 break;
2518
50ad2f24 2519 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
c6a7f445
YS
2520 /*
2521 * If fail to allocate the first time, try to sleep for
2522 * a while. When hit again, cancel the scan.
2523 */
2524 if (!wait)
2525 break;
2526 wait = false;
c6a7f445
YS
2527 khugepaged_alloc_sleep();
2528 }
2529 }
b46e756f
KS
2530}
2531
2532static bool khugepaged_should_wakeup(void)
2533{
2534 return kthread_should_stop() ||
2535 time_after_eq(jiffies, khugepaged_sleep_expire);
2536}
2537
2538static void khugepaged_wait_work(void)
2539{
2540 if (khugepaged_has_work()) {
2541 const unsigned long scan_sleep_jiffies =
2542 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2543
2544 if (!scan_sleep_jiffies)
2545 return;
2546
2547 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2548 wait_event_freezable_timeout(khugepaged_wait,
2549 khugepaged_should_wakeup(),
2550 scan_sleep_jiffies);
2551 return;
2552 }
2553
1064026b 2554 if (hugepage_flags_enabled())
b46e756f
KS
2555 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2556}
2557
2558static int khugepaged(void *none)
2559{
b26e2701 2560 struct khugepaged_mm_slot *mm_slot;
b46e756f
KS
2561
2562 set_freezable();
2563 set_user_nice(current, MAX_NICE);
2564
2565 while (!kthread_should_stop()) {
34d6b470 2566 khugepaged_do_scan(&khugepaged_collapse_control);
b46e756f
KS
2567 khugepaged_wait_work();
2568 }
2569
2570 spin_lock(&khugepaged_mm_lock);
2571 mm_slot = khugepaged_scan.mm_slot;
2572 khugepaged_scan.mm_slot = NULL;
2573 if (mm_slot)
2574 collect_mm_slot(mm_slot);
2575 spin_unlock(&khugepaged_mm_lock);
2576 return 0;
2577}
2578
2579static void set_recommended_min_free_kbytes(void)
2580{
2581 struct zone *zone;
2582 int nr_zones = 0;
2583 unsigned long recommended_min;
2584
1064026b 2585 if (!hugepage_flags_enabled()) {
bd3400ea
LF
2586 calculate_min_free_kbytes();
2587 goto update_wmarks;
2588 }
2589
b7d349c7
JK
2590 for_each_populated_zone(zone) {
2591 /*
2592 * We don't need to worry about fragmentation of
2593 * ZONE_MOVABLE since it only has movable pages.
2594 */
2595 if (zone_idx(zone) > gfp_zone(GFP_USER))
2596 continue;
2597
b46e756f 2598 nr_zones++;
b7d349c7 2599 }
b46e756f
KS
2600
2601 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2602 recommended_min = pageblock_nr_pages * nr_zones * 2;
2603
2604 /*
2605 * Make sure that on average at least two pageblocks are almost free
2606 * of another type, one for a migratetype to fall back to and a
2607 * second to avoid subsequent fallbacks of other types There are 3
2608 * MIGRATE_TYPES we care about.
2609 */
2610 recommended_min += pageblock_nr_pages * nr_zones *
2611 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2612
2613 /* don't ever allow to reserve more than 5% of the lowmem */
2614 recommended_min = min(recommended_min,
2615 (unsigned long) nr_free_buffer_pages() / 20);
2616 recommended_min <<= (PAGE_SHIFT-10);
2617
2618 if (recommended_min > min_free_kbytes) {
2619 if (user_min_free_kbytes >= 0)
2620 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2621 min_free_kbytes, recommended_min);
2622
2623 min_free_kbytes = recommended_min;
2624 }
bd3400ea
LF
2625
2626update_wmarks:
b46e756f
KS
2627 setup_per_zone_wmarks();
2628}
2629
2630int start_stop_khugepaged(void)
2631{
b46e756f
KS
2632 int err = 0;
2633
2634 mutex_lock(&khugepaged_mutex);
1064026b 2635 if (hugepage_flags_enabled()) {
b46e756f
KS
2636 if (!khugepaged_thread)
2637 khugepaged_thread = kthread_run(khugepaged, NULL,
2638 "khugepaged");
2639 if (IS_ERR(khugepaged_thread)) {
2640 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2641 err = PTR_ERR(khugepaged_thread);
2642 khugepaged_thread = NULL;
2643 goto fail;
2644 }
2645
2646 if (!list_empty(&khugepaged_scan.mm_head))
2647 wake_up_interruptible(&khugepaged_wait);
b46e756f
KS
2648 } else if (khugepaged_thread) {
2649 kthread_stop(khugepaged_thread);
2650 khugepaged_thread = NULL;
2651 }
bd3400ea 2652 set_recommended_min_free_kbytes();
b46e756f
KS
2653fail:
2654 mutex_unlock(&khugepaged_mutex);
2655 return err;
2656}
4aab2be0
VB
2657
2658void khugepaged_min_free_kbytes_update(void)
2659{
2660 mutex_lock(&khugepaged_mutex);
1064026b 2661 if (hugepage_flags_enabled() && khugepaged_thread)
4aab2be0
VB
2662 set_recommended_min_free_kbytes();
2663 mutex_unlock(&khugepaged_mutex);
2664}
7d8faaf1 2665
57e9cc50
JW
2666bool current_is_khugepaged(void)
2667{
2668 return kthread_func(current) == khugepaged;
2669}
2670
7d8faaf1
ZK
2671static int madvise_collapse_errno(enum scan_result r)
2672{
2673 /*
2674 * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2675 * actionable feedback to caller, so they may take an appropriate
2676 * fallback measure depending on the nature of the failure.
2677 */
2678 switch (r) {
2679 case SCAN_ALLOC_HUGE_PAGE_FAIL:
2680 return -ENOMEM;
2681 case SCAN_CGROUP_CHARGE_FAIL:
ac492b9c 2682 case SCAN_EXCEED_NONE_PTE:
7d8faaf1
ZK
2683 return -EBUSY;
2684 /* Resource temporary unavailable - trying again might succeed */
ae63c898 2685 case SCAN_PAGE_COUNT:
7d8faaf1
ZK
2686 case SCAN_PAGE_LOCK:
2687 case SCAN_PAGE_LRU:
0f3e2a2c 2688 case SCAN_DEL_PAGE_LRU:
ac492b9c 2689 case SCAN_PAGE_FILLED:
7d8faaf1
ZK
2690 return -EAGAIN;
2691 /*
2692 * Other: Trying again likely not to succeed / error intrinsic to
2693 * specified memory range. khugepaged likely won't be able to collapse
2694 * either.
2695 */
2696 default:
2697 return -EINVAL;
2698 }
2699}
2700
2701int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2702 unsigned long start, unsigned long end)
2703{
2704 struct collapse_control *cc;
2705 struct mm_struct *mm = vma->vm_mm;
2706 unsigned long hstart, hend, addr;
2707 int thps = 0, last_fail = SCAN_FAIL;
2708 bool mmap_locked = true;
2709
2710 BUG_ON(vma->vm_start > start);
2711 BUG_ON(vma->vm_end < end);
2712
2713 *prev = vma;
2714
3485b883
RR
2715 if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
2716 PMD_ORDER))
7d8faaf1
ZK
2717 return -EINVAL;
2718
2719 cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2720 if (!cc)
2721 return -ENOMEM;
2722 cc->is_khugepaged = false;
7d8faaf1
ZK
2723
2724 mmgrab(mm);
2725 lru_add_drain_all();
2726
2727 hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2728 hend = end & HPAGE_PMD_MASK;
2729
2730 for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2731 int result = SCAN_FAIL;
2732
2733 if (!mmap_locked) {
2734 cond_resched();
2735 mmap_read_lock(mm);
2736 mmap_locked = true;
34488399
ZK
2737 result = hugepage_vma_revalidate(mm, addr, false, &vma,
2738 cc);
7d8faaf1
ZK
2739 if (result != SCAN_SUCCEED) {
2740 last_fail = result;
2741 goto out_nolock;
2742 }
4d24de94 2743
52dc0310 2744 hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
7d8faaf1
ZK
2745 }
2746 mmap_assert_locked(mm);
2747 memset(cc->node_load, 0, sizeof(cc->node_load));
e031ff96 2748 nodes_clear(cc->alloc_nmask);
34488399
ZK
2749 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2750 struct file *file = get_file(vma->vm_file);
2751 pgoff_t pgoff = linear_page_index(vma, addr);
2752
2753 mmap_read_unlock(mm);
2754 mmap_locked = false;
2755 result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2756 cc);
2757 fput(file);
2758 } else {
2759 result = hpage_collapse_scan_pmd(mm, vma, addr,
2760 &mmap_locked, cc);
2761 }
7d8faaf1
ZK
2762 if (!mmap_locked)
2763 *prev = NULL; /* Tell caller we dropped mmap_lock */
2764
34488399 2765handle_result:
7d8faaf1
ZK
2766 switch (result) {
2767 case SCAN_SUCCEED:
2768 case SCAN_PMD_MAPPED:
2769 ++thps;
2770 break;
34488399
ZK
2771 case SCAN_PTE_MAPPED_HUGEPAGE:
2772 BUG_ON(mmap_locked);
2773 BUG_ON(*prev);
1043173e 2774 mmap_read_lock(mm);
34488399 2775 result = collapse_pte_mapped_thp(mm, addr, true);
1043173e 2776 mmap_read_unlock(mm);
34488399 2777 goto handle_result;
7d8faaf1
ZK
2778 /* Whitelisted set of results where continuing OK */
2779 case SCAN_PMD_NULL:
2780 case SCAN_PTE_NON_PRESENT:
2781 case SCAN_PTE_UFFD_WP:
2782 case SCAN_PAGE_RO:
2783 case SCAN_LACK_REFERENCED_PAGE:
2784 case SCAN_PAGE_NULL:
2785 case SCAN_PAGE_COUNT:
2786 case SCAN_PAGE_LOCK:
2787 case SCAN_PAGE_COMPOUND:
2788 case SCAN_PAGE_LRU:
0f3e2a2c 2789 case SCAN_DEL_PAGE_LRU:
7d8faaf1
ZK
2790 last_fail = result;
2791 break;
2792 default:
2793 last_fail = result;
2794 /* Other error, exit */
2795 goto out_maybelock;
2796 }
2797 }
2798
2799out_maybelock:
2800 /* Caller expects us to hold mmap_lock on return */
2801 if (!mmap_locked)
2802 mmap_read_lock(mm);
2803out_nolock:
2804 mmap_assert_locked(mm);
2805 mmdrop(mm);
2806 kfree(cc);
2807
2808 return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2809 : madvise_collapse_errno(last_fail);
2810}