1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009 Red Hat, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/numa.h>
37 #include <linux/page_owner.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/memory-tiers.h>
40 #include <linux/compat.h>
43 #include <asm/pgalloc.h>
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/thp.h>
51 * By default, transparent hugepage support is disabled in order to avoid
52 * risking an increased memory footprint for applications that are not
53 * guaranteed to benefit from it. When transparent hugepage support is
54 * enabled, it is for all mappings, and khugepaged scans all mappings.
55 * Defrag is invoked by khugepaged hugepage allocations and by page faults
56 * for all hugepage allocations.
58 unsigned long transparent_hugepage_flags __read_mostly =
59 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
60 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
62 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
63 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
65 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
66 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
67 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
69 static struct shrinker *deferred_split_shrinker;
70 static unsigned long deferred_split_count(struct shrinker *shrink,
71 struct shrink_control *sc);
72 static unsigned long deferred_split_scan(struct shrinker *shrink,
73 struct shrink_control *sc);
75 static atomic_t huge_zero_refcount;
76 struct page *huge_zero_page __read_mostly;
77 unsigned long huge_zero_pfn __read_mostly = ~0UL;
78 unsigned long huge_anon_orders_always __read_mostly;
79 unsigned long huge_anon_orders_madvise __read_mostly;
80 unsigned long huge_anon_orders_inherit __read_mostly;
82 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
83 unsigned long vm_flags, bool smaps,
84 bool in_pf, bool enforce_sysfs,
87 /* Check the intersection of requested and supported orders. */
88 orders &= vma_is_anonymous(vma) ?
89 THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
93 if (!vma->vm_mm) /* vdso */
97 * Explicitly disabled through madvise or prctl, or some
98 * architectures may disable THP for some mappings, for
101 if ((vm_flags & VM_NOHUGEPAGE) ||
102 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
105 * If the hardware/firmware marked hugepage support disabled.
107 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
110 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
112 return in_pf ? orders : 0;
115 * khugepaged special VMA and hugetlb VMA.
116 * Must be checked after dax since some dax mappings may have
119 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
123 * Check alignment for file vma and size for both file and anon vma by
124 * filtering out the unsuitable orders.
126 * Skip the check for page fault. Huge fault does the check in fault
130 int order = highest_order(orders);
134 addr = vma->vm_end - (PAGE_SIZE << order);
135 if (thp_vma_suitable_order(vma, addr, order))
137 order = next_order(&orders, order);
145 * Enabled via shmem mount options or sysfs settings.
146 * Must be done before hugepage flags check since shmem has its
149 if (!in_pf && shmem_file(vma->vm_file))
150 return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
151 !enforce_sysfs, vma->vm_mm, vm_flags)
154 if (!vma_is_anonymous(vma)) {
156 * Enforce sysfs THP requirements as necessary. Anonymous vmas
157 * were already handled in thp_vma_allowable_orders().
160 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
161 !hugepage_global_always())))
165 * Trust that ->huge_fault() handlers know what they are doing
168 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
170 /* Only regular file is valid in collapse path */
171 if (((!in_pf || smaps)) && file_thp_enabled(vma))
176 if (vma_is_temporary_stack(vma))
180 * THPeligible bit of smaps should show 1 for proper VMAs even
181 * though anon_vma is not initialized yet.
183 * Allow page fault since anon_vma may be not initialized until
184 * the first page fault.
187 return (smaps || in_pf) ? orders : 0;
192 static bool get_huge_zero_page(void)
194 struct page *zero_page;
196 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
199 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
202 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
206 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
208 __free_pages(zero_page, compound_order(zero_page));
211 WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
213 /* We take additional reference here. It will be put back by shrinker */
214 atomic_set(&huge_zero_refcount, 2);
216 count_vm_event(THP_ZERO_PAGE_ALLOC);
220 static void put_huge_zero_page(void)
223 * Counter should never go to zero here. Only shrinker can put
226 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
229 struct page *mm_get_huge_zero_page(struct mm_struct *mm)
231 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
232 return READ_ONCE(huge_zero_page);
234 if (!get_huge_zero_page())
237 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
238 put_huge_zero_page();
240 return READ_ONCE(huge_zero_page);
243 void mm_put_huge_zero_page(struct mm_struct *mm)
245 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
246 put_huge_zero_page();
249 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
250 struct shrink_control *sc)
252 /* we can free zero page only if last reference remains */
253 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
256 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
257 struct shrink_control *sc)
259 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
260 struct page *zero_page = xchg(&huge_zero_page, NULL);
261 BUG_ON(zero_page == NULL);
262 WRITE_ONCE(huge_zero_pfn, ~0UL);
263 __free_pages(zero_page, compound_order(zero_page));
270 static struct shrinker *huge_zero_page_shrinker;
273 static ssize_t enabled_show(struct kobject *kobj,
274 struct kobj_attribute *attr, char *buf)
278 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
279 output = "[always] madvise never";
280 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
281 &transparent_hugepage_flags))
282 output = "always [madvise] never";
284 output = "always madvise [never]";
286 return sysfs_emit(buf, "%s\n", output);
289 static ssize_t enabled_store(struct kobject *kobj,
290 struct kobj_attribute *attr,
291 const char *buf, size_t count)
295 if (sysfs_streq(buf, "always")) {
296 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
297 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
298 } else if (sysfs_streq(buf, "madvise")) {
299 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
300 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
301 } else if (sysfs_streq(buf, "never")) {
302 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
303 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
308 int err = start_stop_khugepaged();
315 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
317 ssize_t single_hugepage_flag_show(struct kobject *kobj,
318 struct kobj_attribute *attr, char *buf,
319 enum transparent_hugepage_flag flag)
321 return sysfs_emit(buf, "%d\n",
322 !!test_bit(flag, &transparent_hugepage_flags));
325 ssize_t single_hugepage_flag_store(struct kobject *kobj,
326 struct kobj_attribute *attr,
327 const char *buf, size_t count,
328 enum transparent_hugepage_flag flag)
333 ret = kstrtoul(buf, 10, &value);
340 set_bit(flag, &transparent_hugepage_flags);
342 clear_bit(flag, &transparent_hugepage_flags);
347 static ssize_t defrag_show(struct kobject *kobj,
348 struct kobj_attribute *attr, char *buf)
352 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
353 &transparent_hugepage_flags))
354 output = "[always] defer defer+madvise madvise never";
355 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
356 &transparent_hugepage_flags))
357 output = "always [defer] defer+madvise madvise never";
358 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
359 &transparent_hugepage_flags))
360 output = "always defer [defer+madvise] madvise never";
361 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
362 &transparent_hugepage_flags))
363 output = "always defer defer+madvise [madvise] never";
365 output = "always defer defer+madvise madvise [never]";
367 return sysfs_emit(buf, "%s\n", output);
370 static ssize_t defrag_store(struct kobject *kobj,
371 struct kobj_attribute *attr,
372 const char *buf, size_t count)
374 if (sysfs_streq(buf, "always")) {
375 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
376 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
377 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
378 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
379 } else if (sysfs_streq(buf, "defer+madvise")) {
380 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
381 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
382 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
383 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
384 } else if (sysfs_streq(buf, "defer")) {
385 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
386 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
387 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
388 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
389 } else if (sysfs_streq(buf, "madvise")) {
390 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
391 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
392 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
393 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
394 } else if (sysfs_streq(buf, "never")) {
395 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
396 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
397 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
398 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
404 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
406 static ssize_t use_zero_page_show(struct kobject *kobj,
407 struct kobj_attribute *attr, char *buf)
409 return single_hugepage_flag_show(kobj, attr, buf,
410 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
412 static ssize_t use_zero_page_store(struct kobject *kobj,
413 struct kobj_attribute *attr, const char *buf, size_t count)
415 return single_hugepage_flag_store(kobj, attr, buf, count,
416 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
418 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
420 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
421 struct kobj_attribute *attr, char *buf)
423 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
425 static struct kobj_attribute hpage_pmd_size_attr =
426 __ATTR_RO(hpage_pmd_size);
428 static struct attribute *hugepage_attr[] = {
431 &use_zero_page_attr.attr,
432 &hpage_pmd_size_attr.attr,
434 &shmem_enabled_attr.attr,
439 static const struct attribute_group hugepage_attr_group = {
440 .attrs = hugepage_attr,
443 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
444 static void thpsize_release(struct kobject *kobj);
445 static DEFINE_SPINLOCK(huge_anon_orders_lock);
446 static LIST_HEAD(thpsize_list);
450 struct list_head node;
454 #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
456 static ssize_t thpsize_enabled_show(struct kobject *kobj,
457 struct kobj_attribute *attr, char *buf)
459 int order = to_thpsize(kobj)->order;
462 if (test_bit(order, &huge_anon_orders_always))
463 output = "[always] inherit madvise never";
464 else if (test_bit(order, &huge_anon_orders_inherit))
465 output = "always [inherit] madvise never";
466 else if (test_bit(order, &huge_anon_orders_madvise))
467 output = "always inherit [madvise] never";
469 output = "always inherit madvise [never]";
471 return sysfs_emit(buf, "%s\n", output);
474 static ssize_t thpsize_enabled_store(struct kobject *kobj,
475 struct kobj_attribute *attr,
476 const char *buf, size_t count)
478 int order = to_thpsize(kobj)->order;
481 if (sysfs_streq(buf, "always")) {
482 spin_lock(&huge_anon_orders_lock);
483 clear_bit(order, &huge_anon_orders_inherit);
484 clear_bit(order, &huge_anon_orders_madvise);
485 set_bit(order, &huge_anon_orders_always);
486 spin_unlock(&huge_anon_orders_lock);
487 } else if (sysfs_streq(buf, "inherit")) {
488 spin_lock(&huge_anon_orders_lock);
489 clear_bit(order, &huge_anon_orders_always);
490 clear_bit(order, &huge_anon_orders_madvise);
491 set_bit(order, &huge_anon_orders_inherit);
492 spin_unlock(&huge_anon_orders_lock);
493 } else if (sysfs_streq(buf, "madvise")) {
494 spin_lock(&huge_anon_orders_lock);
495 clear_bit(order, &huge_anon_orders_always);
496 clear_bit(order, &huge_anon_orders_inherit);
497 set_bit(order, &huge_anon_orders_madvise);
498 spin_unlock(&huge_anon_orders_lock);
499 } else if (sysfs_streq(buf, "never")) {
500 spin_lock(&huge_anon_orders_lock);
501 clear_bit(order, &huge_anon_orders_always);
502 clear_bit(order, &huge_anon_orders_inherit);
503 clear_bit(order, &huge_anon_orders_madvise);
504 spin_unlock(&huge_anon_orders_lock);
511 static struct kobj_attribute thpsize_enabled_attr =
512 __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
514 static struct attribute *thpsize_attrs[] = {
515 &thpsize_enabled_attr.attr,
519 static const struct attribute_group thpsize_attr_group = {
520 .attrs = thpsize_attrs,
523 static const struct kobj_type thpsize_ktype = {
524 .release = &thpsize_release,
525 .sysfs_ops = &kobj_sysfs_ops,
528 static struct thpsize *thpsize_create(int order, struct kobject *parent)
530 unsigned long size = (PAGE_SIZE << order) / SZ_1K;
531 struct thpsize *thpsize;
534 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
536 return ERR_PTR(-ENOMEM);
538 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
539 "hugepages-%lukB", size);
545 ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
547 kobject_put(&thpsize->kobj);
551 thpsize->order = order;
555 static void thpsize_release(struct kobject *kobj)
557 kfree(to_thpsize(kobj));
560 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
563 struct thpsize *thpsize;
564 unsigned long orders;
568 * Default to setting PMD-sized THP to inherit the global setting and
569 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
570 * constant so we have to do this here.
572 huge_anon_orders_inherit = BIT(PMD_ORDER);
574 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
575 if (unlikely(!*hugepage_kobj)) {
576 pr_err("failed to create transparent hugepage kobject\n");
580 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
582 pr_err("failed to register transparent hugepage group\n");
586 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
588 pr_err("failed to register transparent hugepage group\n");
589 goto remove_hp_group;
592 orders = THP_ORDERS_ALL_ANON;
593 order = highest_order(orders);
595 thpsize = thpsize_create(order, *hugepage_kobj);
596 if (IS_ERR(thpsize)) {
597 pr_err("failed to create thpsize for order %d\n", order);
598 err = PTR_ERR(thpsize);
601 list_add(&thpsize->node, &thpsize_list);
602 order = next_order(&orders, order);
608 hugepage_exit_sysfs(*hugepage_kobj);
611 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
613 kobject_put(*hugepage_kobj);
617 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
619 struct thpsize *thpsize, *tmp;
621 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
622 list_del(&thpsize->node);
623 kobject_put(&thpsize->kobj);
626 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
627 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
628 kobject_put(hugepage_kobj);
631 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
636 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
639 #endif /* CONFIG_SYSFS */
641 static int __init thp_shrinker_init(void)
643 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
644 if (!huge_zero_page_shrinker)
647 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
648 SHRINKER_MEMCG_AWARE |
650 "thp-deferred_split");
651 if (!deferred_split_shrinker) {
652 shrinker_free(huge_zero_page_shrinker);
656 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
657 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
658 shrinker_register(huge_zero_page_shrinker);
660 deferred_split_shrinker->count_objects = deferred_split_count;
661 deferred_split_shrinker->scan_objects = deferred_split_scan;
662 shrinker_register(deferred_split_shrinker);
667 static void __init thp_shrinker_exit(void)
669 shrinker_free(huge_zero_page_shrinker);
670 shrinker_free(deferred_split_shrinker);
673 static int __init hugepage_init(void)
676 struct kobject *hugepage_kobj;
678 if (!has_transparent_hugepage()) {
679 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
684 * hugepages can't be allocated by the buddy allocator
686 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
688 * we use page->mapping and page->index in second tail page
689 * as list_head: assuming THP order >= 2
691 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
693 err = hugepage_init_sysfs(&hugepage_kobj);
697 err = khugepaged_init();
701 err = thp_shrinker_init();
706 * By default disable transparent hugepages on smaller systems,
707 * where the extra memory used could hurt more than TLB overhead
708 * is likely to save. The admin can still enable it through /sys.
710 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
711 transparent_hugepage_flags = 0;
715 err = start_stop_khugepaged();
723 khugepaged_destroy();
725 hugepage_exit_sysfs(hugepage_kobj);
729 subsys_initcall(hugepage_init);
731 static int __init setup_transparent_hugepage(char *str)
736 if (!strcmp(str, "always")) {
737 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
738 &transparent_hugepage_flags);
739 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
740 &transparent_hugepage_flags);
742 } else if (!strcmp(str, "madvise")) {
743 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
744 &transparent_hugepage_flags);
745 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
746 &transparent_hugepage_flags);
748 } else if (!strcmp(str, "never")) {
749 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
750 &transparent_hugepage_flags);
751 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
752 &transparent_hugepage_flags);
757 pr_warn("transparent_hugepage= cannot parse, ignored\n");
760 __setup("transparent_hugepage=", setup_transparent_hugepage);
762 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
764 if (likely(vma->vm_flags & VM_WRITE))
765 pmd = pmd_mkwrite(pmd, vma);
771 struct deferred_split *get_deferred_split_queue(struct folio *folio)
773 struct mem_cgroup *memcg = folio_memcg(folio);
774 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
777 return &memcg->deferred_split_queue;
779 return &pgdat->deferred_split_queue;
783 struct deferred_split *get_deferred_split_queue(struct folio *folio)
785 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
787 return &pgdat->deferred_split_queue;
791 void folio_prep_large_rmappable(struct folio *folio)
793 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
794 INIT_LIST_HEAD(&folio->_deferred_list);
795 folio_set_large_rmappable(folio);
798 static inline bool is_transparent_hugepage(struct folio *folio)
800 if (!folio_test_large(folio))
803 return is_huge_zero_page(&folio->page) ||
804 folio_test_large_rmappable(folio);
807 static unsigned long __thp_get_unmapped_area(struct file *filp,
808 unsigned long addr, unsigned long len,
809 loff_t off, unsigned long flags, unsigned long size)
811 loff_t off_end = off + len;
812 loff_t off_align = round_up(off, size);
813 unsigned long len_pad, ret, off_sub;
815 if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
818 if (off_end <= off_align || (off_end - off_align) < size)
821 len_pad = len + size;
822 if (len_pad < len || (off + len_pad) < off)
825 ret = current->mm->get_unmapped_area(filp, addr, len_pad,
826 off >> PAGE_SHIFT, flags);
829 * The failure might be due to length padding. The caller will retry
830 * without the padding.
832 if (IS_ERR_VALUE(ret))
836 * Do not try to align to THP boundary if allocation at the address
842 off_sub = (off - ret) & (size - 1);
844 if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown &&
852 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
853 unsigned long len, unsigned long pgoff, unsigned long flags)
856 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
858 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
862 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
864 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
866 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
867 struct page *page, gfp_t gfp)
869 struct vm_area_struct *vma = vmf->vma;
870 struct folio *folio = page_folio(page);
872 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
875 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
877 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
879 count_vm_event(THP_FAULT_FALLBACK);
880 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
881 return VM_FAULT_FALLBACK;
883 folio_throttle_swaprate(folio, gfp);
885 pgtable = pte_alloc_one(vma->vm_mm);
886 if (unlikely(!pgtable)) {
891 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
893 * The memory barrier inside __folio_mark_uptodate makes sure that
894 * clear_huge_page writes become visible before the set_pmd_at()
897 __folio_mark_uptodate(folio);
899 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
900 if (unlikely(!pmd_none(*vmf->pmd))) {
905 ret = check_stable_address_space(vma->vm_mm);
909 /* Deliver the page fault to userland */
910 if (userfaultfd_missing(vma)) {
911 spin_unlock(vmf->ptl);
913 pte_free(vma->vm_mm, pgtable);
914 ret = handle_userfault(vmf, VM_UFFD_MISSING);
915 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
919 entry = mk_huge_pmd(page, vma->vm_page_prot);
920 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
921 folio_add_new_anon_rmap(folio, vma, haddr);
922 folio_add_lru_vma(folio, vma);
923 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
924 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
925 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
926 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
927 mm_inc_nr_ptes(vma->vm_mm);
928 spin_unlock(vmf->ptl);
929 count_vm_event(THP_FAULT_ALLOC);
930 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
935 spin_unlock(vmf->ptl);
938 pte_free(vma->vm_mm, pgtable);
945 * always: directly stall for all thp allocations
946 * defer: wake kswapd and fail if not immediately available
947 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
948 * fail if not immediately available
949 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
951 * never: never stall for any thp allocation
953 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
955 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
957 /* Always do synchronous compaction */
958 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
959 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
961 /* Kick kcompactd and fail quickly */
962 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
963 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
965 /* Synchronous compaction if madvised, otherwise kick kcompactd */
966 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
967 return GFP_TRANSHUGE_LIGHT |
968 (vma_madvised ? __GFP_DIRECT_RECLAIM :
969 __GFP_KSWAPD_RECLAIM);
971 /* Only do synchronous compaction if madvised */
972 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
973 return GFP_TRANSHUGE_LIGHT |
974 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
976 return GFP_TRANSHUGE_LIGHT;
979 /* Caller must hold page table lock. */
980 static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
981 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
982 struct page *zero_page)
987 entry = mk_pmd(zero_page, vma->vm_page_prot);
988 entry = pmd_mkhuge(entry);
989 pgtable_trans_huge_deposit(mm, pmd, pgtable);
990 set_pmd_at(mm, haddr, pmd, entry);
994 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
996 struct vm_area_struct *vma = vmf->vma;
999 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1001 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
1002 return VM_FAULT_FALLBACK;
1003 if (unlikely(anon_vma_prepare(vma)))
1004 return VM_FAULT_OOM;
1005 khugepaged_enter_vma(vma, vma->vm_flags);
1007 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1008 !mm_forbids_zeropage(vma->vm_mm) &&
1009 transparent_hugepage_use_zero_page()) {
1011 struct page *zero_page;
1013 pgtable = pte_alloc_one(vma->vm_mm);
1014 if (unlikely(!pgtable))
1015 return VM_FAULT_OOM;
1016 zero_page = mm_get_huge_zero_page(vma->vm_mm);
1017 if (unlikely(!zero_page)) {
1018 pte_free(vma->vm_mm, pgtable);
1019 count_vm_event(THP_FAULT_FALLBACK);
1020 return VM_FAULT_FALLBACK;
1022 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1024 if (pmd_none(*vmf->pmd)) {
1025 ret = check_stable_address_space(vma->vm_mm);
1027 spin_unlock(vmf->ptl);
1028 pte_free(vma->vm_mm, pgtable);
1029 } else if (userfaultfd_missing(vma)) {
1030 spin_unlock(vmf->ptl);
1031 pte_free(vma->vm_mm, pgtable);
1032 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1033 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1035 set_huge_zero_page(pgtable, vma->vm_mm, vma,
1036 haddr, vmf->pmd, zero_page);
1037 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1038 spin_unlock(vmf->ptl);
1041 spin_unlock(vmf->ptl);
1042 pte_free(vma->vm_mm, pgtable);
1046 gfp = vma_thp_gfp_mask(vma);
1047 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
1048 if (unlikely(!folio)) {
1049 count_vm_event(THP_FAULT_FALLBACK);
1050 return VM_FAULT_FALLBACK;
1052 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
1055 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
1056 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1059 struct mm_struct *mm = vma->vm_mm;
1063 ptl = pmd_lock(mm, pmd);
1064 if (!pmd_none(*pmd)) {
1066 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1067 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1070 entry = pmd_mkyoung(*pmd);
1071 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1072 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1073 update_mmu_cache_pmd(vma, addr, pmd);
1079 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1080 if (pfn_t_devmap(pfn))
1081 entry = pmd_mkdevmap(entry);
1083 entry = pmd_mkyoung(pmd_mkdirty(entry));
1084 entry = maybe_pmd_mkwrite(entry, vma);
1088 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1093 set_pmd_at(mm, addr, pmd, entry);
1094 update_mmu_cache_pmd(vma, addr, pmd);
1099 pte_free(mm, pgtable);
1103 * vmf_insert_pfn_pmd - insert a pmd size pfn
1104 * @vmf: Structure describing the fault
1105 * @pfn: pfn to insert
1106 * @write: whether it's a write fault
1108 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1110 * Return: vm_fault_t value.
1112 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
1114 unsigned long addr = vmf->address & PMD_MASK;
1115 struct vm_area_struct *vma = vmf->vma;
1116 pgprot_t pgprot = vma->vm_page_prot;
1117 pgtable_t pgtable = NULL;
1120 * If we had pmd_special, we could avoid all these restrictions,
1121 * but we need to be consistent with PTEs and architectures that
1122 * can't support a 'special' bit.
1124 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1125 !pfn_t_devmap(pfn));
1126 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1127 (VM_PFNMAP|VM_MIXEDMAP));
1128 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1130 if (addr < vma->vm_start || addr >= vma->vm_end)
1131 return VM_FAULT_SIGBUS;
1133 if (arch_needs_pgtable_deposit()) {
1134 pgtable = pte_alloc_one(vma->vm_mm);
1136 return VM_FAULT_OOM;
1139 track_pfn_insert(vma, &pgprot, pfn);
1141 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
1142 return VM_FAULT_NOPAGE;
1144 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
1146 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1147 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
1149 if (likely(vma->vm_flags & VM_WRITE))
1150 pud = pud_mkwrite(pud);
1154 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
1155 pud_t *pud, pfn_t pfn, bool write)
1157 struct mm_struct *mm = vma->vm_mm;
1158 pgprot_t prot = vma->vm_page_prot;
1162 ptl = pud_lock(mm, pud);
1163 if (!pud_none(*pud)) {
1165 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
1166 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
1169 entry = pud_mkyoung(*pud);
1170 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1171 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1172 update_mmu_cache_pud(vma, addr, pud);
1177 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1178 if (pfn_t_devmap(pfn))
1179 entry = pud_mkdevmap(entry);
1181 entry = pud_mkyoung(pud_mkdirty(entry));
1182 entry = maybe_pud_mkwrite(entry, vma);
1184 set_pud_at(mm, addr, pud, entry);
1185 update_mmu_cache_pud(vma, addr, pud);
1192 * vmf_insert_pfn_pud - insert a pud size pfn
1193 * @vmf: Structure describing the fault
1194 * @pfn: pfn to insert
1195 * @write: whether it's a write fault
1197 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1199 * Return: vm_fault_t value.
1201 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
1203 unsigned long addr = vmf->address & PUD_MASK;
1204 struct vm_area_struct *vma = vmf->vma;
1205 pgprot_t pgprot = vma->vm_page_prot;
1208 * If we had pud_special, we could avoid all these restrictions,
1209 * but we need to be consistent with PTEs and architectures that
1210 * can't support a 'special' bit.
1212 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1213 !pfn_t_devmap(pfn));
1214 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1215 (VM_PFNMAP|VM_MIXEDMAP));
1216 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1218 if (addr < vma->vm_start || addr >= vma->vm_end)
1219 return VM_FAULT_SIGBUS;
1221 track_pfn_insert(vma, &pgprot, pfn);
1223 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1224 return VM_FAULT_NOPAGE;
1226 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1227 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1229 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1230 pmd_t *pmd, bool write)
1234 _pmd = pmd_mkyoung(*pmd);
1236 _pmd = pmd_mkdirty(_pmd);
1237 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1239 update_mmu_cache_pmd(vma, addr, pmd);
1242 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1243 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1245 unsigned long pfn = pmd_pfn(*pmd);
1246 struct mm_struct *mm = vma->vm_mm;
1250 assert_spin_locked(pmd_lockptr(mm, pmd));
1252 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1255 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1260 if (flags & FOLL_TOUCH)
1261 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1264 * device mapped pages can only be returned if the
1265 * caller will manage the page reference count.
1267 if (!(flags & (FOLL_GET | FOLL_PIN)))
1268 return ERR_PTR(-EEXIST);
1270 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1271 *pgmap = get_dev_pagemap(pfn, *pgmap);
1273 return ERR_PTR(-EFAULT);
1274 page = pfn_to_page(pfn);
1275 ret = try_grab_page(page, flags);
1277 page = ERR_PTR(ret);
1282 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1283 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1284 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1286 spinlock_t *dst_ptl, *src_ptl;
1287 struct page *src_page;
1288 struct folio *src_folio;
1290 pgtable_t pgtable = NULL;
1293 /* Skip if can be re-fill on fault */
1294 if (!vma_is_anonymous(dst_vma))
1297 pgtable = pte_alloc_one(dst_mm);
1298 if (unlikely(!pgtable))
1301 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1302 src_ptl = pmd_lockptr(src_mm, src_pmd);
1303 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1308 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1309 if (unlikely(is_swap_pmd(pmd))) {
1310 swp_entry_t entry = pmd_to_swp_entry(pmd);
1312 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1313 if (!is_readable_migration_entry(entry)) {
1314 entry = make_readable_migration_entry(
1316 pmd = swp_entry_to_pmd(entry);
1317 if (pmd_swp_soft_dirty(*src_pmd))
1318 pmd = pmd_swp_mksoft_dirty(pmd);
1319 if (pmd_swp_uffd_wp(*src_pmd))
1320 pmd = pmd_swp_mkuffd_wp(pmd);
1321 set_pmd_at(src_mm, addr, src_pmd, pmd);
1323 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1324 mm_inc_nr_ptes(dst_mm);
1325 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1326 if (!userfaultfd_wp(dst_vma))
1327 pmd = pmd_swp_clear_uffd_wp(pmd);
1328 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1334 if (unlikely(!pmd_trans_huge(pmd))) {
1335 pte_free(dst_mm, pgtable);
1339 * When page table lock is held, the huge zero pmd should not be
1340 * under splitting since we don't split the page itself, only pmd to
1343 if (is_huge_zero_pmd(pmd)) {
1345 * get_huge_zero_page() will never allocate a new page here,
1346 * since we already have a zero page to copy. It just takes a
1349 mm_get_huge_zero_page(dst_mm);
1353 src_page = pmd_page(pmd);
1354 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1355 src_folio = page_folio(src_page);
1357 folio_get(src_folio);
1358 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
1359 /* Page maybe pinned: split and retry the fault on PTEs. */
1360 folio_put(src_folio);
1361 pte_free(dst_mm, pgtable);
1362 spin_unlock(src_ptl);
1363 spin_unlock(dst_ptl);
1364 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1367 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1369 mm_inc_nr_ptes(dst_mm);
1370 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1371 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1372 if (!userfaultfd_wp(dst_vma))
1373 pmd = pmd_clear_uffd_wp(pmd);
1374 pmd = pmd_mkold(pmd_wrprotect(pmd));
1375 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1379 spin_unlock(src_ptl);
1380 spin_unlock(dst_ptl);
1385 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1386 static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1387 pud_t *pud, bool write)
1391 _pud = pud_mkyoung(*pud);
1393 _pud = pud_mkdirty(_pud);
1394 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1396 update_mmu_cache_pud(vma, addr, pud);
1399 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1400 pud_t *pud, int flags, struct dev_pagemap **pgmap)
1402 unsigned long pfn = pud_pfn(*pud);
1403 struct mm_struct *mm = vma->vm_mm;
1407 assert_spin_locked(pud_lockptr(mm, pud));
1409 if (flags & FOLL_WRITE && !pud_write(*pud))
1412 if (pud_present(*pud) && pud_devmap(*pud))
1417 if (flags & FOLL_TOUCH)
1418 touch_pud(vma, addr, pud, flags & FOLL_WRITE);
1421 * device mapped pages can only be returned if the
1422 * caller will manage the page reference count.
1424 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1426 if (!(flags & (FOLL_GET | FOLL_PIN)))
1427 return ERR_PTR(-EEXIST);
1429 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1430 *pgmap = get_dev_pagemap(pfn, *pgmap);
1432 return ERR_PTR(-EFAULT);
1433 page = pfn_to_page(pfn);
1435 ret = try_grab_page(page, flags);
1437 page = ERR_PTR(ret);
1442 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1443 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1444 struct vm_area_struct *vma)
1446 spinlock_t *dst_ptl, *src_ptl;
1450 dst_ptl = pud_lock(dst_mm, dst_pud);
1451 src_ptl = pud_lockptr(src_mm, src_pud);
1452 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1456 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1460 * When page table lock is held, the huge zero pud should not be
1461 * under splitting since we don't split the page itself, only pud to
1464 if (is_huge_zero_pud(pud)) {
1465 /* No huge zero pud yet */
1469 * TODO: once we support anonymous pages, use
1470 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
1472 pudp_set_wrprotect(src_mm, addr, src_pud);
1473 pud = pud_mkold(pud_wrprotect(pud));
1474 set_pud_at(dst_mm, addr, dst_pud, pud);
1478 spin_unlock(src_ptl);
1479 spin_unlock(dst_ptl);
1483 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1485 bool write = vmf->flags & FAULT_FLAG_WRITE;
1487 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1488 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1491 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1493 spin_unlock(vmf->ptl);
1495 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1497 void huge_pmd_set_accessed(struct vm_fault *vmf)
1499 bool write = vmf->flags & FAULT_FLAG_WRITE;
1501 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1502 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1505 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1508 spin_unlock(vmf->ptl);
1511 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1513 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1514 struct vm_area_struct *vma = vmf->vma;
1515 struct folio *folio;
1517 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1518 pmd_t orig_pmd = vmf->orig_pmd;
1520 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1521 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1523 if (is_huge_zero_pmd(orig_pmd))
1526 spin_lock(vmf->ptl);
1528 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1529 spin_unlock(vmf->ptl);
1533 page = pmd_page(orig_pmd);
1534 folio = page_folio(page);
1535 VM_BUG_ON_PAGE(!PageHead(page), page);
1537 /* Early check when only holding the PT lock. */
1538 if (PageAnonExclusive(page))
1541 if (!folio_trylock(folio)) {
1543 spin_unlock(vmf->ptl);
1545 spin_lock(vmf->ptl);
1546 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1547 spin_unlock(vmf->ptl);
1548 folio_unlock(folio);
1555 /* Recheck after temporarily dropping the PT lock. */
1556 if (PageAnonExclusive(page)) {
1557 folio_unlock(folio);
1562 * See do_wp_page(): we can only reuse the folio exclusively if
1563 * there are no additional references. Note that we always drain
1564 * the LRU cache immediately after adding a THP.
1566 if (folio_ref_count(folio) >
1567 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1568 goto unlock_fallback;
1569 if (folio_test_swapcache(folio))
1570 folio_free_swap(folio);
1571 if (folio_ref_count(folio) == 1) {
1574 folio_move_anon_rmap(folio, vma);
1575 SetPageAnonExclusive(page);
1576 folio_unlock(folio);
1578 if (unlikely(unshare)) {
1579 spin_unlock(vmf->ptl);
1582 entry = pmd_mkyoung(orig_pmd);
1583 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1584 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1585 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1586 spin_unlock(vmf->ptl);
1591 folio_unlock(folio);
1592 spin_unlock(vmf->ptl);
1594 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1595 return VM_FAULT_FALLBACK;
1598 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1599 unsigned long addr, pmd_t pmd)
1603 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1606 /* Don't touch entries that are not even readable (NUMA hinting). */
1607 if (pmd_protnone(pmd))
1610 /* Do we need write faults for softdirty tracking? */
1611 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1614 /* Do we need write faults for uffd-wp tracking? */
1615 if (userfaultfd_huge_pmd_wp(vma, pmd))
1618 if (!(vma->vm_flags & VM_SHARED)) {
1619 /* See can_change_pte_writable(). */
1620 page = vm_normal_page_pmd(vma, addr, pmd);
1621 return page && PageAnon(page) && PageAnonExclusive(page);
1624 /* See can_change_pte_writable(). */
1625 return pmd_dirty(pmd);
1628 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1629 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1630 struct vm_area_struct *vma,
1633 /* If the pmd is writable, we can write to the page. */
1637 /* Maybe FOLL_FORCE is set to override it? */
1638 if (!(flags & FOLL_FORCE))
1641 /* But FOLL_FORCE has no effect on shared mappings */
1642 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1645 /* ... or read-only private ones */
1646 if (!(vma->vm_flags & VM_MAYWRITE))
1649 /* ... or already writable ones that just need to take a write fault */
1650 if (vma->vm_flags & VM_WRITE)
1654 * See can_change_pte_writable(): we broke COW and could map the page
1655 * writable if we have an exclusive anonymous page ...
1657 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1660 /* ... and a write-fault isn't required for other reasons. */
1661 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1663 return !userfaultfd_huge_pmd_wp(vma, pmd);
1666 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1671 struct mm_struct *mm = vma->vm_mm;
1675 assert_spin_locked(pmd_lockptr(mm, pmd));
1677 page = pmd_page(*pmd);
1678 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1680 if ((flags & FOLL_WRITE) &&
1681 !can_follow_write_pmd(*pmd, page, vma, flags))
1684 /* Avoid dumping huge zero page */
1685 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1686 return ERR_PTR(-EFAULT);
1688 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
1691 if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
1692 return ERR_PTR(-EMLINK);
1694 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1695 !PageAnonExclusive(page), page);
1697 ret = try_grab_page(page, flags);
1699 return ERR_PTR(ret);
1701 if (flags & FOLL_TOUCH)
1702 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1704 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1705 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1710 /* NUMA hinting page fault entry point for trans huge pmds */
1711 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1713 struct vm_area_struct *vma = vmf->vma;
1714 pmd_t oldpmd = vmf->orig_pmd;
1716 struct folio *folio;
1717 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1718 int nid = NUMA_NO_NODE;
1719 int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
1720 bool migrated = false, writable = false;
1723 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1724 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1725 spin_unlock(vmf->ptl);
1729 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1732 * Detect now whether the PMD could be writable; this information
1733 * is only valid while holding the PT lock.
1735 writable = pmd_write(pmd);
1736 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1737 can_change_pmd_writable(vma, vmf->address, pmd))
1740 folio = vm_normal_folio_pmd(vma, haddr, pmd);
1744 /* See similar comment in do_numa_page for explanation */
1746 flags |= TNF_NO_GROUP;
1748 nid = folio_nid(folio);
1750 * For memory tiering mode, cpupid of slow memory page is used
1751 * to record page access time. So use default value.
1753 if (node_is_toptier(nid))
1754 last_cpupid = folio_last_cpupid(folio);
1755 target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
1756 if (target_nid == NUMA_NO_NODE) {
1761 spin_unlock(vmf->ptl);
1764 migrated = migrate_misplaced_folio(folio, vma, target_nid);
1766 flags |= TNF_MIGRATED;
1769 flags |= TNF_MIGRATE_FAIL;
1770 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1771 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1772 spin_unlock(vmf->ptl);
1779 if (nid != NUMA_NO_NODE)
1780 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
1785 /* Restore the PMD */
1786 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1787 pmd = pmd_mkyoung(pmd);
1789 pmd = pmd_mkwrite(pmd, vma);
1790 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1791 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1792 spin_unlock(vmf->ptl);
1797 * Return true if we do MADV_FREE successfully on entire pmd page.
1798 * Otherwise, return false.
1800 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1801 pmd_t *pmd, unsigned long addr, unsigned long next)
1805 struct folio *folio;
1806 struct mm_struct *mm = tlb->mm;
1809 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1811 ptl = pmd_trans_huge_lock(pmd, vma);
1816 if (is_huge_zero_pmd(orig_pmd))
1819 if (unlikely(!pmd_present(orig_pmd))) {
1820 VM_BUG_ON(thp_migration_supported() &&
1821 !is_pmd_migration_entry(orig_pmd));
1825 folio = pfn_folio(pmd_pfn(orig_pmd));
1827 * If other processes are mapping this folio, we couldn't discard
1828 * the folio unless they all do MADV_FREE so let's skip the folio.
1830 if (folio_estimated_sharers(folio) != 1)
1833 if (!folio_trylock(folio))
1837 * If user want to discard part-pages of THP, split it so MADV_FREE
1838 * will deactivate only them.
1840 if (next - addr != HPAGE_PMD_SIZE) {
1844 folio_unlock(folio);
1849 if (folio_test_dirty(folio))
1850 folio_clear_dirty(folio);
1851 folio_unlock(folio);
1853 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1854 pmdp_invalidate(vma, addr, pmd);
1855 orig_pmd = pmd_mkold(orig_pmd);
1856 orig_pmd = pmd_mkclean(orig_pmd);
1858 set_pmd_at(mm, addr, pmd, orig_pmd);
1859 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1862 folio_mark_lazyfree(folio);
1870 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1874 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1875 pte_free(mm, pgtable);
1879 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1880 pmd_t *pmd, unsigned long addr)
1885 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1887 ptl = __pmd_trans_huge_lock(pmd, vma);
1891 * For architectures like ppc64 we look at deposited pgtable
1892 * when calling pmdp_huge_get_and_clear. So do the
1893 * pgtable_trans_huge_withdraw after finishing pmdp related
1896 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1898 arch_check_zapped_pmd(vma, orig_pmd);
1899 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1900 if (vma_is_special_huge(vma)) {
1901 if (arch_needs_pgtable_deposit())
1902 zap_deposited_table(tlb->mm, pmd);
1904 } else if (is_huge_zero_pmd(orig_pmd)) {
1905 zap_deposited_table(tlb->mm, pmd);
1908 struct page *page = NULL;
1909 int flush_needed = 1;
1911 if (pmd_present(orig_pmd)) {
1912 page = pmd_page(orig_pmd);
1913 folio_remove_rmap_pmd(page_folio(page), page, vma);
1914 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1915 VM_BUG_ON_PAGE(!PageHead(page), page);
1916 } else if (thp_migration_supported()) {
1919 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1920 entry = pmd_to_swp_entry(orig_pmd);
1921 page = pfn_swap_entry_to_page(entry);
1924 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1926 if (PageAnon(page)) {
1927 zap_deposited_table(tlb->mm, pmd);
1928 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1930 if (arch_needs_pgtable_deposit())
1931 zap_deposited_table(tlb->mm, pmd);
1932 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1937 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1942 #ifndef pmd_move_must_withdraw
1943 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1944 spinlock_t *old_pmd_ptl,
1945 struct vm_area_struct *vma)
1948 * With split pmd lock we also need to move preallocated
1949 * PTE page table if new_pmd is on different PMD page table.
1951 * We also don't deposit and withdraw tables for file pages.
1953 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1957 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1959 #ifdef CONFIG_MEM_SOFT_DIRTY
1960 if (unlikely(is_pmd_migration_entry(pmd)))
1961 pmd = pmd_swp_mksoft_dirty(pmd);
1962 else if (pmd_present(pmd))
1963 pmd = pmd_mksoft_dirty(pmd);
1968 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1969 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1971 spinlock_t *old_ptl, *new_ptl;
1973 struct mm_struct *mm = vma->vm_mm;
1974 bool force_flush = false;
1977 * The destination pmd shouldn't be established, free_pgtables()
1978 * should have released it; but move_page_tables() might have already
1979 * inserted a page table, if racing against shmem/file collapse.
1981 if (!pmd_none(*new_pmd)) {
1982 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1987 * We don't have to worry about the ordering of src and dst
1988 * ptlocks because exclusive mmap_lock prevents deadlock.
1990 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1992 new_ptl = pmd_lockptr(mm, new_pmd);
1993 if (new_ptl != old_ptl)
1994 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1995 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1996 if (pmd_present(pmd))
1998 VM_BUG_ON(!pmd_none(*new_pmd));
2000 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
2002 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
2003 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
2005 pmd = move_soft_dirty_pmd(pmd);
2006 set_pmd_at(mm, new_addr, new_pmd, pmd);
2008 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
2009 if (new_ptl != old_ptl)
2010 spin_unlock(new_ptl);
2011 spin_unlock(old_ptl);
2019 * - 0 if PMD could not be locked
2020 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
2021 * or if prot_numa but THP migration is not supported
2022 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
2024 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2025 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2026 unsigned long cp_flags)
2028 struct mm_struct *mm = vma->vm_mm;
2030 pmd_t oldpmd, entry;
2031 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
2032 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2033 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
2036 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2038 if (prot_numa && !thp_migration_supported())
2041 ptl = __pmd_trans_huge_lock(pmd, vma);
2045 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2046 if (is_swap_pmd(*pmd)) {
2047 swp_entry_t entry = pmd_to_swp_entry(*pmd);
2048 struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
2051 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
2052 if (is_writable_migration_entry(entry)) {
2054 * A protection check is difficult so
2055 * just be safe and disable write
2057 if (folio_test_anon(folio))
2058 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2060 entry = make_readable_migration_entry(swp_offset(entry));
2061 newpmd = swp_entry_to_pmd(entry);
2062 if (pmd_swp_soft_dirty(*pmd))
2063 newpmd = pmd_swp_mksoft_dirty(newpmd);
2069 newpmd = pmd_swp_mkuffd_wp(newpmd);
2070 else if (uffd_wp_resolve)
2071 newpmd = pmd_swp_clear_uffd_wp(newpmd);
2072 if (!pmd_same(*pmd, newpmd))
2073 set_pmd_at(mm, addr, pmd, newpmd);
2079 struct folio *folio;
2082 * Avoid trapping faults against the zero page. The read-only
2083 * data is likely to be read-cached on the local CPU and
2084 * local/remote hits to the zero page are not interesting.
2086 if (is_huge_zero_pmd(*pmd))
2089 if (pmd_protnone(*pmd))
2092 folio = page_folio(pmd_page(*pmd));
2093 toptier = node_is_toptier(folio_nid(folio));
2095 * Skip scanning top tier node if normal numa
2096 * balancing is disabled
2098 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
2102 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
2104 folio_xchg_access_time(folio,
2105 jiffies_to_msecs(jiffies));
2108 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
2109 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2110 * which is also under mmap_read_lock(mm):
2113 * change_huge_pmd(prot_numa=1)
2114 * pmdp_huge_get_and_clear_notify()
2115 * madvise_dontneed()
2117 * pmd_trans_huge(*pmd) == 0 (without ptl)
2120 * // pmd is re-established
2122 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2123 * which may break userspace.
2125 * pmdp_invalidate_ad() is required to make sure we don't miss
2126 * dirty/young flags set by hardware.
2128 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
2130 entry = pmd_modify(oldpmd, newprot);
2132 entry = pmd_mkuffd_wp(entry);
2133 else if (uffd_wp_resolve)
2135 * Leave the write bit to be handled by PF interrupt
2136 * handler, then things like COW could be properly
2139 entry = pmd_clear_uffd_wp(entry);
2141 /* See change_pte_range(). */
2142 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2143 can_change_pmd_writable(vma, addr, entry))
2144 entry = pmd_mkwrite(entry, vma);
2147 set_pmd_at(mm, addr, pmd, entry);
2149 if (huge_pmd_needs_flush(oldpmd, entry))
2150 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
2156 #ifdef CONFIG_USERFAULTFD
2158 * The PT lock for src_pmd and the mmap_lock for reading are held by
2159 * the caller, but it must return after releasing the page_table_lock.
2160 * Just move the page from src_pmd to dst_pmd if possible.
2161 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2162 * repeated by the caller, or other errors in case of failure.
2164 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2165 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2166 unsigned long dst_addr, unsigned long src_addr)
2168 pmd_t _dst_pmd, src_pmdval;
2169 struct page *src_page;
2170 struct folio *src_folio;
2171 struct anon_vma *src_anon_vma;
2172 spinlock_t *src_ptl, *dst_ptl;
2173 pgtable_t src_pgtable;
2174 struct mmu_notifier_range range;
2177 src_pmdval = *src_pmd;
2178 src_ptl = pmd_lockptr(mm, src_pmd);
2180 lockdep_assert_held(src_ptl);
2181 mmap_assert_locked(mm);
2183 /* Sanity checks before the operation */
2184 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2185 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2186 spin_unlock(src_ptl);
2190 if (!pmd_trans_huge(src_pmdval)) {
2191 spin_unlock(src_ptl);
2192 if (is_pmd_migration_entry(src_pmdval)) {
2193 pmd_migration_entry_wait(mm, &src_pmdval);
2199 src_page = pmd_page(src_pmdval);
2200 if (unlikely(!PageAnonExclusive(src_page))) {
2201 spin_unlock(src_ptl);
2205 src_folio = page_folio(src_page);
2206 folio_get(src_folio);
2207 spin_unlock(src_ptl);
2209 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2210 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2211 src_addr + HPAGE_PMD_SIZE);
2212 mmu_notifier_invalidate_range_start(&range);
2214 folio_lock(src_folio);
2217 * split_huge_page walks the anon_vma chain without the page
2218 * lock. Serialize against it with the anon_vma lock, the page
2219 * lock is not enough.
2221 src_anon_vma = folio_get_anon_vma(src_folio);
2222 if (!src_anon_vma) {
2226 anon_vma_lock_write(src_anon_vma);
2228 dst_ptl = pmd_lockptr(mm, dst_pmd);
2229 double_pt_lock(src_ptl, dst_ptl);
2230 if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2231 !pmd_same(*dst_pmd, dst_pmdval))) {
2235 if (folio_maybe_dma_pinned(src_folio) ||
2236 !PageAnonExclusive(&src_folio->page)) {
2241 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2242 WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2247 folio_move_anon_rmap(src_folio, dst_vma);
2248 WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
2250 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2251 /* Folio got pinned from under us. Put it back and fail the move. */
2252 if (folio_maybe_dma_pinned(src_folio)) {
2253 set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2258 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
2259 /* Follow mremap() behavior and treat the entry dirty after the move */
2260 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2261 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2263 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2264 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2266 double_pt_unlock(src_ptl, dst_ptl);
2267 anon_vma_unlock_write(src_anon_vma);
2268 put_anon_vma(src_anon_vma);
2270 /* unblock rmap walks */
2271 folio_unlock(src_folio);
2272 mmu_notifier_invalidate_range_end(&range);
2273 folio_put(src_folio);
2276 #endif /* CONFIG_USERFAULTFD */
2279 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2281 * Note that if it returns page table lock pointer, this routine returns without
2282 * unlocking page table lock. So callers must unlock it.
2284 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2287 ptl = pmd_lock(vma->vm_mm, pmd);
2288 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2296 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
2298 * Note that if it returns page table lock pointer, this routine returns without
2299 * unlocking page table lock. So callers must unlock it.
2301 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2305 ptl = pud_lock(vma->vm_mm, pud);
2306 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2312 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2313 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2314 pud_t *pud, unsigned long addr)
2318 ptl = __pud_trans_huge_lock(pud, vma);
2322 pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2323 tlb_remove_pud_tlb_entry(tlb, pud, addr);
2324 if (vma_is_special_huge(vma)) {
2326 /* No zero page support yet */
2328 /* No support for anonymous PUD pages yet */
2334 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2335 unsigned long haddr)
2337 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2338 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2339 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2340 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2342 count_vm_event(THP_SPLIT_PUD);
2344 pudp_huge_clear_flush(vma, haddr, pud);
2347 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2348 unsigned long address)
2351 struct mmu_notifier_range range;
2353 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2354 address & HPAGE_PUD_MASK,
2355 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2356 mmu_notifier_invalidate_range_start(&range);
2357 ptl = pud_lock(vma->vm_mm, pud);
2358 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2360 __split_huge_pud_locked(vma, pud, range.start);
2364 mmu_notifier_invalidate_range_end(&range);
2366 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2368 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2369 unsigned long haddr, pmd_t *pmd)
2371 struct mm_struct *mm = vma->vm_mm;
2373 pmd_t _pmd, old_pmd;
2379 * Leave pmd empty until pte is filled note that it is fine to delay
2380 * notification until mmu_notifier_invalidate_range_end() as we are
2381 * replacing a zero pmd write protected page with a zero pte write
2384 * See Documentation/mm/mmu_notifier.rst
2386 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2388 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2389 pmd_populate(mm, &_pmd, pgtable);
2391 pte = pte_offset_map(&_pmd, haddr);
2393 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2396 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
2397 entry = pte_mkspecial(entry);
2398 if (pmd_uffd_wp(old_pmd))
2399 entry = pte_mkuffd_wp(entry);
2400 VM_BUG_ON(!pte_none(ptep_get(pte)));
2401 set_pte_at(mm, addr, pte, entry);
2405 smp_wmb(); /* make pte visible before pmd */
2406 pmd_populate(mm, pmd, pgtable);
2409 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2410 unsigned long haddr, bool freeze)
2412 struct mm_struct *mm = vma->vm_mm;
2413 struct folio *folio;
2416 pmd_t old_pmd, _pmd;
2417 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2418 bool anon_exclusive = false, dirty = false;
2423 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2424 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2425 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2426 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2427 && !pmd_devmap(*pmd));
2429 count_vm_event(THP_SPLIT_PMD);
2431 if (!vma_is_anonymous(vma)) {
2432 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2434 * We are going to unmap this huge page. So
2435 * just go ahead and zap it
2437 if (arch_needs_pgtable_deposit())
2438 zap_deposited_table(mm, pmd);
2439 if (vma_is_special_huge(vma))
2441 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2444 entry = pmd_to_swp_entry(old_pmd);
2445 page = pfn_swap_entry_to_page(entry);
2447 page = pmd_page(old_pmd);
2448 folio = page_folio(page);
2449 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
2450 folio_mark_dirty(folio);
2451 if (!folio_test_referenced(folio) && pmd_young(old_pmd))
2452 folio_set_referenced(folio);
2453 folio_remove_rmap_pmd(folio, page, vma);
2456 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2460 if (is_huge_zero_pmd(*pmd)) {
2462 * FIXME: Do we want to invalidate secondary mmu by calling
2463 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2464 * inside __split_huge_pmd() ?
2466 * We are going from a zero huge page write protected to zero
2467 * small page also write protected so it does not seems useful
2468 * to invalidate secondary mmu at this time.
2470 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2474 * Up to this point the pmd is present and huge and userland has the
2475 * whole access to the hugepage during the split (which happens in
2476 * place). If we overwrite the pmd with the not-huge version pointing
2477 * to the pte here (which of course we could if all CPUs were bug
2478 * free), userland could trigger a small page size TLB miss on the
2479 * small sized TLB while the hugepage TLB entry is still established in
2480 * the huge TLB. Some CPU doesn't like that.
2481 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2482 * 383 on page 105. Intel should be safe but is also warns that it's
2483 * only safe if the permission and cache attributes of the two entries
2484 * loaded in the two TLB is identical (which should be the case here).
2485 * But it is generally safer to never allow small and huge TLB entries
2486 * for the same virtual address to be loaded simultaneously. So instead
2487 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2488 * current pmd notpresent (atomically because here the pmd_trans_huge
2489 * must remain set at all times on the pmd until the split is complete
2490 * for this pmd), then we flush the SMP TLB and finally we write the
2491 * non-huge version of the pmd entry with pmd_populate.
2493 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2495 pmd_migration = is_pmd_migration_entry(old_pmd);
2496 if (unlikely(pmd_migration)) {
2499 entry = pmd_to_swp_entry(old_pmd);
2500 page = pfn_swap_entry_to_page(entry);
2501 write = is_writable_migration_entry(entry);
2503 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2504 young = is_migration_entry_young(entry);
2505 dirty = is_migration_entry_dirty(entry);
2506 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2507 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2509 page = pmd_page(old_pmd);
2510 folio = page_folio(page);
2511 if (pmd_dirty(old_pmd)) {
2513 folio_set_dirty(folio);
2515 write = pmd_write(old_pmd);
2516 young = pmd_young(old_pmd);
2517 soft_dirty = pmd_soft_dirty(old_pmd);
2518 uffd_wp = pmd_uffd_wp(old_pmd);
2520 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2521 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
2524 * Without "freeze", we'll simply split the PMD, propagating the
2525 * PageAnonExclusive() flag for each PTE by setting it for
2526 * each subpage -- no need to (temporarily) clear.
2528 * With "freeze" we want to replace mapped pages by
2529 * migration entries right away. This is only possible if we
2530 * managed to clear PageAnonExclusive() -- see
2531 * set_pmd_migration_entry().
2533 * In case we cannot clear PageAnonExclusive(), split the PMD
2534 * only and let try_to_migrate_one() fail later.
2536 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
2538 anon_exclusive = PageAnonExclusive(page);
2539 if (freeze && anon_exclusive &&
2540 folio_try_share_anon_rmap_pmd(folio, page))
2543 rmap_t rmap_flags = RMAP_NONE;
2545 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2547 rmap_flags |= RMAP_EXCLUSIVE;
2548 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
2549 vma, haddr, rmap_flags);
2554 * Withdraw the table only after we mark the pmd entry invalid.
2555 * This's critical for some architectures (Power).
2557 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2558 pmd_populate(mm, &_pmd, pgtable);
2560 pte = pte_offset_map(&_pmd, haddr);
2562 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2565 * Note that NUMA hinting access restrictions are not
2566 * transferred to avoid any possibility of altering
2567 * permissions across VMAs.
2569 if (freeze || pmd_migration) {
2570 swp_entry_t swp_entry;
2572 swp_entry = make_writable_migration_entry(
2573 page_to_pfn(page + i));
2574 else if (anon_exclusive)
2575 swp_entry = make_readable_exclusive_migration_entry(
2576 page_to_pfn(page + i));
2578 swp_entry = make_readable_migration_entry(
2579 page_to_pfn(page + i));
2581 swp_entry = make_migration_entry_young(swp_entry);
2583 swp_entry = make_migration_entry_dirty(swp_entry);
2584 entry = swp_entry_to_pte(swp_entry);
2586 entry = pte_swp_mksoft_dirty(entry);
2588 entry = pte_swp_mkuffd_wp(entry);
2590 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2592 entry = pte_mkwrite(entry, vma);
2594 entry = pte_mkold(entry);
2595 /* NOTE: this may set soft-dirty too on some archs */
2597 entry = pte_mkdirty(entry);
2599 entry = pte_mksoft_dirty(entry);
2601 entry = pte_mkuffd_wp(entry);
2603 VM_BUG_ON(!pte_none(ptep_get(pte)));
2604 set_pte_at(mm, addr, pte, entry);
2610 folio_remove_rmap_pmd(folio, page, vma);
2614 smp_wmb(); /* make pte visible before pmd */
2615 pmd_populate(mm, pmd, pgtable);
2618 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2619 unsigned long address, bool freeze, struct folio *folio)
2622 struct mmu_notifier_range range;
2624 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2625 address & HPAGE_PMD_MASK,
2626 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2627 mmu_notifier_invalidate_range_start(&range);
2628 ptl = pmd_lock(vma->vm_mm, pmd);
2631 * If caller asks to setup a migration entry, we need a folio to check
2632 * pmd against. Otherwise we can end up replacing wrong folio.
2634 VM_BUG_ON(freeze && !folio);
2635 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2637 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2638 is_pmd_migration_entry(*pmd)) {
2640 * It's safe to call pmd_page when folio is set because it's
2641 * guaranteed that pmd is present.
2643 if (folio && folio != page_folio(pmd_page(*pmd)))
2645 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
2650 mmu_notifier_invalidate_range_end(&range);
2653 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2654 bool freeze, struct folio *folio)
2656 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2661 __split_huge_pmd(vma, pmd, address, freeze, folio);
2664 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2667 * If the new address isn't hpage aligned and it could previously
2668 * contain an hugepage: check if we need to split an huge pmd.
2670 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2671 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2672 ALIGN(address, HPAGE_PMD_SIZE)))
2673 split_huge_pmd_address(vma, address, false, NULL);
2676 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2677 unsigned long start,
2681 /* Check if we need to split start first. */
2682 split_huge_pmd_if_needed(vma, start);
2684 /* Check if we need to split end next. */
2685 split_huge_pmd_if_needed(vma, end);
2688 * If we're also updating the next vma vm_start,
2689 * check if we need to split it.
2691 if (adjust_next > 0) {
2692 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2693 unsigned long nstart = next->vm_start;
2694 nstart += adjust_next;
2695 split_huge_pmd_if_needed(next, nstart);
2699 static void unmap_folio(struct folio *folio)
2701 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2702 TTU_SYNC | TTU_BATCH_FLUSH;
2704 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2707 * Anon pages need migration entries to preserve them, but file
2708 * pages can simply be left unmapped, then faulted back on demand.
2709 * If that is ever changed (perhaps for mlock), update remap_page().
2711 if (folio_test_anon(folio))
2712 try_to_migrate(folio, ttu_flags);
2714 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2716 try_to_unmap_flush();
2719 static void remap_page(struct folio *folio, unsigned long nr)
2723 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
2724 if (!folio_test_anon(folio))
2727 remove_migration_ptes(folio, folio, true);
2728 i += folio_nr_pages(folio);
2731 folio = folio_next(folio);
2735 static void lru_add_page_tail(struct page *head, struct page *tail,
2736 struct lruvec *lruvec, struct list_head *list)
2738 VM_BUG_ON_PAGE(!PageHead(head), head);
2739 VM_BUG_ON_PAGE(PageCompound(tail), head);
2740 VM_BUG_ON_PAGE(PageLRU(tail), head);
2741 lockdep_assert_held(&lruvec->lru_lock);
2744 /* page reclaim is reclaiming a huge page */
2745 VM_WARN_ON(PageLRU(head));
2747 list_add_tail(&tail->lru, list);
2749 /* head is still on lru (and we have it frozen) */
2750 VM_WARN_ON(!PageLRU(head));
2751 if (PageUnevictable(tail))
2752 tail->mlock_count = 0;
2754 list_add_tail(&tail->lru, &head->lru);
2759 static void __split_huge_page_tail(struct folio *folio, int tail,
2760 struct lruvec *lruvec, struct list_head *list)
2762 struct page *head = &folio->page;
2763 struct page *page_tail = head + tail;
2765 * Careful: new_folio is not a "real" folio before we cleared PageTail.
2766 * Don't pass it around before clear_compound_head().
2768 struct folio *new_folio = (struct folio *)page_tail;
2770 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2773 * Clone page flags before unfreezing refcount.
2775 * After successful get_page_unless_zero() might follow flags change,
2776 * for example lock_page() which set PG_waiters.
2778 * Note that for mapped sub-pages of an anonymous THP,
2779 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2780 * the migration entry instead from where remap_page() will restore it.
2781 * We can still have PG_anon_exclusive set on effectively unmapped and
2782 * unreferenced sub-pages of an anonymous THP: we can simply drop
2783 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2785 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2786 page_tail->flags |= (head->flags &
2787 ((1L << PG_referenced) |
2788 (1L << PG_swapbacked) |
2789 (1L << PG_swapcache) |
2790 (1L << PG_mlocked) |
2791 (1L << PG_uptodate) |
2793 (1L << PG_workingset) |
2795 (1L << PG_unevictable) |
2796 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
2801 LRU_GEN_MASK | LRU_REFS_MASK));
2803 /* ->mapping in first and second tail page is replaced by other uses */
2804 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2806 page_tail->mapping = head->mapping;
2807 page_tail->index = head->index + tail;
2810 * page->private should not be set in tail pages. Fix up and warn once
2811 * if private is unexpectedly set.
2813 if (unlikely(page_tail->private)) {
2814 VM_WARN_ON_ONCE_PAGE(true, page_tail);
2815 page_tail->private = 0;
2817 if (folio_test_swapcache(folio))
2818 new_folio->swap.val = folio->swap.val + tail;
2820 /* Page flags must be visible before we make the page non-compound. */
2824 * Clear PageTail before unfreezing page refcount.
2826 * After successful get_page_unless_zero() might follow put_page()
2827 * which needs correct compound_head().
2829 clear_compound_head(page_tail);
2831 /* Finally unfreeze refcount. Additional reference from page cache. */
2832 page_ref_unfreeze(page_tail, 1 + (!folio_test_anon(folio) ||
2833 folio_test_swapcache(folio)));
2835 if (folio_test_young(folio))
2836 folio_set_young(new_folio);
2837 if (folio_test_idle(folio))
2838 folio_set_idle(new_folio);
2840 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
2843 * always add to the tail because some iterators expect new
2844 * pages to show after the currently processed elements - e.g.
2847 lru_add_page_tail(head, page_tail, lruvec, list);
2850 static void __split_huge_page(struct page *page, struct list_head *list,
2853 struct folio *folio = page_folio(page);
2854 struct page *head = &folio->page;
2855 struct lruvec *lruvec;
2856 struct address_space *swap_cache = NULL;
2857 unsigned long offset = 0;
2858 unsigned int nr = thp_nr_pages(head);
2859 int i, nr_dropped = 0;
2861 /* complete memcg works before add pages to LRU */
2862 split_page_memcg(head, nr);
2864 if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2865 offset = swp_offset(folio->swap);
2866 swap_cache = swap_address_space(folio->swap);
2867 xa_lock(&swap_cache->i_pages);
2870 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2871 lruvec = folio_lruvec_lock(folio);
2873 ClearPageHasHWPoisoned(head);
2875 for (i = nr - 1; i >= 1; i--) {
2876 __split_huge_page_tail(folio, i, lruvec, list);
2877 /* Some pages can be beyond EOF: drop them from page cache */
2878 if (head[i].index >= end) {
2879 struct folio *tail = page_folio(head + i);
2881 if (shmem_mapping(head->mapping))
2883 else if (folio_test_clear_dirty(tail))
2884 folio_account_cleaned(tail,
2885 inode_to_wb(folio->mapping->host));
2886 __filemap_remove_folio(tail, NULL);
2888 } else if (!PageAnon(page)) {
2889 __xa_store(&head->mapping->i_pages, head[i].index,
2891 } else if (swap_cache) {
2892 __xa_store(&swap_cache->i_pages, offset + i,
2897 ClearPageCompound(head);
2898 unlock_page_lruvec(lruvec);
2899 /* Caller disabled irqs, so they are still disabled here */
2901 split_page_owner(head, nr);
2903 /* See comment in __split_huge_page_tail() */
2904 if (PageAnon(head)) {
2905 /* Additional pin to swap cache */
2906 if (PageSwapCache(head)) {
2907 page_ref_add(head, 2);
2908 xa_unlock(&swap_cache->i_pages);
2913 /* Additional pin to page cache */
2914 page_ref_add(head, 2);
2915 xa_unlock(&head->mapping->i_pages);
2920 shmem_uncharge(head->mapping->host, nr_dropped);
2921 remap_page(folio, nr);
2923 if (folio_test_swapcache(folio))
2924 split_swap_cluster(folio->swap);
2926 for (i = 0; i < nr; i++) {
2927 struct page *subpage = head + i;
2928 if (subpage == page)
2930 unlock_page(subpage);
2933 * Subpages may be freed if there wasn't any mapping
2934 * like if add_to_swap() is running on a lru page that
2935 * had its mapping zapped. And freeing these pages
2936 * requires taking the lru_lock so we do the put_page
2937 * of the tail pages after the split is complete.
2939 free_page_and_swap_cache(subpage);
2943 /* Racy check whether the huge page can be split */
2944 bool can_split_folio(struct folio *folio, int *pextra_pins)
2948 /* Additional pins from page cache */
2949 if (folio_test_anon(folio))
2950 extra_pins = folio_test_swapcache(folio) ?
2951 folio_nr_pages(folio) : 0;
2953 extra_pins = folio_nr_pages(folio);
2955 *pextra_pins = extra_pins;
2956 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
2960 * This function splits huge page into normal pages. @page can point to any
2961 * subpage of huge page to split. Split doesn't change the position of @page.
2963 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2964 * The huge page must be locked.
2966 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2968 * Both head page and tail pages will inherit mapping, flags, and so on from
2971 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2972 * they are not mapped.
2974 * Returns 0 if the hugepage is split successfully.
2975 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2978 int split_huge_page_to_list(struct page *page, struct list_head *list)
2980 struct folio *folio = page_folio(page);
2981 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
2982 XA_STATE(xas, &folio->mapping->i_pages, folio->index);
2983 struct anon_vma *anon_vma = NULL;
2984 struct address_space *mapping = NULL;
2985 int extra_pins, ret;
2989 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2990 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2992 is_hzp = is_huge_zero_page(&folio->page);
2994 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
2998 if (folio_test_writeback(folio))
3001 if (folio_test_anon(folio)) {
3003 * The caller does not necessarily hold an mmap_lock that would
3004 * prevent the anon_vma disappearing so we first we take a
3005 * reference to it and then lock the anon_vma for write. This
3006 * is similar to folio_lock_anon_vma_read except the write lock
3007 * is taken to serialise against parallel split or collapse
3010 anon_vma = folio_get_anon_vma(folio);
3017 anon_vma_lock_write(anon_vma);
3021 mapping = folio->mapping;
3029 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3032 if (!filemap_release_folio(folio, gfp)) {
3037 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
3038 if (xas_error(&xas)) {
3039 ret = xas_error(&xas);
3044 i_mmap_lock_read(mapping);
3047 *__split_huge_page() may need to trim off pages beyond EOF:
3048 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
3049 * which cannot be nested inside the page tree lock. So note
3050 * end now: i_size itself may be changed at any moment, but
3051 * folio lock is good enough to serialize the trimming.
3053 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3054 if (shmem_mapping(mapping))
3055 end = shmem_fallocend(mapping->host, end);
3059 * Racy check if we can split the page, before unmap_folio() will
3062 if (!can_split_folio(folio, &extra_pins)) {
3069 /* block interrupt reentry in xa_lock and spinlock */
3070 local_irq_disable();
3073 * Check if the folio is present in page cache.
3074 * We assume all tail are present too, if folio is there.
3078 if (xas_load(&xas) != folio)
3082 /* Prevent deferred_split_scan() touching ->_refcount */
3083 spin_lock(&ds_queue->split_queue_lock);
3084 if (folio_ref_freeze(folio, 1 + extra_pins)) {
3085 if (!list_empty(&folio->_deferred_list)) {
3086 ds_queue->split_queue_len--;
3087 list_del(&folio->_deferred_list);
3089 spin_unlock(&ds_queue->split_queue_lock);
3091 int nr = folio_nr_pages(folio);
3093 xas_split(&xas, folio, folio_order(folio));
3094 if (folio_test_pmd_mappable(folio)) {
3095 if (folio_test_swapbacked(folio)) {
3096 __lruvec_stat_mod_folio(folio,
3097 NR_SHMEM_THPS, -nr);
3099 __lruvec_stat_mod_folio(folio,
3101 filemap_nr_thps_dec(mapping);
3106 __split_huge_page(page, list, end);
3109 spin_unlock(&ds_queue->split_queue_lock);
3114 remap_page(folio, folio_nr_pages(folio));
3120 anon_vma_unlock_write(anon_vma);
3121 put_anon_vma(anon_vma);
3124 i_mmap_unlock_read(mapping);
3127 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3131 void folio_undo_large_rmappable(struct folio *folio)
3133 struct deferred_split *ds_queue;
3134 unsigned long flags;
3137 * At this point, there is no one trying to add the folio to
3138 * deferred_list. If folio is not in deferred_list, it's safe
3139 * to check without acquiring the split_queue_lock.
3141 if (data_race(list_empty(&folio->_deferred_list)))
3144 ds_queue = get_deferred_split_queue(folio);
3145 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3146 if (!list_empty(&folio->_deferred_list)) {
3147 ds_queue->split_queue_len--;
3148 list_del_init(&folio->_deferred_list);
3150 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3153 void deferred_split_folio(struct folio *folio)
3155 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3157 struct mem_cgroup *memcg = folio_memcg(folio);
3159 unsigned long flags;
3161 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
3164 * The try_to_unmap() in page reclaim path might reach here too,
3165 * this may cause a race condition to corrupt deferred split queue.
3166 * And, if page reclaim is already handling the same folio, it is
3167 * unnecessary to handle it again in shrinker.
3169 * Check the swapcache flag to determine if the folio is being
3170 * handled by page reclaim since THP swap would add the folio into
3171 * swap cache before calling try_to_unmap().
3173 if (folio_test_swapcache(folio))
3176 if (!list_empty(&folio->_deferred_list))
3179 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3180 if (list_empty(&folio->_deferred_list)) {
3181 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
3182 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
3183 ds_queue->split_queue_len++;
3186 set_shrinker_bit(memcg, folio_nid(folio),
3187 deferred_split_shrinker->id);
3190 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3193 static unsigned long deferred_split_count(struct shrinker *shrink,
3194 struct shrink_control *sc)
3196 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3197 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3201 ds_queue = &sc->memcg->deferred_split_queue;
3203 return READ_ONCE(ds_queue->split_queue_len);
3206 static unsigned long deferred_split_scan(struct shrinker *shrink,
3207 struct shrink_control *sc)
3209 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3210 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3211 unsigned long flags;
3213 struct folio *folio, *next;
3218 ds_queue = &sc->memcg->deferred_split_queue;
3221 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3222 /* Take pin on all head pages to avoid freeing them under us */
3223 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
3225 if (folio_try_get(folio)) {
3226 list_move(&folio->_deferred_list, &list);
3228 /* We lost race with folio_put() */
3229 list_del_init(&folio->_deferred_list);
3230 ds_queue->split_queue_len--;
3232 if (!--sc->nr_to_scan)
3235 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3237 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
3238 if (!folio_trylock(folio))
3240 /* split_huge_page() removes page from list on success */
3241 if (!split_folio(folio))
3243 folio_unlock(folio);
3248 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3249 list_splice_tail(&list, &ds_queue->split_queue);
3250 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3253 * Stop shrinker if we didn't split any page, but the queue is empty.
3254 * This can happen if pages were freed under us.
3256 if (!split && list_empty(&ds_queue->split_queue))
3261 #ifdef CONFIG_DEBUG_FS
3262 static void split_huge_pages_all(void)
3266 struct folio *folio;
3267 unsigned long pfn, max_zone_pfn;
3268 unsigned long total = 0, split = 0;
3270 pr_debug("Split all THPs\n");
3271 for_each_zone(zone) {
3272 if (!managed_zone(zone))
3274 max_zone_pfn = zone_end_pfn(zone);
3275 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
3278 page = pfn_to_online_page(pfn);
3279 if (!page || PageTail(page))
3281 folio = page_folio(page);
3282 if (!folio_try_get(folio))
3285 if (unlikely(page_folio(page) != folio))
3288 if (zone != folio_zone(folio))
3291 if (!folio_test_large(folio)
3292 || folio_test_hugetlb(folio)
3293 || !folio_test_lru(folio))
3298 nr_pages = folio_nr_pages(folio);
3299 if (!split_folio(folio))
3301 pfn += nr_pages - 1;
3302 folio_unlock(folio);
3309 pr_debug("%lu of %lu THP split\n", split, total);
3312 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
3314 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
3315 is_vm_hugetlb_page(vma);
3318 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3319 unsigned long vaddr_end)
3322 struct task_struct *task;
3323 struct mm_struct *mm;
3324 unsigned long total = 0, split = 0;
3327 vaddr_start &= PAGE_MASK;
3328 vaddr_end &= PAGE_MASK;
3330 /* Find the task_struct from pid */
3332 task = find_task_by_vpid(pid);
3338 get_task_struct(task);
3341 /* Find the mm_struct */
3342 mm = get_task_mm(task);
3343 put_task_struct(task);
3350 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3351 pid, vaddr_start, vaddr_end);
3355 * always increase addr by PAGE_SIZE, since we could have a PTE page
3356 * table filled with PTE-mapped THPs, each of which is distinct.
3358 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3359 struct vm_area_struct *vma = vma_lookup(mm, addr);
3361 struct folio *folio;
3366 /* skip special VMA and hugetlb VMA */
3367 if (vma_not_suitable_for_thp_split(vma)) {
3372 /* FOLL_DUMP to ignore special (like zero) pages */
3373 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
3375 if (IS_ERR_OR_NULL(page))
3378 folio = page_folio(page);
3379 if (!is_transparent_hugepage(folio))
3383 if (!can_split_folio(folio, NULL))
3386 if (!folio_trylock(folio))
3389 if (!split_folio(folio))
3392 folio_unlock(folio);
3397 mmap_read_unlock(mm);
3400 pr_debug("%lu of %lu THP split\n", split, total);
3406 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3409 struct filename *file;
3410 struct file *candidate;
3411 struct address_space *mapping;
3415 unsigned long total = 0, split = 0;
3417 file = getname_kernel(file_path);
3421 candidate = file_open_name(file, O_RDONLY, 0);
3422 if (IS_ERR(candidate))
3425 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3426 file_path, off_start, off_end);
3428 mapping = candidate->f_mapping;
3430 for (index = off_start; index < off_end; index += nr_pages) {
3431 struct folio *folio = filemap_get_folio(mapping, index);
3437 if (!folio_test_large(folio))
3441 nr_pages = folio_nr_pages(folio);
3443 if (!folio_trylock(folio))
3446 if (!split_folio(folio))
3449 folio_unlock(folio);
3455 filp_close(candidate, NULL);
3458 pr_debug("%lu of %lu file-backed THP split\n", split, total);
3464 #define MAX_INPUT_BUF_SZ 255
3466 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3467 size_t count, loff_t *ppops)
3469 static DEFINE_MUTEX(split_debug_mutex);
3471 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3472 char input_buf[MAX_INPUT_BUF_SZ];
3474 unsigned long vaddr_start, vaddr_end;
3476 ret = mutex_lock_interruptible(&split_debug_mutex);
3482 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3483 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3486 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
3488 if (input_buf[0] == '/') {
3490 char *buf = input_buf;
3491 char file_path[MAX_INPUT_BUF_SZ];
3492 pgoff_t off_start = 0, off_end = 0;
3493 size_t input_len = strlen(input_buf);
3495 tok = strsep(&buf, ",");
3497 strcpy(file_path, tok);
3503 ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3508 ret = split_huge_pages_in_file(file_path, off_start, off_end);
3515 ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3516 if (ret == 1 && pid == 1) {
3517 split_huge_pages_all();
3518 ret = strlen(input_buf);
3520 } else if (ret != 3) {
3525 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3527 ret = strlen(input_buf);
3529 mutex_unlock(&split_debug_mutex);
3534 static const struct file_operations split_huge_pages_fops = {
3535 .owner = THIS_MODULE,
3536 .write = split_huge_pages_write,
3537 .llseek = no_llseek,
3540 static int __init split_huge_pages_debugfs(void)
3542 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3543 &split_huge_pages_fops);
3546 late_initcall(split_huge_pages_debugfs);
3549 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3550 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3553 struct folio *folio = page_folio(page);
3554 struct vm_area_struct *vma = pvmw->vma;
3555 struct mm_struct *mm = vma->vm_mm;
3556 unsigned long address = pvmw->address;
3557 bool anon_exclusive;
3562 if (!(pvmw->pmd && !pvmw->pte))
3565 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3566 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3568 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
3569 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
3570 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
3571 set_pmd_at(mm, address, pvmw->pmd, pmdval);
3575 if (pmd_dirty(pmdval))
3576 folio_mark_dirty(folio);
3577 if (pmd_write(pmdval))
3578 entry = make_writable_migration_entry(page_to_pfn(page));
3579 else if (anon_exclusive)
3580 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3582 entry = make_readable_migration_entry(page_to_pfn(page));
3583 if (pmd_young(pmdval))
3584 entry = make_migration_entry_young(entry);
3585 if (pmd_dirty(pmdval))
3586 entry = make_migration_entry_dirty(entry);
3587 pmdswp = swp_entry_to_pmd(entry);
3588 if (pmd_soft_dirty(pmdval))
3589 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3590 if (pmd_uffd_wp(pmdval))
3591 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
3592 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3593 folio_remove_rmap_pmd(folio, page, vma);
3595 trace_set_migration_pmd(address, pmd_val(pmdswp));
3600 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3602 struct folio *folio = page_folio(new);
3603 struct vm_area_struct *vma = pvmw->vma;
3604 struct mm_struct *mm = vma->vm_mm;
3605 unsigned long address = pvmw->address;
3606 unsigned long haddr = address & HPAGE_PMD_MASK;
3610 if (!(pvmw->pmd && !pvmw->pte))
3613 entry = pmd_to_swp_entry(*pvmw->pmd);
3615 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
3616 if (pmd_swp_soft_dirty(*pvmw->pmd))
3617 pmde = pmd_mksoft_dirty(pmde);
3618 if (is_writable_migration_entry(entry))
3619 pmde = pmd_mkwrite(pmde, vma);
3620 if (pmd_swp_uffd_wp(*pvmw->pmd))
3621 pmde = pmd_mkuffd_wp(pmde);
3622 if (!is_migration_entry_young(entry))
3623 pmde = pmd_mkold(pmde);
3624 /* NOTE: this may contain setting soft-dirty on some archs */
3625 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
3626 pmde = pmd_mkdirty(pmde);
3628 if (folio_test_anon(folio)) {
3629 rmap_t rmap_flags = RMAP_NONE;
3631 if (!is_readable_migration_entry(entry))
3632 rmap_flags |= RMAP_EXCLUSIVE;
3634 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
3636 folio_add_file_rmap_pmd(folio, new, vma);
3638 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
3639 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3641 /* No need to invalidate - it was non-present before */
3642 update_mmu_cache_pmd(vma, address, pvmw->pmd);
3643 trace_remove_migration_pmd(address, pmd_val(pmde));