1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009 Red Hat, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/mm_types.h>
24 #include <linux/khugepaged.h>
25 #include <linux/freezer.h>
26 #include <linux/pfn_t.h>
27 #include <linux/mman.h>
28 #include <linux/memremap.h>
29 #include <linux/pagemap.h>
30 #include <linux/debugfs.h>
31 #include <linux/migrate.h>
32 #include <linux/hashtable.h>
33 #include <linux/userfaultfd_k.h>
34 #include <linux/page_idle.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/oom.h>
37 #include <linux/numa.h>
38 #include <linux/page_owner.h>
39 #include <linux/sched/sysctl.h>
40 #include <linux/memory-tiers.h>
41 #include <linux/compat.h>
42 #include <linux/pgalloc_tag.h>
45 #include <asm/pgalloc.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/thp.h>
53 * By default, transparent hugepage support is disabled in order to avoid
54 * risking an increased memory footprint for applications that are not
55 * guaranteed to benefit from it. When transparent hugepage support is
56 * enabled, it is for all mappings, and khugepaged scans all mappings.
57 * Defrag is invoked by khugepaged hugepage allocations and by page faults
58 * for all hugepage allocations.
60 unsigned long transparent_hugepage_flags __read_mostly =
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
62 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
64 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
65 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
67 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
68 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
69 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
71 static struct shrinker *deferred_split_shrinker;
72 static unsigned long deferred_split_count(struct shrinker *shrink,
73 struct shrink_control *sc);
74 static unsigned long deferred_split_scan(struct shrinker *shrink,
75 struct shrink_control *sc);
77 static atomic_t huge_zero_refcount;
78 struct folio *huge_zero_folio __read_mostly;
79 unsigned long huge_zero_pfn __read_mostly = ~0UL;
80 unsigned long huge_anon_orders_always __read_mostly;
81 unsigned long huge_anon_orders_madvise __read_mostly;
82 unsigned long huge_anon_orders_inherit __read_mostly;
84 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
85 unsigned long vm_flags,
86 unsigned long tva_flags,
89 bool smaps = tva_flags & TVA_SMAPS;
90 bool in_pf = tva_flags & TVA_IN_PF;
91 bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
92 unsigned long supported_orders;
94 /* Check the intersection of requested and supported orders. */
95 if (vma_is_anonymous(vma))
96 supported_orders = THP_ORDERS_ALL_ANON;
97 else if (vma_is_dax(vma))
98 supported_orders = THP_ORDERS_ALL_FILE_DAX;
100 supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
102 orders &= supported_orders;
106 if (!vma->vm_mm) /* vdso */
110 * Explicitly disabled through madvise or prctl, or some
111 * architectures may disable THP for some mappings, for
114 if ((vm_flags & VM_NOHUGEPAGE) ||
115 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
118 * If the hardware/firmware marked hugepage support disabled.
120 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
123 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
125 return in_pf ? orders : 0;
128 * khugepaged special VMA and hugetlb VMA.
129 * Must be checked after dax since some dax mappings may have
132 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
136 * Check alignment for file vma and size for both file and anon vma by
137 * filtering out the unsuitable orders.
139 * Skip the check for page fault. Huge fault does the check in fault
143 int order = highest_order(orders);
147 addr = vma->vm_end - (PAGE_SIZE << order);
148 if (thp_vma_suitable_order(vma, addr, order))
150 order = next_order(&orders, order);
158 * Enabled via shmem mount options or sysfs settings.
159 * Must be done before hugepage flags check since shmem has its
162 if (!in_pf && shmem_file(vma->vm_file)) {
163 bool global_huge = shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
164 !enforce_sysfs, vma->vm_mm, vm_flags);
166 if (!vma_is_anon_shmem(vma))
167 return global_huge ? orders : 0;
168 return shmem_allowable_huge_orders(file_inode(vma->vm_file),
169 vma, vma->vm_pgoff, global_huge);
172 if (!vma_is_anonymous(vma)) {
174 * Enforce sysfs THP requirements as necessary. Anonymous vmas
175 * were already handled in thp_vma_allowable_orders().
178 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
179 !hugepage_global_always())))
183 * Trust that ->huge_fault() handlers know what they are doing
186 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
188 /* Only regular file is valid in collapse path */
189 if (((!in_pf || smaps)) && file_thp_enabled(vma))
194 if (vma_is_temporary_stack(vma))
198 * THPeligible bit of smaps should show 1 for proper VMAs even
199 * though anon_vma is not initialized yet.
201 * Allow page fault since anon_vma may be not initialized until
202 * the first page fault.
205 return (smaps || in_pf) ? orders : 0;
210 static bool get_huge_zero_page(void)
212 struct folio *zero_folio;
214 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
217 zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
220 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
223 /* Ensure zero folio won't have large_rmappable flag set. */
224 folio_clear_large_rmappable(zero_folio);
226 if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
228 folio_put(zero_folio);
231 WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
233 /* We take additional reference here. It will be put back by shrinker */
234 atomic_set(&huge_zero_refcount, 2);
236 count_vm_event(THP_ZERO_PAGE_ALLOC);
240 static void put_huge_zero_page(void)
243 * Counter should never go to zero here. Only shrinker can put
246 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
249 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
251 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
252 return READ_ONCE(huge_zero_folio);
254 if (!get_huge_zero_page())
257 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
258 put_huge_zero_page();
260 return READ_ONCE(huge_zero_folio);
263 void mm_put_huge_zero_folio(struct mm_struct *mm)
265 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
266 put_huge_zero_page();
269 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
270 struct shrink_control *sc)
272 /* we can free zero page only if last reference remains */
273 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
276 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
277 struct shrink_control *sc)
279 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
280 struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
281 BUG_ON(zero_folio == NULL);
282 WRITE_ONCE(huge_zero_pfn, ~0UL);
283 folio_put(zero_folio);
290 static struct shrinker *huge_zero_page_shrinker;
293 static ssize_t enabled_show(struct kobject *kobj,
294 struct kobj_attribute *attr, char *buf)
298 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
299 output = "[always] madvise never";
300 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
301 &transparent_hugepage_flags))
302 output = "always [madvise] never";
304 output = "always madvise [never]";
306 return sysfs_emit(buf, "%s\n", output);
309 static ssize_t enabled_store(struct kobject *kobj,
310 struct kobj_attribute *attr,
311 const char *buf, size_t count)
315 if (sysfs_streq(buf, "always")) {
316 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
317 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
318 } else if (sysfs_streq(buf, "madvise")) {
319 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
320 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
321 } else if (sysfs_streq(buf, "never")) {
322 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
323 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
328 int err = start_stop_khugepaged();
335 static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
337 ssize_t single_hugepage_flag_show(struct kobject *kobj,
338 struct kobj_attribute *attr, char *buf,
339 enum transparent_hugepage_flag flag)
341 return sysfs_emit(buf, "%d\n",
342 !!test_bit(flag, &transparent_hugepage_flags));
345 ssize_t single_hugepage_flag_store(struct kobject *kobj,
346 struct kobj_attribute *attr,
347 const char *buf, size_t count,
348 enum transparent_hugepage_flag flag)
353 ret = kstrtoul(buf, 10, &value);
360 set_bit(flag, &transparent_hugepage_flags);
362 clear_bit(flag, &transparent_hugepage_flags);
367 static ssize_t defrag_show(struct kobject *kobj,
368 struct kobj_attribute *attr, char *buf)
372 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
373 &transparent_hugepage_flags))
374 output = "[always] defer defer+madvise madvise never";
375 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
376 &transparent_hugepage_flags))
377 output = "always [defer] defer+madvise madvise never";
378 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
379 &transparent_hugepage_flags))
380 output = "always defer [defer+madvise] madvise never";
381 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
382 &transparent_hugepage_flags))
383 output = "always defer defer+madvise [madvise] never";
385 output = "always defer defer+madvise madvise [never]";
387 return sysfs_emit(buf, "%s\n", output);
390 static ssize_t defrag_store(struct kobject *kobj,
391 struct kobj_attribute *attr,
392 const char *buf, size_t count)
394 if (sysfs_streq(buf, "always")) {
395 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
396 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
397 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
398 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
399 } else if (sysfs_streq(buf, "defer+madvise")) {
400 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
401 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
402 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
403 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
404 } else if (sysfs_streq(buf, "defer")) {
405 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
406 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
407 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
408 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
409 } else if (sysfs_streq(buf, "madvise")) {
410 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
411 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
412 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
413 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
414 } else if (sysfs_streq(buf, "never")) {
415 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
416 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
417 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
418 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
424 static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
426 static ssize_t use_zero_page_show(struct kobject *kobj,
427 struct kobj_attribute *attr, char *buf)
429 return single_hugepage_flag_show(kobj, attr, buf,
430 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
432 static ssize_t use_zero_page_store(struct kobject *kobj,
433 struct kobj_attribute *attr, const char *buf, size_t count)
435 return single_hugepage_flag_store(kobj, attr, buf, count,
436 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
438 static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
440 static ssize_t hpage_pmd_size_show(struct kobject *kobj,
441 struct kobj_attribute *attr, char *buf)
443 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
445 static struct kobj_attribute hpage_pmd_size_attr =
446 __ATTR_RO(hpage_pmd_size);
448 static struct attribute *hugepage_attr[] = {
451 &use_zero_page_attr.attr,
452 &hpage_pmd_size_attr.attr,
454 &shmem_enabled_attr.attr,
459 static const struct attribute_group hugepage_attr_group = {
460 .attrs = hugepage_attr,
463 static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
464 static void thpsize_release(struct kobject *kobj);
465 static DEFINE_SPINLOCK(huge_anon_orders_lock);
466 static LIST_HEAD(thpsize_list);
468 static ssize_t thpsize_enabled_show(struct kobject *kobj,
469 struct kobj_attribute *attr, char *buf)
471 int order = to_thpsize(kobj)->order;
474 if (test_bit(order, &huge_anon_orders_always))
475 output = "[always] inherit madvise never";
476 else if (test_bit(order, &huge_anon_orders_inherit))
477 output = "always [inherit] madvise never";
478 else if (test_bit(order, &huge_anon_orders_madvise))
479 output = "always inherit [madvise] never";
481 output = "always inherit madvise [never]";
483 return sysfs_emit(buf, "%s\n", output);
486 static ssize_t thpsize_enabled_store(struct kobject *kobj,
487 struct kobj_attribute *attr,
488 const char *buf, size_t count)
490 int order = to_thpsize(kobj)->order;
493 if (sysfs_streq(buf, "always")) {
494 spin_lock(&huge_anon_orders_lock);
495 clear_bit(order, &huge_anon_orders_inherit);
496 clear_bit(order, &huge_anon_orders_madvise);
497 set_bit(order, &huge_anon_orders_always);
498 spin_unlock(&huge_anon_orders_lock);
499 } else if (sysfs_streq(buf, "inherit")) {
500 spin_lock(&huge_anon_orders_lock);
501 clear_bit(order, &huge_anon_orders_always);
502 clear_bit(order, &huge_anon_orders_madvise);
503 set_bit(order, &huge_anon_orders_inherit);
504 spin_unlock(&huge_anon_orders_lock);
505 } else if (sysfs_streq(buf, "madvise")) {
506 spin_lock(&huge_anon_orders_lock);
507 clear_bit(order, &huge_anon_orders_always);
508 clear_bit(order, &huge_anon_orders_inherit);
509 set_bit(order, &huge_anon_orders_madvise);
510 spin_unlock(&huge_anon_orders_lock);
511 } else if (sysfs_streq(buf, "never")) {
512 spin_lock(&huge_anon_orders_lock);
513 clear_bit(order, &huge_anon_orders_always);
514 clear_bit(order, &huge_anon_orders_inherit);
515 clear_bit(order, &huge_anon_orders_madvise);
516 spin_unlock(&huge_anon_orders_lock);
523 err = start_stop_khugepaged();
530 static struct kobj_attribute thpsize_enabled_attr =
531 __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
533 static struct attribute *thpsize_attrs[] = {
534 &thpsize_enabled_attr.attr,
536 &thpsize_shmem_enabled_attr.attr,
541 static const struct attribute_group thpsize_attr_group = {
542 .attrs = thpsize_attrs,
545 static const struct kobj_type thpsize_ktype = {
546 .release = &thpsize_release,
547 .sysfs_ops = &kobj_sysfs_ops,
550 DEFINE_PER_CPU(struct mthp_stat, mthp_stats) = {{{0}}};
552 static unsigned long sum_mthp_stat(int order, enum mthp_stat_item item)
554 unsigned long sum = 0;
557 for_each_possible_cpu(cpu) {
558 struct mthp_stat *this = &per_cpu(mthp_stats, cpu);
560 sum += this->stats[order][item];
566 #define DEFINE_MTHP_STAT_ATTR(_name, _index) \
567 static ssize_t _name##_show(struct kobject *kobj, \
568 struct kobj_attribute *attr, char *buf) \
570 int order = to_thpsize(kobj)->order; \
572 return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \
574 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
576 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
577 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
578 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
579 DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
580 DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
581 DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC);
582 DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK);
583 DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE);
584 DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
585 DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
586 DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
588 static struct attribute *stats_attrs[] = {
589 &anon_fault_alloc_attr.attr,
590 &anon_fault_fallback_attr.attr,
591 &anon_fault_fallback_charge_attr.attr,
593 &swpout_fallback_attr.attr,
594 &shmem_alloc_attr.attr,
595 &shmem_fallback_attr.attr,
596 &shmem_fallback_charge_attr.attr,
598 &split_failed_attr.attr,
599 &split_deferred_attr.attr,
603 static struct attribute_group stats_attr_group = {
605 .attrs = stats_attrs,
608 static struct thpsize *thpsize_create(int order, struct kobject *parent)
610 unsigned long size = (PAGE_SIZE << order) / SZ_1K;
611 struct thpsize *thpsize;
614 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
616 return ERR_PTR(-ENOMEM);
618 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
619 "hugepages-%lukB", size);
625 ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
627 kobject_put(&thpsize->kobj);
631 ret = sysfs_create_group(&thpsize->kobj, &stats_attr_group);
633 kobject_put(&thpsize->kobj);
637 thpsize->order = order;
641 static void thpsize_release(struct kobject *kobj)
643 kfree(to_thpsize(kobj));
646 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
649 struct thpsize *thpsize;
650 unsigned long orders;
654 * Default to setting PMD-sized THP to inherit the global setting and
655 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
656 * constant so we have to do this here.
658 huge_anon_orders_inherit = BIT(PMD_ORDER);
660 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
661 if (unlikely(!*hugepage_kobj)) {
662 pr_err("failed to create transparent hugepage kobject\n");
666 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
668 pr_err("failed to register transparent hugepage group\n");
672 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
674 pr_err("failed to register transparent hugepage group\n");
675 goto remove_hp_group;
678 orders = THP_ORDERS_ALL_ANON;
679 order = highest_order(orders);
681 thpsize = thpsize_create(order, *hugepage_kobj);
682 if (IS_ERR(thpsize)) {
683 pr_err("failed to create thpsize for order %d\n", order);
684 err = PTR_ERR(thpsize);
687 list_add(&thpsize->node, &thpsize_list);
688 order = next_order(&orders, order);
694 hugepage_exit_sysfs(*hugepage_kobj);
697 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
699 kobject_put(*hugepage_kobj);
703 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
705 struct thpsize *thpsize, *tmp;
707 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
708 list_del(&thpsize->node);
709 kobject_put(&thpsize->kobj);
712 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
713 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
714 kobject_put(hugepage_kobj);
717 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
722 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
725 #endif /* CONFIG_SYSFS */
727 static int __init thp_shrinker_init(void)
729 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
730 if (!huge_zero_page_shrinker)
733 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
734 SHRINKER_MEMCG_AWARE |
736 "thp-deferred_split");
737 if (!deferred_split_shrinker) {
738 shrinker_free(huge_zero_page_shrinker);
742 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
743 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
744 shrinker_register(huge_zero_page_shrinker);
746 deferred_split_shrinker->count_objects = deferred_split_count;
747 deferred_split_shrinker->scan_objects = deferred_split_scan;
748 shrinker_register(deferred_split_shrinker);
753 static void __init thp_shrinker_exit(void)
755 shrinker_free(huge_zero_page_shrinker);
756 shrinker_free(deferred_split_shrinker);
759 static int __init hugepage_init(void)
762 struct kobject *hugepage_kobj;
764 if (!has_transparent_hugepage()) {
765 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
770 * hugepages can't be allocated by the buddy allocator
772 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
774 err = hugepage_init_sysfs(&hugepage_kobj);
778 err = khugepaged_init();
782 err = thp_shrinker_init();
787 * By default disable transparent hugepages on smaller systems,
788 * where the extra memory used could hurt more than TLB overhead
789 * is likely to save. The admin can still enable it through /sys.
791 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
792 transparent_hugepage_flags = 0;
796 err = start_stop_khugepaged();
804 khugepaged_destroy();
806 hugepage_exit_sysfs(hugepage_kobj);
810 subsys_initcall(hugepage_init);
812 static int __init setup_transparent_hugepage(char *str)
817 if (!strcmp(str, "always")) {
818 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
819 &transparent_hugepage_flags);
820 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
821 &transparent_hugepage_flags);
823 } else if (!strcmp(str, "madvise")) {
824 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
825 &transparent_hugepage_flags);
826 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
827 &transparent_hugepage_flags);
829 } else if (!strcmp(str, "never")) {
830 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
831 &transparent_hugepage_flags);
832 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
833 &transparent_hugepage_flags);
838 pr_warn("transparent_hugepage= cannot parse, ignored\n");
841 __setup("transparent_hugepage=", setup_transparent_hugepage);
843 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
845 if (likely(vma->vm_flags & VM_WRITE))
846 pmd = pmd_mkwrite(pmd, vma);
852 struct deferred_split *get_deferred_split_queue(struct folio *folio)
854 struct mem_cgroup *memcg = folio_memcg(folio);
855 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
858 return &memcg->deferred_split_queue;
860 return &pgdat->deferred_split_queue;
864 struct deferred_split *get_deferred_split_queue(struct folio *folio)
866 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
868 return &pgdat->deferred_split_queue;
872 static inline bool is_transparent_hugepage(const struct folio *folio)
874 if (!folio_test_large(folio))
877 return is_huge_zero_folio(folio) ||
878 folio_test_large_rmappable(folio);
881 static unsigned long __thp_get_unmapped_area(struct file *filp,
882 unsigned long addr, unsigned long len,
883 loff_t off, unsigned long flags, unsigned long size,
886 loff_t off_end = off + len;
887 loff_t off_align = round_up(off, size);
888 unsigned long len_pad, ret, off_sub;
890 if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
893 if (off_end <= off_align || (off_end - off_align) < size)
896 len_pad = len + size;
897 if (len_pad < len || (off + len_pad) < off)
900 ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad,
901 off >> PAGE_SHIFT, flags, vm_flags);
904 * The failure might be due to length padding. The caller will retry
905 * without the padding.
907 if (IS_ERR_VALUE(ret))
911 * Do not try to align to THP boundary if allocation at the address
917 off_sub = (off - ret) & (size - 1);
919 if (test_bit(MMF_TOPDOWN, ¤t->mm->flags) && !off_sub)
926 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
927 unsigned long len, unsigned long pgoff, unsigned long flags,
931 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
933 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags);
937 return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags,
941 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
942 unsigned long len, unsigned long pgoff, unsigned long flags)
944 return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0);
946 EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
948 static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
949 struct page *page, gfp_t gfp)
951 struct vm_area_struct *vma = vmf->vma;
952 struct folio *folio = page_folio(page);
954 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
957 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
959 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
961 count_vm_event(THP_FAULT_FALLBACK);
962 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
963 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
964 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
965 return VM_FAULT_FALLBACK;
967 folio_throttle_swaprate(folio, gfp);
969 pgtable = pte_alloc_one(vma->vm_mm);
970 if (unlikely(!pgtable)) {
975 folio_zero_user(folio, vmf->address);
977 * The memory barrier inside __folio_mark_uptodate makes sure that
978 * folio_zero_user writes become visible before the set_pmd_at()
981 __folio_mark_uptodate(folio);
983 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
984 if (unlikely(!pmd_none(*vmf->pmd))) {
989 ret = check_stable_address_space(vma->vm_mm);
993 /* Deliver the page fault to userland */
994 if (userfaultfd_missing(vma)) {
995 spin_unlock(vmf->ptl);
997 pte_free(vma->vm_mm, pgtable);
998 ret = handle_userfault(vmf, VM_UFFD_MISSING);
999 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1003 entry = mk_huge_pmd(page, vma->vm_page_prot);
1004 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1005 folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE);
1006 folio_add_lru_vma(folio, vma);
1007 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1008 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
1009 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1010 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1011 mm_inc_nr_ptes(vma->vm_mm);
1012 spin_unlock(vmf->ptl);
1013 count_vm_event(THP_FAULT_ALLOC);
1014 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC);
1015 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
1020 spin_unlock(vmf->ptl);
1023 pte_free(vma->vm_mm, pgtable);
1030 * always: directly stall for all thp allocations
1031 * defer: wake kswapd and fail if not immediately available
1032 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
1033 * fail if not immediately available
1034 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
1036 * never: never stall for any thp allocation
1038 gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
1040 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
1042 /* Always do synchronous compaction */
1043 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
1044 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
1046 /* Kick kcompactd and fail quickly */
1047 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
1048 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
1050 /* Synchronous compaction if madvised, otherwise kick kcompactd */
1051 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
1052 return GFP_TRANSHUGE_LIGHT |
1053 (vma_madvised ? __GFP_DIRECT_RECLAIM :
1054 __GFP_KSWAPD_RECLAIM);
1056 /* Only do synchronous compaction if madvised */
1057 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
1058 return GFP_TRANSHUGE_LIGHT |
1059 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
1061 return GFP_TRANSHUGE_LIGHT;
1064 /* Caller must hold page table lock. */
1065 static void set_huge_zero_folio(pgtable_t pgtable, struct mm_struct *mm,
1066 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
1067 struct folio *zero_folio)
1070 if (!pmd_none(*pmd))
1072 entry = mk_pmd(&zero_folio->page, vma->vm_page_prot);
1073 entry = pmd_mkhuge(entry);
1074 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1075 set_pmd_at(mm, haddr, pmd, entry);
1079 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
1081 struct vm_area_struct *vma = vmf->vma;
1083 struct folio *folio;
1084 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1087 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
1088 return VM_FAULT_FALLBACK;
1089 ret = vmf_anon_prepare(vmf);
1092 khugepaged_enter_vma(vma, vma->vm_flags);
1094 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
1095 !mm_forbids_zeropage(vma->vm_mm) &&
1096 transparent_hugepage_use_zero_page()) {
1098 struct folio *zero_folio;
1101 pgtable = pte_alloc_one(vma->vm_mm);
1102 if (unlikely(!pgtable))
1103 return VM_FAULT_OOM;
1104 zero_folio = mm_get_huge_zero_folio(vma->vm_mm);
1105 if (unlikely(!zero_folio)) {
1106 pte_free(vma->vm_mm, pgtable);
1107 count_vm_event(THP_FAULT_FALLBACK);
1108 return VM_FAULT_FALLBACK;
1110 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1112 if (pmd_none(*vmf->pmd)) {
1113 ret = check_stable_address_space(vma->vm_mm);
1115 spin_unlock(vmf->ptl);
1116 pte_free(vma->vm_mm, pgtable);
1117 } else if (userfaultfd_missing(vma)) {
1118 spin_unlock(vmf->ptl);
1119 pte_free(vma->vm_mm, pgtable);
1120 ret = handle_userfault(vmf, VM_UFFD_MISSING);
1121 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1123 set_huge_zero_folio(pgtable, vma->vm_mm, vma,
1124 haddr, vmf->pmd, zero_folio);
1125 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1126 spin_unlock(vmf->ptl);
1129 spin_unlock(vmf->ptl);
1130 pte_free(vma->vm_mm, pgtable);
1134 gfp = vma_thp_gfp_mask(vma);
1135 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
1136 if (unlikely(!folio)) {
1137 count_vm_event(THP_FAULT_FALLBACK);
1138 count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK);
1139 return VM_FAULT_FALLBACK;
1141 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
1144 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
1145 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1148 struct mm_struct *mm = vma->vm_mm;
1152 ptl = pmd_lock(mm, pmd);
1153 if (!pmd_none(*pmd)) {
1155 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1156 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1159 entry = pmd_mkyoung(*pmd);
1160 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1161 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1162 update_mmu_cache_pmd(vma, addr, pmd);
1168 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1169 if (pfn_t_devmap(pfn))
1170 entry = pmd_mkdevmap(entry);
1172 entry = pmd_mkyoung(pmd_mkdirty(entry));
1173 entry = maybe_pmd_mkwrite(entry, vma);
1177 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1182 set_pmd_at(mm, addr, pmd, entry);
1183 update_mmu_cache_pmd(vma, addr, pmd);
1188 pte_free(mm, pgtable);
1192 * vmf_insert_pfn_pmd - insert a pmd size pfn
1193 * @vmf: Structure describing the fault
1194 * @pfn: pfn to insert
1195 * @write: whether it's a write fault
1197 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1199 * Return: vm_fault_t value.
1201 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
1203 unsigned long addr = vmf->address & PMD_MASK;
1204 struct vm_area_struct *vma = vmf->vma;
1205 pgprot_t pgprot = vma->vm_page_prot;
1206 pgtable_t pgtable = NULL;
1209 * If we had pmd_special, we could avoid all these restrictions,
1210 * but we need to be consistent with PTEs and architectures that
1211 * can't support a 'special' bit.
1213 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1214 !pfn_t_devmap(pfn));
1215 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1216 (VM_PFNMAP|VM_MIXEDMAP));
1217 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1219 if (addr < vma->vm_start || addr >= vma->vm_end)
1220 return VM_FAULT_SIGBUS;
1222 if (arch_needs_pgtable_deposit()) {
1223 pgtable = pte_alloc_one(vma->vm_mm);
1225 return VM_FAULT_OOM;
1228 track_pfn_insert(vma, &pgprot, pfn);
1230 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
1231 return VM_FAULT_NOPAGE;
1233 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
1235 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1236 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
1238 if (likely(vma->vm_flags & VM_WRITE))
1239 pud = pud_mkwrite(pud);
1243 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
1244 pud_t *pud, pfn_t pfn, bool write)
1246 struct mm_struct *mm = vma->vm_mm;
1247 pgprot_t prot = vma->vm_page_prot;
1251 ptl = pud_lock(mm, pud);
1252 if (!pud_none(*pud)) {
1254 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
1255 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
1258 entry = pud_mkyoung(*pud);
1259 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1260 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1261 update_mmu_cache_pud(vma, addr, pud);
1266 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1267 if (pfn_t_devmap(pfn))
1268 entry = pud_mkdevmap(entry);
1270 entry = pud_mkyoung(pud_mkdirty(entry));
1271 entry = maybe_pud_mkwrite(entry, vma);
1273 set_pud_at(mm, addr, pud, entry);
1274 update_mmu_cache_pud(vma, addr, pud);
1281 * vmf_insert_pfn_pud - insert a pud size pfn
1282 * @vmf: Structure describing the fault
1283 * @pfn: pfn to insert
1284 * @write: whether it's a write fault
1286 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
1288 * Return: vm_fault_t value.
1290 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
1292 unsigned long addr = vmf->address & PUD_MASK;
1293 struct vm_area_struct *vma = vmf->vma;
1294 pgprot_t pgprot = vma->vm_page_prot;
1297 * If we had pud_special, we could avoid all these restrictions,
1298 * but we need to be consistent with PTEs and architectures that
1299 * can't support a 'special' bit.
1301 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1302 !pfn_t_devmap(pfn));
1303 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1304 (VM_PFNMAP|VM_MIXEDMAP));
1305 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1307 if (addr < vma->vm_start || addr >= vma->vm_end)
1308 return VM_FAULT_SIGBUS;
1310 track_pfn_insert(vma, &pgprot, pfn);
1312 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
1313 return VM_FAULT_NOPAGE;
1315 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
1316 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1318 void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1319 pmd_t *pmd, bool write)
1323 _pmd = pmd_mkyoung(*pmd);
1325 _pmd = pmd_mkdirty(_pmd);
1326 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1328 update_mmu_cache_pmd(vma, addr, pmd);
1331 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1332 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1334 unsigned long pfn = pmd_pfn(*pmd);
1335 struct mm_struct *mm = vma->vm_mm;
1339 assert_spin_locked(pmd_lockptr(mm, pmd));
1341 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1344 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1349 if (flags & FOLL_TOUCH)
1350 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1353 * device mapped pages can only be returned if the
1354 * caller will manage the page reference count.
1356 if (!(flags & (FOLL_GET | FOLL_PIN)))
1357 return ERR_PTR(-EEXIST);
1359 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1360 *pgmap = get_dev_pagemap(pfn, *pgmap);
1362 return ERR_PTR(-EFAULT);
1363 page = pfn_to_page(pfn);
1364 ret = try_grab_folio(page_folio(page), 1, flags);
1366 page = ERR_PTR(ret);
1371 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1372 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1373 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1375 spinlock_t *dst_ptl, *src_ptl;
1376 struct page *src_page;
1377 struct folio *src_folio;
1379 pgtable_t pgtable = NULL;
1382 /* Skip if can be re-fill on fault */
1383 if (!vma_is_anonymous(dst_vma))
1386 pgtable = pte_alloc_one(dst_mm);
1387 if (unlikely(!pgtable))
1390 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1391 src_ptl = pmd_lockptr(src_mm, src_pmd);
1392 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1397 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1398 if (unlikely(is_swap_pmd(pmd))) {
1399 swp_entry_t entry = pmd_to_swp_entry(pmd);
1401 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1402 if (!is_readable_migration_entry(entry)) {
1403 entry = make_readable_migration_entry(
1405 pmd = swp_entry_to_pmd(entry);
1406 if (pmd_swp_soft_dirty(*src_pmd))
1407 pmd = pmd_swp_mksoft_dirty(pmd);
1408 if (pmd_swp_uffd_wp(*src_pmd))
1409 pmd = pmd_swp_mkuffd_wp(pmd);
1410 set_pmd_at(src_mm, addr, src_pmd, pmd);
1412 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1413 mm_inc_nr_ptes(dst_mm);
1414 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1415 if (!userfaultfd_wp(dst_vma))
1416 pmd = pmd_swp_clear_uffd_wp(pmd);
1417 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1423 if (unlikely(!pmd_trans_huge(pmd))) {
1424 pte_free(dst_mm, pgtable);
1428 * When page table lock is held, the huge zero pmd should not be
1429 * under splitting since we don't split the page itself, only pmd to
1432 if (is_huge_zero_pmd(pmd)) {
1434 * mm_get_huge_zero_folio() will never allocate a new
1435 * folio here, since we already have a zero page to
1436 * copy. It just takes a reference.
1438 mm_get_huge_zero_folio(dst_mm);
1442 src_page = pmd_page(pmd);
1443 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1444 src_folio = page_folio(src_page);
1446 folio_get(src_folio);
1447 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
1448 /* Page maybe pinned: split and retry the fault on PTEs. */
1449 folio_put(src_folio);
1450 pte_free(dst_mm, pgtable);
1451 spin_unlock(src_ptl);
1452 spin_unlock(dst_ptl);
1453 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1456 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1458 mm_inc_nr_ptes(dst_mm);
1459 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1460 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1461 if (!userfaultfd_wp(dst_vma))
1462 pmd = pmd_clear_uffd_wp(pmd);
1463 pmd = pmd_mkold(pmd_wrprotect(pmd));
1464 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1468 spin_unlock(src_ptl);
1469 spin_unlock(dst_ptl);
1474 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1475 void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1476 pud_t *pud, bool write)
1480 _pud = pud_mkyoung(*pud);
1482 _pud = pud_mkdirty(_pud);
1483 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1485 update_mmu_cache_pud(vma, addr, pud);
1488 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1489 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1490 struct vm_area_struct *vma)
1492 spinlock_t *dst_ptl, *src_ptl;
1496 dst_ptl = pud_lock(dst_mm, dst_pud);
1497 src_ptl = pud_lockptr(src_mm, src_pud);
1498 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1502 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1506 * When page table lock is held, the huge zero pud should not be
1507 * under splitting since we don't split the page itself, only pud to
1510 if (is_huge_zero_pud(pud)) {
1511 /* No huge zero pud yet */
1515 * TODO: once we support anonymous pages, use
1516 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
1518 pudp_set_wrprotect(src_mm, addr, src_pud);
1519 pud = pud_mkold(pud_wrprotect(pud));
1520 set_pud_at(dst_mm, addr, dst_pud, pud);
1524 spin_unlock(src_ptl);
1525 spin_unlock(dst_ptl);
1529 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1531 bool write = vmf->flags & FAULT_FLAG_WRITE;
1533 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1534 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1537 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1539 spin_unlock(vmf->ptl);
1541 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1543 void huge_pmd_set_accessed(struct vm_fault *vmf)
1545 bool write = vmf->flags & FAULT_FLAG_WRITE;
1547 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1548 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1551 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1554 spin_unlock(vmf->ptl);
1557 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1559 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1560 struct vm_area_struct *vma = vmf->vma;
1561 struct folio *folio;
1563 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1564 pmd_t orig_pmd = vmf->orig_pmd;
1566 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1567 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1569 if (is_huge_zero_pmd(orig_pmd))
1572 spin_lock(vmf->ptl);
1574 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1575 spin_unlock(vmf->ptl);
1579 page = pmd_page(orig_pmd);
1580 folio = page_folio(page);
1581 VM_BUG_ON_PAGE(!PageHead(page), page);
1583 /* Early check when only holding the PT lock. */
1584 if (PageAnonExclusive(page))
1587 if (!folio_trylock(folio)) {
1589 spin_unlock(vmf->ptl);
1591 spin_lock(vmf->ptl);
1592 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1593 spin_unlock(vmf->ptl);
1594 folio_unlock(folio);
1601 /* Recheck after temporarily dropping the PT lock. */
1602 if (PageAnonExclusive(page)) {
1603 folio_unlock(folio);
1608 * See do_wp_page(): we can only reuse the folio exclusively if
1609 * there are no additional references. Note that we always drain
1610 * the LRU cache immediately after adding a THP.
1612 if (folio_ref_count(folio) >
1613 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1614 goto unlock_fallback;
1615 if (folio_test_swapcache(folio))
1616 folio_free_swap(folio);
1617 if (folio_ref_count(folio) == 1) {
1620 folio_move_anon_rmap(folio, vma);
1621 SetPageAnonExclusive(page);
1622 folio_unlock(folio);
1624 if (unlikely(unshare)) {
1625 spin_unlock(vmf->ptl);
1628 entry = pmd_mkyoung(orig_pmd);
1629 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1630 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1631 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1632 spin_unlock(vmf->ptl);
1637 folio_unlock(folio);
1638 spin_unlock(vmf->ptl);
1640 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1641 return VM_FAULT_FALLBACK;
1644 static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1645 unsigned long addr, pmd_t pmd)
1649 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1652 /* Don't touch entries that are not even readable (NUMA hinting). */
1653 if (pmd_protnone(pmd))
1656 /* Do we need write faults for softdirty tracking? */
1657 if (pmd_needs_soft_dirty_wp(vma, pmd))
1660 /* Do we need write faults for uffd-wp tracking? */
1661 if (userfaultfd_huge_pmd_wp(vma, pmd))
1664 if (!(vma->vm_flags & VM_SHARED)) {
1665 /* See can_change_pte_writable(). */
1666 page = vm_normal_page_pmd(vma, addr, pmd);
1667 return page && PageAnon(page) && PageAnonExclusive(page);
1670 /* See can_change_pte_writable(). */
1671 return pmd_dirty(pmd);
1674 /* NUMA hinting page fault entry point for trans huge pmds */
1675 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1677 struct vm_area_struct *vma = vmf->vma;
1678 pmd_t oldpmd = vmf->orig_pmd;
1680 struct folio *folio;
1681 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1682 int nid = NUMA_NO_NODE;
1683 int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
1684 bool writable = false;
1687 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1688 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1689 spin_unlock(vmf->ptl);
1693 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1696 * Detect now whether the PMD could be writable; this information
1697 * is only valid while holding the PT lock.
1699 writable = pmd_write(pmd);
1700 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1701 can_change_pmd_writable(vma, vmf->address, pmd))
1704 folio = vm_normal_folio_pmd(vma, haddr, pmd);
1708 /* See similar comment in do_numa_page for explanation */
1710 flags |= TNF_NO_GROUP;
1712 nid = folio_nid(folio);
1714 * For memory tiering mode, cpupid of slow memory page is used
1715 * to record page access time. So use default value.
1717 if (node_is_toptier(nid))
1718 last_cpupid = folio_last_cpupid(folio);
1719 target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
1720 if (target_nid == NUMA_NO_NODE)
1722 if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
1723 flags |= TNF_MIGRATE_FAIL;
1726 /* The folio is isolated and isolation code holds a folio reference. */
1727 spin_unlock(vmf->ptl);
1730 if (!migrate_misplaced_folio(folio, vma, target_nid)) {
1731 flags |= TNF_MIGRATED;
1733 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
1737 flags |= TNF_MIGRATE_FAIL;
1738 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1739 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1740 spin_unlock(vmf->ptl);
1744 /* Restore the PMD */
1745 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1746 pmd = pmd_mkyoung(pmd);
1748 pmd = pmd_mkwrite(pmd, vma);
1749 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1750 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1751 spin_unlock(vmf->ptl);
1753 if (nid != NUMA_NO_NODE)
1754 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
1759 * Return true if we do MADV_FREE successfully on entire pmd page.
1760 * Otherwise, return false.
1762 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1763 pmd_t *pmd, unsigned long addr, unsigned long next)
1767 struct folio *folio;
1768 struct mm_struct *mm = tlb->mm;
1771 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1773 ptl = pmd_trans_huge_lock(pmd, vma);
1778 if (is_huge_zero_pmd(orig_pmd))
1781 if (unlikely(!pmd_present(orig_pmd))) {
1782 VM_BUG_ON(thp_migration_supported() &&
1783 !is_pmd_migration_entry(orig_pmd));
1787 folio = pmd_folio(orig_pmd);
1789 * If other processes are mapping this folio, we couldn't discard
1790 * the folio unless they all do MADV_FREE so let's skip the folio.
1792 if (folio_likely_mapped_shared(folio))
1795 if (!folio_trylock(folio))
1799 * If user want to discard part-pages of THP, split it so MADV_FREE
1800 * will deactivate only them.
1802 if (next - addr != HPAGE_PMD_SIZE) {
1806 folio_unlock(folio);
1811 if (folio_test_dirty(folio))
1812 folio_clear_dirty(folio);
1813 folio_unlock(folio);
1815 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1816 pmdp_invalidate(vma, addr, pmd);
1817 orig_pmd = pmd_mkold(orig_pmd);
1818 orig_pmd = pmd_mkclean(orig_pmd);
1820 set_pmd_at(mm, addr, pmd, orig_pmd);
1821 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1824 folio_mark_lazyfree(folio);
1832 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1836 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1837 pte_free(mm, pgtable);
1841 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1842 pmd_t *pmd, unsigned long addr)
1847 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1849 ptl = __pmd_trans_huge_lock(pmd, vma);
1853 * For architectures like ppc64 we look at deposited pgtable
1854 * when calling pmdp_huge_get_and_clear. So do the
1855 * pgtable_trans_huge_withdraw after finishing pmdp related
1858 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1860 arch_check_zapped_pmd(vma, orig_pmd);
1861 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1862 if (vma_is_special_huge(vma)) {
1863 if (arch_needs_pgtable_deposit())
1864 zap_deposited_table(tlb->mm, pmd);
1866 } else if (is_huge_zero_pmd(orig_pmd)) {
1867 zap_deposited_table(tlb->mm, pmd);
1870 struct folio *folio = NULL;
1871 int flush_needed = 1;
1873 if (pmd_present(orig_pmd)) {
1874 struct page *page = pmd_page(orig_pmd);
1876 folio = page_folio(page);
1877 folio_remove_rmap_pmd(folio, page, vma);
1878 WARN_ON_ONCE(folio_mapcount(folio) < 0);
1879 VM_BUG_ON_PAGE(!PageHead(page), page);
1880 } else if (thp_migration_supported()) {
1883 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1884 entry = pmd_to_swp_entry(orig_pmd);
1885 folio = pfn_swap_entry_folio(entry);
1888 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1890 if (folio_test_anon(folio)) {
1891 zap_deposited_table(tlb->mm, pmd);
1892 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1894 if (arch_needs_pgtable_deposit())
1895 zap_deposited_table(tlb->mm, pmd);
1896 add_mm_counter(tlb->mm, mm_counter_file(folio),
1902 tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
1907 #ifndef pmd_move_must_withdraw
1908 static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1909 spinlock_t *old_pmd_ptl,
1910 struct vm_area_struct *vma)
1913 * With split pmd lock we also need to move preallocated
1914 * PTE page table if new_pmd is on different PMD page table.
1916 * We also don't deposit and withdraw tables for file pages.
1918 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1922 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1924 #ifdef CONFIG_MEM_SOFT_DIRTY
1925 if (unlikely(is_pmd_migration_entry(pmd)))
1926 pmd = pmd_swp_mksoft_dirty(pmd);
1927 else if (pmd_present(pmd))
1928 pmd = pmd_mksoft_dirty(pmd);
1933 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1934 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1936 spinlock_t *old_ptl, *new_ptl;
1938 struct mm_struct *mm = vma->vm_mm;
1939 bool force_flush = false;
1942 * The destination pmd shouldn't be established, free_pgtables()
1943 * should have released it; but move_page_tables() might have already
1944 * inserted a page table, if racing against shmem/file collapse.
1946 if (!pmd_none(*new_pmd)) {
1947 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1952 * We don't have to worry about the ordering of src and dst
1953 * ptlocks because exclusive mmap_lock prevents deadlock.
1955 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1957 new_ptl = pmd_lockptr(mm, new_pmd);
1958 if (new_ptl != old_ptl)
1959 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1960 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1961 if (pmd_present(pmd))
1963 VM_BUG_ON(!pmd_none(*new_pmd));
1965 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1967 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1968 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1970 pmd = move_soft_dirty_pmd(pmd);
1971 set_pmd_at(mm, new_addr, new_pmd, pmd);
1973 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1974 if (new_ptl != old_ptl)
1975 spin_unlock(new_ptl);
1976 spin_unlock(old_ptl);
1984 * - 0 if PMD could not be locked
1985 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1986 * or if prot_numa but THP migration is not supported
1987 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
1989 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1990 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
1991 unsigned long cp_flags)
1993 struct mm_struct *mm = vma->vm_mm;
1995 pmd_t oldpmd, entry;
1996 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
1997 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
1998 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
2001 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2003 if (prot_numa && !thp_migration_supported())
2006 ptl = __pmd_trans_huge_lock(pmd, vma);
2010 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2011 if (is_swap_pmd(*pmd)) {
2012 swp_entry_t entry = pmd_to_swp_entry(*pmd);
2013 struct folio *folio = pfn_swap_entry_folio(entry);
2016 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
2017 if (is_writable_migration_entry(entry)) {
2019 * A protection check is difficult so
2020 * just be safe and disable write
2022 if (folio_test_anon(folio))
2023 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2025 entry = make_readable_migration_entry(swp_offset(entry));
2026 newpmd = swp_entry_to_pmd(entry);
2027 if (pmd_swp_soft_dirty(*pmd))
2028 newpmd = pmd_swp_mksoft_dirty(newpmd);
2034 newpmd = pmd_swp_mkuffd_wp(newpmd);
2035 else if (uffd_wp_resolve)
2036 newpmd = pmd_swp_clear_uffd_wp(newpmd);
2037 if (!pmd_same(*pmd, newpmd))
2038 set_pmd_at(mm, addr, pmd, newpmd);
2044 struct folio *folio;
2047 * Avoid trapping faults against the zero page. The read-only
2048 * data is likely to be read-cached on the local CPU and
2049 * local/remote hits to the zero page are not interesting.
2051 if (is_huge_zero_pmd(*pmd))
2054 if (pmd_protnone(*pmd))
2057 folio = pmd_folio(*pmd);
2058 toptier = node_is_toptier(folio_nid(folio));
2060 * Skip scanning top tier node if normal numa
2061 * balancing is disabled
2063 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
2067 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
2069 folio_xchg_access_time(folio,
2070 jiffies_to_msecs(jiffies));
2073 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
2074 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2075 * which is also under mmap_read_lock(mm):
2078 * change_huge_pmd(prot_numa=1)
2079 * pmdp_huge_get_and_clear_notify()
2080 * madvise_dontneed()
2082 * pmd_trans_huge(*pmd) == 0 (without ptl)
2085 * // pmd is re-established
2087 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2088 * which may break userspace.
2090 * pmdp_invalidate_ad() is required to make sure we don't miss
2091 * dirty/young flags set by hardware.
2093 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
2095 entry = pmd_modify(oldpmd, newprot);
2097 entry = pmd_mkuffd_wp(entry);
2098 else if (uffd_wp_resolve)
2100 * Leave the write bit to be handled by PF interrupt
2101 * handler, then things like COW could be properly
2104 entry = pmd_clear_uffd_wp(entry);
2106 /* See change_pte_range(). */
2107 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2108 can_change_pmd_writable(vma, addr, entry))
2109 entry = pmd_mkwrite(entry, vma);
2112 set_pmd_at(mm, addr, pmd, entry);
2114 if (huge_pmd_needs_flush(oldpmd, entry))
2115 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
2121 #ifdef CONFIG_USERFAULTFD
2123 * The PT lock for src_pmd and dst_vma/src_vma (for reading) are locked by
2124 * the caller, but it must return after releasing the page_table_lock.
2125 * Just move the page from src_pmd to dst_pmd if possible.
2126 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2127 * repeated by the caller, or other errors in case of failure.
2129 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2130 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2131 unsigned long dst_addr, unsigned long src_addr)
2133 pmd_t _dst_pmd, src_pmdval;
2134 struct page *src_page;
2135 struct folio *src_folio;
2136 struct anon_vma *src_anon_vma;
2137 spinlock_t *src_ptl, *dst_ptl;
2138 pgtable_t src_pgtable;
2139 struct mmu_notifier_range range;
2142 src_pmdval = *src_pmd;
2143 src_ptl = pmd_lockptr(mm, src_pmd);
2145 lockdep_assert_held(src_ptl);
2146 vma_assert_locked(src_vma);
2147 vma_assert_locked(dst_vma);
2149 /* Sanity checks before the operation */
2150 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2151 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2152 spin_unlock(src_ptl);
2156 if (!pmd_trans_huge(src_pmdval)) {
2157 spin_unlock(src_ptl);
2158 if (is_pmd_migration_entry(src_pmdval)) {
2159 pmd_migration_entry_wait(mm, &src_pmdval);
2165 src_page = pmd_page(src_pmdval);
2167 if (!is_huge_zero_pmd(src_pmdval)) {
2168 if (unlikely(!PageAnonExclusive(src_page))) {
2169 spin_unlock(src_ptl);
2173 src_folio = page_folio(src_page);
2174 folio_get(src_folio);
2178 spin_unlock(src_ptl);
2180 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2181 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2182 src_addr + HPAGE_PMD_SIZE);
2183 mmu_notifier_invalidate_range_start(&range);
2186 folio_lock(src_folio);
2189 * split_huge_page walks the anon_vma chain without the page
2190 * lock. Serialize against it with the anon_vma lock, the page
2191 * lock is not enough.
2193 src_anon_vma = folio_get_anon_vma(src_folio);
2194 if (!src_anon_vma) {
2198 anon_vma_lock_write(src_anon_vma);
2200 src_anon_vma = NULL;
2202 dst_ptl = pmd_lockptr(mm, dst_pmd);
2203 double_pt_lock(src_ptl, dst_ptl);
2204 if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2205 !pmd_same(*dst_pmd, dst_pmdval))) {
2210 if (folio_maybe_dma_pinned(src_folio) ||
2211 !PageAnonExclusive(&src_folio->page)) {
2216 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2217 WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2222 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2223 /* Folio got pinned from under us. Put it back and fail the move. */
2224 if (folio_maybe_dma_pinned(src_folio)) {
2225 set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2230 folio_move_anon_rmap(src_folio, dst_vma);
2231 src_folio->index = linear_page_index(dst_vma, dst_addr);
2233 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
2234 /* Follow mremap() behavior and treat the entry dirty after the move */
2235 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2237 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2238 _dst_pmd = mk_huge_pmd(src_page, dst_vma->vm_page_prot);
2240 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2242 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2243 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2245 double_pt_unlock(src_ptl, dst_ptl);
2247 anon_vma_unlock_write(src_anon_vma);
2248 put_anon_vma(src_anon_vma);
2251 /* unblock rmap walks */
2253 folio_unlock(src_folio);
2254 mmu_notifier_invalidate_range_end(&range);
2256 folio_put(src_folio);
2259 #endif /* CONFIG_USERFAULTFD */
2262 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2264 * Note that if it returns page table lock pointer, this routine returns without
2265 * unlocking page table lock. So callers must unlock it.
2267 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
2270 ptl = pmd_lock(vma->vm_mm, pmd);
2271 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2279 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
2281 * Note that if it returns page table lock pointer, this routine returns without
2282 * unlocking page table lock. So callers must unlock it.
2284 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2288 ptl = pud_lock(vma->vm_mm, pud);
2289 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2295 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2296 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2297 pud_t *pud, unsigned long addr)
2301 ptl = __pud_trans_huge_lock(pud, vma);
2305 pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
2306 tlb_remove_pud_tlb_entry(tlb, pud, addr);
2307 if (vma_is_special_huge(vma)) {
2309 /* No zero page support yet */
2311 /* No support for anonymous PUD pages yet */
2317 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2318 unsigned long haddr)
2320 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2321 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2322 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2323 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2325 count_vm_event(THP_SPLIT_PUD);
2327 pudp_huge_clear_flush(vma, haddr, pud);
2330 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2331 unsigned long address)
2334 struct mmu_notifier_range range;
2336 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2337 address & HPAGE_PUD_MASK,
2338 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2339 mmu_notifier_invalidate_range_start(&range);
2340 ptl = pud_lock(vma->vm_mm, pud);
2341 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2343 __split_huge_pud_locked(vma, pud, range.start);
2347 mmu_notifier_invalidate_range_end(&range);
2349 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2351 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2352 unsigned long haddr, pmd_t *pmd)
2354 struct mm_struct *mm = vma->vm_mm;
2356 pmd_t _pmd, old_pmd;
2362 * Leave pmd empty until pte is filled note that it is fine to delay
2363 * notification until mmu_notifier_invalidate_range_end() as we are
2364 * replacing a zero pmd write protected page with a zero pte write
2367 * See Documentation/mm/mmu_notifier.rst
2369 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2371 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2372 pmd_populate(mm, &_pmd, pgtable);
2374 pte = pte_offset_map(&_pmd, haddr);
2376 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2379 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
2380 entry = pte_mkspecial(entry);
2381 if (pmd_uffd_wp(old_pmd))
2382 entry = pte_mkuffd_wp(entry);
2383 VM_BUG_ON(!pte_none(ptep_get(pte)));
2384 set_pte_at(mm, addr, pte, entry);
2388 smp_wmb(); /* make pte visible before pmd */
2389 pmd_populate(mm, pmd, pgtable);
2392 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2393 unsigned long haddr, bool freeze)
2395 struct mm_struct *mm = vma->vm_mm;
2396 struct folio *folio;
2399 pmd_t old_pmd, _pmd;
2400 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2401 bool anon_exclusive = false, dirty = false;
2406 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2407 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2408 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2409 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2410 && !pmd_devmap(*pmd));
2412 count_vm_event(THP_SPLIT_PMD);
2414 if (!vma_is_anonymous(vma)) {
2415 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2417 * We are going to unmap this huge page. So
2418 * just go ahead and zap it
2420 if (arch_needs_pgtable_deposit())
2421 zap_deposited_table(mm, pmd);
2422 if (vma_is_special_huge(vma))
2424 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2427 entry = pmd_to_swp_entry(old_pmd);
2428 folio = pfn_swap_entry_folio(entry);
2430 page = pmd_page(old_pmd);
2431 folio = page_folio(page);
2432 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
2433 folio_mark_dirty(folio);
2434 if (!folio_test_referenced(folio) && pmd_young(old_pmd))
2435 folio_set_referenced(folio);
2436 folio_remove_rmap_pmd(folio, page, vma);
2439 add_mm_counter(mm, mm_counter_file(folio), -HPAGE_PMD_NR);
2443 if (is_huge_zero_pmd(*pmd)) {
2445 * FIXME: Do we want to invalidate secondary mmu by calling
2446 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2447 * inside __split_huge_pmd() ?
2449 * We are going from a zero huge page write protected to zero
2450 * small page also write protected so it does not seems useful
2451 * to invalidate secondary mmu at this time.
2453 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2456 pmd_migration = is_pmd_migration_entry(*pmd);
2457 if (unlikely(pmd_migration)) {
2461 entry = pmd_to_swp_entry(old_pmd);
2462 page = pfn_swap_entry_to_page(entry);
2463 write = is_writable_migration_entry(entry);
2465 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2466 young = is_migration_entry_young(entry);
2467 dirty = is_migration_entry_dirty(entry);
2468 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2469 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2472 * Up to this point the pmd is present and huge and userland has
2473 * the whole access to the hugepage during the split (which
2474 * happens in place). If we overwrite the pmd with the not-huge
2475 * version pointing to the pte here (which of course we could if
2476 * all CPUs were bug free), userland could trigger a small page
2477 * size TLB miss on the small sized TLB while the hugepage TLB
2478 * entry is still established in the huge TLB. Some CPU doesn't
2480 * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2481 * 383 on page 105. Intel should be safe but is also warns that
2482 * it's only safe if the permission and cache attributes of the
2483 * two entries loaded in the two TLB is identical (which should
2484 * be the case here). But it is generally safer to never allow
2485 * small and huge TLB entries for the same virtual address to be
2486 * loaded simultaneously. So instead of doing "pmd_populate();
2487 * flush_pmd_tlb_range();" we first mark the current pmd
2488 * notpresent (atomically because here the pmd_trans_huge must
2489 * remain set at all times on the pmd until the split is
2490 * complete for this pmd), then we flush the SMP TLB and finally
2491 * we write the non-huge version of the pmd entry with
2494 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2495 page = pmd_page(old_pmd);
2496 folio = page_folio(page);
2497 if (pmd_dirty(old_pmd)) {
2499 folio_set_dirty(folio);
2501 write = pmd_write(old_pmd);
2502 young = pmd_young(old_pmd);
2503 soft_dirty = pmd_soft_dirty(old_pmd);
2504 uffd_wp = pmd_uffd_wp(old_pmd);
2506 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2507 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
2510 * Without "freeze", we'll simply split the PMD, propagating the
2511 * PageAnonExclusive() flag for each PTE by setting it for
2512 * each subpage -- no need to (temporarily) clear.
2514 * With "freeze" we want to replace mapped pages by
2515 * migration entries right away. This is only possible if we
2516 * managed to clear PageAnonExclusive() -- see
2517 * set_pmd_migration_entry().
2519 * In case we cannot clear PageAnonExclusive(), split the PMD
2520 * only and let try_to_migrate_one() fail later.
2522 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
2524 anon_exclusive = PageAnonExclusive(page);
2525 if (freeze && anon_exclusive &&
2526 folio_try_share_anon_rmap_pmd(folio, page))
2529 rmap_t rmap_flags = RMAP_NONE;
2531 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2533 rmap_flags |= RMAP_EXCLUSIVE;
2534 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
2535 vma, haddr, rmap_flags);
2540 * Withdraw the table only after we mark the pmd entry invalid.
2541 * This's critical for some architectures (Power).
2543 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2544 pmd_populate(mm, &_pmd, pgtable);
2546 pte = pte_offset_map(&_pmd, haddr);
2550 * Note that NUMA hinting access restrictions are not transferred to
2551 * avoid any possibility of altering permissions across VMAs.
2553 if (freeze || pmd_migration) {
2554 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2556 swp_entry_t swp_entry;
2559 swp_entry = make_writable_migration_entry(
2560 page_to_pfn(page + i));
2561 else if (anon_exclusive)
2562 swp_entry = make_readable_exclusive_migration_entry(
2563 page_to_pfn(page + i));
2565 swp_entry = make_readable_migration_entry(
2566 page_to_pfn(page + i));
2568 swp_entry = make_migration_entry_young(swp_entry);
2570 swp_entry = make_migration_entry_dirty(swp_entry);
2571 entry = swp_entry_to_pte(swp_entry);
2573 entry = pte_swp_mksoft_dirty(entry);
2575 entry = pte_swp_mkuffd_wp(entry);
2577 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2578 set_pte_at(mm, addr, pte + i, entry);
2583 entry = mk_pte(page, READ_ONCE(vma->vm_page_prot));
2585 entry = pte_mkwrite(entry, vma);
2587 entry = pte_mkold(entry);
2588 /* NOTE: this may set soft-dirty too on some archs */
2590 entry = pte_mkdirty(entry);
2592 entry = pte_mksoft_dirty(entry);
2594 entry = pte_mkuffd_wp(entry);
2596 for (i = 0; i < HPAGE_PMD_NR; i++)
2597 VM_WARN_ON(!pte_none(ptep_get(pte + i)));
2599 set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR);
2604 folio_remove_rmap_pmd(folio, page, vma);
2608 smp_wmb(); /* make pte visible before pmd */
2609 pmd_populate(mm, pmd, pgtable);
2612 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
2613 pmd_t *pmd, bool freeze, struct folio *folio)
2615 VM_WARN_ON_ONCE(folio && !folio_test_pmd_mappable(folio));
2616 VM_WARN_ON_ONCE(!IS_ALIGNED(address, HPAGE_PMD_SIZE));
2617 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2618 VM_BUG_ON(freeze && !folio);
2621 * When the caller requests to set up a migration entry, we
2622 * require a folio to check the PMD against. Otherwise, there
2623 * is a risk of replacing the wrong folio.
2625 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2626 is_pmd_migration_entry(*pmd)) {
2627 if (folio && folio != pmd_folio(*pmd))
2629 __split_huge_pmd_locked(vma, pmd, address, freeze);
2633 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2634 unsigned long address, bool freeze, struct folio *folio)
2637 struct mmu_notifier_range range;
2639 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
2640 address & HPAGE_PMD_MASK,
2641 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2642 mmu_notifier_invalidate_range_start(&range);
2643 ptl = pmd_lock(vma->vm_mm, pmd);
2644 split_huge_pmd_locked(vma, range.start, pmd, freeze, folio);
2646 mmu_notifier_invalidate_range_end(&range);
2649 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2650 bool freeze, struct folio *folio)
2652 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2657 __split_huge_pmd(vma, pmd, address, freeze, folio);
2660 static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2663 * If the new address isn't hpage aligned and it could previously
2664 * contain an hugepage: check if we need to split an huge pmd.
2666 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2667 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2668 ALIGN(address, HPAGE_PMD_SIZE)))
2669 split_huge_pmd_address(vma, address, false, NULL);
2672 void vma_adjust_trans_huge(struct vm_area_struct *vma,
2673 unsigned long start,
2677 /* Check if we need to split start first. */
2678 split_huge_pmd_if_needed(vma, start);
2680 /* Check if we need to split end next. */
2681 split_huge_pmd_if_needed(vma, end);
2684 * If we're also updating the next vma vm_start,
2685 * check if we need to split it.
2687 if (adjust_next > 0) {
2688 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2689 unsigned long nstart = next->vm_start;
2690 nstart += adjust_next;
2691 split_huge_pmd_if_needed(next, nstart);
2695 static void unmap_folio(struct folio *folio)
2697 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC |
2700 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2702 if (folio_test_pmd_mappable(folio))
2703 ttu_flags |= TTU_SPLIT_HUGE_PMD;
2706 * Anon pages need migration entries to preserve them, but file
2707 * pages can simply be left unmapped, then faulted back on demand.
2708 * If that is ever changed (perhaps for mlock), update remap_page().
2710 if (folio_test_anon(folio))
2711 try_to_migrate(folio, ttu_flags);
2713 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2715 try_to_unmap_flush();
2718 static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma,
2719 unsigned long addr, pmd_t *pmdp,
2720 struct folio *folio)
2722 struct mm_struct *mm = vma->vm_mm;
2723 int ref_count, map_count;
2724 pmd_t orig_pmd = *pmdp;
2726 if (folio_test_dirty(folio) || pmd_dirty(orig_pmd))
2729 orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp);
2732 * Syncing against concurrent GUP-fast:
2733 * - clear PMD; barrier; read refcount
2734 * - inc refcount; barrier; read PMD
2738 ref_count = folio_ref_count(folio);
2739 map_count = folio_mapcount(folio);
2742 * Order reads for folio refcount and dirty flag
2743 * (see comments in __remove_mapping()).
2748 * If the folio or its PMD is redirtied at this point, or if there
2749 * are unexpected references, we will give up to discard this folio
2752 * The only folio refs must be one from isolation plus the rmap(s).
2754 if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) ||
2755 ref_count != map_count + 1) {
2756 set_pmd_at(mm, addr, pmdp, orig_pmd);
2760 folio_remove_rmap_pmd(folio, pmd_page(orig_pmd), vma);
2761 zap_deposited_table(mm, pmdp);
2762 add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR);
2763 if (vma->vm_flags & VM_LOCKED)
2764 mlock_drain_local();
2770 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
2771 pmd_t *pmdp, struct folio *folio)
2773 VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio);
2774 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2775 VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE));
2777 if (folio_test_anon(folio) && !folio_test_swapbacked(folio))
2778 return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio);
2783 static void remap_page(struct folio *folio, unsigned long nr)
2787 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
2788 if (!folio_test_anon(folio))
2791 remove_migration_ptes(folio, folio, true);
2792 i += folio_nr_pages(folio);
2795 folio = folio_next(folio);
2799 static void lru_add_page_tail(struct page *head, struct page *tail,
2800 struct lruvec *lruvec, struct list_head *list)
2802 VM_BUG_ON_PAGE(!PageHead(head), head);
2803 VM_BUG_ON_PAGE(PageLRU(tail), head);
2804 lockdep_assert_held(&lruvec->lru_lock);
2807 /* page reclaim is reclaiming a huge page */
2808 VM_WARN_ON(PageLRU(head));
2810 list_add_tail(&tail->lru, list);
2812 /* head is still on lru (and we have it frozen) */
2813 VM_WARN_ON(!PageLRU(head));
2814 if (PageUnevictable(tail))
2815 tail->mlock_count = 0;
2817 list_add_tail(&tail->lru, &head->lru);
2822 static void __split_huge_page_tail(struct folio *folio, int tail,
2823 struct lruvec *lruvec, struct list_head *list,
2824 unsigned int new_order)
2826 struct page *head = &folio->page;
2827 struct page *page_tail = head + tail;
2829 * Careful: new_folio is not a "real" folio before we cleared PageTail.
2830 * Don't pass it around before clear_compound_head().
2832 struct folio *new_folio = (struct folio *)page_tail;
2834 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2837 * Clone page flags before unfreezing refcount.
2839 * After successful get_page_unless_zero() might follow flags change,
2840 * for example lock_page() which set PG_waiters.
2842 * Note that for mapped sub-pages of an anonymous THP,
2843 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2844 * the migration entry instead from where remap_page() will restore it.
2845 * We can still have PG_anon_exclusive set on effectively unmapped and
2846 * unreferenced sub-pages of an anonymous THP: we can simply drop
2847 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2849 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2850 page_tail->flags |= (head->flags &
2851 ((1L << PG_referenced) |
2852 (1L << PG_swapbacked) |
2853 (1L << PG_swapcache) |
2854 (1L << PG_mlocked) |
2855 (1L << PG_uptodate) |
2857 (1L << PG_workingset) |
2859 (1L << PG_unevictable) |
2860 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
2865 LRU_GEN_MASK | LRU_REFS_MASK));
2867 /* ->mapping in first and second tail page is replaced by other uses */
2868 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2870 page_tail->mapping = head->mapping;
2871 page_tail->index = head->index + tail;
2874 * page->private should not be set in tail pages. Fix up and warn once
2875 * if private is unexpectedly set.
2877 if (unlikely(page_tail->private)) {
2878 VM_WARN_ON_ONCE_PAGE(true, page_tail);
2879 page_tail->private = 0;
2881 if (folio_test_swapcache(folio))
2882 new_folio->swap.val = folio->swap.val + tail;
2884 /* Page flags must be visible before we make the page non-compound. */
2888 * Clear PageTail before unfreezing page refcount.
2890 * After successful get_page_unless_zero() might follow put_page()
2891 * which needs correct compound_head().
2893 clear_compound_head(page_tail);
2895 prep_compound_page(page_tail, new_order);
2896 folio_set_large_rmappable(new_folio);
2899 /* Finally unfreeze refcount. Additional reference from page cache. */
2900 page_ref_unfreeze(page_tail,
2901 1 + ((!folio_test_anon(folio) || folio_test_swapcache(folio)) ?
2902 folio_nr_pages(new_folio) : 0));
2904 if (folio_test_young(folio))
2905 folio_set_young(new_folio);
2906 if (folio_test_idle(folio))
2907 folio_set_idle(new_folio);
2909 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
2912 * always add to the tail because some iterators expect new
2913 * pages to show after the currently processed elements - e.g.
2916 lru_add_page_tail(head, page_tail, lruvec, list);
2919 static void __split_huge_page(struct page *page, struct list_head *list,
2920 pgoff_t end, unsigned int new_order)
2922 struct folio *folio = page_folio(page);
2923 struct page *head = &folio->page;
2924 struct lruvec *lruvec;
2925 struct address_space *swap_cache = NULL;
2926 unsigned long offset = 0;
2927 int i, nr_dropped = 0;
2928 unsigned int new_nr = 1 << new_order;
2929 int order = folio_order(folio);
2930 unsigned int nr = 1 << order;
2932 /* complete memcg works before add pages to LRU */
2933 split_page_memcg(head, order, new_order);
2935 if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2936 offset = swap_cache_index(folio->swap);
2937 swap_cache = swap_address_space(folio->swap);
2938 xa_lock(&swap_cache->i_pages);
2941 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2942 lruvec = folio_lruvec_lock(folio);
2944 ClearPageHasHWPoisoned(head);
2946 for (i = nr - new_nr; i >= new_nr; i -= new_nr) {
2947 __split_huge_page_tail(folio, i, lruvec, list, new_order);
2948 /* Some pages can be beyond EOF: drop them from page cache */
2949 if (head[i].index >= end) {
2950 struct folio *tail = page_folio(head + i);
2952 if (shmem_mapping(folio->mapping))
2954 else if (folio_test_clear_dirty(tail))
2955 folio_account_cleaned(tail,
2956 inode_to_wb(folio->mapping->host));
2957 __filemap_remove_folio(tail, NULL);
2959 } else if (!PageAnon(page)) {
2960 __xa_store(&folio->mapping->i_pages, head[i].index,
2962 } else if (swap_cache) {
2963 __xa_store(&swap_cache->i_pages, offset + i,
2969 ClearPageCompound(head);
2971 struct folio *new_folio = (struct folio *)head;
2973 folio_set_order(new_folio, new_order);
2975 unlock_page_lruvec(lruvec);
2976 /* Caller disabled irqs, so they are still disabled here */
2978 split_page_owner(head, order, new_order);
2979 pgalloc_tag_split(head, 1 << order);
2981 /* See comment in __split_huge_page_tail() */
2982 if (folio_test_anon(folio)) {
2983 /* Additional pin to swap cache */
2984 if (folio_test_swapcache(folio)) {
2985 folio_ref_add(folio, 1 + new_nr);
2986 xa_unlock(&swap_cache->i_pages);
2988 folio_ref_inc(folio);
2991 /* Additional pin to page cache */
2992 folio_ref_add(folio, 1 + new_nr);
2993 xa_unlock(&folio->mapping->i_pages);
2998 shmem_uncharge(folio->mapping->host, nr_dropped);
2999 remap_page(folio, nr);
3002 * set page to its compound_head when split to non order-0 pages, so
3003 * we can skip unlocking it below, since PG_locked is transferred to
3004 * the compound_head of the page and the caller will unlock it.
3007 page = compound_head(page);
3009 for (i = 0; i < nr; i += new_nr) {
3010 struct page *subpage = head + i;
3011 struct folio *new_folio = page_folio(subpage);
3012 if (subpage == page)
3014 folio_unlock(new_folio);
3017 * Subpages may be freed if there wasn't any mapping
3018 * like if add_to_swap() is running on a lru page that
3019 * had its mapping zapped. And freeing these pages
3020 * requires taking the lru_lock so we do the put_page
3021 * of the tail pages after the split is complete.
3023 free_page_and_swap_cache(subpage);
3027 /* Racy check whether the huge page can be split */
3028 bool can_split_folio(struct folio *folio, int *pextra_pins)
3032 /* Additional pins from page cache */
3033 if (folio_test_anon(folio))
3034 extra_pins = folio_test_swapcache(folio) ?
3035 folio_nr_pages(folio) : 0;
3037 extra_pins = folio_nr_pages(folio);
3039 *pextra_pins = extra_pins;
3040 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
3044 * This function splits a large folio into smaller folios of order @new_order.
3045 * @page can point to any page of the large folio to split. The split operation
3046 * does not change the position of @page.
3050 * 1) The caller must hold a reference on the @page's owning folio, also known
3051 * as the large folio.
3053 * 2) The large folio must be locked.
3055 * 3) The folio must not be pinned. Any unexpected folio references, including
3056 * GUP pins, will result in the folio not getting split; instead, the caller
3057 * will receive an -EAGAIN.
3059 * 4) @new_order > 1, usually. Splitting to order-1 anonymous folios is not
3060 * supported for non-file-backed folios, because folio->_deferred_list, which
3061 * is used by partially mapped folios, is stored in subpage 2, but an order-1
3062 * folio only has subpages 0 and 1. File-backed order-1 folios are supported,
3063 * since they do not use _deferred_list.
3065 * After splitting, the caller's folio reference will be transferred to @page,
3066 * resulting in a raised refcount of @page after this call. The other pages may
3067 * be freed if they are not mapped.
3069 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3071 * Pages in @new_order will inherit the mapping, flags, and so on from the
3074 * Returns 0 if the huge page was split successfully.
3076 * Returns -EAGAIN if the folio has unexpected reference (e.g., GUP) or if
3077 * the folio was concurrently removed from the page cache.
3079 * Returns -EBUSY when trying to split the huge zeropage, if the folio is
3080 * under writeback, if fs-specific folio metadata cannot currently be
3081 * released, or if some unexpected race happened (e.g., anon VMA disappeared,
3084 * Callers should ensure that the order respects the address space mapping
3085 * min-order if one is set for non-anonymous folios.
3087 * Returns -EINVAL when trying to split to an order that is incompatible
3088 * with the folio. Splitting to order 0 is compatible with all folios.
3090 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
3091 unsigned int new_order)
3093 struct folio *folio = page_folio(page);
3094 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3095 /* reset xarray order to new order after split */
3096 XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
3097 struct anon_vma *anon_vma = NULL;
3098 struct address_space *mapping = NULL;
3099 int order = folio_order(folio);
3100 int extra_pins, ret;
3104 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3105 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
3107 if (new_order >= folio_order(folio))
3110 if (folio_test_anon(folio)) {
3111 /* order-1 is not supported for anonymous THP. */
3112 if (new_order == 1) {
3113 VM_WARN_ONCE(1, "Cannot split to order-1 folio");
3116 } else if (new_order) {
3117 /* Split shmem folio to non-zero order not supported */
3118 if (shmem_mapping(folio->mapping)) {
3120 "Cannot split shmem folio to non-0 order");
3124 * No split if the file system does not support large folio.
3125 * Note that we might still have THPs in such mappings due to
3126 * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
3127 * does not actually support large folios properly.
3129 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
3130 !mapping_large_folio_support(folio->mapping)) {
3132 "Cannot split file folio to non-0 order");
3137 /* Only swapping a whole PMD-mapped folio is supported */
3138 if (folio_test_swapcache(folio) && new_order)
3141 is_hzp = is_huge_zero_folio(folio);
3143 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
3147 if (folio_test_writeback(folio))
3150 if (folio_test_anon(folio)) {
3152 * The caller does not necessarily hold an mmap_lock that would
3153 * prevent the anon_vma disappearing so we first we take a
3154 * reference to it and then lock the anon_vma for write. This
3155 * is similar to folio_lock_anon_vma_read except the write lock
3156 * is taken to serialise against parallel split or collapse
3159 anon_vma = folio_get_anon_vma(folio);
3166 anon_vma_lock_write(anon_vma);
3168 unsigned int min_order;
3171 mapping = folio->mapping;
3179 min_order = mapping_min_folio_order(folio->mapping);
3180 if (new_order < min_order) {
3181 VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
3187 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3190 if (!filemap_release_folio(folio, gfp)) {
3195 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
3196 if (xas_error(&xas)) {
3197 ret = xas_error(&xas);
3202 i_mmap_lock_read(mapping);
3205 *__split_huge_page() may need to trim off pages beyond EOF:
3206 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
3207 * which cannot be nested inside the page tree lock. So note
3208 * end now: i_size itself may be changed at any moment, but
3209 * folio lock is good enough to serialize the trimming.
3211 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3212 if (shmem_mapping(mapping))
3213 end = shmem_fallocend(mapping->host, end);
3217 * Racy check if we can split the page, before unmap_folio() will
3220 if (!can_split_folio(folio, &extra_pins)) {
3227 /* block interrupt reentry in xa_lock and spinlock */
3228 local_irq_disable();
3231 * Check if the folio is present in page cache.
3232 * We assume all tail are present too, if folio is there.
3236 if (xas_load(&xas) != folio)
3240 /* Prevent deferred_split_scan() touching ->_refcount */
3241 spin_lock(&ds_queue->split_queue_lock);
3242 if (folio_ref_freeze(folio, 1 + extra_pins)) {
3243 if (folio_order(folio) > 1 &&
3244 !list_empty(&folio->_deferred_list)) {
3245 ds_queue->split_queue_len--;
3247 * Reinitialize page_deferred_list after removing the
3248 * page from the split_queue, otherwise a subsequent
3249 * split will see list corruption when checking the
3250 * page_deferred_list.
3252 list_del_init(&folio->_deferred_list);
3254 spin_unlock(&ds_queue->split_queue_lock);
3256 int nr = folio_nr_pages(folio);
3258 xas_split(&xas, folio, folio_order(folio));
3259 if (folio_test_pmd_mappable(folio) &&
3260 new_order < HPAGE_PMD_ORDER) {
3261 if (folio_test_swapbacked(folio)) {
3262 __lruvec_stat_mod_folio(folio,
3263 NR_SHMEM_THPS, -nr);
3265 __lruvec_stat_mod_folio(folio,
3267 filemap_nr_thps_dec(mapping);
3272 __split_huge_page(page, list, end, new_order);
3275 spin_unlock(&ds_queue->split_queue_lock);
3280 remap_page(folio, folio_nr_pages(folio));
3286 anon_vma_unlock_write(anon_vma);
3287 put_anon_vma(anon_vma);
3290 i_mmap_unlock_read(mapping);
3293 if (order == HPAGE_PMD_ORDER)
3294 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3295 count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED);
3299 int min_order_for_split(struct folio *folio)
3301 if (folio_test_anon(folio))
3304 if (!folio->mapping) {
3305 if (folio_test_pmd_mappable(folio))
3306 count_vm_event(THP_SPLIT_PAGE_FAILED);
3310 return mapping_min_folio_order(folio->mapping);
3313 int split_folio_to_list(struct folio *folio, struct list_head *list)
3315 int ret = min_order_for_split(folio);
3320 return split_huge_page_to_list_to_order(&folio->page, list, ret);
3323 void __folio_undo_large_rmappable(struct folio *folio)
3325 struct deferred_split *ds_queue;
3326 unsigned long flags;
3328 ds_queue = get_deferred_split_queue(folio);
3329 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3330 if (!list_empty(&folio->_deferred_list)) {
3331 ds_queue->split_queue_len--;
3332 list_del_init(&folio->_deferred_list);
3334 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3337 void deferred_split_folio(struct folio *folio)
3339 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3341 struct mem_cgroup *memcg = folio_memcg(folio);
3343 unsigned long flags;
3346 * Order 1 folios have no space for a deferred list, but we also
3347 * won't waste much memory by not adding them to the deferred list.
3349 if (folio_order(folio) <= 1)
3353 * The try_to_unmap() in page reclaim path might reach here too,
3354 * this may cause a race condition to corrupt deferred split queue.
3355 * And, if page reclaim is already handling the same folio, it is
3356 * unnecessary to handle it again in shrinker.
3358 * Check the swapcache flag to determine if the folio is being
3359 * handled by page reclaim since THP swap would add the folio into
3360 * swap cache before calling try_to_unmap().
3362 if (folio_test_swapcache(folio))
3365 if (!list_empty(&folio->_deferred_list))
3368 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3369 if (list_empty(&folio->_deferred_list)) {
3370 if (folio_test_pmd_mappable(folio))
3371 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
3372 count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
3373 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
3374 ds_queue->split_queue_len++;
3377 set_shrinker_bit(memcg, folio_nid(folio),
3378 deferred_split_shrinker->id);
3381 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3384 static unsigned long deferred_split_count(struct shrinker *shrink,
3385 struct shrink_control *sc)
3387 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3388 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3392 ds_queue = &sc->memcg->deferred_split_queue;
3394 return READ_ONCE(ds_queue->split_queue_len);
3397 static unsigned long deferred_split_scan(struct shrinker *shrink,
3398 struct shrink_control *sc)
3400 struct pglist_data *pgdata = NODE_DATA(sc->nid);
3401 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
3402 unsigned long flags;
3404 struct folio *folio, *next;
3409 ds_queue = &sc->memcg->deferred_split_queue;
3412 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3413 /* Take pin on all head pages to avoid freeing them under us */
3414 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
3416 if (folio_try_get(folio)) {
3417 list_move(&folio->_deferred_list, &list);
3419 /* We lost race with folio_put() */
3420 list_del_init(&folio->_deferred_list);
3421 ds_queue->split_queue_len--;
3423 if (!--sc->nr_to_scan)
3426 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3428 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
3429 if (!folio_trylock(folio))
3431 /* split_huge_page() removes page from list on success */
3432 if (!split_folio(folio))
3434 folio_unlock(folio);
3439 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3440 list_splice_tail(&list, &ds_queue->split_queue);
3441 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
3444 * Stop shrinker if we didn't split any page, but the queue is empty.
3445 * This can happen if pages were freed under us.
3447 if (!split && list_empty(&ds_queue->split_queue))
3452 #ifdef CONFIG_DEBUG_FS
3453 static void split_huge_pages_all(void)
3457 struct folio *folio;
3458 unsigned long pfn, max_zone_pfn;
3459 unsigned long total = 0, split = 0;
3461 pr_debug("Split all THPs\n");
3462 for_each_zone(zone) {
3463 if (!managed_zone(zone))
3465 max_zone_pfn = zone_end_pfn(zone);
3466 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
3469 page = pfn_to_online_page(pfn);
3470 if (!page || PageTail(page))
3472 folio = page_folio(page);
3473 if (!folio_try_get(folio))
3476 if (unlikely(page_folio(page) != folio))
3479 if (zone != folio_zone(folio))
3482 if (!folio_test_large(folio)
3483 || folio_test_hugetlb(folio)
3484 || !folio_test_lru(folio))
3489 nr_pages = folio_nr_pages(folio);
3490 if (!split_folio(folio))
3492 pfn += nr_pages - 1;
3493 folio_unlock(folio);
3500 pr_debug("%lu of %lu THP split\n", split, total);
3503 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
3505 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
3506 is_vm_hugetlb_page(vma);
3509 static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3510 unsigned long vaddr_end, unsigned int new_order)
3513 struct task_struct *task;
3514 struct mm_struct *mm;
3515 unsigned long total = 0, split = 0;
3518 vaddr_start &= PAGE_MASK;
3519 vaddr_end &= PAGE_MASK;
3521 /* Find the task_struct from pid */
3523 task = find_task_by_vpid(pid);
3529 get_task_struct(task);
3532 /* Find the mm_struct */
3533 mm = get_task_mm(task);
3534 put_task_struct(task);
3541 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3542 pid, vaddr_start, vaddr_end);
3546 * always increase addr by PAGE_SIZE, since we could have a PTE page
3547 * table filled with PTE-mapped THPs, each of which is distinct.
3549 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3550 struct vm_area_struct *vma = vma_lookup(mm, addr);
3552 struct folio *folio;
3553 struct address_space *mapping;
3554 unsigned int target_order = new_order;
3559 /* skip special VMA and hugetlb VMA */
3560 if (vma_not_suitable_for_thp_split(vma)) {
3565 /* FOLL_DUMP to ignore special (like zero) pages */
3566 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
3568 if (IS_ERR_OR_NULL(page))
3571 folio = page_folio(page);
3572 if (!is_transparent_hugepage(folio))
3575 if (!folio_test_anon(folio)) {
3576 mapping = folio->mapping;
3577 target_order = max(new_order,
3578 mapping_min_folio_order(mapping));
3581 if (target_order >= folio_order(folio))
3586 * For folios with private, split_huge_page_to_list_to_order()
3587 * will try to drop it before split and then check if the folio
3588 * can be split or not. So skip the check here.
3590 if (!folio_test_private(folio) &&
3591 !can_split_folio(folio, NULL))
3594 if (!folio_trylock(folio))
3597 if (!folio_test_anon(folio) && folio->mapping != mapping)
3600 if (!split_folio_to_order(folio, target_order))
3605 folio_unlock(folio);
3610 mmap_read_unlock(mm);
3613 pr_debug("%lu of %lu THP split\n", split, total);
3619 static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3620 pgoff_t off_end, unsigned int new_order)
3622 struct filename *file;
3623 struct file *candidate;
3624 struct address_space *mapping;
3628 unsigned long total = 0, split = 0;
3629 unsigned int min_order;
3630 unsigned int target_order;
3632 file = getname_kernel(file_path);
3636 candidate = file_open_name(file, O_RDONLY, 0);
3637 if (IS_ERR(candidate))
3640 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3641 file_path, off_start, off_end);
3643 mapping = candidate->f_mapping;
3644 min_order = mapping_min_folio_order(mapping);
3645 target_order = max(new_order, min_order);
3647 for (index = off_start; index < off_end; index += nr_pages) {
3648 struct folio *folio = filemap_get_folio(mapping, index);
3654 if (!folio_test_large(folio))
3658 nr_pages = folio_nr_pages(folio);
3660 if (target_order >= folio_order(folio))
3663 if (!folio_trylock(folio))
3666 if (folio->mapping != mapping)
3669 if (!split_folio_to_order(folio, target_order))
3673 folio_unlock(folio);
3679 filp_close(candidate, NULL);
3682 pr_debug("%lu of %lu file-backed THP split\n", split, total);
3688 #define MAX_INPUT_BUF_SZ 255
3690 static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3691 size_t count, loff_t *ppops)
3693 static DEFINE_MUTEX(split_debug_mutex);
3696 * hold pid, start_vaddr, end_vaddr, new_order or
3697 * file_path, off_start, off_end, new_order
3699 char input_buf[MAX_INPUT_BUF_SZ];
3701 unsigned long vaddr_start, vaddr_end;
3702 unsigned int new_order = 0;
3704 ret = mutex_lock_interruptible(&split_debug_mutex);
3710 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3711 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3714 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
3716 if (input_buf[0] == '/') {
3718 char *buf = input_buf;
3719 char file_path[MAX_INPUT_BUF_SZ];
3720 pgoff_t off_start = 0, off_end = 0;
3721 size_t input_len = strlen(input_buf);
3723 tok = strsep(&buf, ",");
3725 strcpy(file_path, tok);
3731 ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order);
3732 if (ret != 2 && ret != 3) {
3736 ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order);
3743 ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order);
3744 if (ret == 1 && pid == 1) {
3745 split_huge_pages_all();
3746 ret = strlen(input_buf);
3748 } else if (ret != 3 && ret != 4) {
3753 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order);
3755 ret = strlen(input_buf);
3757 mutex_unlock(&split_debug_mutex);
3762 static const struct file_operations split_huge_pages_fops = {
3763 .owner = THIS_MODULE,
3764 .write = split_huge_pages_write,
3765 .llseek = no_llseek,
3768 static int __init split_huge_pages_debugfs(void)
3770 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3771 &split_huge_pages_fops);
3774 late_initcall(split_huge_pages_debugfs);
3777 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3778 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3781 struct folio *folio = page_folio(page);
3782 struct vm_area_struct *vma = pvmw->vma;
3783 struct mm_struct *mm = vma->vm_mm;
3784 unsigned long address = pvmw->address;
3785 bool anon_exclusive;
3790 if (!(pvmw->pmd && !pvmw->pte))
3793 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3794 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3796 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
3797 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
3798 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
3799 set_pmd_at(mm, address, pvmw->pmd, pmdval);
3803 if (pmd_dirty(pmdval))
3804 folio_mark_dirty(folio);
3805 if (pmd_write(pmdval))
3806 entry = make_writable_migration_entry(page_to_pfn(page));
3807 else if (anon_exclusive)
3808 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3810 entry = make_readable_migration_entry(page_to_pfn(page));
3811 if (pmd_young(pmdval))
3812 entry = make_migration_entry_young(entry);
3813 if (pmd_dirty(pmdval))
3814 entry = make_migration_entry_dirty(entry);
3815 pmdswp = swp_entry_to_pmd(entry);
3816 if (pmd_soft_dirty(pmdval))
3817 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3818 if (pmd_uffd_wp(pmdval))
3819 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
3820 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3821 folio_remove_rmap_pmd(folio, page, vma);
3823 trace_set_migration_pmd(address, pmd_val(pmdswp));
3828 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3830 struct folio *folio = page_folio(new);
3831 struct vm_area_struct *vma = pvmw->vma;
3832 struct mm_struct *mm = vma->vm_mm;
3833 unsigned long address = pvmw->address;
3834 unsigned long haddr = address & HPAGE_PMD_MASK;
3838 if (!(pvmw->pmd && !pvmw->pte))
3841 entry = pmd_to_swp_entry(*pvmw->pmd);
3843 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
3844 if (pmd_swp_soft_dirty(*pvmw->pmd))
3845 pmde = pmd_mksoft_dirty(pmde);
3846 if (is_writable_migration_entry(entry))
3847 pmde = pmd_mkwrite(pmde, vma);
3848 if (pmd_swp_uffd_wp(*pvmw->pmd))
3849 pmde = pmd_mkuffd_wp(pmde);
3850 if (!is_migration_entry_young(entry))
3851 pmde = pmd_mkold(pmde);
3852 /* NOTE: this may contain setting soft-dirty on some archs */
3853 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
3854 pmde = pmd_mkdirty(pmde);
3856 if (folio_test_anon(folio)) {
3857 rmap_t rmap_flags = RMAP_NONE;
3859 if (!is_readable_migration_entry(entry))
3860 rmap_flags |= RMAP_EXCLUSIVE;
3862 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
3864 folio_add_file_rmap_pmd(folio, new, vma);
3866 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
3867 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3869 /* No need to invalidate - it was non-present before */
3870 update_mmu_cache_pmd(vma, address, pvmw->pmd);
3871 trace_remove_migration_pmd(address, pmd_val(pmde));