mm/huge_memory: page_add_anon_rmap() -> folio_add_anon_rmap_pmd()
[linux-block.git] / mm / huge_memory.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
71e3aac0
AA
2/*
3 * Copyright (C) 2009 Red Hat, Inc.
71e3aac0
AA
4 */
5
ae3a8c1c
AM
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
71e3aac0
AA
8#include <linux/mm.h>
9#include <linux/sched.h>
fa6c0231 10#include <linux/sched/mm.h>
f7ccbae4 11#include <linux/sched/coredump.h>
6a3827d7 12#include <linux/sched/numa_balancing.h>
71e3aac0
AA
13#include <linux/highmem.h>
14#include <linux/hugetlb.h>
15#include <linux/mmu_notifier.h>
16#include <linux/rmap.h>
17#include <linux/swap.h>
97ae1749 18#include <linux/shrinker.h>
ba76149f 19#include <linux/mm_inline.h>
e9b61f19 20#include <linux/swapops.h>
fb5c2029 21#include <linux/backing-dev.h>
4897c765 22#include <linux/dax.h>
ba76149f 23#include <linux/khugepaged.h>
878aee7d 24#include <linux/freezer.h>
f25748e3 25#include <linux/pfn_t.h>
a664b2d8 26#include <linux/mman.h>
3565fce3 27#include <linux/memremap.h>
325adeb5 28#include <linux/pagemap.h>
49071d43 29#include <linux/debugfs.h>
4daae3b4 30#include <linux/migrate.h>
43b5fbbd 31#include <linux/hashtable.h>
6b251fc9 32#include <linux/userfaultfd_k.h>
33c3fc71 33#include <linux/page_idle.h>
baa355fd 34#include <linux/shmem_fs.h>
6b31d595 35#include <linux/oom.h>
98fa15f3 36#include <linux/numa.h>
f7da677b 37#include <linux/page_owner.h>
a1a3a2fc 38#include <linux/sched/sysctl.h>
467b171a 39#include <linux/memory-tiers.h>
97ae1749 40
71e3aac0
AA
41#include <asm/tlb.h>
42#include <asm/pgalloc.h>
43#include "internal.h"
014bb1de 44#include "swap.h"
71e3aac0 45
283fd6fe
AK
46#define CREATE_TRACE_POINTS
47#include <trace/events/thp.h>
48
ba76149f 49/*
b14d595a
MD
50 * By default, transparent hugepage support is disabled in order to avoid
51 * risking an increased memory footprint for applications that are not
52 * guaranteed to benefit from it. When transparent hugepage support is
53 * enabled, it is for all mappings, and khugepaged scans all mappings.
8bfa3f9a
JW
54 * Defrag is invoked by khugepaged hugepage allocations and by page faults
55 * for all hugepage allocations.
ba76149f 56 */
71e3aac0 57unsigned long transparent_hugepage_flags __read_mostly =
13ece886 58#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
ba76149f 59 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
13ece886
AA
60#endif
61#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
62 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
63#endif
444eb2a4 64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
79da5407
KS
65 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
66 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
ba76149f 67
54d91729
QZ
68static struct shrinker *deferred_split_shrinker;
69static unsigned long deferred_split_count(struct shrinker *shrink,
70 struct shrink_control *sc);
71static unsigned long deferred_split_scan(struct shrinker *shrink,
72 struct shrink_control *sc);
f000565a 73
97ae1749 74static atomic_t huge_zero_refcount;
56873f43 75struct page *huge_zero_page __read_mostly;
3b77e8c8 76unsigned long huge_zero_pfn __read_mostly = ~0UL;
3485b883
RR
77unsigned long huge_anon_orders_always __read_mostly;
78unsigned long huge_anon_orders_madvise __read_mostly;
79unsigned long huge_anon_orders_inherit __read_mostly;
80
81unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
82 unsigned long vm_flags, bool smaps,
83 bool in_pf, bool enforce_sysfs,
84 unsigned long orders)
85{
86 /* Check the intersection of requested and supported orders. */
87 orders &= vma_is_anonymous(vma) ?
88 THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
89 if (!orders)
90 return 0;
4a6c1297 91
9fec5168 92 if (!vma->vm_mm) /* vdso */
3485b883 93 return 0;
9fec5168 94
7da4e2cb
YS
95 /*
96 * Explicitly disabled through madvise or prctl, or some
97 * architectures may disable THP for some mappings, for
98 * example, s390 kvm.
99 * */
100 if ((vm_flags & VM_NOHUGEPAGE) ||
101 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
3485b883 102 return 0;
7da4e2cb
YS
103 /*
104 * If the hardware/firmware marked hugepage support disabled.
105 */
3c556d24 106 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
3485b883 107 return 0;
c0630669 108
7da4e2cb 109 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
9fec5168 110 if (vma_is_dax(vma))
3485b883 111 return in_pf ? orders : 0;
7da4e2cb
YS
112
113 /*
7a81751f 114 * khugepaged special VMA and hugetlb VMA.
7da4e2cb
YS
115 * Must be checked after dax since some dax mappings may have
116 * VM_MIXEDMAP set.
117 */
7a81751f 118 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
3485b883 119 return 0;
9fec5168 120
7da4e2cb 121 /*
3485b883
RR
122 * Check alignment for file vma and size for both file and anon vma by
123 * filtering out the unsuitable orders.
7da4e2cb
YS
124 *
125 * Skip the check for page fault. Huge fault does the check in fault
3485b883 126 * handlers.
7da4e2cb 127 */
3485b883
RR
128 if (!in_pf) {
129 int order = highest_order(orders);
130 unsigned long addr;
131
132 while (orders) {
133 addr = vma->vm_end - (PAGE_SIZE << order);
134 if (thp_vma_suitable_order(vma, addr, order))
135 break;
136 order = next_order(&orders, order);
137 }
138
139 if (!orders)
140 return 0;
141 }
9fec5168 142
7da4e2cb
YS
143 /*
144 * Enabled via shmem mount options or sysfs settings.
145 * Must be done before hugepage flags check since shmem has its
146 * own flags.
147 */
148 if (!in_pf && shmem_file(vma->vm_file))
2cf13384 149 return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
3485b883
RR
150 !enforce_sysfs, vma->vm_mm, vm_flags)
151 ? orders : 0;
9fec5168 152
7a81751f 153 if (!vma_is_anonymous(vma)) {
3485b883
RR
154 /*
155 * Enforce sysfs THP requirements as necessary. Anonymous vmas
156 * were already handled in thp_vma_allowable_orders().
157 */
158 if (enforce_sysfs &&
159 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
160 !hugepage_global_always())))
161 return 0;
162
7a81751f
ZK
163 /*
164 * Trust that ->huge_fault() handlers know what they are doing
165 * in fault path.
166 */
167 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
3485b883 168 return orders;
7a81751f
ZK
169 /* Only regular file is valid in collapse path */
170 if (((!in_pf || smaps)) && file_thp_enabled(vma))
3485b883
RR
171 return orders;
172 return 0;
7a81751f 173 }
9fec5168
YS
174
175 if (vma_is_temporary_stack(vma))
3485b883 176 return 0;
9fec5168
YS
177
178 /*
179 * THPeligible bit of smaps should show 1 for proper VMAs even
180 * though anon_vma is not initialized yet.
7da4e2cb
YS
181 *
182 * Allow page fault since anon_vma may be not initialized until
183 * the first page fault.
9fec5168
YS
184 */
185 if (!vma->anon_vma)
3485b883 186 return (smaps || in_pf) ? orders : 0;
9fec5168 187
3485b883 188 return orders;
7635d9cb
MH
189}
190
aaa9705b 191static bool get_huge_zero_page(void)
97ae1749
KS
192{
193 struct page *zero_page;
194retry:
195 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
aaa9705b 196 return true;
97ae1749
KS
197
198 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
4a6c1297 199 HPAGE_PMD_ORDER);
d8a8e1f0
KS
200 if (!zero_page) {
201 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
aaa9705b 202 return false;
d8a8e1f0 203 }
97ae1749 204 preempt_disable();
5918d10a 205 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
97ae1749 206 preempt_enable();
5ddacbe9 207 __free_pages(zero_page, compound_order(zero_page));
97ae1749
KS
208 goto retry;
209 }
3b77e8c8 210 WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
97ae1749
KS
211
212 /* We take additional reference here. It will be put back by shrinker */
213 atomic_set(&huge_zero_refcount, 2);
214 preempt_enable();
f4981502 215 count_vm_event(THP_ZERO_PAGE_ALLOC);
aaa9705b 216 return true;
4a6c1297
KS
217}
218
6fcb52a5 219static void put_huge_zero_page(void)
4a6c1297 220{
97ae1749
KS
221 /*
222 * Counter should never go to zero here. Only shrinker can put
223 * last reference.
224 */
225 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
4a6c1297
KS
226}
227
6fcb52a5
AL
228struct page *mm_get_huge_zero_page(struct mm_struct *mm)
229{
230 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
231 return READ_ONCE(huge_zero_page);
232
233 if (!get_huge_zero_page())
234 return NULL;
235
236 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
237 put_huge_zero_page();
238
239 return READ_ONCE(huge_zero_page);
240}
241
242void mm_put_huge_zero_page(struct mm_struct *mm)
243{
244 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
245 put_huge_zero_page();
246}
247
48896466
GC
248static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
249 struct shrink_control *sc)
4a6c1297 250{
48896466
GC
251 /* we can free zero page only if last reference remains */
252 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
253}
97ae1749 254
48896466
GC
255static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
256 struct shrink_control *sc)
257{
97ae1749 258 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
5918d10a
KS
259 struct page *zero_page = xchg(&huge_zero_page, NULL);
260 BUG_ON(zero_page == NULL);
3b77e8c8 261 WRITE_ONCE(huge_zero_pfn, ~0UL);
5ddacbe9 262 __free_pages(zero_page, compound_order(zero_page));
48896466 263 return HPAGE_PMD_NR;
97ae1749
KS
264 }
265
266 return 0;
4a6c1297
KS
267}
268
54d91729 269static struct shrinker *huge_zero_page_shrinker;
97ae1749 270
71e3aac0 271#ifdef CONFIG_SYSFS
71e3aac0
AA
272static ssize_t enabled_show(struct kobject *kobj,
273 struct kobj_attribute *attr, char *buf)
274{
bfb0ffeb
JP
275 const char *output;
276
444eb2a4 277 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
bfb0ffeb
JP
278 output = "[always] madvise never";
279 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
280 &transparent_hugepage_flags))
281 output = "always [madvise] never";
444eb2a4 282 else
bfb0ffeb
JP
283 output = "always madvise [never]";
284
285 return sysfs_emit(buf, "%s\n", output);
71e3aac0 286}
444eb2a4 287
71e3aac0
AA
288static ssize_t enabled_store(struct kobject *kobj,
289 struct kobj_attribute *attr,
290 const char *buf, size_t count)
291{
21440d7e 292 ssize_t ret = count;
ba76149f 293
f42f2552 294 if (sysfs_streq(buf, "always")) {
21440d7e
DR
295 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
296 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
f42f2552 297 } else if (sysfs_streq(buf, "madvise")) {
21440d7e
DR
298 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
299 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 300 } else if (sysfs_streq(buf, "never")) {
21440d7e
DR
301 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
302 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
303 } else
304 ret = -EINVAL;
ba76149f
AA
305
306 if (ret > 0) {
b46e756f 307 int err = start_stop_khugepaged();
ba76149f
AA
308 if (err)
309 ret = err;
310 }
ba76149f 311 return ret;
71e3aac0 312}
37139bb0
ML
313
314static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
71e3aac0 315
b46e756f 316ssize_t single_hugepage_flag_show(struct kobject *kobj,
bfb0ffeb
JP
317 struct kobj_attribute *attr, char *buf,
318 enum transparent_hugepage_flag flag)
71e3aac0 319{
bfb0ffeb
JP
320 return sysfs_emit(buf, "%d\n",
321 !!test_bit(flag, &transparent_hugepage_flags));
71e3aac0 322}
e27e6151 323
b46e756f 324ssize_t single_hugepage_flag_store(struct kobject *kobj,
71e3aac0
AA
325 struct kobj_attribute *attr,
326 const char *buf, size_t count,
327 enum transparent_hugepage_flag flag)
328{
e27e6151
BH
329 unsigned long value;
330 int ret;
331
332 ret = kstrtoul(buf, 10, &value);
333 if (ret < 0)
334 return ret;
335 if (value > 1)
336 return -EINVAL;
337
338 if (value)
71e3aac0 339 set_bit(flag, &transparent_hugepage_flags);
e27e6151 340 else
71e3aac0 341 clear_bit(flag, &transparent_hugepage_flags);
71e3aac0
AA
342
343 return count;
344}
345
71e3aac0
AA
346static ssize_t defrag_show(struct kobject *kobj,
347 struct kobj_attribute *attr, char *buf)
348{
bfb0ffeb
JP
349 const char *output;
350
351 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
352 &transparent_hugepage_flags))
353 output = "[always] defer defer+madvise madvise never";
354 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
355 &transparent_hugepage_flags))
356 output = "always [defer] defer+madvise madvise never";
357 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
358 &transparent_hugepage_flags))
359 output = "always defer [defer+madvise] madvise never";
360 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
361 &transparent_hugepage_flags))
362 output = "always defer defer+madvise [madvise] never";
363 else
364 output = "always defer defer+madvise madvise [never]";
365
366 return sysfs_emit(buf, "%s\n", output);
71e3aac0 367}
21440d7e 368
71e3aac0
AA
369static ssize_t defrag_store(struct kobject *kobj,
370 struct kobj_attribute *attr,
371 const char *buf, size_t count)
372{
f42f2552 373 if (sysfs_streq(buf, "always")) {
21440d7e
DR
374 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
375 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
376 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
377 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
f42f2552 378 } else if (sysfs_streq(buf, "defer+madvise")) {
21440d7e
DR
379 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
380 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
381 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
382 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 383 } else if (sysfs_streq(buf, "defer")) {
4fad7fb6
DR
384 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
385 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
386 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
387 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
f42f2552 388 } else if (sysfs_streq(buf, "madvise")) {
21440d7e
DR
389 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
390 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
391 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
392 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 393 } else if (sysfs_streq(buf, "never")) {
21440d7e
DR
394 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
395 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
396 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
397 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
398 } else
399 return -EINVAL;
400
401 return count;
71e3aac0 402}
37139bb0 403static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
71e3aac0 404
79da5407 405static ssize_t use_zero_page_show(struct kobject *kobj,
ae7a927d 406 struct kobj_attribute *attr, char *buf)
79da5407 407{
b46e756f 408 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 409 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
79da5407
KS
410}
411static ssize_t use_zero_page_store(struct kobject *kobj,
412 struct kobj_attribute *attr, const char *buf, size_t count)
413{
b46e756f 414 return single_hugepage_flag_store(kobj, attr, buf, count,
79da5407
KS
415 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
416}
37139bb0 417static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
49920d28
HD
418
419static ssize_t hpage_pmd_size_show(struct kobject *kobj,
ae7a927d 420 struct kobj_attribute *attr, char *buf)
49920d28 421{
ae7a927d 422 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
49920d28
HD
423}
424static struct kobj_attribute hpage_pmd_size_attr =
425 __ATTR_RO(hpage_pmd_size);
426
71e3aac0
AA
427static struct attribute *hugepage_attr[] = {
428 &enabled_attr.attr,
429 &defrag_attr.attr,
79da5407 430 &use_zero_page_attr.attr,
49920d28 431 &hpage_pmd_size_attr.attr,
396bcc52 432#ifdef CONFIG_SHMEM
5a6e75f8 433 &shmem_enabled_attr.attr,
71e3aac0
AA
434#endif
435 NULL,
436};
437
8aa95a21 438static const struct attribute_group hugepage_attr_group = {
71e3aac0 439 .attrs = hugepage_attr,
ba76149f
AA
440};
441
3485b883
RR
442static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
443static void thpsize_release(struct kobject *kobj);
444static DEFINE_SPINLOCK(huge_anon_orders_lock);
445static LIST_HEAD(thpsize_list);
446
447struct thpsize {
448 struct kobject kobj;
449 struct list_head node;
450 int order;
451};
452
453#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
454
455static ssize_t thpsize_enabled_show(struct kobject *kobj,
456 struct kobj_attribute *attr, char *buf)
457{
458 int order = to_thpsize(kobj)->order;
459 const char *output;
460
461 if (test_bit(order, &huge_anon_orders_always))
462 output = "[always] inherit madvise never";
463 else if (test_bit(order, &huge_anon_orders_inherit))
464 output = "always [inherit] madvise never";
465 else if (test_bit(order, &huge_anon_orders_madvise))
466 output = "always inherit [madvise] never";
467 else
468 output = "always inherit madvise [never]";
469
470 return sysfs_emit(buf, "%s\n", output);
471}
472
473static ssize_t thpsize_enabled_store(struct kobject *kobj,
474 struct kobj_attribute *attr,
475 const char *buf, size_t count)
476{
477 int order = to_thpsize(kobj)->order;
478 ssize_t ret = count;
479
480 if (sysfs_streq(buf, "always")) {
481 spin_lock(&huge_anon_orders_lock);
482 clear_bit(order, &huge_anon_orders_inherit);
483 clear_bit(order, &huge_anon_orders_madvise);
484 set_bit(order, &huge_anon_orders_always);
485 spin_unlock(&huge_anon_orders_lock);
486 } else if (sysfs_streq(buf, "inherit")) {
487 spin_lock(&huge_anon_orders_lock);
488 clear_bit(order, &huge_anon_orders_always);
489 clear_bit(order, &huge_anon_orders_madvise);
490 set_bit(order, &huge_anon_orders_inherit);
491 spin_unlock(&huge_anon_orders_lock);
492 } else if (sysfs_streq(buf, "madvise")) {
493 spin_lock(&huge_anon_orders_lock);
494 clear_bit(order, &huge_anon_orders_always);
495 clear_bit(order, &huge_anon_orders_inherit);
496 set_bit(order, &huge_anon_orders_madvise);
497 spin_unlock(&huge_anon_orders_lock);
498 } else if (sysfs_streq(buf, "never")) {
499 spin_lock(&huge_anon_orders_lock);
500 clear_bit(order, &huge_anon_orders_always);
501 clear_bit(order, &huge_anon_orders_inherit);
502 clear_bit(order, &huge_anon_orders_madvise);
503 spin_unlock(&huge_anon_orders_lock);
504 } else
505 ret = -EINVAL;
506
507 return ret;
508}
509
510static struct kobj_attribute thpsize_enabled_attr =
511 __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
512
513static struct attribute *thpsize_attrs[] = {
514 &thpsize_enabled_attr.attr,
515 NULL,
516};
517
518static const struct attribute_group thpsize_attr_group = {
519 .attrs = thpsize_attrs,
520};
521
522static const struct kobj_type thpsize_ktype = {
523 .release = &thpsize_release,
524 .sysfs_ops = &kobj_sysfs_ops,
525};
526
527static struct thpsize *thpsize_create(int order, struct kobject *parent)
528{
529 unsigned long size = (PAGE_SIZE << order) / SZ_1K;
530 struct thpsize *thpsize;
531 int ret;
532
533 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
534 if (!thpsize)
535 return ERR_PTR(-ENOMEM);
536
537 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
538 "hugepages-%lukB", size);
539 if (ret) {
540 kfree(thpsize);
541 return ERR_PTR(ret);
542 }
543
544 ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
545 if (ret) {
546 kobject_put(&thpsize->kobj);
547 return ERR_PTR(ret);
548 }
549
550 thpsize->order = order;
551 return thpsize;
552}
553
554static void thpsize_release(struct kobject *kobj)
555{
556 kfree(to_thpsize(kobj));
557}
558
569e5590 559static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
71e3aac0 560{
71e3aac0 561 int err;
3485b883
RR
562 struct thpsize *thpsize;
563 unsigned long orders;
564 int order;
565
566 /*
567 * Default to setting PMD-sized THP to inherit the global setting and
568 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
569 * constant so we have to do this here.
570 */
571 huge_anon_orders_inherit = BIT(PMD_ORDER);
71e3aac0 572
569e5590
SL
573 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
574 if (unlikely(!*hugepage_kobj)) {
ae3a8c1c 575 pr_err("failed to create transparent hugepage kobject\n");
569e5590 576 return -ENOMEM;
ba76149f
AA
577 }
578
569e5590 579 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
ba76149f 580 if (err) {
ae3a8c1c 581 pr_err("failed to register transparent hugepage group\n");
569e5590 582 goto delete_obj;
ba76149f
AA
583 }
584
569e5590 585 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
ba76149f 586 if (err) {
ae3a8c1c 587 pr_err("failed to register transparent hugepage group\n");
569e5590 588 goto remove_hp_group;
ba76149f 589 }
569e5590 590
3485b883
RR
591 orders = THP_ORDERS_ALL_ANON;
592 order = highest_order(orders);
593 while (orders) {
594 thpsize = thpsize_create(order, *hugepage_kobj);
595 if (IS_ERR(thpsize)) {
596 pr_err("failed to create thpsize for order %d\n", order);
597 err = PTR_ERR(thpsize);
598 goto remove_all;
599 }
600 list_add(&thpsize->node, &thpsize_list);
601 order = next_order(&orders, order);
602 }
603
569e5590
SL
604 return 0;
605
3485b883
RR
606remove_all:
607 hugepage_exit_sysfs(*hugepage_kobj);
608 return err;
569e5590
SL
609remove_hp_group:
610 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
611delete_obj:
612 kobject_put(*hugepage_kobj);
613 return err;
614}
615
616static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
617{
3485b883
RR
618 struct thpsize *thpsize, *tmp;
619
620 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
621 list_del(&thpsize->node);
622 kobject_put(&thpsize->kobj);
623 }
624
569e5590
SL
625 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
626 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
627 kobject_put(hugepage_kobj);
628}
629#else
630static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
631{
632 return 0;
633}
634
635static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
636{
637}
638#endif /* CONFIG_SYSFS */
639
54d91729
QZ
640static int __init thp_shrinker_init(void)
641{
642 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
643 if (!huge_zero_page_shrinker)
644 return -ENOMEM;
645
646 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
647 SHRINKER_MEMCG_AWARE |
648 SHRINKER_NONSLAB,
649 "thp-deferred_split");
650 if (!deferred_split_shrinker) {
651 shrinker_free(huge_zero_page_shrinker);
652 return -ENOMEM;
653 }
654
655 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
656 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
657 shrinker_register(huge_zero_page_shrinker);
658
659 deferred_split_shrinker->count_objects = deferred_split_count;
660 deferred_split_shrinker->scan_objects = deferred_split_scan;
661 shrinker_register(deferred_split_shrinker);
662
663 return 0;
664}
665
666static void __init thp_shrinker_exit(void)
667{
668 shrinker_free(huge_zero_page_shrinker);
669 shrinker_free(deferred_split_shrinker);
670}
671
569e5590
SL
672static int __init hugepage_init(void)
673{
674 int err;
675 struct kobject *hugepage_kobj;
676
677 if (!has_transparent_hugepage()) {
3c556d24 678 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
569e5590
SL
679 return -EINVAL;
680 }
681
ff20c2e0
KS
682 /*
683 * hugepages can't be allocated by the buddy allocator
684 */
23baf831 685 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_ORDER);
ff20c2e0
KS
686 /*
687 * we use page->mapping and page->index in second tail page
688 * as list_head: assuming THP order >= 2
689 */
690 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
691
569e5590
SL
692 err = hugepage_init_sysfs(&hugepage_kobj);
693 if (err)
65ebb64f 694 goto err_sysfs;
ba76149f 695
b46e756f 696 err = khugepaged_init();
ba76149f 697 if (err)
65ebb64f 698 goto err_slab;
ba76149f 699
54d91729 700 err = thp_shrinker_init();
9a982250 701 if (err)
54d91729 702 goto err_shrinker;
97ae1749 703
97562cd2
RR
704 /*
705 * By default disable transparent hugepages on smaller systems,
706 * where the extra memory used could hurt more than TLB overhead
707 * is likely to save. The admin can still enable it through /sys.
708 */
ca79b0c2 709 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
97562cd2 710 transparent_hugepage_flags = 0;
79553da2
KS
711 return 0;
712 }
97562cd2 713
79553da2 714 err = start_stop_khugepaged();
65ebb64f
KS
715 if (err)
716 goto err_khugepaged;
ba76149f 717
569e5590 718 return 0;
65ebb64f 719err_khugepaged:
54d91729
QZ
720 thp_shrinker_exit();
721err_shrinker:
b46e756f 722 khugepaged_destroy();
65ebb64f 723err_slab:
569e5590 724 hugepage_exit_sysfs(hugepage_kobj);
65ebb64f 725err_sysfs:
ba76149f 726 return err;
71e3aac0 727}
a64fb3cd 728subsys_initcall(hugepage_init);
71e3aac0
AA
729
730static int __init setup_transparent_hugepage(char *str)
731{
732 int ret = 0;
733 if (!str)
734 goto out;
735 if (!strcmp(str, "always")) {
736 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
737 &transparent_hugepage_flags);
738 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
739 &transparent_hugepage_flags);
740 ret = 1;
741 } else if (!strcmp(str, "madvise")) {
742 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
743 &transparent_hugepage_flags);
744 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
745 &transparent_hugepage_flags);
746 ret = 1;
747 } else if (!strcmp(str, "never")) {
748 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
749 &transparent_hugepage_flags);
750 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
751 &transparent_hugepage_flags);
752 ret = 1;
753 }
754out:
755 if (!ret)
ae3a8c1c 756 pr_warn("transparent_hugepage= cannot parse, ignored\n");
71e3aac0
AA
757 return ret;
758}
759__setup("transparent_hugepage=", setup_transparent_hugepage);
760
f55e1014 761pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
71e3aac0 762{
f55e1014 763 if (likely(vma->vm_flags & VM_WRITE))
161e393c 764 pmd = pmd_mkwrite(pmd, vma);
71e3aac0
AA
765 return pmd;
766}
767
87eaceb3 768#ifdef CONFIG_MEMCG
f8baa6be
MWO
769static inline
770struct deferred_split *get_deferred_split_queue(struct folio *folio)
9a982250 771{
f8baa6be
MWO
772 struct mem_cgroup *memcg = folio_memcg(folio);
773 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
87eaceb3
YS
774
775 if (memcg)
776 return &memcg->deferred_split_queue;
777 else
778 return &pgdat->deferred_split_queue;
9a982250 779}
87eaceb3 780#else
f8baa6be
MWO
781static inline
782struct deferred_split *get_deferred_split_queue(struct folio *folio)
87eaceb3 783{
f8baa6be 784 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
87eaceb3
YS
785
786 return &pgdat->deferred_split_queue;
787}
788#endif
9a982250 789
da6e7bf3 790void folio_prep_large_rmappable(struct folio *folio)
9a982250 791{
8991de90
MWO
792 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
793 INIT_LIST_HEAD(&folio->_deferred_list);
de53c05f 794 folio_set_large_rmappable(folio);
9a982250
KS
795}
796
a644b0ab 797static inline bool is_transparent_hugepage(struct folio *folio)
005ba37c 798{
a644b0ab 799 if (!folio_test_large(folio))
fa1f68cc 800 return false;
005ba37c 801
f04029f3 802 return is_huge_zero_page(&folio->page) ||
de53c05f 803 folio_test_large_rmappable(folio);
005ba37c 804}
005ba37c 805
97d3d0f9
KS
806static unsigned long __thp_get_unmapped_area(struct file *filp,
807 unsigned long addr, unsigned long len,
74d2fad1
TK
808 loff_t off, unsigned long flags, unsigned long size)
809{
74d2fad1
TK
810 loff_t off_end = off + len;
811 loff_t off_align = round_up(off, size);
97d3d0f9 812 unsigned long len_pad, ret;
74d2fad1
TK
813
814 if (off_end <= off_align || (off_end - off_align) < size)
815 return 0;
816
817 len_pad = len + size;
818 if (len_pad < len || (off + len_pad) < off)
819 return 0;
820
97d3d0f9 821 ret = current->mm->get_unmapped_area(filp, addr, len_pad,
74d2fad1 822 off >> PAGE_SHIFT, flags);
97d3d0f9
KS
823
824 /*
825 * The failure might be due to length padding. The caller will retry
826 * without the padding.
827 */
828 if (IS_ERR_VALUE(ret))
74d2fad1
TK
829 return 0;
830
97d3d0f9
KS
831 /*
832 * Do not try to align to THP boundary if allocation at the address
833 * hint succeeds.
834 */
835 if (ret == addr)
836 return addr;
837
838 ret += (off - ret) & (size - 1);
839 return ret;
74d2fad1
TK
840}
841
842unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
843 unsigned long len, unsigned long pgoff, unsigned long flags)
844{
97d3d0f9 845 unsigned long ret;
74d2fad1
TK
846 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
847
97d3d0f9
KS
848 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
849 if (ret)
850 return ret;
1854bc6e 851
74d2fad1
TK
852 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
853}
854EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
855
2b740303
SJ
856static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
857 struct page *page, gfp_t gfp)
71e3aac0 858{
82b0f8c3 859 struct vm_area_struct *vma = vmf->vma;
cfe3236d 860 struct folio *folio = page_folio(page);
71e3aac0 861 pgtable_t pgtable;
82b0f8c3 862 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2b740303 863 vm_fault_t ret = 0;
71e3aac0 864
cfe3236d 865 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
00501b53 866
cfe3236d
KW
867 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
868 folio_put(folio);
6b251fc9 869 count_vm_event(THP_FAULT_FALLBACK);
85b9f46e 870 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
6b251fc9
AA
871 return VM_FAULT_FALLBACK;
872 }
cfe3236d 873 folio_throttle_swaprate(folio, gfp);
00501b53 874
4cf58924 875 pgtable = pte_alloc_one(vma->vm_mm);
00501b53 876 if (unlikely(!pgtable)) {
6b31d595
MH
877 ret = VM_FAULT_OOM;
878 goto release;
00501b53 879 }
71e3aac0 880
c79b57e4 881 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
52f37629 882 /*
cfe3236d 883 * The memory barrier inside __folio_mark_uptodate makes sure that
52f37629
MK
884 * clear_huge_page writes become visible before the set_pmd_at()
885 * write.
886 */
cfe3236d 887 __folio_mark_uptodate(folio);
71e3aac0 888
82b0f8c3
JK
889 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
890 if (unlikely(!pmd_none(*vmf->pmd))) {
6b31d595 891 goto unlock_release;
71e3aac0
AA
892 } else {
893 pmd_t entry;
6b251fc9 894
6b31d595
MH
895 ret = check_stable_address_space(vma->vm_mm);
896 if (ret)
897 goto unlock_release;
898
6b251fc9
AA
899 /* Deliver the page fault to userland */
900 if (userfaultfd_missing(vma)) {
82b0f8c3 901 spin_unlock(vmf->ptl);
cfe3236d 902 folio_put(folio);
bae473a4 903 pte_free(vma->vm_mm, pgtable);
8fd5eda4
ML
904 ret = handle_userfault(vmf, VM_UFFD_MISSING);
905 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
906 return ret;
6b251fc9
AA
907 }
908
3122359a 909 entry = mk_huge_pmd(page, vma->vm_page_prot);
f55e1014 910 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
cfe3236d
KW
911 folio_add_new_anon_rmap(folio, vma, haddr);
912 folio_add_lru_vma(folio, vma);
82b0f8c3
JK
913 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
914 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
fca40573 915 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
bae473a4 916 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
c4812909 917 mm_inc_nr_ptes(vma->vm_mm);
82b0f8c3 918 spin_unlock(vmf->ptl);
6b251fc9 919 count_vm_event(THP_FAULT_ALLOC);
9d82c694 920 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
71e3aac0
AA
921 }
922
aa2e878e 923 return 0;
6b31d595
MH
924unlock_release:
925 spin_unlock(vmf->ptl);
926release:
927 if (pgtable)
928 pte_free(vma->vm_mm, pgtable);
cfe3236d 929 folio_put(folio);
6b31d595
MH
930 return ret;
931
71e3aac0
AA
932}
933
444eb2a4 934/*
21440d7e
DR
935 * always: directly stall for all thp allocations
936 * defer: wake kswapd and fail if not immediately available
937 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
938 * fail if not immediately available
939 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
940 * available
941 * never: never stall for any thp allocation
444eb2a4 942 */
164cc4fe 943gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
444eb2a4 944{
164cc4fe 945 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
2f0799a0 946
ac79f78d 947 /* Always do synchronous compaction */
a8282608
AA
948 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
949 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
ac79f78d
DR
950
951 /* Kick kcompactd and fail quickly */
21440d7e 952 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
19deb769 953 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
ac79f78d
DR
954
955 /* Synchronous compaction if madvised, otherwise kick kcompactd */
21440d7e 956 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
19deb769
DR
957 return GFP_TRANSHUGE_LIGHT |
958 (vma_madvised ? __GFP_DIRECT_RECLAIM :
959 __GFP_KSWAPD_RECLAIM);
ac79f78d
DR
960
961 /* Only do synchronous compaction if madvised */
21440d7e 962 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
19deb769
DR
963 return GFP_TRANSHUGE_LIGHT |
964 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
ac79f78d 965
19deb769 966 return GFP_TRANSHUGE_LIGHT;
444eb2a4
MG
967}
968
c4088ebd 969/* Caller must hold page table lock. */
2efeb8da 970static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
97ae1749 971 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
5918d10a 972 struct page *zero_page)
fc9fe822
KS
973{
974 pmd_t entry;
7c414164 975 if (!pmd_none(*pmd))
2efeb8da 976 return;
5918d10a 977 entry = mk_pmd(zero_page, vma->vm_page_prot);
fc9fe822 978 entry = pmd_mkhuge(entry);
c8bb4163 979 pgtable_trans_huge_deposit(mm, pmd, pgtable);
fc9fe822 980 set_pmd_at(mm, haddr, pmd, entry);
c4812909 981 mm_inc_nr_ptes(mm);
fc9fe822
KS
982}
983
2b740303 984vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
71e3aac0 985{
82b0f8c3 986 struct vm_area_struct *vma = vmf->vma;
077fcf11 987 gfp_t gfp;
cb196ee1 988 struct folio *folio;
82b0f8c3 989 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
71e3aac0 990
3485b883 991 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
c0292554 992 return VM_FAULT_FALLBACK;
128ec037
KS
993 if (unlikely(anon_vma_prepare(vma)))
994 return VM_FAULT_OOM;
4fa6893f 995 khugepaged_enter_vma(vma, vma->vm_flags);
d2081b2b 996
82b0f8c3 997 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
bae473a4 998 !mm_forbids_zeropage(vma->vm_mm) &&
128ec037
KS
999 transparent_hugepage_use_zero_page()) {
1000 pgtable_t pgtable;
1001 struct page *zero_page;
2b740303 1002 vm_fault_t ret;
4cf58924 1003 pgtable = pte_alloc_one(vma->vm_mm);
128ec037 1004 if (unlikely(!pgtable))
ba76149f 1005 return VM_FAULT_OOM;
6fcb52a5 1006 zero_page = mm_get_huge_zero_page(vma->vm_mm);
128ec037 1007 if (unlikely(!zero_page)) {
bae473a4 1008 pte_free(vma->vm_mm, pgtable);
81ab4201 1009 count_vm_event(THP_FAULT_FALLBACK);
c0292554 1010 return VM_FAULT_FALLBACK;
b9bbfbe3 1011 }
82b0f8c3 1012 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
6b251fc9 1013 ret = 0;
82b0f8c3 1014 if (pmd_none(*vmf->pmd)) {
6b31d595
MH
1015 ret = check_stable_address_space(vma->vm_mm);
1016 if (ret) {
1017 spin_unlock(vmf->ptl);
bfe8cc1d 1018 pte_free(vma->vm_mm, pgtable);
6b31d595 1019 } else if (userfaultfd_missing(vma)) {
82b0f8c3 1020 spin_unlock(vmf->ptl);
bfe8cc1d 1021 pte_free(vma->vm_mm, pgtable);
82b0f8c3 1022 ret = handle_userfault(vmf, VM_UFFD_MISSING);
6b251fc9
AA
1023 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1024 } else {
bae473a4 1025 set_huge_zero_page(pgtable, vma->vm_mm, vma,
82b0f8c3 1026 haddr, vmf->pmd, zero_page);
fca40573 1027 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
82b0f8c3 1028 spin_unlock(vmf->ptl);
6b251fc9 1029 }
bfe8cc1d 1030 } else {
82b0f8c3 1031 spin_unlock(vmf->ptl);
bae473a4 1032 pte_free(vma->vm_mm, pgtable);
bfe8cc1d 1033 }
6b251fc9 1034 return ret;
71e3aac0 1035 }
164cc4fe 1036 gfp = vma_thp_gfp_mask(vma);
cb196ee1
MWO
1037 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
1038 if (unlikely(!folio)) {
128ec037 1039 count_vm_event(THP_FAULT_FALLBACK);
c0292554 1040 return VM_FAULT_FALLBACK;
128ec037 1041 }
cb196ee1 1042 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
71e3aac0
AA
1043}
1044
ae18d6dc 1045static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
3b6521f5
OH
1046 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1047 pgtable_t pgtable)
5cad465d
MW
1048{
1049 struct mm_struct *mm = vma->vm_mm;
1050 pmd_t entry;
1051 spinlock_t *ptl;
1052
1053 ptl = pmd_lock(mm, pmd);
c6f3c5ee
AK
1054 if (!pmd_none(*pmd)) {
1055 if (write) {
1056 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1057 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1058 goto out_unlock;
1059 }
1060 entry = pmd_mkyoung(*pmd);
1061 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1062 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1063 update_mmu_cache_pmd(vma, addr, pmd);
1064 }
1065
1066 goto out_unlock;
1067 }
1068
f25748e3
DW
1069 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1070 if (pfn_t_devmap(pfn))
1071 entry = pmd_mkdevmap(entry);
01871e59 1072 if (write) {
f55e1014
LT
1073 entry = pmd_mkyoung(pmd_mkdirty(entry));
1074 entry = maybe_pmd_mkwrite(entry, vma);
5cad465d 1075 }
3b6521f5
OH
1076
1077 if (pgtable) {
1078 pgtable_trans_huge_deposit(mm, pmd, pgtable);
c4812909 1079 mm_inc_nr_ptes(mm);
c6f3c5ee 1080 pgtable = NULL;
3b6521f5
OH
1081 }
1082
01871e59
RZ
1083 set_pmd_at(mm, addr, pmd, entry);
1084 update_mmu_cache_pmd(vma, addr, pmd);
c6f3c5ee
AK
1085
1086out_unlock:
5cad465d 1087 spin_unlock(ptl);
c6f3c5ee
AK
1088 if (pgtable)
1089 pte_free(mm, pgtable);
5cad465d
MW
1090}
1091
9a9731b1 1092/**
7b806d22 1093 * vmf_insert_pfn_pmd - insert a pmd size pfn
9a9731b1
THV
1094 * @vmf: Structure describing the fault
1095 * @pfn: pfn to insert
9a9731b1
THV
1096 * @write: whether it's a write fault
1097 *
7b806d22 1098 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
9a9731b1
THV
1099 *
1100 * Return: vm_fault_t value.
1101 */
7b806d22 1102vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
5cad465d 1103{
fce86ff5
DW
1104 unsigned long addr = vmf->address & PMD_MASK;
1105 struct vm_area_struct *vma = vmf->vma;
7b806d22 1106 pgprot_t pgprot = vma->vm_page_prot;
3b6521f5 1107 pgtable_t pgtable = NULL;
fce86ff5 1108
5cad465d
MW
1109 /*
1110 * If we had pmd_special, we could avoid all these restrictions,
1111 * but we need to be consistent with PTEs and architectures that
1112 * can't support a 'special' bit.
1113 */
e1fb4a08
DJ
1114 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1115 !pfn_t_devmap(pfn));
5cad465d
MW
1116 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1117 (VM_PFNMAP|VM_MIXEDMAP));
1118 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
5cad465d
MW
1119
1120 if (addr < vma->vm_start || addr >= vma->vm_end)
1121 return VM_FAULT_SIGBUS;
308a047c 1122
3b6521f5 1123 if (arch_needs_pgtable_deposit()) {
4cf58924 1124 pgtable = pte_alloc_one(vma->vm_mm);
3b6521f5
OH
1125 if (!pgtable)
1126 return VM_FAULT_OOM;
1127 }
1128
308a047c
BP
1129 track_pfn_insert(vma, &pgprot, pfn);
1130
fce86ff5 1131 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
ae18d6dc 1132 return VM_FAULT_NOPAGE;
5cad465d 1133}
7b806d22 1134EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
5cad465d 1135
a00cc7d9 1136#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
f55e1014 1137static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
a00cc7d9 1138{
f55e1014 1139 if (likely(vma->vm_flags & VM_WRITE))
a00cc7d9
MW
1140 pud = pud_mkwrite(pud);
1141 return pud;
1142}
1143
1144static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
7b806d22 1145 pud_t *pud, pfn_t pfn, bool write)
a00cc7d9
MW
1146{
1147 struct mm_struct *mm = vma->vm_mm;
7b806d22 1148 pgprot_t prot = vma->vm_page_prot;
a00cc7d9
MW
1149 pud_t entry;
1150 spinlock_t *ptl;
1151
1152 ptl = pud_lock(mm, pud);
c6f3c5ee
AK
1153 if (!pud_none(*pud)) {
1154 if (write) {
1155 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
1156 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
1157 goto out_unlock;
1158 }
1159 entry = pud_mkyoung(*pud);
1160 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1161 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1162 update_mmu_cache_pud(vma, addr, pud);
1163 }
1164 goto out_unlock;
1165 }
1166
a00cc7d9
MW
1167 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1168 if (pfn_t_devmap(pfn))
1169 entry = pud_mkdevmap(entry);
1170 if (write) {
f55e1014
LT
1171 entry = pud_mkyoung(pud_mkdirty(entry));
1172 entry = maybe_pud_mkwrite(entry, vma);
a00cc7d9
MW
1173 }
1174 set_pud_at(mm, addr, pud, entry);
1175 update_mmu_cache_pud(vma, addr, pud);
c6f3c5ee
AK
1176
1177out_unlock:
a00cc7d9
MW
1178 spin_unlock(ptl);
1179}
1180
9a9731b1 1181/**
7b806d22 1182 * vmf_insert_pfn_pud - insert a pud size pfn
9a9731b1
THV
1183 * @vmf: Structure describing the fault
1184 * @pfn: pfn to insert
9a9731b1
THV
1185 * @write: whether it's a write fault
1186 *
7b806d22 1187 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
9a9731b1
THV
1188 *
1189 * Return: vm_fault_t value.
1190 */
7b806d22 1191vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
a00cc7d9 1192{
fce86ff5
DW
1193 unsigned long addr = vmf->address & PUD_MASK;
1194 struct vm_area_struct *vma = vmf->vma;
7b806d22 1195 pgprot_t pgprot = vma->vm_page_prot;
fce86ff5 1196
a00cc7d9
MW
1197 /*
1198 * If we had pud_special, we could avoid all these restrictions,
1199 * but we need to be consistent with PTEs and architectures that
1200 * can't support a 'special' bit.
1201 */
62ec0d8c
DJ
1202 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1203 !pfn_t_devmap(pfn));
a00cc7d9
MW
1204 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1205 (VM_PFNMAP|VM_MIXEDMAP));
1206 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
a00cc7d9
MW
1207
1208 if (addr < vma->vm_start || addr >= vma->vm_end)
1209 return VM_FAULT_SIGBUS;
1210
1211 track_pfn_insert(vma, &pgprot, pfn);
1212
7b806d22 1213 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
a00cc7d9
MW
1214 return VM_FAULT_NOPAGE;
1215}
7b806d22 1216EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
a00cc7d9
MW
1217#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1218
3565fce3 1219static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
a69e4717 1220 pmd_t *pmd, bool write)
3565fce3
DW
1221{
1222 pmd_t _pmd;
1223
a8f97366 1224 _pmd = pmd_mkyoung(*pmd);
a69e4717 1225 if (write)
a8f97366 1226 _pmd = pmd_mkdirty(_pmd);
3565fce3 1227 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
a69e4717 1228 pmd, _pmd, write))
3565fce3
DW
1229 update_mmu_cache_pmd(vma, addr, pmd);
1230}
1231
1232struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
df06b37f 1233 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
1234{
1235 unsigned long pfn = pmd_pfn(*pmd);
1236 struct mm_struct *mm = vma->vm_mm;
3565fce3 1237 struct page *page;
0f089235 1238 int ret;
3565fce3
DW
1239
1240 assert_spin_locked(pmd_lockptr(mm, pmd));
1241
f6f37321 1242 if (flags & FOLL_WRITE && !pmd_write(*pmd))
3565fce3
DW
1243 return NULL;
1244
1245 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1246 /* pass */;
1247 else
1248 return NULL;
1249
1250 if (flags & FOLL_TOUCH)
a69e4717 1251 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
3565fce3
DW
1252
1253 /*
1254 * device mapped pages can only be returned if the
1255 * caller will manage the page reference count.
1256 */
3faa52c0 1257 if (!(flags & (FOLL_GET | FOLL_PIN)))
3565fce3
DW
1258 return ERR_PTR(-EEXIST);
1259
1260 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
df06b37f
KB
1261 *pgmap = get_dev_pagemap(pfn, *pgmap);
1262 if (!*pgmap)
3565fce3
DW
1263 return ERR_PTR(-EFAULT);
1264 page = pfn_to_page(pfn);
0f089235
LG
1265 ret = try_grab_page(page, flags);
1266 if (ret)
1267 page = ERR_PTR(ret);
3565fce3
DW
1268
1269 return page;
1270}
1271
71e3aac0
AA
1272int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1273 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
8f34f1ea 1274 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
71e3aac0 1275{
c4088ebd 1276 spinlock_t *dst_ptl, *src_ptl;
71e3aac0
AA
1277 struct page *src_page;
1278 pmd_t pmd;
12c9d70b 1279 pgtable_t pgtable = NULL;
628d47ce 1280 int ret = -ENOMEM;
71e3aac0 1281
628d47ce 1282 /* Skip if can be re-fill on fault */
8f34f1ea 1283 if (!vma_is_anonymous(dst_vma))
628d47ce
KS
1284 return 0;
1285
4cf58924 1286 pgtable = pte_alloc_one(dst_mm);
628d47ce
KS
1287 if (unlikely(!pgtable))
1288 goto out;
71e3aac0 1289
c4088ebd
KS
1290 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1291 src_ptl = pmd_lockptr(src_mm, src_pmd);
1292 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
71e3aac0
AA
1293
1294 ret = -EAGAIN;
1295 pmd = *src_pmd;
84c3fc4e
ZY
1296
1297#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1298 if (unlikely(is_swap_pmd(pmd))) {
1299 swp_entry_t entry = pmd_to_swp_entry(pmd);
1300
1301 VM_BUG_ON(!is_pmd_migration_entry(pmd));
6c287605 1302 if (!is_readable_migration_entry(entry)) {
4dd845b5
AP
1303 entry = make_readable_migration_entry(
1304 swp_offset(entry));
84c3fc4e 1305 pmd = swp_entry_to_pmd(entry);
ab6e3d09
NH
1306 if (pmd_swp_soft_dirty(*src_pmd))
1307 pmd = pmd_swp_mksoft_dirty(pmd);
8f34f1ea
PX
1308 if (pmd_swp_uffd_wp(*src_pmd))
1309 pmd = pmd_swp_mkuffd_wp(pmd);
84c3fc4e
ZY
1310 set_pmd_at(src_mm, addr, src_pmd, pmd);
1311 }
dd8a67f9 1312 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
af5b0f6a 1313 mm_inc_nr_ptes(dst_mm);
dd8a67f9 1314 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
8f34f1ea
PX
1315 if (!userfaultfd_wp(dst_vma))
1316 pmd = pmd_swp_clear_uffd_wp(pmd);
84c3fc4e
ZY
1317 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1318 ret = 0;
1319 goto out_unlock;
1320 }
1321#endif
1322
628d47ce 1323 if (unlikely(!pmd_trans_huge(pmd))) {
71e3aac0
AA
1324 pte_free(dst_mm, pgtable);
1325 goto out_unlock;
1326 }
fc9fe822 1327 /*
c4088ebd 1328 * When page table lock is held, the huge zero pmd should not be
fc9fe822
KS
1329 * under splitting since we don't split the page itself, only pmd to
1330 * a page table.
1331 */
1332 if (is_huge_zero_pmd(pmd)) {
97ae1749
KS
1333 /*
1334 * get_huge_zero_page() will never allocate a new page here,
1335 * since we already have a zero page to copy. It just takes a
1336 * reference.
1337 */
5fc7a5f6
PX
1338 mm_get_huge_zero_page(dst_mm);
1339 goto out_zero_page;
fc9fe822 1340 }
de466bd6 1341
628d47ce
KS
1342 src_page = pmd_page(pmd);
1343 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
d042035e 1344
fb3d824d
DH
1345 get_page(src_page);
1346 if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
1347 /* Page maybe pinned: split and retry the fault on PTEs. */
1348 put_page(src_page);
d042035e
PX
1349 pte_free(dst_mm, pgtable);
1350 spin_unlock(src_ptl);
1351 spin_unlock(dst_ptl);
8f34f1ea 1352 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
d042035e
PX
1353 return -EAGAIN;
1354 }
628d47ce 1355 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
5fc7a5f6 1356out_zero_page:
c4812909 1357 mm_inc_nr_ptes(dst_mm);
628d47ce 1358 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
71e3aac0 1359 pmdp_set_wrprotect(src_mm, addr, src_pmd);
8f34f1ea
PX
1360 if (!userfaultfd_wp(dst_vma))
1361 pmd = pmd_clear_uffd_wp(pmd);
71e3aac0
AA
1362 pmd = pmd_mkold(pmd_wrprotect(pmd));
1363 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
71e3aac0
AA
1364
1365 ret = 0;
1366out_unlock:
c4088ebd
KS
1367 spin_unlock(src_ptl);
1368 spin_unlock(dst_ptl);
71e3aac0
AA
1369out:
1370 return ret;
1371}
1372
a00cc7d9
MW
1373#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1374static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
5fe653e9 1375 pud_t *pud, bool write)
a00cc7d9
MW
1376{
1377 pud_t _pud;
1378
a8f97366 1379 _pud = pud_mkyoung(*pud);
5fe653e9 1380 if (write)
a8f97366 1381 _pud = pud_mkdirty(_pud);
a00cc7d9 1382 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
5fe653e9 1383 pud, _pud, write))
a00cc7d9
MW
1384 update_mmu_cache_pud(vma, addr, pud);
1385}
1386
1387struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
df06b37f 1388 pud_t *pud, int flags, struct dev_pagemap **pgmap)
a00cc7d9
MW
1389{
1390 unsigned long pfn = pud_pfn(*pud);
1391 struct mm_struct *mm = vma->vm_mm;
a00cc7d9 1392 struct page *page;
0f089235 1393 int ret;
a00cc7d9
MW
1394
1395 assert_spin_locked(pud_lockptr(mm, pud));
1396
f6f37321 1397 if (flags & FOLL_WRITE && !pud_write(*pud))
a00cc7d9
MW
1398 return NULL;
1399
1400 if (pud_present(*pud) && pud_devmap(*pud))
1401 /* pass */;
1402 else
1403 return NULL;
1404
1405 if (flags & FOLL_TOUCH)
5fe653e9 1406 touch_pud(vma, addr, pud, flags & FOLL_WRITE);
a00cc7d9
MW
1407
1408 /*
1409 * device mapped pages can only be returned if the
1410 * caller will manage the page reference count.
3faa52c0
JH
1411 *
1412 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
a00cc7d9 1413 */
3faa52c0 1414 if (!(flags & (FOLL_GET | FOLL_PIN)))
a00cc7d9
MW
1415 return ERR_PTR(-EEXIST);
1416
1417 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
df06b37f
KB
1418 *pgmap = get_dev_pagemap(pfn, *pgmap);
1419 if (!*pgmap)
a00cc7d9
MW
1420 return ERR_PTR(-EFAULT);
1421 page = pfn_to_page(pfn);
0f089235
LG
1422
1423 ret = try_grab_page(page, flags);
1424 if (ret)
1425 page = ERR_PTR(ret);
a00cc7d9
MW
1426
1427 return page;
1428}
1429
1430int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1431 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1432 struct vm_area_struct *vma)
1433{
1434 spinlock_t *dst_ptl, *src_ptl;
1435 pud_t pud;
1436 int ret;
1437
1438 dst_ptl = pud_lock(dst_mm, dst_pud);
1439 src_ptl = pud_lockptr(src_mm, src_pud);
1440 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1441
1442 ret = -EAGAIN;
1443 pud = *src_pud;
1444 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1445 goto out_unlock;
1446
1447 /*
1448 * When page table lock is held, the huge zero pud should not be
1449 * under splitting since we don't split the page itself, only pud to
1450 * a page table.
1451 */
1452 if (is_huge_zero_pud(pud)) {
1453 /* No huge zero pud yet */
1454 }
1455
fb3d824d
DH
1456 /*
1457 * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
1458 * and split if duplicating fails.
1459 */
a00cc7d9
MW
1460 pudp_set_wrprotect(src_mm, addr, src_pud);
1461 pud = pud_mkold(pud_wrprotect(pud));
1462 set_pud_at(dst_mm, addr, dst_pud, pud);
1463
1464 ret = 0;
1465out_unlock:
1466 spin_unlock(src_ptl);
1467 spin_unlock(dst_ptl);
1468 return ret;
1469}
1470
1471void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1472{
a00cc7d9
MW
1473 bool write = vmf->flags & FAULT_FLAG_WRITE;
1474
1475 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1476 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1477 goto unlock;
1478
5fe653e9 1479 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
a00cc7d9
MW
1480unlock:
1481 spin_unlock(vmf->ptl);
1482}
1483#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1484
5db4f15c 1485void huge_pmd_set_accessed(struct vm_fault *vmf)
a1dd450b 1486{
20f664aa 1487 bool write = vmf->flags & FAULT_FLAG_WRITE;
a1dd450b 1488
82b0f8c3 1489 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
a69e4717 1490 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
a1dd450b
WD
1491 goto unlock;
1492
a69e4717 1493 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
a1dd450b
WD
1494
1495unlock:
82b0f8c3 1496 spin_unlock(vmf->ptl);
a1dd450b
WD
1497}
1498
5db4f15c 1499vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
71e3aac0 1500{
c89357e2 1501 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
82b0f8c3 1502 struct vm_area_struct *vma = vmf->vma;
2fad3d14 1503 struct folio *folio;
3917c802 1504 struct page *page;
82b0f8c3 1505 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5db4f15c 1506 pmd_t orig_pmd = vmf->orig_pmd;
71e3aac0 1507
82b0f8c3 1508 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
81d1b09c 1509 VM_BUG_ON_VMA(!vma->anon_vma, vma);
3917c802 1510
93b4796d 1511 if (is_huge_zero_pmd(orig_pmd))
3917c802
KS
1512 goto fallback;
1513
82b0f8c3 1514 spin_lock(vmf->ptl);
3917c802
KS
1515
1516 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1517 spin_unlock(vmf->ptl);
1518 return 0;
1519 }
71e3aac0
AA
1520
1521 page = pmd_page(orig_pmd);
2fad3d14 1522 folio = page_folio(page);
f6004e73 1523 VM_BUG_ON_PAGE(!PageHead(page), page);
3917c802 1524
6c287605
DH
1525 /* Early check when only holding the PT lock. */
1526 if (PageAnonExclusive(page))
1527 goto reuse;
1528
2fad3d14
MWO
1529 if (!folio_trylock(folio)) {
1530 folio_get(folio);
ba3c4ce6 1531 spin_unlock(vmf->ptl);
2fad3d14 1532 folio_lock(folio);
ba3c4ce6
HY
1533 spin_lock(vmf->ptl);
1534 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
3917c802 1535 spin_unlock(vmf->ptl);
2fad3d14
MWO
1536 folio_unlock(folio);
1537 folio_put(folio);
3917c802 1538 return 0;
ba3c4ce6 1539 }
2fad3d14 1540 folio_put(folio);
ba3c4ce6 1541 }
3917c802 1542
6c287605
DH
1543 /* Recheck after temporarily dropping the PT lock. */
1544 if (PageAnonExclusive(page)) {
2fad3d14 1545 folio_unlock(folio);
6c287605
DH
1546 goto reuse;
1547 }
1548
3917c802 1549 /*
2fad3d14
MWO
1550 * See do_wp_page(): we can only reuse the folio exclusively if
1551 * there are no additional references. Note that we always drain
1fec6890 1552 * the LRU cache immediately after adding a THP.
3917c802 1553 */
2fad3d14
MWO
1554 if (folio_ref_count(folio) >
1555 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
3bff7e3f 1556 goto unlock_fallback;
2fad3d14
MWO
1557 if (folio_test_swapcache(folio))
1558 folio_free_swap(folio);
1559 if (folio_ref_count(folio) == 1) {
71e3aac0 1560 pmd_t entry;
6c54dc6c 1561
06968625 1562 folio_move_anon_rmap(folio, vma);
5ca43289 1563 SetPageAnonExclusive(page);
2fad3d14 1564 folio_unlock(folio);
6c287605 1565reuse:
c89357e2
DH
1566 if (unlikely(unshare)) {
1567 spin_unlock(vmf->ptl);
1568 return 0;
1569 }
71e3aac0 1570 entry = pmd_mkyoung(orig_pmd);
f55e1014 1571 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3917c802 1572 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
82b0f8c3 1573 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
82b0f8c3 1574 spin_unlock(vmf->ptl);
cb8d8633 1575 return 0;
71e3aac0 1576 }
3917c802 1577
3bff7e3f 1578unlock_fallback:
2fad3d14 1579 folio_unlock(folio);
82b0f8c3 1580 spin_unlock(vmf->ptl);
3917c802
KS
1581fallback:
1582 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1583 return VM_FAULT_FALLBACK;
71e3aac0
AA
1584}
1585
c27f479e
DH
1586static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1587 unsigned long addr, pmd_t pmd)
1588{
1589 struct page *page;
1590
1591 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1592 return false;
1593
1594 /* Don't touch entries that are not even readable (NUMA hinting). */
1595 if (pmd_protnone(pmd))
1596 return false;
1597
1598 /* Do we need write faults for softdirty tracking? */
1599 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1600 return false;
1601
1602 /* Do we need write faults for uffd-wp tracking? */
1603 if (userfaultfd_huge_pmd_wp(vma, pmd))
1604 return false;
1605
1606 if (!(vma->vm_flags & VM_SHARED)) {
1607 /* See can_change_pte_writable(). */
1608 page = vm_normal_page_pmd(vma, addr, pmd);
1609 return page && PageAnon(page) && PageAnonExclusive(page);
1610 }
1611
1612 /* See can_change_pte_writable(). */
1613 return pmd_dirty(pmd);
1614}
1615
5535be30
DH
1616/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1617static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1618 struct vm_area_struct *vma,
1619 unsigned int flags)
8310d48b 1620{
5535be30
DH
1621 /* If the pmd is writable, we can write to the page. */
1622 if (pmd_write(pmd))
1623 return true;
1624
1625 /* Maybe FOLL_FORCE is set to override it? */
1626 if (!(flags & FOLL_FORCE))
1627 return false;
1628
1629 /* But FOLL_FORCE has no effect on shared mappings */
1630 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1631 return false;
1632
1633 /* ... or read-only private ones */
1634 if (!(vma->vm_flags & VM_MAYWRITE))
1635 return false;
1636
1637 /* ... or already writable ones that just need to take a write fault */
1638 if (vma->vm_flags & VM_WRITE)
1639 return false;
1640
1641 /*
1642 * See can_change_pte_writable(): we broke COW and could map the page
1643 * writable if we have an exclusive anonymous page ...
1644 */
1645 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1646 return false;
1647
1648 /* ... and a write-fault isn't required for other reasons. */
1649 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1650 return false;
1651 return !userfaultfd_huge_pmd_wp(vma, pmd);
8310d48b
KF
1652}
1653
b676b293 1654struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
1655 unsigned long addr,
1656 pmd_t *pmd,
1657 unsigned int flags)
1658{
b676b293 1659 struct mm_struct *mm = vma->vm_mm;
5535be30 1660 struct page *page;
0f089235 1661 int ret;
71e3aac0 1662
c4088ebd 1663 assert_spin_locked(pmd_lockptr(mm, pmd));
71e3aac0 1664
5535be30
DH
1665 page = pmd_page(*pmd);
1666 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1667
1668 if ((flags & FOLL_WRITE) &&
1669 !can_follow_write_pmd(*pmd, page, vma, flags))
1670 return NULL;
71e3aac0 1671
85facf25
KS
1672 /* Avoid dumping huge zero page */
1673 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1674 return ERR_PTR(-EFAULT);
1675
d74943a2 1676 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
5535be30 1677 return NULL;
3faa52c0 1678
84209e87 1679 if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
a7f22660
DH
1680 return ERR_PTR(-EMLINK);
1681
b6a2619c
DH
1682 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1683 !PageAnonExclusive(page), page);
1684
0f089235
LG
1685 ret = try_grab_page(page, flags);
1686 if (ret)
1687 return ERR_PTR(ret);
3faa52c0 1688
3565fce3 1689 if (flags & FOLL_TOUCH)
a69e4717 1690 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
3faa52c0 1691
71e3aac0 1692 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
ca120cf6 1693 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
71e3aac0 1694
71e3aac0
AA
1695 return page;
1696}
1697
d10e63f2 1698/* NUMA hinting page fault entry point for trans huge pmds */
5db4f15c 1699vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
d10e63f2 1700{
82b0f8c3 1701 struct vm_area_struct *vma = vmf->vma;
c5b5a3dd
YS
1702 pmd_t oldpmd = vmf->orig_pmd;
1703 pmd_t pmd;
667ffc31 1704 struct folio *folio;
82b0f8c3 1705 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
667ffc31 1706 int nid = NUMA_NO_NODE;
33024536 1707 int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
6a56ccbc 1708 bool migrated = false, writable = false;
6688cc05 1709 int flags = 0;
d10e63f2 1710
82b0f8c3 1711 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
c5b5a3dd 1712 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
82b0f8c3 1713 spin_unlock(vmf->ptl);
de466bd6
MG
1714 goto out;
1715 }
1716
c5b5a3dd 1717 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
6a56ccbc
DH
1718
1719 /*
1720 * Detect now whether the PMD could be writable; this information
1721 * is only valid while holding the PT lock.
1722 */
1723 writable = pmd_write(pmd);
1724 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1725 can_change_pmd_writable(vma, vmf->address, pmd))
1726 writable = true;
1727
667ffc31
KW
1728 folio = vm_normal_folio_pmd(vma, haddr, pmd);
1729 if (!folio)
c5b5a3dd
YS
1730 goto out_map;
1731
1732 /* See similar comment in do_numa_page for explanation */
6a56ccbc 1733 if (!writable)
c5b5a3dd
YS
1734 flags |= TNF_NO_GROUP;
1735
667ffc31 1736 nid = folio_nid(folio);
33024536
HY
1737 /*
1738 * For memory tiering mode, cpupid of slow memory page is used
1739 * to record page access time. So use default value.
1740 */
667ffc31 1741 if (node_is_toptier(nid))
c4a8d2fa 1742 last_cpupid = folio_last_cpupid(folio);
cda6d936 1743 target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
c5b5a3dd 1744 if (target_nid == NUMA_NO_NODE) {
667ffc31 1745 folio_put(folio);
c5b5a3dd
YS
1746 goto out_map;
1747 }
1748
82b0f8c3 1749 spin_unlock(vmf->ptl);
6a56ccbc 1750 writable = false;
8b1b436d 1751
667ffc31 1752 migrated = migrate_misplaced_folio(folio, vma, target_nid);
6688cc05
PZ
1753 if (migrated) {
1754 flags |= TNF_MIGRATED;
667ffc31 1755 nid = target_nid;
c5b5a3dd 1756 } else {
074c2381 1757 flags |= TNF_MIGRATE_FAIL;
c5b5a3dd
YS
1758 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1759 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1760 spin_unlock(vmf->ptl);
1761 goto out;
1762 }
1763 goto out_map;
1764 }
b8916634
MG
1765
1766out:
667ffc31
KW
1767 if (nid != NUMA_NO_NODE)
1768 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
8191acbd 1769
d10e63f2 1770 return 0;
c5b5a3dd
YS
1771
1772out_map:
1773 /* Restore the PMD */
1774 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1775 pmd = pmd_mkyoung(pmd);
6a56ccbc 1776 if (writable)
161e393c 1777 pmd = pmd_mkwrite(pmd, vma);
c5b5a3dd
YS
1778 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1779 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1780 spin_unlock(vmf->ptl);
1781 goto out;
d10e63f2
MG
1782}
1783
319904ad
HY
1784/*
1785 * Return true if we do MADV_FREE successfully on entire pmd page.
1786 * Otherwise, return false.
1787 */
1788bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
b8d3c4c3 1789 pmd_t *pmd, unsigned long addr, unsigned long next)
b8d3c4c3
MK
1790{
1791 spinlock_t *ptl;
1792 pmd_t orig_pmd;
fc986a38 1793 struct folio *folio;
b8d3c4c3 1794 struct mm_struct *mm = tlb->mm;
319904ad 1795 bool ret = false;
b8d3c4c3 1796
ed6a7935 1797 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
07e32661 1798
b6ec57f4
KS
1799 ptl = pmd_trans_huge_lock(pmd, vma);
1800 if (!ptl)
25eedabe 1801 goto out_unlocked;
b8d3c4c3
MK
1802
1803 orig_pmd = *pmd;
319904ad 1804 if (is_huge_zero_pmd(orig_pmd))
b8d3c4c3 1805 goto out;
b8d3c4c3 1806
84c3fc4e
ZY
1807 if (unlikely(!pmd_present(orig_pmd))) {
1808 VM_BUG_ON(thp_migration_supported() &&
1809 !is_pmd_migration_entry(orig_pmd));
1810 goto out;
1811 }
1812
fc986a38 1813 folio = pfn_folio(pmd_pfn(orig_pmd));
b8d3c4c3 1814 /*
fc986a38
KW
1815 * If other processes are mapping this folio, we couldn't discard
1816 * the folio unless they all do MADV_FREE so let's skip the folio.
b8d3c4c3 1817 */
20b18aad 1818 if (folio_estimated_sharers(folio) != 1)
b8d3c4c3
MK
1819 goto out;
1820
fc986a38 1821 if (!folio_trylock(folio))
b8d3c4c3
MK
1822 goto out;
1823
1824 /*
1825 * If user want to discard part-pages of THP, split it so MADV_FREE
1826 * will deactivate only them.
1827 */
1828 if (next - addr != HPAGE_PMD_SIZE) {
fc986a38 1829 folio_get(folio);
b8d3c4c3 1830 spin_unlock(ptl);
fc986a38
KW
1831 split_folio(folio);
1832 folio_unlock(folio);
1833 folio_put(folio);
b8d3c4c3
MK
1834 goto out_unlocked;
1835 }
1836
fc986a38
KW
1837 if (folio_test_dirty(folio))
1838 folio_clear_dirty(folio);
1839 folio_unlock(folio);
b8d3c4c3 1840
b8d3c4c3 1841 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
58ceeb6b 1842 pmdp_invalidate(vma, addr, pmd);
b8d3c4c3
MK
1843 orig_pmd = pmd_mkold(orig_pmd);
1844 orig_pmd = pmd_mkclean(orig_pmd);
1845
1846 set_pmd_at(mm, addr, pmd, orig_pmd);
1847 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1848 }
802a3a92 1849
6a6fe9eb 1850 folio_mark_lazyfree(folio);
319904ad 1851 ret = true;
b8d3c4c3
MK
1852out:
1853 spin_unlock(ptl);
1854out_unlocked:
1855 return ret;
1856}
1857
953c66c2
AK
1858static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1859{
1860 pgtable_t pgtable;
1861
1862 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1863 pte_free(mm, pgtable);
c4812909 1864 mm_dec_nr_ptes(mm);
953c66c2
AK
1865}
1866
71e3aac0 1867int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
f21760b1 1868 pmd_t *pmd, unsigned long addr)
71e3aac0 1869{
da146769 1870 pmd_t orig_pmd;
bf929152 1871 spinlock_t *ptl;
71e3aac0 1872
ed6a7935 1873 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
07e32661 1874
b6ec57f4
KS
1875 ptl = __pmd_trans_huge_lock(pmd, vma);
1876 if (!ptl)
da146769
KS
1877 return 0;
1878 /*
1879 * For architectures like ppc64 we look at deposited pgtable
1880 * when calling pmdp_huge_get_and_clear. So do the
1881 * pgtable_trans_huge_withdraw after finishing pmdp related
1882 * operations.
1883 */
93a98695
AK
1884 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1885 tlb->fullmm);
e5136e87 1886 arch_check_zapped_pmd(vma, orig_pmd);
da146769 1887 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2484ca9b 1888 if (vma_is_special_huge(vma)) {
3b6521f5
OH
1889 if (arch_needs_pgtable_deposit())
1890 zap_deposited_table(tlb->mm, pmd);
da146769 1891 spin_unlock(ptl);
da146769 1892 } else if (is_huge_zero_pmd(orig_pmd)) {
c14a6eb4 1893 zap_deposited_table(tlb->mm, pmd);
da146769 1894 spin_unlock(ptl);
da146769 1895 } else {
616b8371
ZY
1896 struct page *page = NULL;
1897 int flush_needed = 1;
1898
1899 if (pmd_present(orig_pmd)) {
1900 page = pmd_page(orig_pmd);
cea86fe2 1901 page_remove_rmap(page, vma, true);
616b8371
ZY
1902 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1903 VM_BUG_ON_PAGE(!PageHead(page), page);
1904 } else if (thp_migration_supported()) {
1905 swp_entry_t entry;
1906
1907 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1908 entry = pmd_to_swp_entry(orig_pmd);
af5cdaf8 1909 page = pfn_swap_entry_to_page(entry);
616b8371
ZY
1910 flush_needed = 0;
1911 } else
1912 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1913
b5072380 1914 if (PageAnon(page)) {
c14a6eb4 1915 zap_deposited_table(tlb->mm, pmd);
b5072380
KS
1916 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1917 } else {
953c66c2
AK
1918 if (arch_needs_pgtable_deposit())
1919 zap_deposited_table(tlb->mm, pmd);
fadae295 1920 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
b5072380 1921 }
616b8371 1922
da146769 1923 spin_unlock(ptl);
616b8371
ZY
1924 if (flush_needed)
1925 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
025c5b24 1926 }
da146769 1927 return 1;
71e3aac0
AA
1928}
1929
1dd38b6c
AK
1930#ifndef pmd_move_must_withdraw
1931static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1932 spinlock_t *old_pmd_ptl,
1933 struct vm_area_struct *vma)
1934{
1935 /*
1936 * With split pmd lock we also need to move preallocated
1937 * PTE page table if new_pmd is on different PMD page table.
1938 *
1939 * We also don't deposit and withdraw tables for file pages.
1940 */
1941 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1942}
1943#endif
1944
ab6e3d09
NH
1945static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1946{
1947#ifdef CONFIG_MEM_SOFT_DIRTY
1948 if (unlikely(is_pmd_migration_entry(pmd)))
1949 pmd = pmd_swp_mksoft_dirty(pmd);
1950 else if (pmd_present(pmd))
1951 pmd = pmd_mksoft_dirty(pmd);
1952#endif
1953 return pmd;
1954}
1955
bf8616d5 1956bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
b8aa9d9d 1957 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
37a1c49a 1958{
bf929152 1959 spinlock_t *old_ptl, *new_ptl;
37a1c49a 1960 pmd_t pmd;
37a1c49a 1961 struct mm_struct *mm = vma->vm_mm;
5d190420 1962 bool force_flush = false;
37a1c49a 1963
37a1c49a
AA
1964 /*
1965 * The destination pmd shouldn't be established, free_pgtables()
a5be621e
HD
1966 * should have released it; but move_page_tables() might have already
1967 * inserted a page table, if racing against shmem/file collapse.
37a1c49a 1968 */
a5be621e 1969 if (!pmd_none(*new_pmd)) {
37a1c49a 1970 VM_BUG_ON(pmd_trans_huge(*new_pmd));
4b471e88 1971 return false;
37a1c49a
AA
1972 }
1973
bf929152
KS
1974 /*
1975 * We don't have to worry about the ordering of src and dst
c1e8d7c6 1976 * ptlocks because exclusive mmap_lock prevents deadlock.
bf929152 1977 */
b6ec57f4
KS
1978 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1979 if (old_ptl) {
bf929152
KS
1980 new_ptl = pmd_lockptr(mm, new_pmd);
1981 if (new_ptl != old_ptl)
1982 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
8809aa2d 1983 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
eb66ae03 1984 if (pmd_present(pmd))
a2ce2666 1985 force_flush = true;
025c5b24 1986 VM_BUG_ON(!pmd_none(*new_pmd));
3592806c 1987
1dd38b6c 1988 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
b3084f4d 1989 pgtable_t pgtable;
3592806c
KS
1990 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1991 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
3592806c 1992 }
ab6e3d09
NH
1993 pmd = move_soft_dirty_pmd(pmd);
1994 set_pmd_at(mm, new_addr, new_pmd, pmd);
5d190420 1995 if (force_flush)
7c38f181 1996 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
eb66ae03
LT
1997 if (new_ptl != old_ptl)
1998 spin_unlock(new_ptl);
bf929152 1999 spin_unlock(old_ptl);
4b471e88 2000 return true;
37a1c49a 2001 }
4b471e88 2002 return false;
37a1c49a
AA
2003}
2004
f123d74a
MG
2005/*
2006 * Returns
2007 * - 0 if PMD could not be locked
f0953a1b 2008 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
e346e668 2009 * or if prot_numa but THP migration is not supported
f0953a1b 2010 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
f123d74a 2011 */
4a18419f
NA
2012int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2013 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2014 unsigned long cp_flags)
cd7548ab
JW
2015{
2016 struct mm_struct *mm = vma->vm_mm;
bf929152 2017 spinlock_t *ptl;
c9fe6656 2018 pmd_t oldpmd, entry;
58705444 2019 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
292924b2
PX
2020 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2021 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6a56ccbc 2022 int ret = 1;
cd7548ab 2023
4a18419f
NA
2024 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2025
e346e668
YS
2026 if (prot_numa && !thp_migration_supported())
2027 return 1;
2028
b6ec57f4 2029 ptl = __pmd_trans_huge_lock(pmd, vma);
0a85e51d
KS
2030 if (!ptl)
2031 return 0;
e944fd67 2032
84c3fc4e
ZY
2033#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2034 if (is_swap_pmd(*pmd)) {
2035 swp_entry_t entry = pmd_to_swp_entry(*pmd);
d986ba2b 2036 struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
24bf08c4 2037 pmd_t newpmd;
84c3fc4e
ZY
2038
2039 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
4dd845b5 2040 if (is_writable_migration_entry(entry)) {
84c3fc4e
ZY
2041 /*
2042 * A protection check is difficult so
2043 * just be safe and disable write
2044 */
d986ba2b 2045 if (folio_test_anon(folio))
6c287605
DH
2046 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2047 else
2048 entry = make_readable_migration_entry(swp_offset(entry));
84c3fc4e 2049 newpmd = swp_entry_to_pmd(entry);
ab6e3d09
NH
2050 if (pmd_swp_soft_dirty(*pmd))
2051 newpmd = pmd_swp_mksoft_dirty(newpmd);
24bf08c4
DH
2052 } else {
2053 newpmd = *pmd;
84c3fc4e 2054 }
24bf08c4
DH
2055
2056 if (uffd_wp)
2057 newpmd = pmd_swp_mkuffd_wp(newpmd);
2058 else if (uffd_wp_resolve)
2059 newpmd = pmd_swp_clear_uffd_wp(newpmd);
2060 if (!pmd_same(*pmd, newpmd))
2061 set_pmd_at(mm, addr, pmd, newpmd);
84c3fc4e
ZY
2062 goto unlock;
2063 }
2064#endif
2065
a1a3a2fc 2066 if (prot_numa) {
d986ba2b 2067 struct folio *folio;
33024536 2068 bool toptier;
a1a3a2fc
HY
2069 /*
2070 * Avoid trapping faults against the zero page. The read-only
2071 * data is likely to be read-cached on the local CPU and
2072 * local/remote hits to the zero page are not interesting.
2073 */
2074 if (is_huge_zero_pmd(*pmd))
2075 goto unlock;
025c5b24 2076
a1a3a2fc
HY
2077 if (pmd_protnone(*pmd))
2078 goto unlock;
0a85e51d 2079
d986ba2b
KW
2080 folio = page_folio(pmd_page(*pmd));
2081 toptier = node_is_toptier(folio_nid(folio));
a1a3a2fc
HY
2082 /*
2083 * Skip scanning top tier node if normal numa
2084 * balancing is disabled
2085 */
2086 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
33024536 2087 toptier)
a1a3a2fc 2088 goto unlock;
33024536
HY
2089
2090 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
2091 !toptier)
d986ba2b
KW
2092 folio_xchg_access_time(folio,
2093 jiffies_to_msecs(jiffies));
a1a3a2fc 2094 }
ced10803 2095 /*
3e4e28c5 2096 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
ced10803 2097 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
3e4e28c5 2098 * which is also under mmap_read_lock(mm):
ced10803
KS
2099 *
2100 * CPU0: CPU1:
2101 * change_huge_pmd(prot_numa=1)
2102 * pmdp_huge_get_and_clear_notify()
2103 * madvise_dontneed()
2104 * zap_pmd_range()
2105 * pmd_trans_huge(*pmd) == 0 (without ptl)
2106 * // skip the pmd
2107 * set_pmd_at();
2108 * // pmd is re-established
2109 *
2110 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2111 * which may break userspace.
2112 *
4f831457 2113 * pmdp_invalidate_ad() is required to make sure we don't miss
ced10803
KS
2114 * dirty/young flags set by hardware.
2115 */
4f831457 2116 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
ced10803 2117
c9fe6656 2118 entry = pmd_modify(oldpmd, newprot);
f1eb1bac 2119 if (uffd_wp)
292924b2 2120 entry = pmd_mkuffd_wp(entry);
f1eb1bac 2121 else if (uffd_wp_resolve)
292924b2
PX
2122 /*
2123 * Leave the write bit to be handled by PF interrupt
2124 * handler, then things like COW could be properly
2125 * handled.
2126 */
2127 entry = pmd_clear_uffd_wp(entry);
c27f479e
DH
2128
2129 /* See change_pte_range(). */
2130 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2131 can_change_pmd_writable(vma, addr, entry))
161e393c 2132 entry = pmd_mkwrite(entry, vma);
c27f479e 2133
0a85e51d
KS
2134 ret = HPAGE_PMD_NR;
2135 set_pmd_at(mm, addr, pmd, entry);
4a18419f 2136
c9fe6656
NA
2137 if (huge_pmd_needs_flush(oldpmd, entry))
2138 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
0a85e51d
KS
2139unlock:
2140 spin_unlock(ptl);
025c5b24
NH
2141 return ret;
2142}
2143
adef4406
AA
2144#ifdef CONFIG_USERFAULTFD
2145/*
2146 * The PT lock for src_pmd and the mmap_lock for reading are held by
2147 * the caller, but it must return after releasing the page_table_lock.
2148 * Just move the page from src_pmd to dst_pmd if possible.
2149 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2150 * repeated by the caller, or other errors in case of failure.
2151 */
2152int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2153 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2154 unsigned long dst_addr, unsigned long src_addr)
2155{
2156 pmd_t _dst_pmd, src_pmdval;
2157 struct page *src_page;
2158 struct folio *src_folio;
2159 struct anon_vma *src_anon_vma;
2160 spinlock_t *src_ptl, *dst_ptl;
2161 pgtable_t src_pgtable;
2162 struct mmu_notifier_range range;
2163 int err = 0;
2164
2165 src_pmdval = *src_pmd;
2166 src_ptl = pmd_lockptr(mm, src_pmd);
2167
2168 lockdep_assert_held(src_ptl);
2169 mmap_assert_locked(mm);
2170
2171 /* Sanity checks before the operation */
2172 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2173 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2174 spin_unlock(src_ptl);
2175 return -EINVAL;
2176 }
2177
2178 if (!pmd_trans_huge(src_pmdval)) {
2179 spin_unlock(src_ptl);
2180 if (is_pmd_migration_entry(src_pmdval)) {
2181 pmd_migration_entry_wait(mm, &src_pmdval);
2182 return -EAGAIN;
2183 }
2184 return -ENOENT;
2185 }
2186
2187 src_page = pmd_page(src_pmdval);
2188 if (unlikely(!PageAnonExclusive(src_page))) {
2189 spin_unlock(src_ptl);
2190 return -EBUSY;
2191 }
2192
2193 src_folio = page_folio(src_page);
2194 folio_get(src_folio);
2195 spin_unlock(src_ptl);
2196
2197 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2198 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2199 src_addr + HPAGE_PMD_SIZE);
2200 mmu_notifier_invalidate_range_start(&range);
2201
2202 folio_lock(src_folio);
2203
2204 /*
2205 * split_huge_page walks the anon_vma chain without the page
2206 * lock. Serialize against it with the anon_vma lock, the page
2207 * lock is not enough.
2208 */
2209 src_anon_vma = folio_get_anon_vma(src_folio);
2210 if (!src_anon_vma) {
2211 err = -EAGAIN;
2212 goto unlock_folio;
2213 }
2214 anon_vma_lock_write(src_anon_vma);
2215
2216 dst_ptl = pmd_lockptr(mm, dst_pmd);
2217 double_pt_lock(src_ptl, dst_ptl);
2218 if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2219 !pmd_same(*dst_pmd, dst_pmdval))) {
2220 err = -EAGAIN;
2221 goto unlock_ptls;
2222 }
2223 if (folio_maybe_dma_pinned(src_folio) ||
2224 !PageAnonExclusive(&src_folio->page)) {
2225 err = -EBUSY;
2226 goto unlock_ptls;
2227 }
2228
2229 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2230 WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2231 err = -EBUSY;
2232 goto unlock_ptls;
2233 }
2234
2235 folio_move_anon_rmap(src_folio, dst_vma);
2236 WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
2237
2238 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2239 /* Folio got pinned from under us. Put it back and fail the move. */
2240 if (folio_maybe_dma_pinned(src_folio)) {
2241 set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2242 err = -EBUSY;
2243 goto unlock_ptls;
2244 }
2245
2246 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
2247 /* Follow mremap() behavior and treat the entry dirty after the move */
2248 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2249 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2250
2251 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2252 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2253unlock_ptls:
2254 double_pt_unlock(src_ptl, dst_ptl);
2255 anon_vma_unlock_write(src_anon_vma);
2256 put_anon_vma(src_anon_vma);
2257unlock_folio:
2258 /* unblock rmap walks */
2259 folio_unlock(src_folio);
2260 mmu_notifier_invalidate_range_end(&range);
2261 folio_put(src_folio);
2262 return err;
2263}
2264#endif /* CONFIG_USERFAULTFD */
2265
025c5b24 2266/*
8f19b0c0 2267 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
025c5b24 2268 *
8f19b0c0
HY
2269 * Note that if it returns page table lock pointer, this routine returns without
2270 * unlocking page table lock. So callers must unlock it.
025c5b24 2271 */
b6ec57f4 2272spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
025c5b24 2273{
b6ec57f4
KS
2274 spinlock_t *ptl;
2275 ptl = pmd_lock(vma->vm_mm, pmd);
84c3fc4e
ZY
2276 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2277 pmd_devmap(*pmd)))
b6ec57f4
KS
2278 return ptl;
2279 spin_unlock(ptl);
2280 return NULL;
cd7548ab
JW
2281}
2282
a00cc7d9 2283/*
d965e390 2284 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
a00cc7d9 2285 *
d965e390
ML
2286 * Note that if it returns page table lock pointer, this routine returns without
2287 * unlocking page table lock. So callers must unlock it.
a00cc7d9
MW
2288 */
2289spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2290{
2291 spinlock_t *ptl;
2292
2293 ptl = pud_lock(vma->vm_mm, pud);
2294 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2295 return ptl;
2296 spin_unlock(ptl);
2297 return NULL;
2298}
2299
2300#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2301int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2302 pud_t *pud, unsigned long addr)
2303{
a00cc7d9
MW
2304 spinlock_t *ptl;
2305
2306 ptl = __pud_trans_huge_lock(pud, vma);
2307 if (!ptl)
2308 return 0;
74929079 2309
f32928ab 2310 pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
a00cc7d9 2311 tlb_remove_pud_tlb_entry(tlb, pud, addr);
2484ca9b 2312 if (vma_is_special_huge(vma)) {
a00cc7d9
MW
2313 spin_unlock(ptl);
2314 /* No zero page support yet */
2315 } else {
2316 /* No support for anonymous PUD pages yet */
2317 BUG();
2318 }
2319 return 1;
2320}
2321
2322static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2323 unsigned long haddr)
2324{
2325 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2326 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2327 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2328 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2329
ce9311cf 2330 count_vm_event(THP_SPLIT_PUD);
a00cc7d9 2331
ec8832d0 2332 pudp_huge_clear_flush(vma, haddr, pud);
a00cc7d9
MW
2333}
2334
2335void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2336 unsigned long address)
2337{
2338 spinlock_t *ptl;
ac46d4f3 2339 struct mmu_notifier_range range;
a00cc7d9 2340
7d4a8be0 2341 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
6f4f13e8 2342 address & HPAGE_PUD_MASK,
ac46d4f3
JG
2343 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2344 mmu_notifier_invalidate_range_start(&range);
2345 ptl = pud_lock(vma->vm_mm, pud);
a00cc7d9
MW
2346 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2347 goto out;
ac46d4f3 2348 __split_huge_pud_locked(vma, pud, range.start);
a00cc7d9
MW
2349
2350out:
2351 spin_unlock(ptl);
ec8832d0 2352 mmu_notifier_invalidate_range_end(&range);
a00cc7d9
MW
2353}
2354#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2355
eef1b3ba
KS
2356static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2357 unsigned long haddr, pmd_t *pmd)
2358{
2359 struct mm_struct *mm = vma->vm_mm;
2360 pgtable_t pgtable;
42b2af2c 2361 pmd_t _pmd, old_pmd;
c9c1ee20
HD
2362 unsigned long addr;
2363 pte_t *pte;
eef1b3ba
KS
2364 int i;
2365
0f10851e
JG
2366 /*
2367 * Leave pmd empty until pte is filled note that it is fine to delay
2368 * notification until mmu_notifier_invalidate_range_end() as we are
2369 * replacing a zero pmd write protected page with a zero pte write
2370 * protected page.
2371 *
ee65728e 2372 * See Documentation/mm/mmu_notifier.rst
0f10851e 2373 */
42b2af2c 2374 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
eef1b3ba
KS
2375
2376 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2377 pmd_populate(mm, &_pmd, pgtable);
2378
c9c1ee20
HD
2379 pte = pte_offset_map(&_pmd, haddr);
2380 VM_BUG_ON(!pte);
2381 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2382 pte_t entry;
2383
2384 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
eef1b3ba 2385 entry = pte_mkspecial(entry);
42b2af2c
DH
2386 if (pmd_uffd_wp(old_pmd))
2387 entry = pte_mkuffd_wp(entry);
c33c7948 2388 VM_BUG_ON(!pte_none(ptep_get(pte)));
c9c1ee20
HD
2389 set_pte_at(mm, addr, pte, entry);
2390 pte++;
eef1b3ba 2391 }
c9c1ee20 2392 pte_unmap(pte - 1);
eef1b3ba
KS
2393 smp_wmb(); /* make pte visible before pmd */
2394 pmd_populate(mm, pmd, pgtable);
eef1b3ba
KS
2395}
2396
2397static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
ba988280 2398 unsigned long haddr, bool freeze)
eef1b3ba
KS
2399{
2400 struct mm_struct *mm = vma->vm_mm;
91b2978a 2401 struct folio *folio;
eef1b3ba
KS
2402 struct page *page;
2403 pgtable_t pgtable;
423ac9af 2404 pmd_t old_pmd, _pmd;
292924b2 2405 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
0ccf7f16 2406 bool anon_exclusive = false, dirty = false;
2ac015e2 2407 unsigned long addr;
c9c1ee20 2408 pte_t *pte;
eef1b3ba
KS
2409 int i;
2410
2411 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2412 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2413 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
84c3fc4e
ZY
2414 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2415 && !pmd_devmap(*pmd));
eef1b3ba
KS
2416
2417 count_vm_event(THP_SPLIT_PMD);
2418
d21b9e57 2419 if (!vma_is_anonymous(vma)) {
ec8832d0 2420 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
953c66c2
AK
2421 /*
2422 * We are going to unmap this huge page. So
2423 * just go ahead and zap it
2424 */
2425 if (arch_needs_pgtable_deposit())
2426 zap_deposited_table(mm, pmd);
2484ca9b 2427 if (vma_is_special_huge(vma))
d21b9e57 2428 return;
99fa8a48
HD
2429 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2430 swp_entry_t entry;
2431
2432 entry = pmd_to_swp_entry(old_pmd);
af5cdaf8 2433 page = pfn_swap_entry_to_page(entry);
99fa8a48
HD
2434 } else {
2435 page = pmd_page(old_pmd);
2436 if (!PageDirty(page) && pmd_dirty(old_pmd))
2437 set_page_dirty(page);
2438 if (!PageReferenced(page) && pmd_young(old_pmd))
2439 SetPageReferenced(page);
cea86fe2 2440 page_remove_rmap(page, vma, true);
99fa8a48
HD
2441 put_page(page);
2442 }
fadae295 2443 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
eef1b3ba 2444 return;
99fa8a48
HD
2445 }
2446
3b77e8c8 2447 if (is_huge_zero_pmd(*pmd)) {
4645b9fe
JG
2448 /*
2449 * FIXME: Do we want to invalidate secondary mmu by calling
1af5a810
AP
2450 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2451 * inside __split_huge_pmd() ?
4645b9fe
JG
2452 *
2453 * We are going from a zero huge page write protected to zero
2454 * small page also write protected so it does not seems useful
2455 * to invalidate secondary mmu at this time.
2456 */
eef1b3ba
KS
2457 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2458 }
2459
423ac9af
AK
2460 /*
2461 * Up to this point the pmd is present and huge and userland has the
2462 * whole access to the hugepage during the split (which happens in
2463 * place). If we overwrite the pmd with the not-huge version pointing
2464 * to the pte here (which of course we could if all CPUs were bug
2465 * free), userland could trigger a small page size TLB miss on the
2466 * small sized TLB while the hugepage TLB entry is still established in
2467 * the huge TLB. Some CPU doesn't like that.
42742d9b
AK
2468 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2469 * 383 on page 105. Intel should be safe but is also warns that it's
423ac9af
AK
2470 * only safe if the permission and cache attributes of the two entries
2471 * loaded in the two TLB is identical (which should be the case here).
2472 * But it is generally safer to never allow small and huge TLB entries
2473 * for the same virtual address to be loaded simultaneously. So instead
2474 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2475 * current pmd notpresent (atomically because here the pmd_trans_huge
2476 * must remain set at all times on the pmd until the split is complete
2477 * for this pmd), then we flush the SMP TLB and finally we write the
2478 * non-huge version of the pmd entry with pmd_populate.
2479 */
2480 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2481
423ac9af 2482 pmd_migration = is_pmd_migration_entry(old_pmd);
2e83ee1d 2483 if (unlikely(pmd_migration)) {
84c3fc4e
ZY
2484 swp_entry_t entry;
2485
423ac9af 2486 entry = pmd_to_swp_entry(old_pmd);
af5cdaf8 2487 page = pfn_swap_entry_to_page(entry);
4dd845b5 2488 write = is_writable_migration_entry(entry);
6c287605
DH
2489 if (PageAnon(page))
2490 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2e346877
PX
2491 young = is_migration_entry_young(entry);
2492 dirty = is_migration_entry_dirty(entry);
2e83ee1d 2493 soft_dirty = pmd_swp_soft_dirty(old_pmd);
f45ec5ff 2494 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2e83ee1d 2495 } else {
423ac9af 2496 page = pmd_page(old_pmd);
91b2978a 2497 folio = page_folio(page);
0ccf7f16
PX
2498 if (pmd_dirty(old_pmd)) {
2499 dirty = true;
91b2978a 2500 folio_set_dirty(folio);
0ccf7f16 2501 }
2e83ee1d
PX
2502 write = pmd_write(old_pmd);
2503 young = pmd_young(old_pmd);
2504 soft_dirty = pmd_soft_dirty(old_pmd);
292924b2 2505 uffd_wp = pmd_uffd_wp(old_pmd);
6c287605 2506
91b2978a
DH
2507 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2508 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
6c287605
DH
2509
2510 /*
2511 * Without "freeze", we'll simply split the PMD, propagating the
2512 * PageAnonExclusive() flag for each PTE by setting it for
2513 * each subpage -- no need to (temporarily) clear.
2514 *
2515 * With "freeze" we want to replace mapped pages by
2516 * migration entries right away. This is only possible if we
2517 * managed to clear PageAnonExclusive() -- see
2518 * set_pmd_migration_entry().
2519 *
2520 * In case we cannot clear PageAnonExclusive(), split the PMD
2521 * only and let try_to_migrate_one() fail later.
088b8aa5
DH
2522 *
2523 * See page_try_share_anon_rmap(): invalidate PMD first.
6c287605 2524 */
91b2978a 2525 anon_exclusive = PageAnonExclusive(page);
6c287605
DH
2526 if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
2527 freeze = false;
91b2978a
DH
2528 if (!freeze) {
2529 rmap_t rmap_flags = RMAP_NONE;
2530
2531 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2532 if (anon_exclusive)
2533 rmap_flags |= RMAP_EXCLUSIVE;
2534 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
2535 vma, haddr, rmap_flags);
2536 }
2e83ee1d 2537 }
eef1b3ba 2538
423ac9af
AK
2539 /*
2540 * Withdraw the table only after we mark the pmd entry invalid.
2541 * This's critical for some architectures (Power).
2542 */
eef1b3ba
KS
2543 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2544 pmd_populate(mm, &_pmd, pgtable);
2545
c9c1ee20
HD
2546 pte = pte_offset_map(&_pmd, haddr);
2547 VM_BUG_ON(!pte);
2ac015e2 2548 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
c9c1ee20 2549 pte_t entry;
eef1b3ba
KS
2550 /*
2551 * Note that NUMA hinting access restrictions are not
2552 * transferred to avoid any possibility of altering
2553 * permissions across VMAs.
2554 */
84c3fc4e 2555 if (freeze || pmd_migration) {
ba988280 2556 swp_entry_t swp_entry;
4dd845b5
AP
2557 if (write)
2558 swp_entry = make_writable_migration_entry(
2559 page_to_pfn(page + i));
6c287605
DH
2560 else if (anon_exclusive)
2561 swp_entry = make_readable_exclusive_migration_entry(
2562 page_to_pfn(page + i));
4dd845b5
AP
2563 else
2564 swp_entry = make_readable_migration_entry(
2565 page_to_pfn(page + i));
2e346877
PX
2566 if (young)
2567 swp_entry = make_migration_entry_young(swp_entry);
2568 if (dirty)
2569 swp_entry = make_migration_entry_dirty(swp_entry);
ba988280 2570 entry = swp_entry_to_pte(swp_entry);
804dd150
AA
2571 if (soft_dirty)
2572 entry = pte_swp_mksoft_dirty(entry);
f45ec5ff
PX
2573 if (uffd_wp)
2574 entry = pte_swp_mkuffd_wp(entry);
ba988280 2575 } else {
6d2329f8 2576 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
1462c52e 2577 if (write)
161e393c 2578 entry = pte_mkwrite(entry, vma);
ba988280
KS
2579 if (!young)
2580 entry = pte_mkold(entry);
e833bc50
PX
2581 /* NOTE: this may set soft-dirty too on some archs */
2582 if (dirty)
2583 entry = pte_mkdirty(entry);
804dd150
AA
2584 if (soft_dirty)
2585 entry = pte_mksoft_dirty(entry);
292924b2
PX
2586 if (uffd_wp)
2587 entry = pte_mkuffd_wp(entry);
ba988280 2588 }
c33c7948 2589 VM_BUG_ON(!pte_none(ptep_get(pte)));
2ac015e2 2590 set_pte_at(mm, addr, pte, entry);
c9c1ee20 2591 pte++;
eef1b3ba 2592 }
c9c1ee20 2593 pte_unmap(pte - 1);
eef1b3ba 2594
cb67f428
HD
2595 if (!pmd_migration)
2596 page_remove_rmap(page, vma, true);
96d82deb
HD
2597 if (freeze)
2598 put_page(page);
eef1b3ba
KS
2599
2600 smp_wmb(); /* make pte visible before pmd */
2601 pmd_populate(mm, pmd, pgtable);
2602}
2603
2604void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
af28a988 2605 unsigned long address, bool freeze, struct folio *folio)
eef1b3ba
KS
2606{
2607 spinlock_t *ptl;
ac46d4f3 2608 struct mmu_notifier_range range;
eef1b3ba 2609
7d4a8be0 2610 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
6f4f13e8 2611 address & HPAGE_PMD_MASK,
ac46d4f3
JG
2612 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2613 mmu_notifier_invalidate_range_start(&range);
2614 ptl = pmd_lock(vma->vm_mm, pmd);
33f4751e
NH
2615
2616 /*
af28a988
MWO
2617 * If caller asks to setup a migration entry, we need a folio to check
2618 * pmd against. Otherwise we can end up replacing wrong folio.
33f4751e 2619 */
af28a988 2620 VM_BUG_ON(freeze && !folio);
83a8441f 2621 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
33f4751e 2622
7f760917 2623 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
83a8441f 2624 is_pmd_migration_entry(*pmd)) {
cea33328
ML
2625 /*
2626 * It's safe to call pmd_page when folio is set because it's
2627 * guaranteed that pmd is present.
2628 */
83a8441f
MWO
2629 if (folio && folio != page_folio(pmd_page(*pmd)))
2630 goto out;
7f760917 2631 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
83a8441f 2632 }
7f760917 2633
e90309c9 2634out:
eef1b3ba 2635 spin_unlock(ptl);
ec8832d0 2636 mmu_notifier_invalidate_range_end(&range);
eef1b3ba
KS
2637}
2638
fec89c10 2639void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
af28a988 2640 bool freeze, struct folio *folio)
94fcc585 2641{
50722804 2642 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
94fcc585 2643
50722804 2644 if (!pmd)
f72e7dcd
HD
2645 return;
2646
af28a988 2647 __split_huge_pmd(vma, pmd, address, freeze, folio);
94fcc585
AA
2648}
2649
71f9e58e
ML
2650static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2651{
2652 /*
2653 * If the new address isn't hpage aligned and it could previously
2654 * contain an hugepage: check if we need to split an huge pmd.
2655 */
2656 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2657 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2658 ALIGN(address, HPAGE_PMD_SIZE)))
2659 split_huge_pmd_address(vma, address, false, NULL);
2660}
2661
e1b9996b 2662void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
2663 unsigned long start,
2664 unsigned long end,
2665 long adjust_next)
2666{
71f9e58e
ML
2667 /* Check if we need to split start first. */
2668 split_huge_pmd_if_needed(vma, start);
94fcc585 2669
71f9e58e
ML
2670 /* Check if we need to split end next. */
2671 split_huge_pmd_if_needed(vma, end);
94fcc585
AA
2672
2673 /*
68540502 2674 * If we're also updating the next vma vm_start,
71f9e58e 2675 * check if we need to split it.
94fcc585
AA
2676 */
2677 if (adjust_next > 0) {
68540502 2678 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
94fcc585 2679 unsigned long nstart = next->vm_start;
f9d86a60 2680 nstart += adjust_next;
71f9e58e 2681 split_huge_pmd_if_needed(next, nstart);
94fcc585
AA
2682 }
2683}
e9b61f19 2684
684555aa 2685static void unmap_folio(struct folio *folio)
e9b61f19 2686{
a98a2f0c 2687 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
3027c6f8 2688 TTU_SYNC | TTU_BATCH_FLUSH;
e9b61f19 2689
684555aa 2690 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
e9b61f19 2691
a98a2f0c
AP
2692 /*
2693 * Anon pages need migration entries to preserve them, but file
2694 * pages can simply be left unmapped, then faulted back on demand.
2695 * If that is ever changed (perhaps for mlock), update remap_page().
2696 */
4b8554c5
MWO
2697 if (folio_test_anon(folio))
2698 try_to_migrate(folio, ttu_flags);
a98a2f0c 2699 else
869f7ee6 2700 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
3027c6f8
BW
2701
2702 try_to_unmap_flush();
e9b61f19
KS
2703}
2704
4eecb8b9 2705static void remap_page(struct folio *folio, unsigned long nr)
e9b61f19 2706{
4eecb8b9 2707 int i = 0;
ab02c252 2708
684555aa 2709 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
4eecb8b9 2710 if (!folio_test_anon(folio))
ab02c252 2711 return;
4eecb8b9
MWO
2712 for (;;) {
2713 remove_migration_ptes(folio, folio, true);
2714 i += folio_nr_pages(folio);
2715 if (i >= nr)
2716 break;
2717 folio = folio_next(folio);
ace71a19 2718 }
e9b61f19
KS
2719}
2720
94866635 2721static void lru_add_page_tail(struct page *head, struct page *tail,
88dcb9a3
AS
2722 struct lruvec *lruvec, struct list_head *list)
2723{
94866635
AS
2724 VM_BUG_ON_PAGE(!PageHead(head), head);
2725 VM_BUG_ON_PAGE(PageCompound(tail), head);
2726 VM_BUG_ON_PAGE(PageLRU(tail), head);
6168d0da 2727 lockdep_assert_held(&lruvec->lru_lock);
88dcb9a3 2728
6dbb5741 2729 if (list) {
88dcb9a3 2730 /* page reclaim is reclaiming a huge page */
6dbb5741 2731 VM_WARN_ON(PageLRU(head));
94866635
AS
2732 get_page(tail);
2733 list_add_tail(&tail->lru, list);
88dcb9a3 2734 } else {
6dbb5741
AS
2735 /* head is still on lru (and we have it frozen) */
2736 VM_WARN_ON(!PageLRU(head));
07ca7606
HD
2737 if (PageUnevictable(tail))
2738 tail->mlock_count = 0;
2739 else
2740 list_add_tail(&tail->lru, &head->lru);
6dbb5741 2741 SetPageLRU(tail);
88dcb9a3
AS
2742 }
2743}
2744
07e09c48 2745static void __split_huge_page_tail(struct folio *folio, int tail,
e9b61f19
KS
2746 struct lruvec *lruvec, struct list_head *list)
2747{
07e09c48 2748 struct page *head = &folio->page;
e9b61f19 2749 struct page *page_tail = head + tail;
07e09c48
DH
2750 /*
2751 * Careful: new_folio is not a "real" folio before we cleared PageTail.
2752 * Don't pass it around before clear_compound_head().
2753 */
2754 struct folio *new_folio = (struct folio *)page_tail;
e9b61f19 2755
8df651c7 2756 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
e9b61f19
KS
2757
2758 /*
605ca5ed
KK
2759 * Clone page flags before unfreezing refcount.
2760 *
2761 * After successful get_page_unless_zero() might follow flags change,
8958b249 2762 * for example lock_page() which set PG_waiters.
6c287605
DH
2763 *
2764 * Note that for mapped sub-pages of an anonymous THP,
684555aa 2765 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
6c287605
DH
2766 * the migration entry instead from where remap_page() will restore it.
2767 * We can still have PG_anon_exclusive set on effectively unmapped and
2768 * unreferenced sub-pages of an anonymous THP: we can simply drop
2769 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
e9b61f19 2770 */
e9b61f19
KS
2771 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2772 page_tail->flags |= (head->flags &
2773 ((1L << PG_referenced) |
2774 (1L << PG_swapbacked) |
38d8b4e6 2775 (1L << PG_swapcache) |
e9b61f19
KS
2776 (1L << PG_mlocked) |
2777 (1L << PG_uptodate) |
2778 (1L << PG_active) |
1899ad18 2779 (1L << PG_workingset) |
e9b61f19 2780 (1L << PG_locked) |
b8d3c4c3 2781 (1L << PG_unevictable) |
b0284cd2 2782#ifdef CONFIG_ARCH_USES_PG_ARCH_X
72e6afa0 2783 (1L << PG_arch_2) |
ef6458b1 2784 (1L << PG_arch_3) |
72e6afa0 2785#endif
ec1c86b2
YZ
2786 (1L << PG_dirty) |
2787 LRU_GEN_MASK | LRU_REFS_MASK));
e9b61f19 2788
cb67f428 2789 /* ->mapping in first and second tail page is replaced by other uses */
173d9d9f
HD
2790 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2791 page_tail);
2792 page_tail->mapping = head->mapping;
2793 page_tail->index = head->index + tail;
71e2d666
MG
2794
2795 /*
cfeed8ff
DH
2796 * page->private should not be set in tail pages. Fix up and warn once
2797 * if private is unexpectedly set.
71e2d666 2798 */
cfeed8ff
DH
2799 if (unlikely(page_tail->private)) {
2800 VM_WARN_ON_ONCE_PAGE(true, page_tail);
71e2d666
MG
2801 page_tail->private = 0;
2802 }
07e09c48
DH
2803 if (folio_test_swapcache(folio))
2804 new_folio->swap.val = folio->swap.val + tail;
173d9d9f 2805
605ca5ed 2806 /* Page flags must be visible before we make the page non-compound. */
e9b61f19
KS
2807 smp_wmb();
2808
605ca5ed
KK
2809 /*
2810 * Clear PageTail before unfreezing page refcount.
2811 *
2812 * After successful get_page_unless_zero() might follow put_page()
2813 * which needs correct compound_head().
2814 */
e9b61f19
KS
2815 clear_compound_head(page_tail);
2816
605ca5ed 2817 /* Finally unfreeze refcount. Additional reference from page cache. */
b7542769
KW
2818 page_ref_unfreeze(page_tail, 1 + (!folio_test_anon(folio) ||
2819 folio_test_swapcache(folio)));
605ca5ed 2820
b7542769
KW
2821 if (folio_test_young(folio))
2822 folio_set_young(new_folio);
2823 if (folio_test_idle(folio))
2824 folio_set_idle(new_folio);
e9b61f19 2825
c8253011 2826 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
94723aaf
MH
2827
2828 /*
2829 * always add to the tail because some iterators expect new
2830 * pages to show after the currently processed elements - e.g.
2831 * migrate_pages
2832 */
e9b61f19 2833 lru_add_page_tail(head, page_tail, lruvec, list);
e9b61f19
KS
2834}
2835
baa355fd 2836static void __split_huge_page(struct page *page, struct list_head *list,
b6769834 2837 pgoff_t end)
e9b61f19 2838{
e809c3fe
MWO
2839 struct folio *folio = page_folio(page);
2840 struct page *head = &folio->page;
e9b61f19 2841 struct lruvec *lruvec;
4101196b
MWO
2842 struct address_space *swap_cache = NULL;
2843 unsigned long offset = 0;
8cce5475 2844 unsigned int nr = thp_nr_pages(head);
509f0069 2845 int i, nr_dropped = 0;
e9b61f19 2846
e9b61f19 2847 /* complete memcg works before add pages to LRU */
be6c8982 2848 split_page_memcg(head, nr);
e9b61f19 2849
07e09c48
DH
2850 if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2851 offset = swp_offset(folio->swap);
2852 swap_cache = swap_address_space(folio->swap);
4101196b
MWO
2853 xa_lock(&swap_cache->i_pages);
2854 }
2855
f0953a1b 2856 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
e809c3fe 2857 lruvec = folio_lruvec_lock(folio);
b6769834 2858
eac96c3e
YS
2859 ClearPageHasHWPoisoned(head);
2860
8cce5475 2861 for (i = nr - 1; i >= 1; i--) {
07e09c48 2862 __split_huge_page_tail(folio, i, lruvec, list);
d144bf62 2863 /* Some pages can be beyond EOF: drop them from page cache */
baa355fd 2864 if (head[i].index >= end) {
fb5c2029
MWO
2865 struct folio *tail = page_folio(head + i);
2866
d144bf62 2867 if (shmem_mapping(head->mapping))
509f0069 2868 nr_dropped++;
fb5c2029
MWO
2869 else if (folio_test_clear_dirty(tail))
2870 folio_account_cleaned(tail,
2871 inode_to_wb(folio->mapping->host));
2872 __filemap_remove_folio(tail, NULL);
2873 folio_put(tail);
4101196b
MWO
2874 } else if (!PageAnon(page)) {
2875 __xa_store(&head->mapping->i_pages, head[i].index,
2876 head + i, 0);
2877 } else if (swap_cache) {
2878 __xa_store(&swap_cache->i_pages, offset + i,
2879 head + i, 0);
baa355fd
KS
2880 }
2881 }
e9b61f19
KS
2882
2883 ClearPageCompound(head);
6168d0da 2884 unlock_page_lruvec(lruvec);
b6769834 2885 /* Caller disabled irqs, so they are still disabled here */
f7da677b 2886
8cce5475 2887 split_page_owner(head, nr);
f7da677b 2888
baa355fd
KS
2889 /* See comment in __split_huge_page_tail() */
2890 if (PageAnon(head)) {
aa5dc07f 2891 /* Additional pin to swap cache */
4101196b 2892 if (PageSwapCache(head)) {
38d8b4e6 2893 page_ref_add(head, 2);
4101196b
MWO
2894 xa_unlock(&swap_cache->i_pages);
2895 } else {
38d8b4e6 2896 page_ref_inc(head);
4101196b 2897 }
baa355fd 2898 } else {
aa5dc07f 2899 /* Additional pin to page cache */
baa355fd 2900 page_ref_add(head, 2);
b93b0163 2901 xa_unlock(&head->mapping->i_pages);
baa355fd 2902 }
b6769834 2903 local_irq_enable();
e9b61f19 2904
509f0069
HD
2905 if (nr_dropped)
2906 shmem_uncharge(head->mapping->host, nr_dropped);
4eecb8b9 2907 remap_page(folio, nr);
e9b61f19 2908
07e09c48
DH
2909 if (folio_test_swapcache(folio))
2910 split_swap_cluster(folio->swap);
c4f9c701 2911
8cce5475 2912 for (i = 0; i < nr; i++) {
e9b61f19
KS
2913 struct page *subpage = head + i;
2914 if (subpage == page)
2915 continue;
2916 unlock_page(subpage);
2917
2918 /*
2919 * Subpages may be freed if there wasn't any mapping
2920 * like if add_to_swap() is running on a lru page that
2921 * had its mapping zapped. And freeing these pages
2922 * requires taking the lru_lock so we do the put_page
2923 * of the tail pages after the split is complete.
2924 */
0b175468 2925 free_page_and_swap_cache(subpage);
e9b61f19
KS
2926 }
2927}
2928
b8f593cd 2929/* Racy check whether the huge page can be split */
d4b4084a 2930bool can_split_folio(struct folio *folio, int *pextra_pins)
b8f593cd
HY
2931{
2932 int extra_pins;
2933
aa5dc07f 2934 /* Additional pins from page cache */
d4b4084a
MWO
2935 if (folio_test_anon(folio))
2936 extra_pins = folio_test_swapcache(folio) ?
2937 folio_nr_pages(folio) : 0;
b8f593cd 2938 else
d4b4084a 2939 extra_pins = folio_nr_pages(folio);
b8f593cd
HY
2940 if (pextra_pins)
2941 *pextra_pins = extra_pins;
d4b4084a 2942 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
b8f593cd
HY
2943}
2944
e9b61f19
KS
2945/*
2946 * This function splits huge page into normal pages. @page can point to any
2947 * subpage of huge page to split. Split doesn't change the position of @page.
2948 *
2949 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2950 * The huge page must be locked.
2951 *
2952 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2953 *
2954 * Both head page and tail pages will inherit mapping, flags, and so on from
2955 * the hugepage.
2956 *
2957 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2958 * they are not mapped.
2959 *
2960 * Returns 0 if the hugepage is split successfully.
2961 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2962 * us.
2963 */
2964int split_huge_page_to_list(struct page *page, struct list_head *list)
2965{
4eecb8b9 2966 struct folio *folio = page_folio(page);
f8baa6be 2967 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3e9a13da 2968 XA_STATE(xas, &folio->mapping->i_pages, folio->index);
baa355fd
KS
2969 struct anon_vma *anon_vma = NULL;
2970 struct address_space *mapping = NULL;
504e070d 2971 int extra_pins, ret;
006d3ff2 2972 pgoff_t end;
478d134e 2973 bool is_hzp;
e9b61f19 2974
3e9a13da
MWO
2975 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2976 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
e9b61f19 2977
3e9a13da 2978 is_hzp = is_huge_zero_page(&folio->page);
4737edbb
NH
2979 if (is_hzp) {
2980 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
478d134e 2981 return -EBUSY;
4737edbb 2982 }
478d134e 2983
3e9a13da 2984 if (folio_test_writeback(folio))
59807685
HY
2985 return -EBUSY;
2986
3e9a13da 2987 if (folio_test_anon(folio)) {
baa355fd 2988 /*
c1e8d7c6 2989 * The caller does not necessarily hold an mmap_lock that would
baa355fd
KS
2990 * prevent the anon_vma disappearing so we first we take a
2991 * reference to it and then lock the anon_vma for write. This
2f031c6f 2992 * is similar to folio_lock_anon_vma_read except the write lock
baa355fd
KS
2993 * is taken to serialise against parallel split or collapse
2994 * operations.
2995 */
29eea9b5 2996 anon_vma = folio_get_anon_vma(folio);
baa355fd
KS
2997 if (!anon_vma) {
2998 ret = -EBUSY;
2999 goto out;
3000 }
006d3ff2 3001 end = -1;
baa355fd
KS
3002 mapping = NULL;
3003 anon_vma_lock_write(anon_vma);
3004 } else {
6a3edd29
YF
3005 gfp_t gfp;
3006
3e9a13da 3007 mapping = folio->mapping;
baa355fd
KS
3008
3009 /* Truncated ? */
3010 if (!mapping) {
3011 ret = -EBUSY;
3012 goto out;
3013 }
3014
6a3edd29
YF
3015 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3016 GFP_RECLAIM_MASK);
3017
0201ebf2 3018 if (!filemap_release_folio(folio, gfp)) {
6a3edd29
YF
3019 ret = -EBUSY;
3020 goto out;
3021 }
3022
3e9a13da 3023 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
6b24ca4a
MWO
3024 if (xas_error(&xas)) {
3025 ret = xas_error(&xas);
3026 goto out;
3027 }
3028
baa355fd
KS
3029 anon_vma = NULL;
3030 i_mmap_lock_read(mapping);
006d3ff2
HD
3031
3032 /*
3033 *__split_huge_page() may need to trim off pages beyond EOF:
3034 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
3035 * which cannot be nested inside the page tree lock. So note
3036 * end now: i_size itself may be changed at any moment, but
3e9a13da 3037 * folio lock is good enough to serialize the trimming.
006d3ff2
HD
3038 */
3039 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
d144bf62
HD
3040 if (shmem_mapping(mapping))
3041 end = shmem_fallocend(mapping->host, end);
e9b61f19 3042 }
e9b61f19
KS
3043
3044 /*
684555aa 3045 * Racy check if we can split the page, before unmap_folio() will
e9b61f19
KS
3046 * split PMDs
3047 */
d4b4084a 3048 if (!can_split_folio(folio, &extra_pins)) {
fd4a7ac3 3049 ret = -EAGAIN;
e9b61f19
KS
3050 goto out_unlock;
3051 }
3052
684555aa 3053 unmap_folio(folio);
e9b61f19 3054
b6769834
AS
3055 /* block interrupt reentry in xa_lock and spinlock */
3056 local_irq_disable();
baa355fd 3057 if (mapping) {
baa355fd 3058 /*
3e9a13da
MWO
3059 * Check if the folio is present in page cache.
3060 * We assume all tail are present too, if folio is there.
baa355fd 3061 */
6b24ca4a
MWO
3062 xas_lock(&xas);
3063 xas_reset(&xas);
3e9a13da 3064 if (xas_load(&xas) != folio)
baa355fd
KS
3065 goto fail;
3066 }
3067
0139aa7b 3068 /* Prevent deferred_split_scan() touching ->_refcount */
364c1eeb 3069 spin_lock(&ds_queue->split_queue_lock);
3e9a13da 3070 if (folio_ref_freeze(folio, 1 + extra_pins)) {
4375a553 3071 if (!list_empty(&folio->_deferred_list)) {
364c1eeb 3072 ds_queue->split_queue_len--;
4375a553 3073 list_del(&folio->_deferred_list);
9a982250 3074 }
afb97172 3075 spin_unlock(&ds_queue->split_queue_lock);
06d3eff6 3076 if (mapping) {
3e9a13da 3077 int nr = folio_nr_pages(folio);
bf9ecead 3078
3e9a13da 3079 xas_split(&xas, folio, folio_order(folio));
a48d5bdc
SR
3080 if (folio_test_pmd_mappable(folio)) {
3081 if (folio_test_swapbacked(folio)) {
3082 __lruvec_stat_mod_folio(folio,
3083 NR_SHMEM_THPS, -nr);
3084 } else {
3085 __lruvec_stat_mod_folio(folio,
3086 NR_FILE_THPS, -nr);
3087 filemap_nr_thps_dec(mapping);
3088 }
1ca7554d 3089 }
06d3eff6
KS
3090 }
3091
b6769834 3092 __split_huge_page(page, list, end);
c4f9c701 3093 ret = 0;
e9b61f19 3094 } else {
364c1eeb 3095 spin_unlock(&ds_queue->split_queue_lock);
504e070d
YS
3096fail:
3097 if (mapping)
6b24ca4a 3098 xas_unlock(&xas);
b6769834 3099 local_irq_enable();
4eecb8b9 3100 remap_page(folio, folio_nr_pages(folio));
fd4a7ac3 3101 ret = -EAGAIN;
e9b61f19
KS
3102 }
3103
3104out_unlock:
baa355fd
KS
3105 if (anon_vma) {
3106 anon_vma_unlock_write(anon_vma);
3107 put_anon_vma(anon_vma);
3108 }
3109 if (mapping)
3110 i_mmap_unlock_read(mapping);
e9b61f19 3111out:
69a37a8b 3112 xas_destroy(&xas);
e9b61f19
KS
3113 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3114 return ret;
3115}
9a982250 3116
8dc4a8f1 3117void folio_undo_large_rmappable(struct folio *folio)
9a982250 3118{
8dc4a8f1 3119 struct deferred_split *ds_queue;
9a982250
KS
3120 unsigned long flags;
3121
deedad80
YF
3122 /*
3123 * At this point, there is no one trying to add the folio to
3124 * deferred_list. If folio is not in deferred_list, it's safe
3125 * to check without acquiring the split_queue_lock.
3126 */
8dc4a8f1
MWO
3127 if (data_race(list_empty(&folio->_deferred_list)))
3128 return;
3129
3130 ds_queue = get_deferred_split_queue(folio);
3131 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3132 if (!list_empty(&folio->_deferred_list)) {
3133 ds_queue->split_queue_len--;
3134 list_del(&folio->_deferred_list);
9a982250 3135 }
8dc4a8f1 3136 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250
KS
3137}
3138
f158ed61 3139void deferred_split_folio(struct folio *folio)
9a982250 3140{
f8baa6be 3141 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
87eaceb3 3142#ifdef CONFIG_MEMCG
8991de90 3143 struct mem_cgroup *memcg = folio_memcg(folio);
87eaceb3 3144#endif
9a982250
KS
3145 unsigned long flags;
3146
8991de90 3147 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
9a982250 3148
87eaceb3
YS
3149 /*
3150 * The try_to_unmap() in page reclaim path might reach here too,
3151 * this may cause a race condition to corrupt deferred split queue.
8991de90 3152 * And, if page reclaim is already handling the same folio, it is
87eaceb3
YS
3153 * unnecessary to handle it again in shrinker.
3154 *
8991de90
MWO
3155 * Check the swapcache flag to determine if the folio is being
3156 * handled by page reclaim since THP swap would add the folio into
87eaceb3
YS
3157 * swap cache before calling try_to_unmap().
3158 */
8991de90 3159 if (folio_test_swapcache(folio))
87eaceb3
YS
3160 return;
3161
8991de90 3162 if (!list_empty(&folio->_deferred_list))
87eaceb3
YS
3163 return;
3164
364c1eeb 3165 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
8991de90 3166 if (list_empty(&folio->_deferred_list)) {
f9719a03 3167 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
8991de90 3168 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
364c1eeb 3169 ds_queue->split_queue_len++;
87eaceb3
YS
3170#ifdef CONFIG_MEMCG
3171 if (memcg)
8991de90 3172 set_shrinker_bit(memcg, folio_nid(folio),
54d91729 3173 deferred_split_shrinker->id);
87eaceb3 3174#endif
9a982250 3175 }
364c1eeb 3176 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250
KS
3177}
3178
3179static unsigned long deferred_split_count(struct shrinker *shrink,
3180 struct shrink_control *sc)
3181{
a3d0a918 3182 struct pglist_data *pgdata = NODE_DATA(sc->nid);
364c1eeb 3183 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
87eaceb3
YS
3184
3185#ifdef CONFIG_MEMCG
3186 if (sc->memcg)
3187 ds_queue = &sc->memcg->deferred_split_queue;
3188#endif
364c1eeb 3189 return READ_ONCE(ds_queue->split_queue_len);
9a982250
KS
3190}
3191
3192static unsigned long deferred_split_scan(struct shrinker *shrink,
3193 struct shrink_control *sc)
3194{
a3d0a918 3195 struct pglist_data *pgdata = NODE_DATA(sc->nid);
364c1eeb 3196 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
9a982250 3197 unsigned long flags;
4375a553
MWO
3198 LIST_HEAD(list);
3199 struct folio *folio, *next;
9a982250
KS
3200 int split = 0;
3201
87eaceb3
YS
3202#ifdef CONFIG_MEMCG
3203 if (sc->memcg)
3204 ds_queue = &sc->memcg->deferred_split_queue;
3205#endif
3206
364c1eeb 3207 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
9a982250 3208 /* Take pin on all head pages to avoid freeing them under us */
4375a553
MWO
3209 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
3210 _deferred_list) {
3211 if (folio_try_get(folio)) {
3212 list_move(&folio->_deferred_list, &list);
e3ae1953 3213 } else {
4375a553
MWO
3214 /* We lost race with folio_put() */
3215 list_del_init(&folio->_deferred_list);
364c1eeb 3216 ds_queue->split_queue_len--;
9a982250 3217 }
e3ae1953
KS
3218 if (!--sc->nr_to_scan)
3219 break;
9a982250 3220 }
364c1eeb 3221 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250 3222
4375a553
MWO
3223 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
3224 if (!folio_trylock(folio))
fa41b900 3225 goto next;
9a982250 3226 /* split_huge_page() removes page from list on success */
4375a553 3227 if (!split_folio(folio))
9a982250 3228 split++;
4375a553 3229 folio_unlock(folio);
fa41b900 3230next:
4375a553 3231 folio_put(folio);
9a982250
KS
3232 }
3233
364c1eeb
YS
3234 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3235 list_splice_tail(&list, &ds_queue->split_queue);
3236 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250 3237
cb8d68ec
KS
3238 /*
3239 * Stop shrinker if we didn't split any page, but the queue is empty.
3240 * This can happen if pages were freed under us.
3241 */
364c1eeb 3242 if (!split && list_empty(&ds_queue->split_queue))
cb8d68ec
KS
3243 return SHRINK_STOP;
3244 return split;
9a982250
KS
3245}
3246
49071d43 3247#ifdef CONFIG_DEBUG_FS
fa6c0231 3248static void split_huge_pages_all(void)
49071d43
KS
3249{
3250 struct zone *zone;
3251 struct page *page;
630e7c5e 3252 struct folio *folio;
49071d43
KS
3253 unsigned long pfn, max_zone_pfn;
3254 unsigned long total = 0, split = 0;
3255
fa6c0231 3256 pr_debug("Split all THPs\n");
a17206da
ML
3257 for_each_zone(zone) {
3258 if (!managed_zone(zone))
3259 continue;
49071d43
KS
3260 max_zone_pfn = zone_end_pfn(zone);
3261 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
a17206da 3262 int nr_pages;
49071d43 3263
2b7aa91b 3264 page = pfn_to_online_page(pfn);
630e7c5e
KW
3265 if (!page || PageTail(page))
3266 continue;
3267 folio = page_folio(page);
3268 if (!folio_try_get(folio))
49071d43
KS
3269 continue;
3270
630e7c5e 3271 if (unlikely(page_folio(page) != folio))
49071d43
KS
3272 goto next;
3273
630e7c5e 3274 if (zone != folio_zone(folio))
49071d43
KS
3275 goto next;
3276
630e7c5e
KW
3277 if (!folio_test_large(folio)
3278 || folio_test_hugetlb(folio)
3279 || !folio_test_lru(folio))
49071d43
KS
3280 goto next;
3281
3282 total++;
630e7c5e
KW
3283 folio_lock(folio);
3284 nr_pages = folio_nr_pages(folio);
3285 if (!split_folio(folio))
49071d43 3286 split++;
a17206da 3287 pfn += nr_pages - 1;
630e7c5e 3288 folio_unlock(folio);
49071d43 3289next:
630e7c5e 3290 folio_put(folio);
fa6c0231 3291 cond_resched();
49071d43
KS
3292 }
3293 }
3294
fa6c0231
ZY
3295 pr_debug("%lu of %lu THP split\n", split, total);
3296}
49071d43 3297
fa6c0231
ZY
3298static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
3299{
3300 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
3301 is_vm_hugetlb_page(vma);
3302}
3303
3304static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3305 unsigned long vaddr_end)
3306{
3307 int ret = 0;
3308 struct task_struct *task;
3309 struct mm_struct *mm;
3310 unsigned long total = 0, split = 0;
3311 unsigned long addr;
3312
3313 vaddr_start &= PAGE_MASK;
3314 vaddr_end &= PAGE_MASK;
3315
3316 /* Find the task_struct from pid */
3317 rcu_read_lock();
3318 task = find_task_by_vpid(pid);
3319 if (!task) {
3320 rcu_read_unlock();
3321 ret = -ESRCH;
3322 goto out;
3323 }
3324 get_task_struct(task);
3325 rcu_read_unlock();
3326
3327 /* Find the mm_struct */
3328 mm = get_task_mm(task);
3329 put_task_struct(task);
3330
3331 if (!mm) {
3332 ret = -EINVAL;
3333 goto out;
3334 }
3335
3336 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3337 pid, vaddr_start, vaddr_end);
3338
3339 mmap_read_lock(mm);
3340 /*
3341 * always increase addr by PAGE_SIZE, since we could have a PTE page
3342 * table filled with PTE-mapped THPs, each of which is distinct.
3343 */
3344 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
74ba2b38 3345 struct vm_area_struct *vma = vma_lookup(mm, addr);
fa6c0231 3346 struct page *page;
a644b0ab 3347 struct folio *folio;
fa6c0231 3348
74ba2b38 3349 if (!vma)
fa6c0231
ZY
3350 break;
3351
3352 /* skip special VMA and hugetlb VMA */
3353 if (vma_not_suitable_for_thp_split(vma)) {
3354 addr = vma->vm_end;
3355 continue;
3356 }
3357
3358 /* FOLL_DUMP to ignore special (like zero) pages */
87d2762e 3359 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
fa6c0231 3360
f7091ed6 3361 if (IS_ERR_OR_NULL(page))
fa6c0231
ZY
3362 continue;
3363
a644b0ab
MWO
3364 folio = page_folio(page);
3365 if (!is_transparent_hugepage(folio))
fa6c0231
ZY
3366 goto next;
3367
3368 total++;
a644b0ab 3369 if (!can_split_folio(folio, NULL))
fa6c0231
ZY
3370 goto next;
3371
a644b0ab 3372 if (!folio_trylock(folio))
fa6c0231
ZY
3373 goto next;
3374
a644b0ab 3375 if (!split_folio(folio))
fa6c0231
ZY
3376 split++;
3377
a644b0ab 3378 folio_unlock(folio);
fa6c0231 3379next:
a644b0ab 3380 folio_put(folio);
fa6c0231
ZY
3381 cond_resched();
3382 }
3383 mmap_read_unlock(mm);
3384 mmput(mm);
3385
3386 pr_debug("%lu of %lu THP split\n", split, total);
3387
3388out:
3389 return ret;
49071d43 3390}
fa6c0231 3391
fbe37501
ZY
3392static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3393 pgoff_t off_end)
3394{
3395 struct filename *file;
3396 struct file *candidate;
3397 struct address_space *mapping;
3398 int ret = -EINVAL;
3399 pgoff_t index;
3400 int nr_pages = 1;
3401 unsigned long total = 0, split = 0;
3402
3403 file = getname_kernel(file_path);
3404 if (IS_ERR(file))
3405 return ret;
3406
3407 candidate = file_open_name(file, O_RDONLY, 0);
3408 if (IS_ERR(candidate))
3409 goto out;
3410
3411 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3412 file_path, off_start, off_end);
3413
3414 mapping = candidate->f_mapping;
3415
3416 for (index = off_start; index < off_end; index += nr_pages) {
1fb130b2 3417 struct folio *folio = filemap_get_folio(mapping, index);
fbe37501
ZY
3418
3419 nr_pages = 1;
66dabbb6 3420 if (IS_ERR(folio))
fbe37501
ZY
3421 continue;
3422
9ee2c086 3423 if (!folio_test_large(folio))
fbe37501
ZY
3424 goto next;
3425
3426 total++;
9ee2c086 3427 nr_pages = folio_nr_pages(folio);
fbe37501 3428
9ee2c086 3429 if (!folio_trylock(folio))
fbe37501
ZY
3430 goto next;
3431
9ee2c086 3432 if (!split_folio(folio))
fbe37501
ZY
3433 split++;
3434
9ee2c086 3435 folio_unlock(folio);
fbe37501 3436next:
9ee2c086 3437 folio_put(folio);
fbe37501
ZY
3438 cond_resched();
3439 }
3440
3441 filp_close(candidate, NULL);
3442 ret = 0;
3443
3444 pr_debug("%lu of %lu file-backed THP split\n", split, total);
3445out:
3446 putname(file);
3447 return ret;
3448}
3449
fa6c0231
ZY
3450#define MAX_INPUT_BUF_SZ 255
3451
3452static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3453 size_t count, loff_t *ppops)
3454{
3455 static DEFINE_MUTEX(split_debug_mutex);
3456 ssize_t ret;
fbe37501
ZY
3457 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3458 char input_buf[MAX_INPUT_BUF_SZ];
fa6c0231
ZY
3459 int pid;
3460 unsigned long vaddr_start, vaddr_end;
3461
3462 ret = mutex_lock_interruptible(&split_debug_mutex);
3463 if (ret)
3464 return ret;
3465
3466 ret = -EFAULT;
3467
3468 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3469 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3470 goto out;
3471
3472 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
fbe37501
ZY
3473
3474 if (input_buf[0] == '/') {
3475 char *tok;
3476 char *buf = input_buf;
3477 char file_path[MAX_INPUT_BUF_SZ];
3478 pgoff_t off_start = 0, off_end = 0;
3479 size_t input_len = strlen(input_buf);
3480
3481 tok = strsep(&buf, ",");
3482 if (tok) {
1212e00c 3483 strcpy(file_path, tok);
fbe37501
ZY
3484 } else {
3485 ret = -EINVAL;
3486 goto out;
3487 }
3488
3489 ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3490 if (ret != 2) {
3491 ret = -EINVAL;
3492 goto out;
3493 }
3494 ret = split_huge_pages_in_file(file_path, off_start, off_end);
3495 if (!ret)
3496 ret = input_len;
3497
3498 goto out;
3499 }
3500
fa6c0231
ZY
3501 ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3502 if (ret == 1 && pid == 1) {
3503 split_huge_pages_all();
3504 ret = strlen(input_buf);
3505 goto out;
3506 } else if (ret != 3) {
3507 ret = -EINVAL;
3508 goto out;
3509 }
3510
3511 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3512 if (!ret)
3513 ret = strlen(input_buf);
3514out:
3515 mutex_unlock(&split_debug_mutex);
3516 return ret;
3517
3518}
3519
3520static const struct file_operations split_huge_pages_fops = {
3521 .owner = THIS_MODULE,
3522 .write = split_huge_pages_write,
3523 .llseek = no_llseek,
3524};
49071d43
KS
3525
3526static int __init split_huge_pages_debugfs(void)
3527{
d9f7979c
GKH
3528 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3529 &split_huge_pages_fops);
49071d43
KS
3530 return 0;
3531}
3532late_initcall(split_huge_pages_debugfs);
3533#endif
616b8371
ZY
3534
3535#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
7f5abe60 3536int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
616b8371
ZY
3537 struct page *page)
3538{
3539 struct vm_area_struct *vma = pvmw->vma;
3540 struct mm_struct *mm = vma->vm_mm;
3541 unsigned long address = pvmw->address;
6c287605 3542 bool anon_exclusive;
616b8371
ZY
3543 pmd_t pmdval;
3544 swp_entry_t entry;
ab6e3d09 3545 pmd_t pmdswp;
616b8371
ZY
3546
3547 if (!(pvmw->pmd && !pvmw->pte))
7f5abe60 3548 return 0;
616b8371 3549
616b8371 3550 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
8a8683ad 3551 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
6c287605 3552
088b8aa5 3553 /* See page_try_share_anon_rmap(): invalidate PMD first. */
6c287605
DH
3554 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
3555 if (anon_exclusive && page_try_share_anon_rmap(page)) {
3556 set_pmd_at(mm, address, pvmw->pmd, pmdval);
7f5abe60 3557 return -EBUSY;
6c287605
DH
3558 }
3559
616b8371
ZY
3560 if (pmd_dirty(pmdval))
3561 set_page_dirty(page);
4dd845b5
AP
3562 if (pmd_write(pmdval))
3563 entry = make_writable_migration_entry(page_to_pfn(page));
6c287605
DH
3564 else if (anon_exclusive)
3565 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
4dd845b5
AP
3566 else
3567 entry = make_readable_migration_entry(page_to_pfn(page));
2e346877
PX
3568 if (pmd_young(pmdval))
3569 entry = make_migration_entry_young(entry);
3570 if (pmd_dirty(pmdval))
3571 entry = make_migration_entry_dirty(entry);
ab6e3d09
NH
3572 pmdswp = swp_entry_to_pmd(entry);
3573 if (pmd_soft_dirty(pmdval))
3574 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
24bf08c4
DH
3575 if (pmd_uffd_wp(pmdval))
3576 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
ab6e3d09 3577 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
cea86fe2 3578 page_remove_rmap(page, vma, true);
616b8371 3579 put_page(page);
283fd6fe 3580 trace_set_migration_pmd(address, pmd_val(pmdswp));
7f5abe60
DH
3581
3582 return 0;
616b8371
ZY
3583}
3584
3585void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3586{
14d85a6e 3587 struct folio *folio = page_folio(new);
616b8371
ZY
3588 struct vm_area_struct *vma = pvmw->vma;
3589 struct mm_struct *mm = vma->vm_mm;
3590 unsigned long address = pvmw->address;
4fba8f2a 3591 unsigned long haddr = address & HPAGE_PMD_MASK;
616b8371
ZY
3592 pmd_t pmde;
3593 swp_entry_t entry;
3594
3595 if (!(pvmw->pmd && !pvmw->pte))
3596 return;
3597
3598 entry = pmd_to_swp_entry(*pvmw->pmd);
14d85a6e 3599 folio_get(folio);
2e346877 3600 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
ab6e3d09
NH
3601 if (pmd_swp_soft_dirty(*pvmw->pmd))
3602 pmde = pmd_mksoft_dirty(pmde);
3c811f78 3603 if (is_writable_migration_entry(entry))
161e393c 3604 pmde = pmd_mkwrite(pmde, vma);
8f34f1ea 3605 if (pmd_swp_uffd_wp(*pvmw->pmd))
f1eb1bac 3606 pmde = pmd_mkuffd_wp(pmde);
2e346877
PX
3607 if (!is_migration_entry_young(entry))
3608 pmde = pmd_mkold(pmde);
3609 /* NOTE: this may contain setting soft-dirty on some archs */
14d85a6e 3610 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
2e346877 3611 pmde = pmd_mkdirty(pmde);
616b8371 3612
14d85a6e 3613 if (folio_test_anon(folio)) {
395db7b1 3614 rmap_t rmap_flags = RMAP_NONE;
6c287605
DH
3615
3616 if (!is_readable_migration_entry(entry))
3617 rmap_flags |= RMAP_EXCLUSIVE;
3618
395db7b1 3619 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
6c287605 3620 } else {
14d85a6e 3621 folio_add_file_rmap_pmd(folio, new, vma);
6c287605 3622 }
14d85a6e 3623 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
4fba8f2a 3624 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
5cbcf225
MS
3625
3626 /* No need to invalidate - it was non-present before */
616b8371 3627 update_mmu_cache_pmd(vma, address, pvmw->pmd);
283fd6fe 3628 trace_remove_migration_pmd(address, pmd_val(pmde));
616b8371
ZY
3629}
3630#endif