nouveau/gsp: add kconfig option to enable GSP paths by default
[linux-2.6-block.git] / mm / huge_memory.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
71e3aac0
AA
2/*
3 * Copyright (C) 2009 Red Hat, Inc.
71e3aac0
AA
4 */
5
ae3a8c1c
AM
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
71e3aac0
AA
8#include <linux/mm.h>
9#include <linux/sched.h>
fa6c0231 10#include <linux/sched/mm.h>
f7ccbae4 11#include <linux/sched/coredump.h>
6a3827d7 12#include <linux/sched/numa_balancing.h>
71e3aac0
AA
13#include <linux/highmem.h>
14#include <linux/hugetlb.h>
15#include <linux/mmu_notifier.h>
16#include <linux/rmap.h>
17#include <linux/swap.h>
97ae1749 18#include <linux/shrinker.h>
ba76149f 19#include <linux/mm_inline.h>
e9b61f19 20#include <linux/swapops.h>
fb5c2029 21#include <linux/backing-dev.h>
4897c765 22#include <linux/dax.h>
ba76149f 23#include <linux/khugepaged.h>
878aee7d 24#include <linux/freezer.h>
f25748e3 25#include <linux/pfn_t.h>
a664b2d8 26#include <linux/mman.h>
3565fce3 27#include <linux/memremap.h>
325adeb5 28#include <linux/pagemap.h>
49071d43 29#include <linux/debugfs.h>
4daae3b4 30#include <linux/migrate.h>
43b5fbbd 31#include <linux/hashtable.h>
6b251fc9 32#include <linux/userfaultfd_k.h>
33c3fc71 33#include <linux/page_idle.h>
baa355fd 34#include <linux/shmem_fs.h>
6b31d595 35#include <linux/oom.h>
98fa15f3 36#include <linux/numa.h>
f7da677b 37#include <linux/page_owner.h>
a1a3a2fc 38#include <linux/sched/sysctl.h>
467b171a 39#include <linux/memory-tiers.h>
4ef9ad19 40#include <linux/compat.h>
97ae1749 41
71e3aac0
AA
42#include <asm/tlb.h>
43#include <asm/pgalloc.h>
44#include "internal.h"
014bb1de 45#include "swap.h"
71e3aac0 46
283fd6fe
AK
47#define CREATE_TRACE_POINTS
48#include <trace/events/thp.h>
49
ba76149f 50/*
b14d595a
MD
51 * By default, transparent hugepage support is disabled in order to avoid
52 * risking an increased memory footprint for applications that are not
53 * guaranteed to benefit from it. When transparent hugepage support is
54 * enabled, it is for all mappings, and khugepaged scans all mappings.
8bfa3f9a
JW
55 * Defrag is invoked by khugepaged hugepage allocations and by page faults
56 * for all hugepage allocations.
ba76149f 57 */
71e3aac0 58unsigned long transparent_hugepage_flags __read_mostly =
13ece886 59#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
ba76149f 60 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
13ece886
AA
61#endif
62#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
63 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
64#endif
444eb2a4 65 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
79da5407
KS
66 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
67 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
ba76149f 68
54d91729
QZ
69static struct shrinker *deferred_split_shrinker;
70static unsigned long deferred_split_count(struct shrinker *shrink,
71 struct shrink_control *sc);
72static unsigned long deferred_split_scan(struct shrinker *shrink,
73 struct shrink_control *sc);
f000565a 74
97ae1749 75static atomic_t huge_zero_refcount;
56873f43 76struct page *huge_zero_page __read_mostly;
3b77e8c8 77unsigned long huge_zero_pfn __read_mostly = ~0UL;
3485b883
RR
78unsigned long huge_anon_orders_always __read_mostly;
79unsigned long huge_anon_orders_madvise __read_mostly;
80unsigned long huge_anon_orders_inherit __read_mostly;
81
82unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
83 unsigned long vm_flags, bool smaps,
84 bool in_pf, bool enforce_sysfs,
85 unsigned long orders)
86{
87 /* Check the intersection of requested and supported orders. */
88 orders &= vma_is_anonymous(vma) ?
89 THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
90 if (!orders)
91 return 0;
4a6c1297 92
9fec5168 93 if (!vma->vm_mm) /* vdso */
3485b883 94 return 0;
9fec5168 95
7da4e2cb
YS
96 /*
97 * Explicitly disabled through madvise or prctl, or some
98 * architectures may disable THP for some mappings, for
99 * example, s390 kvm.
100 * */
101 if ((vm_flags & VM_NOHUGEPAGE) ||
102 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
3485b883 103 return 0;
7da4e2cb
YS
104 /*
105 * If the hardware/firmware marked hugepage support disabled.
106 */
3c556d24 107 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
3485b883 108 return 0;
c0630669 109
7da4e2cb 110 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
9fec5168 111 if (vma_is_dax(vma))
3485b883 112 return in_pf ? orders : 0;
7da4e2cb
YS
113
114 /*
7a81751f 115 * khugepaged special VMA and hugetlb VMA.
7da4e2cb
YS
116 * Must be checked after dax since some dax mappings may have
117 * VM_MIXEDMAP set.
118 */
7a81751f 119 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
3485b883 120 return 0;
9fec5168 121
7da4e2cb 122 /*
3485b883
RR
123 * Check alignment for file vma and size for both file and anon vma by
124 * filtering out the unsuitable orders.
7da4e2cb
YS
125 *
126 * Skip the check for page fault. Huge fault does the check in fault
3485b883 127 * handlers.
7da4e2cb 128 */
3485b883
RR
129 if (!in_pf) {
130 int order = highest_order(orders);
131 unsigned long addr;
132
133 while (orders) {
134 addr = vma->vm_end - (PAGE_SIZE << order);
135 if (thp_vma_suitable_order(vma, addr, order))
136 break;
137 order = next_order(&orders, order);
138 }
139
140 if (!orders)
141 return 0;
142 }
9fec5168 143
7da4e2cb
YS
144 /*
145 * Enabled via shmem mount options or sysfs settings.
146 * Must be done before hugepage flags check since shmem has its
147 * own flags.
148 */
149 if (!in_pf && shmem_file(vma->vm_file))
2cf13384 150 return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
3485b883
RR
151 !enforce_sysfs, vma->vm_mm, vm_flags)
152 ? orders : 0;
9fec5168 153
7a81751f 154 if (!vma_is_anonymous(vma)) {
3485b883
RR
155 /*
156 * Enforce sysfs THP requirements as necessary. Anonymous vmas
157 * were already handled in thp_vma_allowable_orders().
158 */
159 if (enforce_sysfs &&
160 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
161 !hugepage_global_always())))
162 return 0;
163
7a81751f
ZK
164 /*
165 * Trust that ->huge_fault() handlers know what they are doing
166 * in fault path.
167 */
168 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
3485b883 169 return orders;
7a81751f
ZK
170 /* Only regular file is valid in collapse path */
171 if (((!in_pf || smaps)) && file_thp_enabled(vma))
3485b883
RR
172 return orders;
173 return 0;
7a81751f 174 }
9fec5168
YS
175
176 if (vma_is_temporary_stack(vma))
3485b883 177 return 0;
9fec5168
YS
178
179 /*
180 * THPeligible bit of smaps should show 1 for proper VMAs even
181 * though anon_vma is not initialized yet.
7da4e2cb
YS
182 *
183 * Allow page fault since anon_vma may be not initialized until
184 * the first page fault.
9fec5168
YS
185 */
186 if (!vma->anon_vma)
3485b883 187 return (smaps || in_pf) ? orders : 0;
9fec5168 188
3485b883 189 return orders;
7635d9cb
MH
190}
191
aaa9705b 192static bool get_huge_zero_page(void)
97ae1749
KS
193{
194 struct page *zero_page;
195retry:
196 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
aaa9705b 197 return true;
97ae1749
KS
198
199 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
4a6c1297 200 HPAGE_PMD_ORDER);
d8a8e1f0
KS
201 if (!zero_page) {
202 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
aaa9705b 203 return false;
d8a8e1f0 204 }
97ae1749 205 preempt_disable();
5918d10a 206 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
97ae1749 207 preempt_enable();
5ddacbe9 208 __free_pages(zero_page, compound_order(zero_page));
97ae1749
KS
209 goto retry;
210 }
3b77e8c8 211 WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
97ae1749
KS
212
213 /* We take additional reference here. It will be put back by shrinker */
214 atomic_set(&huge_zero_refcount, 2);
215 preempt_enable();
f4981502 216 count_vm_event(THP_ZERO_PAGE_ALLOC);
aaa9705b 217 return true;
4a6c1297
KS
218}
219
6fcb52a5 220static void put_huge_zero_page(void)
4a6c1297 221{
97ae1749
KS
222 /*
223 * Counter should never go to zero here. Only shrinker can put
224 * last reference.
225 */
226 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
4a6c1297
KS
227}
228
6fcb52a5
AL
229struct page *mm_get_huge_zero_page(struct mm_struct *mm)
230{
231 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
232 return READ_ONCE(huge_zero_page);
233
234 if (!get_huge_zero_page())
235 return NULL;
236
237 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
238 put_huge_zero_page();
239
240 return READ_ONCE(huge_zero_page);
241}
242
243void mm_put_huge_zero_page(struct mm_struct *mm)
244{
245 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
246 put_huge_zero_page();
247}
248
48896466
GC
249static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
250 struct shrink_control *sc)
4a6c1297 251{
48896466
GC
252 /* we can free zero page only if last reference remains */
253 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
254}
97ae1749 255
48896466
GC
256static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
257 struct shrink_control *sc)
258{
97ae1749 259 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
5918d10a
KS
260 struct page *zero_page = xchg(&huge_zero_page, NULL);
261 BUG_ON(zero_page == NULL);
3b77e8c8 262 WRITE_ONCE(huge_zero_pfn, ~0UL);
5ddacbe9 263 __free_pages(zero_page, compound_order(zero_page));
48896466 264 return HPAGE_PMD_NR;
97ae1749
KS
265 }
266
267 return 0;
4a6c1297
KS
268}
269
54d91729 270static struct shrinker *huge_zero_page_shrinker;
97ae1749 271
71e3aac0 272#ifdef CONFIG_SYSFS
71e3aac0
AA
273static ssize_t enabled_show(struct kobject *kobj,
274 struct kobj_attribute *attr, char *buf)
275{
bfb0ffeb
JP
276 const char *output;
277
444eb2a4 278 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
bfb0ffeb
JP
279 output = "[always] madvise never";
280 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
281 &transparent_hugepage_flags))
282 output = "always [madvise] never";
444eb2a4 283 else
bfb0ffeb
JP
284 output = "always madvise [never]";
285
286 return sysfs_emit(buf, "%s\n", output);
71e3aac0 287}
444eb2a4 288
71e3aac0
AA
289static ssize_t enabled_store(struct kobject *kobj,
290 struct kobj_attribute *attr,
291 const char *buf, size_t count)
292{
21440d7e 293 ssize_t ret = count;
ba76149f 294
f42f2552 295 if (sysfs_streq(buf, "always")) {
21440d7e
DR
296 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
297 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
f42f2552 298 } else if (sysfs_streq(buf, "madvise")) {
21440d7e
DR
299 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
300 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 301 } else if (sysfs_streq(buf, "never")) {
21440d7e
DR
302 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
303 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
304 } else
305 ret = -EINVAL;
ba76149f
AA
306
307 if (ret > 0) {
b46e756f 308 int err = start_stop_khugepaged();
ba76149f
AA
309 if (err)
310 ret = err;
311 }
ba76149f 312 return ret;
71e3aac0 313}
37139bb0
ML
314
315static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
71e3aac0 316
b46e756f 317ssize_t single_hugepage_flag_show(struct kobject *kobj,
bfb0ffeb
JP
318 struct kobj_attribute *attr, char *buf,
319 enum transparent_hugepage_flag flag)
71e3aac0 320{
bfb0ffeb
JP
321 return sysfs_emit(buf, "%d\n",
322 !!test_bit(flag, &transparent_hugepage_flags));
71e3aac0 323}
e27e6151 324
b46e756f 325ssize_t single_hugepage_flag_store(struct kobject *kobj,
71e3aac0
AA
326 struct kobj_attribute *attr,
327 const char *buf, size_t count,
328 enum transparent_hugepage_flag flag)
329{
e27e6151
BH
330 unsigned long value;
331 int ret;
332
333 ret = kstrtoul(buf, 10, &value);
334 if (ret < 0)
335 return ret;
336 if (value > 1)
337 return -EINVAL;
338
339 if (value)
71e3aac0 340 set_bit(flag, &transparent_hugepage_flags);
e27e6151 341 else
71e3aac0 342 clear_bit(flag, &transparent_hugepage_flags);
71e3aac0
AA
343
344 return count;
345}
346
71e3aac0
AA
347static ssize_t defrag_show(struct kobject *kobj,
348 struct kobj_attribute *attr, char *buf)
349{
bfb0ffeb
JP
350 const char *output;
351
352 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
353 &transparent_hugepage_flags))
354 output = "[always] defer defer+madvise madvise never";
355 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
356 &transparent_hugepage_flags))
357 output = "always [defer] defer+madvise madvise never";
358 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
359 &transparent_hugepage_flags))
360 output = "always defer [defer+madvise] madvise never";
361 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
362 &transparent_hugepage_flags))
363 output = "always defer defer+madvise [madvise] never";
364 else
365 output = "always defer defer+madvise madvise [never]";
366
367 return sysfs_emit(buf, "%s\n", output);
71e3aac0 368}
21440d7e 369
71e3aac0
AA
370static ssize_t defrag_store(struct kobject *kobj,
371 struct kobj_attribute *attr,
372 const char *buf, size_t count)
373{
f42f2552 374 if (sysfs_streq(buf, "always")) {
21440d7e
DR
375 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
376 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
377 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
378 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
f42f2552 379 } else if (sysfs_streq(buf, "defer+madvise")) {
21440d7e
DR
380 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
381 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
382 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
383 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 384 } else if (sysfs_streq(buf, "defer")) {
4fad7fb6
DR
385 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
386 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
387 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
388 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
f42f2552 389 } else if (sysfs_streq(buf, "madvise")) {
21440d7e
DR
390 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
391 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
392 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
393 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 394 } else if (sysfs_streq(buf, "never")) {
21440d7e
DR
395 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
396 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
397 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
398 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
399 } else
400 return -EINVAL;
401
402 return count;
71e3aac0 403}
37139bb0 404static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
71e3aac0 405
79da5407 406static ssize_t use_zero_page_show(struct kobject *kobj,
ae7a927d 407 struct kobj_attribute *attr, char *buf)
79da5407 408{
b46e756f 409 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 410 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
79da5407
KS
411}
412static ssize_t use_zero_page_store(struct kobject *kobj,
413 struct kobj_attribute *attr, const char *buf, size_t count)
414{
b46e756f 415 return single_hugepage_flag_store(kobj, attr, buf, count,
79da5407
KS
416 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
417}
37139bb0 418static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
49920d28
HD
419
420static ssize_t hpage_pmd_size_show(struct kobject *kobj,
ae7a927d 421 struct kobj_attribute *attr, char *buf)
49920d28 422{
ae7a927d 423 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
49920d28
HD
424}
425static struct kobj_attribute hpage_pmd_size_attr =
426 __ATTR_RO(hpage_pmd_size);
427
71e3aac0
AA
428static struct attribute *hugepage_attr[] = {
429 &enabled_attr.attr,
430 &defrag_attr.attr,
79da5407 431 &use_zero_page_attr.attr,
49920d28 432 &hpage_pmd_size_attr.attr,
396bcc52 433#ifdef CONFIG_SHMEM
5a6e75f8 434 &shmem_enabled_attr.attr,
71e3aac0
AA
435#endif
436 NULL,
437};
438
8aa95a21 439static const struct attribute_group hugepage_attr_group = {
71e3aac0 440 .attrs = hugepage_attr,
ba76149f
AA
441};
442
3485b883
RR
443static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
444static void thpsize_release(struct kobject *kobj);
445static DEFINE_SPINLOCK(huge_anon_orders_lock);
446static LIST_HEAD(thpsize_list);
447
448struct thpsize {
449 struct kobject kobj;
450 struct list_head node;
451 int order;
452};
453
454#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
455
456static ssize_t thpsize_enabled_show(struct kobject *kobj,
457 struct kobj_attribute *attr, char *buf)
458{
459 int order = to_thpsize(kobj)->order;
460 const char *output;
461
462 if (test_bit(order, &huge_anon_orders_always))
463 output = "[always] inherit madvise never";
464 else if (test_bit(order, &huge_anon_orders_inherit))
465 output = "always [inherit] madvise never";
466 else if (test_bit(order, &huge_anon_orders_madvise))
467 output = "always inherit [madvise] never";
468 else
469 output = "always inherit madvise [never]";
470
471 return sysfs_emit(buf, "%s\n", output);
472}
473
474static ssize_t thpsize_enabled_store(struct kobject *kobj,
475 struct kobj_attribute *attr,
476 const char *buf, size_t count)
477{
478 int order = to_thpsize(kobj)->order;
479 ssize_t ret = count;
480
481 if (sysfs_streq(buf, "always")) {
482 spin_lock(&huge_anon_orders_lock);
483 clear_bit(order, &huge_anon_orders_inherit);
484 clear_bit(order, &huge_anon_orders_madvise);
485 set_bit(order, &huge_anon_orders_always);
486 spin_unlock(&huge_anon_orders_lock);
487 } else if (sysfs_streq(buf, "inherit")) {
488 spin_lock(&huge_anon_orders_lock);
489 clear_bit(order, &huge_anon_orders_always);
490 clear_bit(order, &huge_anon_orders_madvise);
491 set_bit(order, &huge_anon_orders_inherit);
492 spin_unlock(&huge_anon_orders_lock);
493 } else if (sysfs_streq(buf, "madvise")) {
494 spin_lock(&huge_anon_orders_lock);
495 clear_bit(order, &huge_anon_orders_always);
496 clear_bit(order, &huge_anon_orders_inherit);
497 set_bit(order, &huge_anon_orders_madvise);
498 spin_unlock(&huge_anon_orders_lock);
499 } else if (sysfs_streq(buf, "never")) {
500 spin_lock(&huge_anon_orders_lock);
501 clear_bit(order, &huge_anon_orders_always);
502 clear_bit(order, &huge_anon_orders_inherit);
503 clear_bit(order, &huge_anon_orders_madvise);
504 spin_unlock(&huge_anon_orders_lock);
505 } else
506 ret = -EINVAL;
507
508 return ret;
509}
510
511static struct kobj_attribute thpsize_enabled_attr =
512 __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
513
514static struct attribute *thpsize_attrs[] = {
515 &thpsize_enabled_attr.attr,
516 NULL,
517};
518
519static const struct attribute_group thpsize_attr_group = {
520 .attrs = thpsize_attrs,
521};
522
523static const struct kobj_type thpsize_ktype = {
524 .release = &thpsize_release,
525 .sysfs_ops = &kobj_sysfs_ops,
526};
527
528static struct thpsize *thpsize_create(int order, struct kobject *parent)
529{
530 unsigned long size = (PAGE_SIZE << order) / SZ_1K;
531 struct thpsize *thpsize;
532 int ret;
533
534 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
535 if (!thpsize)
536 return ERR_PTR(-ENOMEM);
537
538 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
539 "hugepages-%lukB", size);
540 if (ret) {
541 kfree(thpsize);
542 return ERR_PTR(ret);
543 }
544
545 ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
546 if (ret) {
547 kobject_put(&thpsize->kobj);
548 return ERR_PTR(ret);
549 }
550
551 thpsize->order = order;
552 return thpsize;
553}
554
555static void thpsize_release(struct kobject *kobj)
556{
557 kfree(to_thpsize(kobj));
558}
559
569e5590 560static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
71e3aac0 561{
71e3aac0 562 int err;
3485b883
RR
563 struct thpsize *thpsize;
564 unsigned long orders;
565 int order;
566
567 /*
568 * Default to setting PMD-sized THP to inherit the global setting and
569 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
570 * constant so we have to do this here.
571 */
572 huge_anon_orders_inherit = BIT(PMD_ORDER);
71e3aac0 573
569e5590
SL
574 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
575 if (unlikely(!*hugepage_kobj)) {
ae3a8c1c 576 pr_err("failed to create transparent hugepage kobject\n");
569e5590 577 return -ENOMEM;
ba76149f
AA
578 }
579
569e5590 580 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
ba76149f 581 if (err) {
ae3a8c1c 582 pr_err("failed to register transparent hugepage group\n");
569e5590 583 goto delete_obj;
ba76149f
AA
584 }
585
569e5590 586 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
ba76149f 587 if (err) {
ae3a8c1c 588 pr_err("failed to register transparent hugepage group\n");
569e5590 589 goto remove_hp_group;
ba76149f 590 }
569e5590 591
3485b883
RR
592 orders = THP_ORDERS_ALL_ANON;
593 order = highest_order(orders);
594 while (orders) {
595 thpsize = thpsize_create(order, *hugepage_kobj);
596 if (IS_ERR(thpsize)) {
597 pr_err("failed to create thpsize for order %d\n", order);
598 err = PTR_ERR(thpsize);
599 goto remove_all;
600 }
601 list_add(&thpsize->node, &thpsize_list);
602 order = next_order(&orders, order);
603 }
604
569e5590
SL
605 return 0;
606
3485b883
RR
607remove_all:
608 hugepage_exit_sysfs(*hugepage_kobj);
609 return err;
569e5590
SL
610remove_hp_group:
611 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
612delete_obj:
613 kobject_put(*hugepage_kobj);
614 return err;
615}
616
617static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
618{
3485b883
RR
619 struct thpsize *thpsize, *tmp;
620
621 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
622 list_del(&thpsize->node);
623 kobject_put(&thpsize->kobj);
624 }
625
569e5590
SL
626 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
627 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
628 kobject_put(hugepage_kobj);
629}
630#else
631static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
632{
633 return 0;
634}
635
636static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
637{
638}
639#endif /* CONFIG_SYSFS */
640
54d91729
QZ
641static int __init thp_shrinker_init(void)
642{
643 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
644 if (!huge_zero_page_shrinker)
645 return -ENOMEM;
646
647 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
648 SHRINKER_MEMCG_AWARE |
649 SHRINKER_NONSLAB,
650 "thp-deferred_split");
651 if (!deferred_split_shrinker) {
652 shrinker_free(huge_zero_page_shrinker);
653 return -ENOMEM;
654 }
655
656 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
657 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
658 shrinker_register(huge_zero_page_shrinker);
659
660 deferred_split_shrinker->count_objects = deferred_split_count;
661 deferred_split_shrinker->scan_objects = deferred_split_scan;
662 shrinker_register(deferred_split_shrinker);
663
664 return 0;
665}
666
667static void __init thp_shrinker_exit(void)
668{
669 shrinker_free(huge_zero_page_shrinker);
670 shrinker_free(deferred_split_shrinker);
671}
672
569e5590
SL
673static int __init hugepage_init(void)
674{
675 int err;
676 struct kobject *hugepage_kobj;
677
678 if (!has_transparent_hugepage()) {
3c556d24 679 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
569e5590
SL
680 return -EINVAL;
681 }
682
ff20c2e0
KS
683 /*
684 * hugepages can't be allocated by the buddy allocator
685 */
5e0a760b 686 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
ff20c2e0
KS
687 /*
688 * we use page->mapping and page->index in second tail page
689 * as list_head: assuming THP order >= 2
690 */
691 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
692
569e5590
SL
693 err = hugepage_init_sysfs(&hugepage_kobj);
694 if (err)
65ebb64f 695 goto err_sysfs;
ba76149f 696
b46e756f 697 err = khugepaged_init();
ba76149f 698 if (err)
65ebb64f 699 goto err_slab;
ba76149f 700
54d91729 701 err = thp_shrinker_init();
9a982250 702 if (err)
54d91729 703 goto err_shrinker;
97ae1749 704
97562cd2
RR
705 /*
706 * By default disable transparent hugepages on smaller systems,
707 * where the extra memory used could hurt more than TLB overhead
708 * is likely to save. The admin can still enable it through /sys.
709 */
ca79b0c2 710 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
97562cd2 711 transparent_hugepage_flags = 0;
79553da2
KS
712 return 0;
713 }
97562cd2 714
79553da2 715 err = start_stop_khugepaged();
65ebb64f
KS
716 if (err)
717 goto err_khugepaged;
ba76149f 718
569e5590 719 return 0;
65ebb64f 720err_khugepaged:
54d91729
QZ
721 thp_shrinker_exit();
722err_shrinker:
b46e756f 723 khugepaged_destroy();
65ebb64f 724err_slab:
569e5590 725 hugepage_exit_sysfs(hugepage_kobj);
65ebb64f 726err_sysfs:
ba76149f 727 return err;
71e3aac0 728}
a64fb3cd 729subsys_initcall(hugepage_init);
71e3aac0
AA
730
731static int __init setup_transparent_hugepage(char *str)
732{
733 int ret = 0;
734 if (!str)
735 goto out;
736 if (!strcmp(str, "always")) {
737 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
738 &transparent_hugepage_flags);
739 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
740 &transparent_hugepage_flags);
741 ret = 1;
742 } else if (!strcmp(str, "madvise")) {
743 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
744 &transparent_hugepage_flags);
745 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
746 &transparent_hugepage_flags);
747 ret = 1;
748 } else if (!strcmp(str, "never")) {
749 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
750 &transparent_hugepage_flags);
751 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
752 &transparent_hugepage_flags);
753 ret = 1;
754 }
755out:
756 if (!ret)
ae3a8c1c 757 pr_warn("transparent_hugepage= cannot parse, ignored\n");
71e3aac0
AA
758 return ret;
759}
760__setup("transparent_hugepage=", setup_transparent_hugepage);
761
f55e1014 762pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
71e3aac0 763{
f55e1014 764 if (likely(vma->vm_flags & VM_WRITE))
161e393c 765 pmd = pmd_mkwrite(pmd, vma);
71e3aac0
AA
766 return pmd;
767}
768
87eaceb3 769#ifdef CONFIG_MEMCG
f8baa6be
MWO
770static inline
771struct deferred_split *get_deferred_split_queue(struct folio *folio)
9a982250 772{
f8baa6be
MWO
773 struct mem_cgroup *memcg = folio_memcg(folio);
774 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
87eaceb3
YS
775
776 if (memcg)
777 return &memcg->deferred_split_queue;
778 else
779 return &pgdat->deferred_split_queue;
9a982250 780}
87eaceb3 781#else
f8baa6be
MWO
782static inline
783struct deferred_split *get_deferred_split_queue(struct folio *folio)
87eaceb3 784{
f8baa6be 785 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
87eaceb3
YS
786
787 return &pgdat->deferred_split_queue;
788}
789#endif
9a982250 790
da6e7bf3 791void folio_prep_large_rmappable(struct folio *folio)
9a982250 792{
8991de90
MWO
793 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
794 INIT_LIST_HEAD(&folio->_deferred_list);
de53c05f 795 folio_set_large_rmappable(folio);
9a982250
KS
796}
797
a644b0ab 798static inline bool is_transparent_hugepage(struct folio *folio)
005ba37c 799{
a644b0ab 800 if (!folio_test_large(folio))
fa1f68cc 801 return false;
005ba37c 802
f04029f3 803 return is_huge_zero_page(&folio->page) ||
de53c05f 804 folio_test_large_rmappable(folio);
005ba37c 805}
005ba37c 806
97d3d0f9
KS
807static unsigned long __thp_get_unmapped_area(struct file *filp,
808 unsigned long addr, unsigned long len,
74d2fad1
TK
809 loff_t off, unsigned long flags, unsigned long size)
810{
74d2fad1
TK
811 loff_t off_end = off + len;
812 loff_t off_align = round_up(off, size);
96204e15 813 unsigned long len_pad, ret, off_sub;
74d2fad1 814
4ef9ad19
YS
815 if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
816 return 0;
817
74d2fad1
TK
818 if (off_end <= off_align || (off_end - off_align) < size)
819 return 0;
820
821 len_pad = len + size;
822 if (len_pad < len || (off + len_pad) < off)
823 return 0;
824
97d3d0f9 825 ret = current->mm->get_unmapped_area(filp, addr, len_pad,
74d2fad1 826 off >> PAGE_SHIFT, flags);
97d3d0f9
KS
827
828 /*
829 * The failure might be due to length padding. The caller will retry
830 * without the padding.
831 */
832 if (IS_ERR_VALUE(ret))
74d2fad1
TK
833 return 0;
834
97d3d0f9
KS
835 /*
836 * Do not try to align to THP boundary if allocation at the address
837 * hint succeeds.
838 */
839 if (ret == addr)
840 return addr;
841
96204e15
RR
842 off_sub = (off - ret) & (size - 1);
843
844 if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown &&
845 !off_sub)
846 return ret + size;
847
848 ret += off_sub;
97d3d0f9 849 return ret;
74d2fad1
TK
850}
851
852unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
853 unsigned long len, unsigned long pgoff, unsigned long flags)
854{
97d3d0f9 855 unsigned long ret;
74d2fad1
TK
856 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
857
97d3d0f9
KS
858 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
859 if (ret)
860 return ret;
1854bc6e 861
74d2fad1
TK
862 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
863}
864EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
865
2b740303
SJ
866static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
867 struct page *page, gfp_t gfp)
71e3aac0 868{
82b0f8c3 869 struct vm_area_struct *vma = vmf->vma;
cfe3236d 870 struct folio *folio = page_folio(page);
71e3aac0 871 pgtable_t pgtable;
82b0f8c3 872 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2b740303 873 vm_fault_t ret = 0;
71e3aac0 874
cfe3236d 875 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
00501b53 876
cfe3236d
KW
877 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
878 folio_put(folio);
6b251fc9 879 count_vm_event(THP_FAULT_FALLBACK);
85b9f46e 880 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
6b251fc9
AA
881 return VM_FAULT_FALLBACK;
882 }
cfe3236d 883 folio_throttle_swaprate(folio, gfp);
00501b53 884
4cf58924 885 pgtable = pte_alloc_one(vma->vm_mm);
00501b53 886 if (unlikely(!pgtable)) {
6b31d595
MH
887 ret = VM_FAULT_OOM;
888 goto release;
00501b53 889 }
71e3aac0 890
c79b57e4 891 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
52f37629 892 /*
cfe3236d 893 * The memory barrier inside __folio_mark_uptodate makes sure that
52f37629
MK
894 * clear_huge_page writes become visible before the set_pmd_at()
895 * write.
896 */
cfe3236d 897 __folio_mark_uptodate(folio);
71e3aac0 898
82b0f8c3
JK
899 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
900 if (unlikely(!pmd_none(*vmf->pmd))) {
6b31d595 901 goto unlock_release;
71e3aac0
AA
902 } else {
903 pmd_t entry;
6b251fc9 904
6b31d595
MH
905 ret = check_stable_address_space(vma->vm_mm);
906 if (ret)
907 goto unlock_release;
908
6b251fc9
AA
909 /* Deliver the page fault to userland */
910 if (userfaultfd_missing(vma)) {
82b0f8c3 911 spin_unlock(vmf->ptl);
cfe3236d 912 folio_put(folio);
bae473a4 913 pte_free(vma->vm_mm, pgtable);
8fd5eda4
ML
914 ret = handle_userfault(vmf, VM_UFFD_MISSING);
915 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
916 return ret;
6b251fc9
AA
917 }
918
3122359a 919 entry = mk_huge_pmd(page, vma->vm_page_prot);
f55e1014 920 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
cfe3236d
KW
921 folio_add_new_anon_rmap(folio, vma, haddr);
922 folio_add_lru_vma(folio, vma);
82b0f8c3
JK
923 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
924 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
fca40573 925 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
bae473a4 926 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
c4812909 927 mm_inc_nr_ptes(vma->vm_mm);
82b0f8c3 928 spin_unlock(vmf->ptl);
6b251fc9 929 count_vm_event(THP_FAULT_ALLOC);
9d82c694 930 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
71e3aac0
AA
931 }
932
aa2e878e 933 return 0;
6b31d595
MH
934unlock_release:
935 spin_unlock(vmf->ptl);
936release:
937 if (pgtable)
938 pte_free(vma->vm_mm, pgtable);
cfe3236d 939 folio_put(folio);
6b31d595
MH
940 return ret;
941
71e3aac0
AA
942}
943
444eb2a4 944/*
21440d7e
DR
945 * always: directly stall for all thp allocations
946 * defer: wake kswapd and fail if not immediately available
947 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
948 * fail if not immediately available
949 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
950 * available
951 * never: never stall for any thp allocation
444eb2a4 952 */
164cc4fe 953gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
444eb2a4 954{
164cc4fe 955 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
2f0799a0 956
ac79f78d 957 /* Always do synchronous compaction */
a8282608
AA
958 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
959 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
ac79f78d
DR
960
961 /* Kick kcompactd and fail quickly */
21440d7e 962 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
19deb769 963 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
ac79f78d
DR
964
965 /* Synchronous compaction if madvised, otherwise kick kcompactd */
21440d7e 966 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
19deb769
DR
967 return GFP_TRANSHUGE_LIGHT |
968 (vma_madvised ? __GFP_DIRECT_RECLAIM :
969 __GFP_KSWAPD_RECLAIM);
ac79f78d
DR
970
971 /* Only do synchronous compaction if madvised */
21440d7e 972 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
19deb769
DR
973 return GFP_TRANSHUGE_LIGHT |
974 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
ac79f78d 975
19deb769 976 return GFP_TRANSHUGE_LIGHT;
444eb2a4
MG
977}
978
c4088ebd 979/* Caller must hold page table lock. */
2efeb8da 980static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
97ae1749 981 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
5918d10a 982 struct page *zero_page)
fc9fe822
KS
983{
984 pmd_t entry;
7c414164 985 if (!pmd_none(*pmd))
2efeb8da 986 return;
5918d10a 987 entry = mk_pmd(zero_page, vma->vm_page_prot);
fc9fe822 988 entry = pmd_mkhuge(entry);
c8bb4163 989 pgtable_trans_huge_deposit(mm, pmd, pgtable);
fc9fe822 990 set_pmd_at(mm, haddr, pmd, entry);
c4812909 991 mm_inc_nr_ptes(mm);
fc9fe822
KS
992}
993
2b740303 994vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
71e3aac0 995{
82b0f8c3 996 struct vm_area_struct *vma = vmf->vma;
077fcf11 997 gfp_t gfp;
cb196ee1 998 struct folio *folio;
82b0f8c3 999 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
71e3aac0 1000
3485b883 1001 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
c0292554 1002 return VM_FAULT_FALLBACK;
128ec037
KS
1003 if (unlikely(anon_vma_prepare(vma)))
1004 return VM_FAULT_OOM;
4fa6893f 1005 khugepaged_enter_vma(vma, vma->vm_flags);
d2081b2b 1006
82b0f8c3 1007 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
bae473a4 1008 !mm_forbids_zeropage(vma->vm_mm) &&
128ec037
KS
1009 transparent_hugepage_use_zero_page()) {
1010 pgtable_t pgtable;
1011 struct page *zero_page;
2b740303 1012 vm_fault_t ret;
4cf58924 1013 pgtable = pte_alloc_one(vma->vm_mm);
128ec037 1014 if (unlikely(!pgtable))
ba76149f 1015 return VM_FAULT_OOM;
6fcb52a5 1016 zero_page = mm_get_huge_zero_page(vma->vm_mm);
128ec037 1017 if (unlikely(!zero_page)) {
bae473a4 1018 pte_free(vma->vm_mm, pgtable);
81ab4201 1019 count_vm_event(THP_FAULT_FALLBACK);
c0292554 1020 return VM_FAULT_FALLBACK;
b9bbfbe3 1021 }
82b0f8c3 1022 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
6b251fc9 1023 ret = 0;
82b0f8c3 1024 if (pmd_none(*vmf->pmd)) {
6b31d595
MH
1025 ret = check_stable_address_space(vma->vm_mm);
1026 if (ret) {
1027 spin_unlock(vmf->ptl);
bfe8cc1d 1028 pte_free(vma->vm_mm, pgtable);
6b31d595 1029 } else if (userfaultfd_missing(vma)) {
82b0f8c3 1030 spin_unlock(vmf->ptl);
bfe8cc1d 1031 pte_free(vma->vm_mm, pgtable);
82b0f8c3 1032 ret = handle_userfault(vmf, VM_UFFD_MISSING);
6b251fc9
AA
1033 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1034 } else {
bae473a4 1035 set_huge_zero_page(pgtable, vma->vm_mm, vma,
82b0f8c3 1036 haddr, vmf->pmd, zero_page);
fca40573 1037 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
82b0f8c3 1038 spin_unlock(vmf->ptl);
6b251fc9 1039 }
bfe8cc1d 1040 } else {
82b0f8c3 1041 spin_unlock(vmf->ptl);
bae473a4 1042 pte_free(vma->vm_mm, pgtable);
bfe8cc1d 1043 }
6b251fc9 1044 return ret;
71e3aac0 1045 }
164cc4fe 1046 gfp = vma_thp_gfp_mask(vma);
cb196ee1
MWO
1047 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
1048 if (unlikely(!folio)) {
128ec037 1049 count_vm_event(THP_FAULT_FALLBACK);
c0292554 1050 return VM_FAULT_FALLBACK;
128ec037 1051 }
cb196ee1 1052 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
71e3aac0
AA
1053}
1054
ae18d6dc 1055static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
3b6521f5
OH
1056 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1057 pgtable_t pgtable)
5cad465d
MW
1058{
1059 struct mm_struct *mm = vma->vm_mm;
1060 pmd_t entry;
1061 spinlock_t *ptl;
1062
1063 ptl = pmd_lock(mm, pmd);
c6f3c5ee
AK
1064 if (!pmd_none(*pmd)) {
1065 if (write) {
1066 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1067 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1068 goto out_unlock;
1069 }
1070 entry = pmd_mkyoung(*pmd);
1071 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1072 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1073 update_mmu_cache_pmd(vma, addr, pmd);
1074 }
1075
1076 goto out_unlock;
1077 }
1078
f25748e3
DW
1079 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1080 if (pfn_t_devmap(pfn))
1081 entry = pmd_mkdevmap(entry);
01871e59 1082 if (write) {
f55e1014
LT
1083 entry = pmd_mkyoung(pmd_mkdirty(entry));
1084 entry = maybe_pmd_mkwrite(entry, vma);
5cad465d 1085 }
3b6521f5
OH
1086
1087 if (pgtable) {
1088 pgtable_trans_huge_deposit(mm, pmd, pgtable);
c4812909 1089 mm_inc_nr_ptes(mm);
c6f3c5ee 1090 pgtable = NULL;
3b6521f5
OH
1091 }
1092
01871e59
RZ
1093 set_pmd_at(mm, addr, pmd, entry);
1094 update_mmu_cache_pmd(vma, addr, pmd);
c6f3c5ee
AK
1095
1096out_unlock:
5cad465d 1097 spin_unlock(ptl);
c6f3c5ee
AK
1098 if (pgtable)
1099 pte_free(mm, pgtable);
5cad465d
MW
1100}
1101
9a9731b1 1102/**
7b806d22 1103 * vmf_insert_pfn_pmd - insert a pmd size pfn
9a9731b1
THV
1104 * @vmf: Structure describing the fault
1105 * @pfn: pfn to insert
9a9731b1
THV
1106 * @write: whether it's a write fault
1107 *
7b806d22 1108 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
9a9731b1
THV
1109 *
1110 * Return: vm_fault_t value.
1111 */
7b806d22 1112vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
5cad465d 1113{
fce86ff5
DW
1114 unsigned long addr = vmf->address & PMD_MASK;
1115 struct vm_area_struct *vma = vmf->vma;
7b806d22 1116 pgprot_t pgprot = vma->vm_page_prot;
3b6521f5 1117 pgtable_t pgtable = NULL;
fce86ff5 1118
5cad465d
MW
1119 /*
1120 * If we had pmd_special, we could avoid all these restrictions,
1121 * but we need to be consistent with PTEs and architectures that
1122 * can't support a 'special' bit.
1123 */
e1fb4a08
DJ
1124 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1125 !pfn_t_devmap(pfn));
5cad465d
MW
1126 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1127 (VM_PFNMAP|VM_MIXEDMAP));
1128 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
5cad465d
MW
1129
1130 if (addr < vma->vm_start || addr >= vma->vm_end)
1131 return VM_FAULT_SIGBUS;
308a047c 1132
3b6521f5 1133 if (arch_needs_pgtable_deposit()) {
4cf58924 1134 pgtable = pte_alloc_one(vma->vm_mm);
3b6521f5
OH
1135 if (!pgtable)
1136 return VM_FAULT_OOM;
1137 }
1138
308a047c
BP
1139 track_pfn_insert(vma, &pgprot, pfn);
1140
fce86ff5 1141 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
ae18d6dc 1142 return VM_FAULT_NOPAGE;
5cad465d 1143}
7b806d22 1144EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
5cad465d 1145
a00cc7d9 1146#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
f55e1014 1147static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
a00cc7d9 1148{
f55e1014 1149 if (likely(vma->vm_flags & VM_WRITE))
a00cc7d9
MW
1150 pud = pud_mkwrite(pud);
1151 return pud;
1152}
1153
1154static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
7b806d22 1155 pud_t *pud, pfn_t pfn, bool write)
a00cc7d9
MW
1156{
1157 struct mm_struct *mm = vma->vm_mm;
7b806d22 1158 pgprot_t prot = vma->vm_page_prot;
a00cc7d9
MW
1159 pud_t entry;
1160 spinlock_t *ptl;
1161
1162 ptl = pud_lock(mm, pud);
c6f3c5ee
AK
1163 if (!pud_none(*pud)) {
1164 if (write) {
1165 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
1166 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
1167 goto out_unlock;
1168 }
1169 entry = pud_mkyoung(*pud);
1170 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1171 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1172 update_mmu_cache_pud(vma, addr, pud);
1173 }
1174 goto out_unlock;
1175 }
1176
a00cc7d9
MW
1177 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1178 if (pfn_t_devmap(pfn))
1179 entry = pud_mkdevmap(entry);
1180 if (write) {
f55e1014
LT
1181 entry = pud_mkyoung(pud_mkdirty(entry));
1182 entry = maybe_pud_mkwrite(entry, vma);
a00cc7d9
MW
1183 }
1184 set_pud_at(mm, addr, pud, entry);
1185 update_mmu_cache_pud(vma, addr, pud);
c6f3c5ee
AK
1186
1187out_unlock:
a00cc7d9
MW
1188 spin_unlock(ptl);
1189}
1190
9a9731b1 1191/**
7b806d22 1192 * vmf_insert_pfn_pud - insert a pud size pfn
9a9731b1
THV
1193 * @vmf: Structure describing the fault
1194 * @pfn: pfn to insert
9a9731b1
THV
1195 * @write: whether it's a write fault
1196 *
7b806d22 1197 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
9a9731b1
THV
1198 *
1199 * Return: vm_fault_t value.
1200 */
7b806d22 1201vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
a00cc7d9 1202{
fce86ff5
DW
1203 unsigned long addr = vmf->address & PUD_MASK;
1204 struct vm_area_struct *vma = vmf->vma;
7b806d22 1205 pgprot_t pgprot = vma->vm_page_prot;
fce86ff5 1206
a00cc7d9
MW
1207 /*
1208 * If we had pud_special, we could avoid all these restrictions,
1209 * but we need to be consistent with PTEs and architectures that
1210 * can't support a 'special' bit.
1211 */
62ec0d8c
DJ
1212 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1213 !pfn_t_devmap(pfn));
a00cc7d9
MW
1214 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1215 (VM_PFNMAP|VM_MIXEDMAP));
1216 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
a00cc7d9
MW
1217
1218 if (addr < vma->vm_start || addr >= vma->vm_end)
1219 return VM_FAULT_SIGBUS;
1220
1221 track_pfn_insert(vma, &pgprot, pfn);
1222
7b806d22 1223 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
a00cc7d9
MW
1224 return VM_FAULT_NOPAGE;
1225}
7b806d22 1226EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
a00cc7d9
MW
1227#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1228
3565fce3 1229static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
a69e4717 1230 pmd_t *pmd, bool write)
3565fce3
DW
1231{
1232 pmd_t _pmd;
1233
a8f97366 1234 _pmd = pmd_mkyoung(*pmd);
a69e4717 1235 if (write)
a8f97366 1236 _pmd = pmd_mkdirty(_pmd);
3565fce3 1237 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
a69e4717 1238 pmd, _pmd, write))
3565fce3
DW
1239 update_mmu_cache_pmd(vma, addr, pmd);
1240}
1241
1242struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
df06b37f 1243 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
1244{
1245 unsigned long pfn = pmd_pfn(*pmd);
1246 struct mm_struct *mm = vma->vm_mm;
3565fce3 1247 struct page *page;
0f089235 1248 int ret;
3565fce3
DW
1249
1250 assert_spin_locked(pmd_lockptr(mm, pmd));
1251
f6f37321 1252 if (flags & FOLL_WRITE && !pmd_write(*pmd))
3565fce3
DW
1253 return NULL;
1254
1255 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1256 /* pass */;
1257 else
1258 return NULL;
1259
1260 if (flags & FOLL_TOUCH)
a69e4717 1261 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
3565fce3
DW
1262
1263 /*
1264 * device mapped pages can only be returned if the
1265 * caller will manage the page reference count.
1266 */
3faa52c0 1267 if (!(flags & (FOLL_GET | FOLL_PIN)))
3565fce3
DW
1268 return ERR_PTR(-EEXIST);
1269
1270 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
df06b37f
KB
1271 *pgmap = get_dev_pagemap(pfn, *pgmap);
1272 if (!*pgmap)
3565fce3
DW
1273 return ERR_PTR(-EFAULT);
1274 page = pfn_to_page(pfn);
0f089235
LG
1275 ret = try_grab_page(page, flags);
1276 if (ret)
1277 page = ERR_PTR(ret);
3565fce3
DW
1278
1279 return page;
1280}
1281
71e3aac0
AA
1282int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1283 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
8f34f1ea 1284 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
71e3aac0 1285{
c4088ebd 1286 spinlock_t *dst_ptl, *src_ptl;
71e3aac0 1287 struct page *src_page;
96c772c2 1288 struct folio *src_folio;
71e3aac0 1289 pmd_t pmd;
12c9d70b 1290 pgtable_t pgtable = NULL;
628d47ce 1291 int ret = -ENOMEM;
71e3aac0 1292
628d47ce 1293 /* Skip if can be re-fill on fault */
8f34f1ea 1294 if (!vma_is_anonymous(dst_vma))
628d47ce
KS
1295 return 0;
1296
4cf58924 1297 pgtable = pte_alloc_one(dst_mm);
628d47ce
KS
1298 if (unlikely(!pgtable))
1299 goto out;
71e3aac0 1300
c4088ebd
KS
1301 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1302 src_ptl = pmd_lockptr(src_mm, src_pmd);
1303 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
71e3aac0
AA
1304
1305 ret = -EAGAIN;
1306 pmd = *src_pmd;
84c3fc4e
ZY
1307
1308#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1309 if (unlikely(is_swap_pmd(pmd))) {
1310 swp_entry_t entry = pmd_to_swp_entry(pmd);
1311
1312 VM_BUG_ON(!is_pmd_migration_entry(pmd));
6c287605 1313 if (!is_readable_migration_entry(entry)) {
4dd845b5
AP
1314 entry = make_readable_migration_entry(
1315 swp_offset(entry));
84c3fc4e 1316 pmd = swp_entry_to_pmd(entry);
ab6e3d09
NH
1317 if (pmd_swp_soft_dirty(*src_pmd))
1318 pmd = pmd_swp_mksoft_dirty(pmd);
8f34f1ea
PX
1319 if (pmd_swp_uffd_wp(*src_pmd))
1320 pmd = pmd_swp_mkuffd_wp(pmd);
84c3fc4e
ZY
1321 set_pmd_at(src_mm, addr, src_pmd, pmd);
1322 }
dd8a67f9 1323 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
af5b0f6a 1324 mm_inc_nr_ptes(dst_mm);
dd8a67f9 1325 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
8f34f1ea
PX
1326 if (!userfaultfd_wp(dst_vma))
1327 pmd = pmd_swp_clear_uffd_wp(pmd);
84c3fc4e
ZY
1328 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1329 ret = 0;
1330 goto out_unlock;
1331 }
1332#endif
1333
628d47ce 1334 if (unlikely(!pmd_trans_huge(pmd))) {
71e3aac0
AA
1335 pte_free(dst_mm, pgtable);
1336 goto out_unlock;
1337 }
fc9fe822 1338 /*
c4088ebd 1339 * When page table lock is held, the huge zero pmd should not be
fc9fe822
KS
1340 * under splitting since we don't split the page itself, only pmd to
1341 * a page table.
1342 */
1343 if (is_huge_zero_pmd(pmd)) {
97ae1749
KS
1344 /*
1345 * get_huge_zero_page() will never allocate a new page here,
1346 * since we already have a zero page to copy. It just takes a
1347 * reference.
1348 */
5fc7a5f6
PX
1349 mm_get_huge_zero_page(dst_mm);
1350 goto out_zero_page;
fc9fe822 1351 }
de466bd6 1352
628d47ce
KS
1353 src_page = pmd_page(pmd);
1354 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
96c772c2 1355 src_folio = page_folio(src_page);
d042035e 1356
96c772c2
DH
1357 folio_get(src_folio);
1358 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
fb3d824d 1359 /* Page maybe pinned: split and retry the fault on PTEs. */
96c772c2 1360 folio_put(src_folio);
d042035e
PX
1361 pte_free(dst_mm, pgtable);
1362 spin_unlock(src_ptl);
1363 spin_unlock(dst_ptl);
8f34f1ea 1364 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
d042035e
PX
1365 return -EAGAIN;
1366 }
628d47ce 1367 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
5fc7a5f6 1368out_zero_page:
c4812909 1369 mm_inc_nr_ptes(dst_mm);
628d47ce 1370 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
71e3aac0 1371 pmdp_set_wrprotect(src_mm, addr, src_pmd);
8f34f1ea
PX
1372 if (!userfaultfd_wp(dst_vma))
1373 pmd = pmd_clear_uffd_wp(pmd);
71e3aac0
AA
1374 pmd = pmd_mkold(pmd_wrprotect(pmd));
1375 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
71e3aac0
AA
1376
1377 ret = 0;
1378out_unlock:
c4088ebd
KS
1379 spin_unlock(src_ptl);
1380 spin_unlock(dst_ptl);
71e3aac0
AA
1381out:
1382 return ret;
1383}
1384
a00cc7d9
MW
1385#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1386static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
5fe653e9 1387 pud_t *pud, bool write)
a00cc7d9
MW
1388{
1389 pud_t _pud;
1390
a8f97366 1391 _pud = pud_mkyoung(*pud);
5fe653e9 1392 if (write)
a8f97366 1393 _pud = pud_mkdirty(_pud);
a00cc7d9 1394 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
5fe653e9 1395 pud, _pud, write))
a00cc7d9
MW
1396 update_mmu_cache_pud(vma, addr, pud);
1397}
1398
1399struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
df06b37f 1400 pud_t *pud, int flags, struct dev_pagemap **pgmap)
a00cc7d9
MW
1401{
1402 unsigned long pfn = pud_pfn(*pud);
1403 struct mm_struct *mm = vma->vm_mm;
a00cc7d9 1404 struct page *page;
0f089235 1405 int ret;
a00cc7d9
MW
1406
1407 assert_spin_locked(pud_lockptr(mm, pud));
1408
f6f37321 1409 if (flags & FOLL_WRITE && !pud_write(*pud))
a00cc7d9
MW
1410 return NULL;
1411
1412 if (pud_present(*pud) && pud_devmap(*pud))
1413 /* pass */;
1414 else
1415 return NULL;
1416
1417 if (flags & FOLL_TOUCH)
5fe653e9 1418 touch_pud(vma, addr, pud, flags & FOLL_WRITE);
a00cc7d9
MW
1419
1420 /*
1421 * device mapped pages can only be returned if the
1422 * caller will manage the page reference count.
3faa52c0
JH
1423 *
1424 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
a00cc7d9 1425 */
3faa52c0 1426 if (!(flags & (FOLL_GET | FOLL_PIN)))
a00cc7d9
MW
1427 return ERR_PTR(-EEXIST);
1428
1429 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
df06b37f
KB
1430 *pgmap = get_dev_pagemap(pfn, *pgmap);
1431 if (!*pgmap)
a00cc7d9
MW
1432 return ERR_PTR(-EFAULT);
1433 page = pfn_to_page(pfn);
0f089235
LG
1434
1435 ret = try_grab_page(page, flags);
1436 if (ret)
1437 page = ERR_PTR(ret);
a00cc7d9
MW
1438
1439 return page;
1440}
1441
1442int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1443 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1444 struct vm_area_struct *vma)
1445{
1446 spinlock_t *dst_ptl, *src_ptl;
1447 pud_t pud;
1448 int ret;
1449
1450 dst_ptl = pud_lock(dst_mm, dst_pud);
1451 src_ptl = pud_lockptr(src_mm, src_pud);
1452 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1453
1454 ret = -EAGAIN;
1455 pud = *src_pud;
1456 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1457 goto out_unlock;
1458
1459 /*
1460 * When page table lock is held, the huge zero pud should not be
1461 * under splitting since we don't split the page itself, only pud to
1462 * a page table.
1463 */
1464 if (is_huge_zero_pud(pud)) {
1465 /* No huge zero pud yet */
1466 }
1467
fb3d824d 1468 /*
96c772c2
DH
1469 * TODO: once we support anonymous pages, use
1470 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
fb3d824d 1471 */
a00cc7d9
MW
1472 pudp_set_wrprotect(src_mm, addr, src_pud);
1473 pud = pud_mkold(pud_wrprotect(pud));
1474 set_pud_at(dst_mm, addr, dst_pud, pud);
1475
1476 ret = 0;
1477out_unlock:
1478 spin_unlock(src_ptl);
1479 spin_unlock(dst_ptl);
1480 return ret;
1481}
1482
1483void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1484{
a00cc7d9
MW
1485 bool write = vmf->flags & FAULT_FLAG_WRITE;
1486
1487 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1488 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1489 goto unlock;
1490
5fe653e9 1491 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
a00cc7d9
MW
1492unlock:
1493 spin_unlock(vmf->ptl);
1494}
1495#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1496
5db4f15c 1497void huge_pmd_set_accessed(struct vm_fault *vmf)
a1dd450b 1498{
20f664aa 1499 bool write = vmf->flags & FAULT_FLAG_WRITE;
a1dd450b 1500
82b0f8c3 1501 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
a69e4717 1502 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
a1dd450b
WD
1503 goto unlock;
1504
a69e4717 1505 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
a1dd450b
WD
1506
1507unlock:
82b0f8c3 1508 spin_unlock(vmf->ptl);
a1dd450b
WD
1509}
1510
5db4f15c 1511vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
71e3aac0 1512{
c89357e2 1513 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
82b0f8c3 1514 struct vm_area_struct *vma = vmf->vma;
2fad3d14 1515 struct folio *folio;
3917c802 1516 struct page *page;
82b0f8c3 1517 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5db4f15c 1518 pmd_t orig_pmd = vmf->orig_pmd;
71e3aac0 1519
82b0f8c3 1520 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
81d1b09c 1521 VM_BUG_ON_VMA(!vma->anon_vma, vma);
3917c802 1522
93b4796d 1523 if (is_huge_zero_pmd(orig_pmd))
3917c802
KS
1524 goto fallback;
1525
82b0f8c3 1526 spin_lock(vmf->ptl);
3917c802
KS
1527
1528 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1529 spin_unlock(vmf->ptl);
1530 return 0;
1531 }
71e3aac0
AA
1532
1533 page = pmd_page(orig_pmd);
2fad3d14 1534 folio = page_folio(page);
f6004e73 1535 VM_BUG_ON_PAGE(!PageHead(page), page);
3917c802 1536
6c287605
DH
1537 /* Early check when only holding the PT lock. */
1538 if (PageAnonExclusive(page))
1539 goto reuse;
1540
2fad3d14
MWO
1541 if (!folio_trylock(folio)) {
1542 folio_get(folio);
ba3c4ce6 1543 spin_unlock(vmf->ptl);
2fad3d14 1544 folio_lock(folio);
ba3c4ce6
HY
1545 spin_lock(vmf->ptl);
1546 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
3917c802 1547 spin_unlock(vmf->ptl);
2fad3d14
MWO
1548 folio_unlock(folio);
1549 folio_put(folio);
3917c802 1550 return 0;
ba3c4ce6 1551 }
2fad3d14 1552 folio_put(folio);
ba3c4ce6 1553 }
3917c802 1554
6c287605
DH
1555 /* Recheck after temporarily dropping the PT lock. */
1556 if (PageAnonExclusive(page)) {
2fad3d14 1557 folio_unlock(folio);
6c287605
DH
1558 goto reuse;
1559 }
1560
3917c802 1561 /*
2fad3d14
MWO
1562 * See do_wp_page(): we can only reuse the folio exclusively if
1563 * there are no additional references. Note that we always drain
1fec6890 1564 * the LRU cache immediately after adding a THP.
3917c802 1565 */
2fad3d14
MWO
1566 if (folio_ref_count(folio) >
1567 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
3bff7e3f 1568 goto unlock_fallback;
2fad3d14
MWO
1569 if (folio_test_swapcache(folio))
1570 folio_free_swap(folio);
1571 if (folio_ref_count(folio) == 1) {
71e3aac0 1572 pmd_t entry;
6c54dc6c 1573
06968625 1574 folio_move_anon_rmap(folio, vma);
5ca43289 1575 SetPageAnonExclusive(page);
2fad3d14 1576 folio_unlock(folio);
6c287605 1577reuse:
c89357e2
DH
1578 if (unlikely(unshare)) {
1579 spin_unlock(vmf->ptl);
1580 return 0;
1581 }
71e3aac0 1582 entry = pmd_mkyoung(orig_pmd);
f55e1014 1583 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3917c802 1584 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
82b0f8c3 1585 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
82b0f8c3 1586 spin_unlock(vmf->ptl);
cb8d8633 1587 return 0;
71e3aac0 1588 }
3917c802 1589
3bff7e3f 1590unlock_fallback:
2fad3d14 1591 folio_unlock(folio);
82b0f8c3 1592 spin_unlock(vmf->ptl);
3917c802
KS
1593fallback:
1594 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1595 return VM_FAULT_FALLBACK;
71e3aac0
AA
1596}
1597
c27f479e
DH
1598static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1599 unsigned long addr, pmd_t pmd)
1600{
1601 struct page *page;
1602
1603 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1604 return false;
1605
1606 /* Don't touch entries that are not even readable (NUMA hinting). */
1607 if (pmd_protnone(pmd))
1608 return false;
1609
1610 /* Do we need write faults for softdirty tracking? */
1611 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1612 return false;
1613
1614 /* Do we need write faults for uffd-wp tracking? */
1615 if (userfaultfd_huge_pmd_wp(vma, pmd))
1616 return false;
1617
1618 if (!(vma->vm_flags & VM_SHARED)) {
1619 /* See can_change_pte_writable(). */
1620 page = vm_normal_page_pmd(vma, addr, pmd);
1621 return page && PageAnon(page) && PageAnonExclusive(page);
1622 }
1623
1624 /* See can_change_pte_writable(). */
1625 return pmd_dirty(pmd);
1626}
1627
5535be30
DH
1628/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1629static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1630 struct vm_area_struct *vma,
1631 unsigned int flags)
8310d48b 1632{
5535be30
DH
1633 /* If the pmd is writable, we can write to the page. */
1634 if (pmd_write(pmd))
1635 return true;
1636
1637 /* Maybe FOLL_FORCE is set to override it? */
1638 if (!(flags & FOLL_FORCE))
1639 return false;
1640
1641 /* But FOLL_FORCE has no effect on shared mappings */
1642 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1643 return false;
1644
1645 /* ... or read-only private ones */
1646 if (!(vma->vm_flags & VM_MAYWRITE))
1647 return false;
1648
1649 /* ... or already writable ones that just need to take a write fault */
1650 if (vma->vm_flags & VM_WRITE)
1651 return false;
1652
1653 /*
1654 * See can_change_pte_writable(): we broke COW and could map the page
1655 * writable if we have an exclusive anonymous page ...
1656 */
1657 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1658 return false;
1659
1660 /* ... and a write-fault isn't required for other reasons. */
1661 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1662 return false;
1663 return !userfaultfd_huge_pmd_wp(vma, pmd);
8310d48b
KF
1664}
1665
b676b293 1666struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
1667 unsigned long addr,
1668 pmd_t *pmd,
1669 unsigned int flags)
1670{
b676b293 1671 struct mm_struct *mm = vma->vm_mm;
5535be30 1672 struct page *page;
0f089235 1673 int ret;
71e3aac0 1674
c4088ebd 1675 assert_spin_locked(pmd_lockptr(mm, pmd));
71e3aac0 1676
5535be30
DH
1677 page = pmd_page(*pmd);
1678 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1679
1680 if ((flags & FOLL_WRITE) &&
1681 !can_follow_write_pmd(*pmd, page, vma, flags))
1682 return NULL;
71e3aac0 1683
85facf25
KS
1684 /* Avoid dumping huge zero page */
1685 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1686 return ERR_PTR(-EFAULT);
1687
d74943a2 1688 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
5535be30 1689 return NULL;
3faa52c0 1690
84209e87 1691 if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
a7f22660
DH
1692 return ERR_PTR(-EMLINK);
1693
b6a2619c
DH
1694 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1695 !PageAnonExclusive(page), page);
1696
0f089235
LG
1697 ret = try_grab_page(page, flags);
1698 if (ret)
1699 return ERR_PTR(ret);
3faa52c0 1700
3565fce3 1701 if (flags & FOLL_TOUCH)
a69e4717 1702 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
3faa52c0 1703
71e3aac0 1704 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
ca120cf6 1705 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
71e3aac0 1706
71e3aac0
AA
1707 return page;
1708}
1709
d10e63f2 1710/* NUMA hinting page fault entry point for trans huge pmds */
5db4f15c 1711vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
d10e63f2 1712{
82b0f8c3 1713 struct vm_area_struct *vma = vmf->vma;
c5b5a3dd
YS
1714 pmd_t oldpmd = vmf->orig_pmd;
1715 pmd_t pmd;
667ffc31 1716 struct folio *folio;
82b0f8c3 1717 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
667ffc31 1718 int nid = NUMA_NO_NODE;
33024536 1719 int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
6a56ccbc 1720 bool migrated = false, writable = false;
6688cc05 1721 int flags = 0;
d10e63f2 1722
82b0f8c3 1723 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
c5b5a3dd 1724 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
82b0f8c3 1725 spin_unlock(vmf->ptl);
de466bd6
MG
1726 goto out;
1727 }
1728
c5b5a3dd 1729 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
6a56ccbc
DH
1730
1731 /*
1732 * Detect now whether the PMD could be writable; this information
1733 * is only valid while holding the PT lock.
1734 */
1735 writable = pmd_write(pmd);
1736 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1737 can_change_pmd_writable(vma, vmf->address, pmd))
1738 writable = true;
1739
667ffc31
KW
1740 folio = vm_normal_folio_pmd(vma, haddr, pmd);
1741 if (!folio)
c5b5a3dd
YS
1742 goto out_map;
1743
1744 /* See similar comment in do_numa_page for explanation */
6a56ccbc 1745 if (!writable)
c5b5a3dd
YS
1746 flags |= TNF_NO_GROUP;
1747
667ffc31 1748 nid = folio_nid(folio);
33024536
HY
1749 /*
1750 * For memory tiering mode, cpupid of slow memory page is used
1751 * to record page access time. So use default value.
1752 */
667ffc31 1753 if (node_is_toptier(nid))
c4a8d2fa 1754 last_cpupid = folio_last_cpupid(folio);
cda6d936 1755 target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
c5b5a3dd 1756 if (target_nid == NUMA_NO_NODE) {
667ffc31 1757 folio_put(folio);
c5b5a3dd
YS
1758 goto out_map;
1759 }
1760
82b0f8c3 1761 spin_unlock(vmf->ptl);
6a56ccbc 1762 writable = false;
8b1b436d 1763
667ffc31 1764 migrated = migrate_misplaced_folio(folio, vma, target_nid);
6688cc05
PZ
1765 if (migrated) {
1766 flags |= TNF_MIGRATED;
667ffc31 1767 nid = target_nid;
c5b5a3dd 1768 } else {
074c2381 1769 flags |= TNF_MIGRATE_FAIL;
c5b5a3dd
YS
1770 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1771 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1772 spin_unlock(vmf->ptl);
1773 goto out;
1774 }
1775 goto out_map;
1776 }
b8916634
MG
1777
1778out:
667ffc31
KW
1779 if (nid != NUMA_NO_NODE)
1780 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
8191acbd 1781
d10e63f2 1782 return 0;
c5b5a3dd
YS
1783
1784out_map:
1785 /* Restore the PMD */
1786 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1787 pmd = pmd_mkyoung(pmd);
6a56ccbc 1788 if (writable)
161e393c 1789 pmd = pmd_mkwrite(pmd, vma);
c5b5a3dd
YS
1790 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1791 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1792 spin_unlock(vmf->ptl);
1793 goto out;
d10e63f2
MG
1794}
1795
319904ad
HY
1796/*
1797 * Return true if we do MADV_FREE successfully on entire pmd page.
1798 * Otherwise, return false.
1799 */
1800bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
b8d3c4c3 1801 pmd_t *pmd, unsigned long addr, unsigned long next)
b8d3c4c3
MK
1802{
1803 spinlock_t *ptl;
1804 pmd_t orig_pmd;
fc986a38 1805 struct folio *folio;
b8d3c4c3 1806 struct mm_struct *mm = tlb->mm;
319904ad 1807 bool ret = false;
b8d3c4c3 1808
ed6a7935 1809 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
07e32661 1810
b6ec57f4
KS
1811 ptl = pmd_trans_huge_lock(pmd, vma);
1812 if (!ptl)
25eedabe 1813 goto out_unlocked;
b8d3c4c3
MK
1814
1815 orig_pmd = *pmd;
319904ad 1816 if (is_huge_zero_pmd(orig_pmd))
b8d3c4c3 1817 goto out;
b8d3c4c3 1818
84c3fc4e
ZY
1819 if (unlikely(!pmd_present(orig_pmd))) {
1820 VM_BUG_ON(thp_migration_supported() &&
1821 !is_pmd_migration_entry(orig_pmd));
1822 goto out;
1823 }
1824
fc986a38 1825 folio = pfn_folio(pmd_pfn(orig_pmd));
b8d3c4c3 1826 /*
fc986a38
KW
1827 * If other processes are mapping this folio, we couldn't discard
1828 * the folio unless they all do MADV_FREE so let's skip the folio.
b8d3c4c3 1829 */
20b18aad 1830 if (folio_estimated_sharers(folio) != 1)
b8d3c4c3
MK
1831 goto out;
1832
fc986a38 1833 if (!folio_trylock(folio))
b8d3c4c3
MK
1834 goto out;
1835
1836 /*
1837 * If user want to discard part-pages of THP, split it so MADV_FREE
1838 * will deactivate only them.
1839 */
1840 if (next - addr != HPAGE_PMD_SIZE) {
fc986a38 1841 folio_get(folio);
b8d3c4c3 1842 spin_unlock(ptl);
fc986a38
KW
1843 split_folio(folio);
1844 folio_unlock(folio);
1845 folio_put(folio);
b8d3c4c3
MK
1846 goto out_unlocked;
1847 }
1848
fc986a38
KW
1849 if (folio_test_dirty(folio))
1850 folio_clear_dirty(folio);
1851 folio_unlock(folio);
b8d3c4c3 1852
b8d3c4c3 1853 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
58ceeb6b 1854 pmdp_invalidate(vma, addr, pmd);
b8d3c4c3
MK
1855 orig_pmd = pmd_mkold(orig_pmd);
1856 orig_pmd = pmd_mkclean(orig_pmd);
1857
1858 set_pmd_at(mm, addr, pmd, orig_pmd);
1859 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1860 }
802a3a92 1861
6a6fe9eb 1862 folio_mark_lazyfree(folio);
319904ad 1863 ret = true;
b8d3c4c3
MK
1864out:
1865 spin_unlock(ptl);
1866out_unlocked:
1867 return ret;
1868}
1869
953c66c2
AK
1870static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1871{
1872 pgtable_t pgtable;
1873
1874 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1875 pte_free(mm, pgtable);
c4812909 1876 mm_dec_nr_ptes(mm);
953c66c2
AK
1877}
1878
71e3aac0 1879int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
f21760b1 1880 pmd_t *pmd, unsigned long addr)
71e3aac0 1881{
da146769 1882 pmd_t orig_pmd;
bf929152 1883 spinlock_t *ptl;
71e3aac0 1884
ed6a7935 1885 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
07e32661 1886
b6ec57f4
KS
1887 ptl = __pmd_trans_huge_lock(pmd, vma);
1888 if (!ptl)
da146769
KS
1889 return 0;
1890 /*
1891 * For architectures like ppc64 we look at deposited pgtable
1892 * when calling pmdp_huge_get_and_clear. So do the
1893 * pgtable_trans_huge_withdraw after finishing pmdp related
1894 * operations.
1895 */
93a98695
AK
1896 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1897 tlb->fullmm);
e5136e87 1898 arch_check_zapped_pmd(vma, orig_pmd);
da146769 1899 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2484ca9b 1900 if (vma_is_special_huge(vma)) {
3b6521f5
OH
1901 if (arch_needs_pgtable_deposit())
1902 zap_deposited_table(tlb->mm, pmd);
da146769 1903 spin_unlock(ptl);
da146769 1904 } else if (is_huge_zero_pmd(orig_pmd)) {
c14a6eb4 1905 zap_deposited_table(tlb->mm, pmd);
da146769 1906 spin_unlock(ptl);
da146769 1907 } else {
616b8371
ZY
1908 struct page *page = NULL;
1909 int flush_needed = 1;
1910
1911 if (pmd_present(orig_pmd)) {
1912 page = pmd_page(orig_pmd);
a8e61d58 1913 folio_remove_rmap_pmd(page_folio(page), page, vma);
616b8371
ZY
1914 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1915 VM_BUG_ON_PAGE(!PageHead(page), page);
1916 } else if (thp_migration_supported()) {
1917 swp_entry_t entry;
1918
1919 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1920 entry = pmd_to_swp_entry(orig_pmd);
af5cdaf8 1921 page = pfn_swap_entry_to_page(entry);
616b8371
ZY
1922 flush_needed = 0;
1923 } else
1924 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1925
b5072380 1926 if (PageAnon(page)) {
c14a6eb4 1927 zap_deposited_table(tlb->mm, pmd);
b5072380
KS
1928 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1929 } else {
953c66c2
AK
1930 if (arch_needs_pgtable_deposit())
1931 zap_deposited_table(tlb->mm, pmd);
fadae295 1932 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
b5072380 1933 }
616b8371 1934
da146769 1935 spin_unlock(ptl);
616b8371
ZY
1936 if (flush_needed)
1937 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
025c5b24 1938 }
da146769 1939 return 1;
71e3aac0
AA
1940}
1941
1dd38b6c
AK
1942#ifndef pmd_move_must_withdraw
1943static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1944 spinlock_t *old_pmd_ptl,
1945 struct vm_area_struct *vma)
1946{
1947 /*
1948 * With split pmd lock we also need to move preallocated
1949 * PTE page table if new_pmd is on different PMD page table.
1950 *
1951 * We also don't deposit and withdraw tables for file pages.
1952 */
1953 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1954}
1955#endif
1956
ab6e3d09
NH
1957static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1958{
1959#ifdef CONFIG_MEM_SOFT_DIRTY
1960 if (unlikely(is_pmd_migration_entry(pmd)))
1961 pmd = pmd_swp_mksoft_dirty(pmd);
1962 else if (pmd_present(pmd))
1963 pmd = pmd_mksoft_dirty(pmd);
1964#endif
1965 return pmd;
1966}
1967
bf8616d5 1968bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
b8aa9d9d 1969 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
37a1c49a 1970{
bf929152 1971 spinlock_t *old_ptl, *new_ptl;
37a1c49a 1972 pmd_t pmd;
37a1c49a 1973 struct mm_struct *mm = vma->vm_mm;
5d190420 1974 bool force_flush = false;
37a1c49a 1975
37a1c49a
AA
1976 /*
1977 * The destination pmd shouldn't be established, free_pgtables()
a5be621e
HD
1978 * should have released it; but move_page_tables() might have already
1979 * inserted a page table, if racing against shmem/file collapse.
37a1c49a 1980 */
a5be621e 1981 if (!pmd_none(*new_pmd)) {
37a1c49a 1982 VM_BUG_ON(pmd_trans_huge(*new_pmd));
4b471e88 1983 return false;
37a1c49a
AA
1984 }
1985
bf929152
KS
1986 /*
1987 * We don't have to worry about the ordering of src and dst
c1e8d7c6 1988 * ptlocks because exclusive mmap_lock prevents deadlock.
bf929152 1989 */
b6ec57f4
KS
1990 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1991 if (old_ptl) {
bf929152
KS
1992 new_ptl = pmd_lockptr(mm, new_pmd);
1993 if (new_ptl != old_ptl)
1994 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
8809aa2d 1995 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
eb66ae03 1996 if (pmd_present(pmd))
a2ce2666 1997 force_flush = true;
025c5b24 1998 VM_BUG_ON(!pmd_none(*new_pmd));
3592806c 1999
1dd38b6c 2000 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
b3084f4d 2001 pgtable_t pgtable;
3592806c
KS
2002 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
2003 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
3592806c 2004 }
ab6e3d09
NH
2005 pmd = move_soft_dirty_pmd(pmd);
2006 set_pmd_at(mm, new_addr, new_pmd, pmd);
5d190420 2007 if (force_flush)
7c38f181 2008 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
eb66ae03
LT
2009 if (new_ptl != old_ptl)
2010 spin_unlock(new_ptl);
bf929152 2011 spin_unlock(old_ptl);
4b471e88 2012 return true;
37a1c49a 2013 }
4b471e88 2014 return false;
37a1c49a
AA
2015}
2016
f123d74a
MG
2017/*
2018 * Returns
2019 * - 0 if PMD could not be locked
f0953a1b 2020 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
e346e668 2021 * or if prot_numa but THP migration is not supported
f0953a1b 2022 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
f123d74a 2023 */
4a18419f
NA
2024int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2025 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2026 unsigned long cp_flags)
cd7548ab
JW
2027{
2028 struct mm_struct *mm = vma->vm_mm;
bf929152 2029 spinlock_t *ptl;
c9fe6656 2030 pmd_t oldpmd, entry;
58705444 2031 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
292924b2
PX
2032 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2033 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6a56ccbc 2034 int ret = 1;
cd7548ab 2035
4a18419f
NA
2036 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2037
e346e668
YS
2038 if (prot_numa && !thp_migration_supported())
2039 return 1;
2040
b6ec57f4 2041 ptl = __pmd_trans_huge_lock(pmd, vma);
0a85e51d
KS
2042 if (!ptl)
2043 return 0;
e944fd67 2044
84c3fc4e
ZY
2045#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2046 if (is_swap_pmd(*pmd)) {
2047 swp_entry_t entry = pmd_to_swp_entry(*pmd);
d986ba2b 2048 struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
24bf08c4 2049 pmd_t newpmd;
84c3fc4e
ZY
2050
2051 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
4dd845b5 2052 if (is_writable_migration_entry(entry)) {
84c3fc4e
ZY
2053 /*
2054 * A protection check is difficult so
2055 * just be safe and disable write
2056 */
d986ba2b 2057 if (folio_test_anon(folio))
6c287605
DH
2058 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2059 else
2060 entry = make_readable_migration_entry(swp_offset(entry));
84c3fc4e 2061 newpmd = swp_entry_to_pmd(entry);
ab6e3d09
NH
2062 if (pmd_swp_soft_dirty(*pmd))
2063 newpmd = pmd_swp_mksoft_dirty(newpmd);
24bf08c4
DH
2064 } else {
2065 newpmd = *pmd;
84c3fc4e 2066 }
24bf08c4
DH
2067
2068 if (uffd_wp)
2069 newpmd = pmd_swp_mkuffd_wp(newpmd);
2070 else if (uffd_wp_resolve)
2071 newpmd = pmd_swp_clear_uffd_wp(newpmd);
2072 if (!pmd_same(*pmd, newpmd))
2073 set_pmd_at(mm, addr, pmd, newpmd);
84c3fc4e
ZY
2074 goto unlock;
2075 }
2076#endif
2077
a1a3a2fc 2078 if (prot_numa) {
d986ba2b 2079 struct folio *folio;
33024536 2080 bool toptier;
a1a3a2fc
HY
2081 /*
2082 * Avoid trapping faults against the zero page. The read-only
2083 * data is likely to be read-cached on the local CPU and
2084 * local/remote hits to the zero page are not interesting.
2085 */
2086 if (is_huge_zero_pmd(*pmd))
2087 goto unlock;
025c5b24 2088
a1a3a2fc
HY
2089 if (pmd_protnone(*pmd))
2090 goto unlock;
0a85e51d 2091
d986ba2b
KW
2092 folio = page_folio(pmd_page(*pmd));
2093 toptier = node_is_toptier(folio_nid(folio));
a1a3a2fc
HY
2094 /*
2095 * Skip scanning top tier node if normal numa
2096 * balancing is disabled
2097 */
2098 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
33024536 2099 toptier)
a1a3a2fc 2100 goto unlock;
33024536
HY
2101
2102 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
2103 !toptier)
d986ba2b
KW
2104 folio_xchg_access_time(folio,
2105 jiffies_to_msecs(jiffies));
a1a3a2fc 2106 }
ced10803 2107 /*
3e4e28c5 2108 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
ced10803 2109 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
3e4e28c5 2110 * which is also under mmap_read_lock(mm):
ced10803
KS
2111 *
2112 * CPU0: CPU1:
2113 * change_huge_pmd(prot_numa=1)
2114 * pmdp_huge_get_and_clear_notify()
2115 * madvise_dontneed()
2116 * zap_pmd_range()
2117 * pmd_trans_huge(*pmd) == 0 (without ptl)
2118 * // skip the pmd
2119 * set_pmd_at();
2120 * // pmd is re-established
2121 *
2122 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2123 * which may break userspace.
2124 *
4f831457 2125 * pmdp_invalidate_ad() is required to make sure we don't miss
ced10803
KS
2126 * dirty/young flags set by hardware.
2127 */
4f831457 2128 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
ced10803 2129
c9fe6656 2130 entry = pmd_modify(oldpmd, newprot);
f1eb1bac 2131 if (uffd_wp)
292924b2 2132 entry = pmd_mkuffd_wp(entry);
f1eb1bac 2133 else if (uffd_wp_resolve)
292924b2
PX
2134 /*
2135 * Leave the write bit to be handled by PF interrupt
2136 * handler, then things like COW could be properly
2137 * handled.
2138 */
2139 entry = pmd_clear_uffd_wp(entry);
c27f479e
DH
2140
2141 /* See change_pte_range(). */
2142 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2143 can_change_pmd_writable(vma, addr, entry))
161e393c 2144 entry = pmd_mkwrite(entry, vma);
c27f479e 2145
0a85e51d
KS
2146 ret = HPAGE_PMD_NR;
2147 set_pmd_at(mm, addr, pmd, entry);
4a18419f 2148
c9fe6656
NA
2149 if (huge_pmd_needs_flush(oldpmd, entry))
2150 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
0a85e51d
KS
2151unlock:
2152 spin_unlock(ptl);
025c5b24
NH
2153 return ret;
2154}
2155
adef4406
AA
2156#ifdef CONFIG_USERFAULTFD
2157/*
2158 * The PT lock for src_pmd and the mmap_lock for reading are held by
2159 * the caller, but it must return after releasing the page_table_lock.
2160 * Just move the page from src_pmd to dst_pmd if possible.
2161 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2162 * repeated by the caller, or other errors in case of failure.
2163 */
2164int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2165 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2166 unsigned long dst_addr, unsigned long src_addr)
2167{
2168 pmd_t _dst_pmd, src_pmdval;
2169 struct page *src_page;
2170 struct folio *src_folio;
2171 struct anon_vma *src_anon_vma;
2172 spinlock_t *src_ptl, *dst_ptl;
2173 pgtable_t src_pgtable;
2174 struct mmu_notifier_range range;
2175 int err = 0;
2176
2177 src_pmdval = *src_pmd;
2178 src_ptl = pmd_lockptr(mm, src_pmd);
2179
2180 lockdep_assert_held(src_ptl);
2181 mmap_assert_locked(mm);
2182
2183 /* Sanity checks before the operation */
2184 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2185 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2186 spin_unlock(src_ptl);
2187 return -EINVAL;
2188 }
2189
2190 if (!pmd_trans_huge(src_pmdval)) {
2191 spin_unlock(src_ptl);
2192 if (is_pmd_migration_entry(src_pmdval)) {
2193 pmd_migration_entry_wait(mm, &src_pmdval);
2194 return -EAGAIN;
2195 }
2196 return -ENOENT;
2197 }
2198
2199 src_page = pmd_page(src_pmdval);
2200 if (unlikely(!PageAnonExclusive(src_page))) {
2201 spin_unlock(src_ptl);
2202 return -EBUSY;
2203 }
2204
2205 src_folio = page_folio(src_page);
2206 folio_get(src_folio);
2207 spin_unlock(src_ptl);
2208
2209 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2210 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2211 src_addr + HPAGE_PMD_SIZE);
2212 mmu_notifier_invalidate_range_start(&range);
2213
2214 folio_lock(src_folio);
2215
2216 /*
2217 * split_huge_page walks the anon_vma chain without the page
2218 * lock. Serialize against it with the anon_vma lock, the page
2219 * lock is not enough.
2220 */
2221 src_anon_vma = folio_get_anon_vma(src_folio);
2222 if (!src_anon_vma) {
2223 err = -EAGAIN;
2224 goto unlock_folio;
2225 }
2226 anon_vma_lock_write(src_anon_vma);
2227
2228 dst_ptl = pmd_lockptr(mm, dst_pmd);
2229 double_pt_lock(src_ptl, dst_ptl);
2230 if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2231 !pmd_same(*dst_pmd, dst_pmdval))) {
2232 err = -EAGAIN;
2233 goto unlock_ptls;
2234 }
2235 if (folio_maybe_dma_pinned(src_folio) ||
2236 !PageAnonExclusive(&src_folio->page)) {
2237 err = -EBUSY;
2238 goto unlock_ptls;
2239 }
2240
2241 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2242 WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2243 err = -EBUSY;
2244 goto unlock_ptls;
2245 }
2246
2247 folio_move_anon_rmap(src_folio, dst_vma);
2248 WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
2249
2250 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2251 /* Folio got pinned from under us. Put it back and fail the move. */
2252 if (folio_maybe_dma_pinned(src_folio)) {
2253 set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2254 err = -EBUSY;
2255 goto unlock_ptls;
2256 }
2257
2258 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
2259 /* Follow mremap() behavior and treat the entry dirty after the move */
2260 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2261 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2262
2263 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2264 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2265unlock_ptls:
2266 double_pt_unlock(src_ptl, dst_ptl);
2267 anon_vma_unlock_write(src_anon_vma);
2268 put_anon_vma(src_anon_vma);
2269unlock_folio:
2270 /* unblock rmap walks */
2271 folio_unlock(src_folio);
2272 mmu_notifier_invalidate_range_end(&range);
2273 folio_put(src_folio);
2274 return err;
2275}
2276#endif /* CONFIG_USERFAULTFD */
2277
025c5b24 2278/*
8f19b0c0 2279 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
025c5b24 2280 *
8f19b0c0
HY
2281 * Note that if it returns page table lock pointer, this routine returns without
2282 * unlocking page table lock. So callers must unlock it.
025c5b24 2283 */
b6ec57f4 2284spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
025c5b24 2285{
b6ec57f4
KS
2286 spinlock_t *ptl;
2287 ptl = pmd_lock(vma->vm_mm, pmd);
84c3fc4e
ZY
2288 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2289 pmd_devmap(*pmd)))
b6ec57f4
KS
2290 return ptl;
2291 spin_unlock(ptl);
2292 return NULL;
cd7548ab
JW
2293}
2294
a00cc7d9 2295/*
d965e390 2296 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
a00cc7d9 2297 *
d965e390
ML
2298 * Note that if it returns page table lock pointer, this routine returns without
2299 * unlocking page table lock. So callers must unlock it.
a00cc7d9
MW
2300 */
2301spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2302{
2303 spinlock_t *ptl;
2304
2305 ptl = pud_lock(vma->vm_mm, pud);
2306 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2307 return ptl;
2308 spin_unlock(ptl);
2309 return NULL;
2310}
2311
2312#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2313int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2314 pud_t *pud, unsigned long addr)
2315{
a00cc7d9
MW
2316 spinlock_t *ptl;
2317
2318 ptl = __pud_trans_huge_lock(pud, vma);
2319 if (!ptl)
2320 return 0;
74929079 2321
f32928ab 2322 pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
a00cc7d9 2323 tlb_remove_pud_tlb_entry(tlb, pud, addr);
2484ca9b 2324 if (vma_is_special_huge(vma)) {
a00cc7d9
MW
2325 spin_unlock(ptl);
2326 /* No zero page support yet */
2327 } else {
2328 /* No support for anonymous PUD pages yet */
2329 BUG();
2330 }
2331 return 1;
2332}
2333
2334static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2335 unsigned long haddr)
2336{
2337 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2338 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2339 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2340 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2341
ce9311cf 2342 count_vm_event(THP_SPLIT_PUD);
a00cc7d9 2343
ec8832d0 2344 pudp_huge_clear_flush(vma, haddr, pud);
a00cc7d9
MW
2345}
2346
2347void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2348 unsigned long address)
2349{
2350 spinlock_t *ptl;
ac46d4f3 2351 struct mmu_notifier_range range;
a00cc7d9 2352
7d4a8be0 2353 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
6f4f13e8 2354 address & HPAGE_PUD_MASK,
ac46d4f3
JG
2355 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2356 mmu_notifier_invalidate_range_start(&range);
2357 ptl = pud_lock(vma->vm_mm, pud);
a00cc7d9
MW
2358 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2359 goto out;
ac46d4f3 2360 __split_huge_pud_locked(vma, pud, range.start);
a00cc7d9
MW
2361
2362out:
2363 spin_unlock(ptl);
ec8832d0 2364 mmu_notifier_invalidate_range_end(&range);
a00cc7d9
MW
2365}
2366#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2367
eef1b3ba
KS
2368static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2369 unsigned long haddr, pmd_t *pmd)
2370{
2371 struct mm_struct *mm = vma->vm_mm;
2372 pgtable_t pgtable;
42b2af2c 2373 pmd_t _pmd, old_pmd;
c9c1ee20
HD
2374 unsigned long addr;
2375 pte_t *pte;
eef1b3ba
KS
2376 int i;
2377
0f10851e
JG
2378 /*
2379 * Leave pmd empty until pte is filled note that it is fine to delay
2380 * notification until mmu_notifier_invalidate_range_end() as we are
2381 * replacing a zero pmd write protected page with a zero pte write
2382 * protected page.
2383 *
ee65728e 2384 * See Documentation/mm/mmu_notifier.rst
0f10851e 2385 */
42b2af2c 2386 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
eef1b3ba
KS
2387
2388 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2389 pmd_populate(mm, &_pmd, pgtable);
2390
c9c1ee20
HD
2391 pte = pte_offset_map(&_pmd, haddr);
2392 VM_BUG_ON(!pte);
2393 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2394 pte_t entry;
2395
2396 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
eef1b3ba 2397 entry = pte_mkspecial(entry);
42b2af2c
DH
2398 if (pmd_uffd_wp(old_pmd))
2399 entry = pte_mkuffd_wp(entry);
c33c7948 2400 VM_BUG_ON(!pte_none(ptep_get(pte)));
c9c1ee20
HD
2401 set_pte_at(mm, addr, pte, entry);
2402 pte++;
eef1b3ba 2403 }
c9c1ee20 2404 pte_unmap(pte - 1);
eef1b3ba
KS
2405 smp_wmb(); /* make pte visible before pmd */
2406 pmd_populate(mm, pmd, pgtable);
eef1b3ba
KS
2407}
2408
2409static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
ba988280 2410 unsigned long haddr, bool freeze)
eef1b3ba
KS
2411{
2412 struct mm_struct *mm = vma->vm_mm;
91b2978a 2413 struct folio *folio;
eef1b3ba
KS
2414 struct page *page;
2415 pgtable_t pgtable;
423ac9af 2416 pmd_t old_pmd, _pmd;
292924b2 2417 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
0ccf7f16 2418 bool anon_exclusive = false, dirty = false;
2ac015e2 2419 unsigned long addr;
c9c1ee20 2420 pte_t *pte;
eef1b3ba
KS
2421 int i;
2422
2423 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2424 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2425 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
84c3fc4e
ZY
2426 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2427 && !pmd_devmap(*pmd));
eef1b3ba
KS
2428
2429 count_vm_event(THP_SPLIT_PMD);
2430
d21b9e57 2431 if (!vma_is_anonymous(vma)) {
ec8832d0 2432 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
953c66c2
AK
2433 /*
2434 * We are going to unmap this huge page. So
2435 * just go ahead and zap it
2436 */
2437 if (arch_needs_pgtable_deposit())
2438 zap_deposited_table(mm, pmd);
2484ca9b 2439 if (vma_is_special_huge(vma))
d21b9e57 2440 return;
99fa8a48
HD
2441 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2442 swp_entry_t entry;
2443
2444 entry = pmd_to_swp_entry(old_pmd);
af5cdaf8 2445 page = pfn_swap_entry_to_page(entry);
99fa8a48
HD
2446 } else {
2447 page = pmd_page(old_pmd);
a8e61d58
DH
2448 folio = page_folio(page);
2449 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
db44c658 2450 folio_mark_dirty(folio);
a8e61d58
DH
2451 if (!folio_test_referenced(folio) && pmd_young(old_pmd))
2452 folio_set_referenced(folio);
2453 folio_remove_rmap_pmd(folio, page, vma);
2454 folio_put(folio);
99fa8a48 2455 }
fadae295 2456 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
eef1b3ba 2457 return;
99fa8a48
HD
2458 }
2459
3b77e8c8 2460 if (is_huge_zero_pmd(*pmd)) {
4645b9fe
JG
2461 /*
2462 * FIXME: Do we want to invalidate secondary mmu by calling
1af5a810
AP
2463 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2464 * inside __split_huge_pmd() ?
4645b9fe
JG
2465 *
2466 * We are going from a zero huge page write protected to zero
2467 * small page also write protected so it does not seems useful
2468 * to invalidate secondary mmu at this time.
2469 */
eef1b3ba
KS
2470 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2471 }
2472
423ac9af
AK
2473 /*
2474 * Up to this point the pmd is present and huge and userland has the
2475 * whole access to the hugepage during the split (which happens in
2476 * place). If we overwrite the pmd with the not-huge version pointing
2477 * to the pte here (which of course we could if all CPUs were bug
2478 * free), userland could trigger a small page size TLB miss on the
2479 * small sized TLB while the hugepage TLB entry is still established in
2480 * the huge TLB. Some CPU doesn't like that.
42742d9b
AK
2481 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2482 * 383 on page 105. Intel should be safe but is also warns that it's
423ac9af
AK
2483 * only safe if the permission and cache attributes of the two entries
2484 * loaded in the two TLB is identical (which should be the case here).
2485 * But it is generally safer to never allow small and huge TLB entries
2486 * for the same virtual address to be loaded simultaneously. So instead
2487 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2488 * current pmd notpresent (atomically because here the pmd_trans_huge
2489 * must remain set at all times on the pmd until the split is complete
2490 * for this pmd), then we flush the SMP TLB and finally we write the
2491 * non-huge version of the pmd entry with pmd_populate.
2492 */
2493 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2494
423ac9af 2495 pmd_migration = is_pmd_migration_entry(old_pmd);
2e83ee1d 2496 if (unlikely(pmd_migration)) {
84c3fc4e
ZY
2497 swp_entry_t entry;
2498
423ac9af 2499 entry = pmd_to_swp_entry(old_pmd);
af5cdaf8 2500 page = pfn_swap_entry_to_page(entry);
4dd845b5 2501 write = is_writable_migration_entry(entry);
6c287605
DH
2502 if (PageAnon(page))
2503 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2e346877
PX
2504 young = is_migration_entry_young(entry);
2505 dirty = is_migration_entry_dirty(entry);
2e83ee1d 2506 soft_dirty = pmd_swp_soft_dirty(old_pmd);
f45ec5ff 2507 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2e83ee1d 2508 } else {
423ac9af 2509 page = pmd_page(old_pmd);
91b2978a 2510 folio = page_folio(page);
0ccf7f16
PX
2511 if (pmd_dirty(old_pmd)) {
2512 dirty = true;
91b2978a 2513 folio_set_dirty(folio);
0ccf7f16 2514 }
2e83ee1d
PX
2515 write = pmd_write(old_pmd);
2516 young = pmd_young(old_pmd);
2517 soft_dirty = pmd_soft_dirty(old_pmd);
292924b2 2518 uffd_wp = pmd_uffd_wp(old_pmd);
6c287605 2519
91b2978a
DH
2520 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2521 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
6c287605
DH
2522
2523 /*
2524 * Without "freeze", we'll simply split the PMD, propagating the
2525 * PageAnonExclusive() flag for each PTE by setting it for
2526 * each subpage -- no need to (temporarily) clear.
2527 *
2528 * With "freeze" we want to replace mapped pages by
2529 * migration entries right away. This is only possible if we
2530 * managed to clear PageAnonExclusive() -- see
2531 * set_pmd_migration_entry().
2532 *
2533 * In case we cannot clear PageAnonExclusive(), split the PMD
2534 * only and let try_to_migrate_one() fail later.
088b8aa5 2535 *
e3b4b137 2536 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
6c287605 2537 */
91b2978a 2538 anon_exclusive = PageAnonExclusive(page);
e3b4b137
DH
2539 if (freeze && anon_exclusive &&
2540 folio_try_share_anon_rmap_pmd(folio, page))
6c287605 2541 freeze = false;
91b2978a
DH
2542 if (!freeze) {
2543 rmap_t rmap_flags = RMAP_NONE;
2544
2545 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2546 if (anon_exclusive)
2547 rmap_flags |= RMAP_EXCLUSIVE;
2548 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
2549 vma, haddr, rmap_flags);
2550 }
2e83ee1d 2551 }
eef1b3ba 2552
423ac9af
AK
2553 /*
2554 * Withdraw the table only after we mark the pmd entry invalid.
2555 * This's critical for some architectures (Power).
2556 */
eef1b3ba
KS
2557 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2558 pmd_populate(mm, &_pmd, pgtable);
2559
c9c1ee20
HD
2560 pte = pte_offset_map(&_pmd, haddr);
2561 VM_BUG_ON(!pte);
2ac015e2 2562 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
c9c1ee20 2563 pte_t entry;
eef1b3ba
KS
2564 /*
2565 * Note that NUMA hinting access restrictions are not
2566 * transferred to avoid any possibility of altering
2567 * permissions across VMAs.
2568 */
84c3fc4e 2569 if (freeze || pmd_migration) {
ba988280 2570 swp_entry_t swp_entry;
4dd845b5
AP
2571 if (write)
2572 swp_entry = make_writable_migration_entry(
2573 page_to_pfn(page + i));
6c287605
DH
2574 else if (anon_exclusive)
2575 swp_entry = make_readable_exclusive_migration_entry(
2576 page_to_pfn(page + i));
4dd845b5
AP
2577 else
2578 swp_entry = make_readable_migration_entry(
2579 page_to_pfn(page + i));
2e346877
PX
2580 if (young)
2581 swp_entry = make_migration_entry_young(swp_entry);
2582 if (dirty)
2583 swp_entry = make_migration_entry_dirty(swp_entry);
ba988280 2584 entry = swp_entry_to_pte(swp_entry);
804dd150
AA
2585 if (soft_dirty)
2586 entry = pte_swp_mksoft_dirty(entry);
f45ec5ff
PX
2587 if (uffd_wp)
2588 entry = pte_swp_mkuffd_wp(entry);
ba988280 2589 } else {
6d2329f8 2590 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
1462c52e 2591 if (write)
161e393c 2592 entry = pte_mkwrite(entry, vma);
ba988280
KS
2593 if (!young)
2594 entry = pte_mkold(entry);
e833bc50
PX
2595 /* NOTE: this may set soft-dirty too on some archs */
2596 if (dirty)
2597 entry = pte_mkdirty(entry);
804dd150
AA
2598 if (soft_dirty)
2599 entry = pte_mksoft_dirty(entry);
292924b2
PX
2600 if (uffd_wp)
2601 entry = pte_mkuffd_wp(entry);
ba988280 2602 }
c33c7948 2603 VM_BUG_ON(!pte_none(ptep_get(pte)));
2ac015e2 2604 set_pte_at(mm, addr, pte, entry);
c9c1ee20 2605 pte++;
eef1b3ba 2606 }
c9c1ee20 2607 pte_unmap(pte - 1);
eef1b3ba 2608
cb67f428 2609 if (!pmd_migration)
a8e61d58 2610 folio_remove_rmap_pmd(folio, page, vma);
96d82deb
HD
2611 if (freeze)
2612 put_page(page);
eef1b3ba
KS
2613
2614 smp_wmb(); /* make pte visible before pmd */
2615 pmd_populate(mm, pmd, pgtable);
2616}
2617
2618void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
af28a988 2619 unsigned long address, bool freeze, struct folio *folio)
eef1b3ba
KS
2620{
2621 spinlock_t *ptl;
ac46d4f3 2622 struct mmu_notifier_range range;
eef1b3ba 2623
7d4a8be0 2624 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
6f4f13e8 2625 address & HPAGE_PMD_MASK,
ac46d4f3
JG
2626 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2627 mmu_notifier_invalidate_range_start(&range);
2628 ptl = pmd_lock(vma->vm_mm, pmd);
33f4751e
NH
2629
2630 /*
af28a988
MWO
2631 * If caller asks to setup a migration entry, we need a folio to check
2632 * pmd against. Otherwise we can end up replacing wrong folio.
33f4751e 2633 */
af28a988 2634 VM_BUG_ON(freeze && !folio);
83a8441f 2635 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
33f4751e 2636
7f760917 2637 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
83a8441f 2638 is_pmd_migration_entry(*pmd)) {
cea33328
ML
2639 /*
2640 * It's safe to call pmd_page when folio is set because it's
2641 * guaranteed that pmd is present.
2642 */
83a8441f
MWO
2643 if (folio && folio != page_folio(pmd_page(*pmd)))
2644 goto out;
7f760917 2645 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
83a8441f 2646 }
7f760917 2647
e90309c9 2648out:
eef1b3ba 2649 spin_unlock(ptl);
ec8832d0 2650 mmu_notifier_invalidate_range_end(&range);
eef1b3ba
KS
2651}
2652
fec89c10 2653void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
af28a988 2654 bool freeze, struct folio *folio)
94fcc585 2655{
50722804 2656 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
94fcc585 2657
50722804 2658 if (!pmd)
f72e7dcd
HD
2659 return;
2660
af28a988 2661 __split_huge_pmd(vma, pmd, address, freeze, folio);
94fcc585
AA
2662}
2663
71f9e58e
ML
2664static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2665{
2666 /*
2667 * If the new address isn't hpage aligned and it could previously
2668 * contain an hugepage: check if we need to split an huge pmd.
2669 */
2670 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2671 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2672 ALIGN(address, HPAGE_PMD_SIZE)))
2673 split_huge_pmd_address(vma, address, false, NULL);
2674}
2675
e1b9996b 2676void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
2677 unsigned long start,
2678 unsigned long end,
2679 long adjust_next)
2680{
71f9e58e
ML
2681 /* Check if we need to split start first. */
2682 split_huge_pmd_if_needed(vma, start);
94fcc585 2683
71f9e58e
ML
2684 /* Check if we need to split end next. */
2685 split_huge_pmd_if_needed(vma, end);
94fcc585
AA
2686
2687 /*
68540502 2688 * If we're also updating the next vma vm_start,
71f9e58e 2689 * check if we need to split it.
94fcc585
AA
2690 */
2691 if (adjust_next > 0) {
68540502 2692 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
94fcc585 2693 unsigned long nstart = next->vm_start;
f9d86a60 2694 nstart += adjust_next;
71f9e58e 2695 split_huge_pmd_if_needed(next, nstart);
94fcc585
AA
2696 }
2697}
e9b61f19 2698
684555aa 2699static void unmap_folio(struct folio *folio)
e9b61f19 2700{
a98a2f0c 2701 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
3027c6f8 2702 TTU_SYNC | TTU_BATCH_FLUSH;
e9b61f19 2703
684555aa 2704 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
e9b61f19 2705
a98a2f0c
AP
2706 /*
2707 * Anon pages need migration entries to preserve them, but file
2708 * pages can simply be left unmapped, then faulted back on demand.
2709 * If that is ever changed (perhaps for mlock), update remap_page().
2710 */
4b8554c5
MWO
2711 if (folio_test_anon(folio))
2712 try_to_migrate(folio, ttu_flags);
a98a2f0c 2713 else
869f7ee6 2714 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
3027c6f8
BW
2715
2716 try_to_unmap_flush();
e9b61f19
KS
2717}
2718
4eecb8b9 2719static void remap_page(struct folio *folio, unsigned long nr)
e9b61f19 2720{
4eecb8b9 2721 int i = 0;
ab02c252 2722
684555aa 2723 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
4eecb8b9 2724 if (!folio_test_anon(folio))
ab02c252 2725 return;
4eecb8b9
MWO
2726 for (;;) {
2727 remove_migration_ptes(folio, folio, true);
2728 i += folio_nr_pages(folio);
2729 if (i >= nr)
2730 break;
2731 folio = folio_next(folio);
ace71a19 2732 }
e9b61f19
KS
2733}
2734
94866635 2735static void lru_add_page_tail(struct page *head, struct page *tail,
88dcb9a3
AS
2736 struct lruvec *lruvec, struct list_head *list)
2737{
94866635
AS
2738 VM_BUG_ON_PAGE(!PageHead(head), head);
2739 VM_BUG_ON_PAGE(PageCompound(tail), head);
2740 VM_BUG_ON_PAGE(PageLRU(tail), head);
6168d0da 2741 lockdep_assert_held(&lruvec->lru_lock);
88dcb9a3 2742
6dbb5741 2743 if (list) {
88dcb9a3 2744 /* page reclaim is reclaiming a huge page */
6dbb5741 2745 VM_WARN_ON(PageLRU(head));
94866635
AS
2746 get_page(tail);
2747 list_add_tail(&tail->lru, list);
88dcb9a3 2748 } else {
6dbb5741
AS
2749 /* head is still on lru (and we have it frozen) */
2750 VM_WARN_ON(!PageLRU(head));
07ca7606
HD
2751 if (PageUnevictable(tail))
2752 tail->mlock_count = 0;
2753 else
2754 list_add_tail(&tail->lru, &head->lru);
6dbb5741 2755 SetPageLRU(tail);
88dcb9a3
AS
2756 }
2757}
2758
07e09c48 2759static void __split_huge_page_tail(struct folio *folio, int tail,
e9b61f19
KS
2760 struct lruvec *lruvec, struct list_head *list)
2761{
07e09c48 2762 struct page *head = &folio->page;
e9b61f19 2763 struct page *page_tail = head + tail;
07e09c48
DH
2764 /*
2765 * Careful: new_folio is not a "real" folio before we cleared PageTail.
2766 * Don't pass it around before clear_compound_head().
2767 */
2768 struct folio *new_folio = (struct folio *)page_tail;
e9b61f19 2769
8df651c7 2770 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
e9b61f19
KS
2771
2772 /*
605ca5ed
KK
2773 * Clone page flags before unfreezing refcount.
2774 *
2775 * After successful get_page_unless_zero() might follow flags change,
8958b249 2776 * for example lock_page() which set PG_waiters.
6c287605
DH
2777 *
2778 * Note that for mapped sub-pages of an anonymous THP,
684555aa 2779 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
6c287605
DH
2780 * the migration entry instead from where remap_page() will restore it.
2781 * We can still have PG_anon_exclusive set on effectively unmapped and
2782 * unreferenced sub-pages of an anonymous THP: we can simply drop
2783 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
e9b61f19 2784 */
e9b61f19
KS
2785 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2786 page_tail->flags |= (head->flags &
2787 ((1L << PG_referenced) |
2788 (1L << PG_swapbacked) |
38d8b4e6 2789 (1L << PG_swapcache) |
e9b61f19
KS
2790 (1L << PG_mlocked) |
2791 (1L << PG_uptodate) |
2792 (1L << PG_active) |
1899ad18 2793 (1L << PG_workingset) |
e9b61f19 2794 (1L << PG_locked) |
b8d3c4c3 2795 (1L << PG_unevictable) |
b0284cd2 2796#ifdef CONFIG_ARCH_USES_PG_ARCH_X
72e6afa0 2797 (1L << PG_arch_2) |
ef6458b1 2798 (1L << PG_arch_3) |
72e6afa0 2799#endif
ec1c86b2
YZ
2800 (1L << PG_dirty) |
2801 LRU_GEN_MASK | LRU_REFS_MASK));
e9b61f19 2802
cb67f428 2803 /* ->mapping in first and second tail page is replaced by other uses */
173d9d9f
HD
2804 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2805 page_tail);
2806 page_tail->mapping = head->mapping;
2807 page_tail->index = head->index + tail;
71e2d666
MG
2808
2809 /*
cfeed8ff
DH
2810 * page->private should not be set in tail pages. Fix up and warn once
2811 * if private is unexpectedly set.
71e2d666 2812 */
cfeed8ff
DH
2813 if (unlikely(page_tail->private)) {
2814 VM_WARN_ON_ONCE_PAGE(true, page_tail);
71e2d666
MG
2815 page_tail->private = 0;
2816 }
07e09c48
DH
2817 if (folio_test_swapcache(folio))
2818 new_folio->swap.val = folio->swap.val + tail;
173d9d9f 2819
605ca5ed 2820 /* Page flags must be visible before we make the page non-compound. */
e9b61f19
KS
2821 smp_wmb();
2822
605ca5ed
KK
2823 /*
2824 * Clear PageTail before unfreezing page refcount.
2825 *
2826 * After successful get_page_unless_zero() might follow put_page()
2827 * which needs correct compound_head().
2828 */
e9b61f19
KS
2829 clear_compound_head(page_tail);
2830
605ca5ed 2831 /* Finally unfreeze refcount. Additional reference from page cache. */
b7542769
KW
2832 page_ref_unfreeze(page_tail, 1 + (!folio_test_anon(folio) ||
2833 folio_test_swapcache(folio)));
605ca5ed 2834
b7542769
KW
2835 if (folio_test_young(folio))
2836 folio_set_young(new_folio);
2837 if (folio_test_idle(folio))
2838 folio_set_idle(new_folio);
e9b61f19 2839
c8253011 2840 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
94723aaf
MH
2841
2842 /*
2843 * always add to the tail because some iterators expect new
2844 * pages to show after the currently processed elements - e.g.
2845 * migrate_pages
2846 */
e9b61f19 2847 lru_add_page_tail(head, page_tail, lruvec, list);
e9b61f19
KS
2848}
2849
baa355fd 2850static void __split_huge_page(struct page *page, struct list_head *list,
b6769834 2851 pgoff_t end)
e9b61f19 2852{
e809c3fe
MWO
2853 struct folio *folio = page_folio(page);
2854 struct page *head = &folio->page;
e9b61f19 2855 struct lruvec *lruvec;
4101196b
MWO
2856 struct address_space *swap_cache = NULL;
2857 unsigned long offset = 0;
8cce5475 2858 unsigned int nr = thp_nr_pages(head);
509f0069 2859 int i, nr_dropped = 0;
e9b61f19 2860
e9b61f19 2861 /* complete memcg works before add pages to LRU */
be6c8982 2862 split_page_memcg(head, nr);
e9b61f19 2863
07e09c48
DH
2864 if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2865 offset = swp_offset(folio->swap);
2866 swap_cache = swap_address_space(folio->swap);
4101196b
MWO
2867 xa_lock(&swap_cache->i_pages);
2868 }
2869
f0953a1b 2870 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
e809c3fe 2871 lruvec = folio_lruvec_lock(folio);
b6769834 2872
eac96c3e
YS
2873 ClearPageHasHWPoisoned(head);
2874
8cce5475 2875 for (i = nr - 1; i >= 1; i--) {
07e09c48 2876 __split_huge_page_tail(folio, i, lruvec, list);
d144bf62 2877 /* Some pages can be beyond EOF: drop them from page cache */
baa355fd 2878 if (head[i].index >= end) {
fb5c2029
MWO
2879 struct folio *tail = page_folio(head + i);
2880
d144bf62 2881 if (shmem_mapping(head->mapping))
509f0069 2882 nr_dropped++;
fb5c2029
MWO
2883 else if (folio_test_clear_dirty(tail))
2884 folio_account_cleaned(tail,
2885 inode_to_wb(folio->mapping->host));
2886 __filemap_remove_folio(tail, NULL);
2887 folio_put(tail);
4101196b
MWO
2888 } else if (!PageAnon(page)) {
2889 __xa_store(&head->mapping->i_pages, head[i].index,
2890 head + i, 0);
2891 } else if (swap_cache) {
2892 __xa_store(&swap_cache->i_pages, offset + i,
2893 head + i, 0);
baa355fd
KS
2894 }
2895 }
e9b61f19
KS
2896
2897 ClearPageCompound(head);
6168d0da 2898 unlock_page_lruvec(lruvec);
b6769834 2899 /* Caller disabled irqs, so they are still disabled here */
f7da677b 2900
8cce5475 2901 split_page_owner(head, nr);
f7da677b 2902
baa355fd
KS
2903 /* See comment in __split_huge_page_tail() */
2904 if (PageAnon(head)) {
aa5dc07f 2905 /* Additional pin to swap cache */
4101196b 2906 if (PageSwapCache(head)) {
38d8b4e6 2907 page_ref_add(head, 2);
4101196b
MWO
2908 xa_unlock(&swap_cache->i_pages);
2909 } else {
38d8b4e6 2910 page_ref_inc(head);
4101196b 2911 }
baa355fd 2912 } else {
aa5dc07f 2913 /* Additional pin to page cache */
baa355fd 2914 page_ref_add(head, 2);
b93b0163 2915 xa_unlock(&head->mapping->i_pages);
baa355fd 2916 }
b6769834 2917 local_irq_enable();
e9b61f19 2918
509f0069
HD
2919 if (nr_dropped)
2920 shmem_uncharge(head->mapping->host, nr_dropped);
4eecb8b9 2921 remap_page(folio, nr);
e9b61f19 2922
07e09c48
DH
2923 if (folio_test_swapcache(folio))
2924 split_swap_cluster(folio->swap);
c4f9c701 2925
8cce5475 2926 for (i = 0; i < nr; i++) {
e9b61f19
KS
2927 struct page *subpage = head + i;
2928 if (subpage == page)
2929 continue;
2930 unlock_page(subpage);
2931
2932 /*
2933 * Subpages may be freed if there wasn't any mapping
2934 * like if add_to_swap() is running on a lru page that
2935 * had its mapping zapped. And freeing these pages
2936 * requires taking the lru_lock so we do the put_page
2937 * of the tail pages after the split is complete.
2938 */
0b175468 2939 free_page_and_swap_cache(subpage);
e9b61f19
KS
2940 }
2941}
2942
b8f593cd 2943/* Racy check whether the huge page can be split */
d4b4084a 2944bool can_split_folio(struct folio *folio, int *pextra_pins)
b8f593cd
HY
2945{
2946 int extra_pins;
2947
aa5dc07f 2948 /* Additional pins from page cache */
d4b4084a
MWO
2949 if (folio_test_anon(folio))
2950 extra_pins = folio_test_swapcache(folio) ?
2951 folio_nr_pages(folio) : 0;
b8f593cd 2952 else
d4b4084a 2953 extra_pins = folio_nr_pages(folio);
b8f593cd
HY
2954 if (pextra_pins)
2955 *pextra_pins = extra_pins;
d4b4084a 2956 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
b8f593cd
HY
2957}
2958
e9b61f19
KS
2959/*
2960 * This function splits huge page into normal pages. @page can point to any
2961 * subpage of huge page to split. Split doesn't change the position of @page.
2962 *
2963 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2964 * The huge page must be locked.
2965 *
2966 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2967 *
2968 * Both head page and tail pages will inherit mapping, flags, and so on from
2969 * the hugepage.
2970 *
2971 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2972 * they are not mapped.
2973 *
2974 * Returns 0 if the hugepage is split successfully.
2975 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2976 * us.
2977 */
2978int split_huge_page_to_list(struct page *page, struct list_head *list)
2979{
4eecb8b9 2980 struct folio *folio = page_folio(page);
f8baa6be 2981 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3e9a13da 2982 XA_STATE(xas, &folio->mapping->i_pages, folio->index);
baa355fd
KS
2983 struct anon_vma *anon_vma = NULL;
2984 struct address_space *mapping = NULL;
504e070d 2985 int extra_pins, ret;
006d3ff2 2986 pgoff_t end;
478d134e 2987 bool is_hzp;
e9b61f19 2988
3e9a13da
MWO
2989 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2990 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
e9b61f19 2991
3e9a13da 2992 is_hzp = is_huge_zero_page(&folio->page);
4737edbb
NH
2993 if (is_hzp) {
2994 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
478d134e 2995 return -EBUSY;
4737edbb 2996 }
478d134e 2997
3e9a13da 2998 if (folio_test_writeback(folio))
59807685
HY
2999 return -EBUSY;
3000
3e9a13da 3001 if (folio_test_anon(folio)) {
baa355fd 3002 /*
c1e8d7c6 3003 * The caller does not necessarily hold an mmap_lock that would
baa355fd
KS
3004 * prevent the anon_vma disappearing so we first we take a
3005 * reference to it and then lock the anon_vma for write. This
2f031c6f 3006 * is similar to folio_lock_anon_vma_read except the write lock
baa355fd
KS
3007 * is taken to serialise against parallel split or collapse
3008 * operations.
3009 */
29eea9b5 3010 anon_vma = folio_get_anon_vma(folio);
baa355fd
KS
3011 if (!anon_vma) {
3012 ret = -EBUSY;
3013 goto out;
3014 }
006d3ff2 3015 end = -1;
baa355fd
KS
3016 mapping = NULL;
3017 anon_vma_lock_write(anon_vma);
3018 } else {
6a3edd29
YF
3019 gfp_t gfp;
3020
3e9a13da 3021 mapping = folio->mapping;
baa355fd
KS
3022
3023 /* Truncated ? */
3024 if (!mapping) {
3025 ret = -EBUSY;
3026 goto out;
3027 }
3028
6a3edd29
YF
3029 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3030 GFP_RECLAIM_MASK);
3031
0201ebf2 3032 if (!filemap_release_folio(folio, gfp)) {
6a3edd29
YF
3033 ret = -EBUSY;
3034 goto out;
3035 }
3036
3e9a13da 3037 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
6b24ca4a
MWO
3038 if (xas_error(&xas)) {
3039 ret = xas_error(&xas);
3040 goto out;
3041 }
3042
baa355fd
KS
3043 anon_vma = NULL;
3044 i_mmap_lock_read(mapping);
006d3ff2
HD
3045
3046 /*
3047 *__split_huge_page() may need to trim off pages beyond EOF:
3048 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
3049 * which cannot be nested inside the page tree lock. So note
3050 * end now: i_size itself may be changed at any moment, but
3e9a13da 3051 * folio lock is good enough to serialize the trimming.
006d3ff2
HD
3052 */
3053 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
d144bf62
HD
3054 if (shmem_mapping(mapping))
3055 end = shmem_fallocend(mapping->host, end);
e9b61f19 3056 }
e9b61f19
KS
3057
3058 /*
684555aa 3059 * Racy check if we can split the page, before unmap_folio() will
e9b61f19
KS
3060 * split PMDs
3061 */
d4b4084a 3062 if (!can_split_folio(folio, &extra_pins)) {
fd4a7ac3 3063 ret = -EAGAIN;
e9b61f19
KS
3064 goto out_unlock;
3065 }
3066
684555aa 3067 unmap_folio(folio);
e9b61f19 3068
b6769834
AS
3069 /* block interrupt reentry in xa_lock and spinlock */
3070 local_irq_disable();
baa355fd 3071 if (mapping) {
baa355fd 3072 /*
3e9a13da
MWO
3073 * Check if the folio is present in page cache.
3074 * We assume all tail are present too, if folio is there.
baa355fd 3075 */
6b24ca4a
MWO
3076 xas_lock(&xas);
3077 xas_reset(&xas);
3e9a13da 3078 if (xas_load(&xas) != folio)
baa355fd
KS
3079 goto fail;
3080 }
3081
0139aa7b 3082 /* Prevent deferred_split_scan() touching ->_refcount */
364c1eeb 3083 spin_lock(&ds_queue->split_queue_lock);
3e9a13da 3084 if (folio_ref_freeze(folio, 1 + extra_pins)) {
4375a553 3085 if (!list_empty(&folio->_deferred_list)) {
364c1eeb 3086 ds_queue->split_queue_len--;
4375a553 3087 list_del(&folio->_deferred_list);
9a982250 3088 }
afb97172 3089 spin_unlock(&ds_queue->split_queue_lock);
06d3eff6 3090 if (mapping) {
3e9a13da 3091 int nr = folio_nr_pages(folio);
bf9ecead 3092
3e9a13da 3093 xas_split(&xas, folio, folio_order(folio));
a48d5bdc
SR
3094 if (folio_test_pmd_mappable(folio)) {
3095 if (folio_test_swapbacked(folio)) {
3096 __lruvec_stat_mod_folio(folio,
3097 NR_SHMEM_THPS, -nr);
3098 } else {
3099 __lruvec_stat_mod_folio(folio,
3100 NR_FILE_THPS, -nr);
3101 filemap_nr_thps_dec(mapping);
3102 }
1ca7554d 3103 }
06d3eff6
KS
3104 }
3105
b6769834 3106 __split_huge_page(page, list, end);
c4f9c701 3107 ret = 0;
e9b61f19 3108 } else {
364c1eeb 3109 spin_unlock(&ds_queue->split_queue_lock);
504e070d
YS
3110fail:
3111 if (mapping)
6b24ca4a 3112 xas_unlock(&xas);
b6769834 3113 local_irq_enable();
4eecb8b9 3114 remap_page(folio, folio_nr_pages(folio));
fd4a7ac3 3115 ret = -EAGAIN;
e9b61f19
KS
3116 }
3117
3118out_unlock:
baa355fd
KS
3119 if (anon_vma) {
3120 anon_vma_unlock_write(anon_vma);
3121 put_anon_vma(anon_vma);
3122 }
3123 if (mapping)
3124 i_mmap_unlock_read(mapping);
e9b61f19 3125out:
69a37a8b 3126 xas_destroy(&xas);
e9b61f19
KS
3127 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3128 return ret;
3129}
9a982250 3130
8dc4a8f1 3131void folio_undo_large_rmappable(struct folio *folio)
9a982250 3132{
8dc4a8f1 3133 struct deferred_split *ds_queue;
9a982250
KS
3134 unsigned long flags;
3135
deedad80
YF
3136 /*
3137 * At this point, there is no one trying to add the folio to
3138 * deferred_list. If folio is not in deferred_list, it's safe
3139 * to check without acquiring the split_queue_lock.
3140 */
8dc4a8f1
MWO
3141 if (data_race(list_empty(&folio->_deferred_list)))
3142 return;
3143
3144 ds_queue = get_deferred_split_queue(folio);
3145 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3146 if (!list_empty(&folio->_deferred_list)) {
3147 ds_queue->split_queue_len--;
9bcef597 3148 list_del_init(&folio->_deferred_list);
9a982250 3149 }
8dc4a8f1 3150 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250
KS
3151}
3152
f158ed61 3153void deferred_split_folio(struct folio *folio)
9a982250 3154{
f8baa6be 3155 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
87eaceb3 3156#ifdef CONFIG_MEMCG
8991de90 3157 struct mem_cgroup *memcg = folio_memcg(folio);
87eaceb3 3158#endif
9a982250
KS
3159 unsigned long flags;
3160
8991de90 3161 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
9a982250 3162
87eaceb3
YS
3163 /*
3164 * The try_to_unmap() in page reclaim path might reach here too,
3165 * this may cause a race condition to corrupt deferred split queue.
8991de90 3166 * And, if page reclaim is already handling the same folio, it is
87eaceb3
YS
3167 * unnecessary to handle it again in shrinker.
3168 *
8991de90
MWO
3169 * Check the swapcache flag to determine if the folio is being
3170 * handled by page reclaim since THP swap would add the folio into
87eaceb3
YS
3171 * swap cache before calling try_to_unmap().
3172 */
8991de90 3173 if (folio_test_swapcache(folio))
87eaceb3
YS
3174 return;
3175
8991de90 3176 if (!list_empty(&folio->_deferred_list))
87eaceb3
YS
3177 return;
3178
364c1eeb 3179 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
8991de90 3180 if (list_empty(&folio->_deferred_list)) {
f9719a03 3181 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
8991de90 3182 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
364c1eeb 3183 ds_queue->split_queue_len++;
87eaceb3
YS
3184#ifdef CONFIG_MEMCG
3185 if (memcg)
8991de90 3186 set_shrinker_bit(memcg, folio_nid(folio),
54d91729 3187 deferred_split_shrinker->id);
87eaceb3 3188#endif
9a982250 3189 }
364c1eeb 3190 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250
KS
3191}
3192
3193static unsigned long deferred_split_count(struct shrinker *shrink,
3194 struct shrink_control *sc)
3195{
a3d0a918 3196 struct pglist_data *pgdata = NODE_DATA(sc->nid);
364c1eeb 3197 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
87eaceb3
YS
3198
3199#ifdef CONFIG_MEMCG
3200 if (sc->memcg)
3201 ds_queue = &sc->memcg->deferred_split_queue;
3202#endif
364c1eeb 3203 return READ_ONCE(ds_queue->split_queue_len);
9a982250
KS
3204}
3205
3206static unsigned long deferred_split_scan(struct shrinker *shrink,
3207 struct shrink_control *sc)
3208{
a3d0a918 3209 struct pglist_data *pgdata = NODE_DATA(sc->nid);
364c1eeb 3210 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
9a982250 3211 unsigned long flags;
4375a553
MWO
3212 LIST_HEAD(list);
3213 struct folio *folio, *next;
9a982250
KS
3214 int split = 0;
3215
87eaceb3
YS
3216#ifdef CONFIG_MEMCG
3217 if (sc->memcg)
3218 ds_queue = &sc->memcg->deferred_split_queue;
3219#endif
3220
364c1eeb 3221 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
9a982250 3222 /* Take pin on all head pages to avoid freeing them under us */
4375a553
MWO
3223 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
3224 _deferred_list) {
3225 if (folio_try_get(folio)) {
3226 list_move(&folio->_deferred_list, &list);
e3ae1953 3227 } else {
4375a553
MWO
3228 /* We lost race with folio_put() */
3229 list_del_init(&folio->_deferred_list);
364c1eeb 3230 ds_queue->split_queue_len--;
9a982250 3231 }
e3ae1953
KS
3232 if (!--sc->nr_to_scan)
3233 break;
9a982250 3234 }
364c1eeb 3235 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250 3236
4375a553
MWO
3237 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
3238 if (!folio_trylock(folio))
fa41b900 3239 goto next;
9a982250 3240 /* split_huge_page() removes page from list on success */
4375a553 3241 if (!split_folio(folio))
9a982250 3242 split++;
4375a553 3243 folio_unlock(folio);
fa41b900 3244next:
4375a553 3245 folio_put(folio);
9a982250
KS
3246 }
3247
364c1eeb
YS
3248 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3249 list_splice_tail(&list, &ds_queue->split_queue);
3250 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250 3251
cb8d68ec
KS
3252 /*
3253 * Stop shrinker if we didn't split any page, but the queue is empty.
3254 * This can happen if pages were freed under us.
3255 */
364c1eeb 3256 if (!split && list_empty(&ds_queue->split_queue))
cb8d68ec
KS
3257 return SHRINK_STOP;
3258 return split;
9a982250
KS
3259}
3260
49071d43 3261#ifdef CONFIG_DEBUG_FS
fa6c0231 3262static void split_huge_pages_all(void)
49071d43
KS
3263{
3264 struct zone *zone;
3265 struct page *page;
630e7c5e 3266 struct folio *folio;
49071d43
KS
3267 unsigned long pfn, max_zone_pfn;
3268 unsigned long total = 0, split = 0;
3269
fa6c0231 3270 pr_debug("Split all THPs\n");
a17206da
ML
3271 for_each_zone(zone) {
3272 if (!managed_zone(zone))
3273 continue;
49071d43
KS
3274 max_zone_pfn = zone_end_pfn(zone);
3275 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
a17206da 3276 int nr_pages;
49071d43 3277
2b7aa91b 3278 page = pfn_to_online_page(pfn);
630e7c5e
KW
3279 if (!page || PageTail(page))
3280 continue;
3281 folio = page_folio(page);
3282 if (!folio_try_get(folio))
49071d43
KS
3283 continue;
3284
630e7c5e 3285 if (unlikely(page_folio(page) != folio))
49071d43
KS
3286 goto next;
3287
630e7c5e 3288 if (zone != folio_zone(folio))
49071d43
KS
3289 goto next;
3290
630e7c5e
KW
3291 if (!folio_test_large(folio)
3292 || folio_test_hugetlb(folio)
3293 || !folio_test_lru(folio))
49071d43
KS
3294 goto next;
3295
3296 total++;
630e7c5e
KW
3297 folio_lock(folio);
3298 nr_pages = folio_nr_pages(folio);
3299 if (!split_folio(folio))
49071d43 3300 split++;
a17206da 3301 pfn += nr_pages - 1;
630e7c5e 3302 folio_unlock(folio);
49071d43 3303next:
630e7c5e 3304 folio_put(folio);
fa6c0231 3305 cond_resched();
49071d43
KS
3306 }
3307 }
3308
fa6c0231
ZY
3309 pr_debug("%lu of %lu THP split\n", split, total);
3310}
49071d43 3311
fa6c0231
ZY
3312static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
3313{
3314 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
3315 is_vm_hugetlb_page(vma);
3316}
3317
3318static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3319 unsigned long vaddr_end)
3320{
3321 int ret = 0;
3322 struct task_struct *task;
3323 struct mm_struct *mm;
3324 unsigned long total = 0, split = 0;
3325 unsigned long addr;
3326
3327 vaddr_start &= PAGE_MASK;
3328 vaddr_end &= PAGE_MASK;
3329
3330 /* Find the task_struct from pid */
3331 rcu_read_lock();
3332 task = find_task_by_vpid(pid);
3333 if (!task) {
3334 rcu_read_unlock();
3335 ret = -ESRCH;
3336 goto out;
3337 }
3338 get_task_struct(task);
3339 rcu_read_unlock();
3340
3341 /* Find the mm_struct */
3342 mm = get_task_mm(task);
3343 put_task_struct(task);
3344
3345 if (!mm) {
3346 ret = -EINVAL;
3347 goto out;
3348 }
3349
3350 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3351 pid, vaddr_start, vaddr_end);
3352
3353 mmap_read_lock(mm);
3354 /*
3355 * always increase addr by PAGE_SIZE, since we could have a PTE page
3356 * table filled with PTE-mapped THPs, each of which is distinct.
3357 */
3358 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
74ba2b38 3359 struct vm_area_struct *vma = vma_lookup(mm, addr);
fa6c0231 3360 struct page *page;
a644b0ab 3361 struct folio *folio;
fa6c0231 3362
74ba2b38 3363 if (!vma)
fa6c0231
ZY
3364 break;
3365
3366 /* skip special VMA and hugetlb VMA */
3367 if (vma_not_suitable_for_thp_split(vma)) {
3368 addr = vma->vm_end;
3369 continue;
3370 }
3371
3372 /* FOLL_DUMP to ignore special (like zero) pages */
87d2762e 3373 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
fa6c0231 3374
f7091ed6 3375 if (IS_ERR_OR_NULL(page))
fa6c0231
ZY
3376 continue;
3377
a644b0ab
MWO
3378 folio = page_folio(page);
3379 if (!is_transparent_hugepage(folio))
fa6c0231
ZY
3380 goto next;
3381
3382 total++;
a644b0ab 3383 if (!can_split_folio(folio, NULL))
fa6c0231
ZY
3384 goto next;
3385
a644b0ab 3386 if (!folio_trylock(folio))
fa6c0231
ZY
3387 goto next;
3388
a644b0ab 3389 if (!split_folio(folio))
fa6c0231
ZY
3390 split++;
3391
a644b0ab 3392 folio_unlock(folio);
fa6c0231 3393next:
a644b0ab 3394 folio_put(folio);
fa6c0231
ZY
3395 cond_resched();
3396 }
3397 mmap_read_unlock(mm);
3398 mmput(mm);
3399
3400 pr_debug("%lu of %lu THP split\n", split, total);
3401
3402out:
3403 return ret;
49071d43 3404}
fa6c0231 3405
fbe37501
ZY
3406static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3407 pgoff_t off_end)
3408{
3409 struct filename *file;
3410 struct file *candidate;
3411 struct address_space *mapping;
3412 int ret = -EINVAL;
3413 pgoff_t index;
3414 int nr_pages = 1;
3415 unsigned long total = 0, split = 0;
3416
3417 file = getname_kernel(file_path);
3418 if (IS_ERR(file))
3419 return ret;
3420
3421 candidate = file_open_name(file, O_RDONLY, 0);
3422 if (IS_ERR(candidate))
3423 goto out;
3424
3425 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3426 file_path, off_start, off_end);
3427
3428 mapping = candidate->f_mapping;
3429
3430 for (index = off_start; index < off_end; index += nr_pages) {
1fb130b2 3431 struct folio *folio = filemap_get_folio(mapping, index);
fbe37501
ZY
3432
3433 nr_pages = 1;
66dabbb6 3434 if (IS_ERR(folio))
fbe37501
ZY
3435 continue;
3436
9ee2c086 3437 if (!folio_test_large(folio))
fbe37501
ZY
3438 goto next;
3439
3440 total++;
9ee2c086 3441 nr_pages = folio_nr_pages(folio);
fbe37501 3442
9ee2c086 3443 if (!folio_trylock(folio))
fbe37501
ZY
3444 goto next;
3445
9ee2c086 3446 if (!split_folio(folio))
fbe37501
ZY
3447 split++;
3448
9ee2c086 3449 folio_unlock(folio);
fbe37501 3450next:
9ee2c086 3451 folio_put(folio);
fbe37501
ZY
3452 cond_resched();
3453 }
3454
3455 filp_close(candidate, NULL);
3456 ret = 0;
3457
3458 pr_debug("%lu of %lu file-backed THP split\n", split, total);
3459out:
3460 putname(file);
3461 return ret;
3462}
3463
fa6c0231
ZY
3464#define MAX_INPUT_BUF_SZ 255
3465
3466static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3467 size_t count, loff_t *ppops)
3468{
3469 static DEFINE_MUTEX(split_debug_mutex);
3470 ssize_t ret;
fbe37501
ZY
3471 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3472 char input_buf[MAX_INPUT_BUF_SZ];
fa6c0231
ZY
3473 int pid;
3474 unsigned long vaddr_start, vaddr_end;
3475
3476 ret = mutex_lock_interruptible(&split_debug_mutex);
3477 if (ret)
3478 return ret;
3479
3480 ret = -EFAULT;
3481
3482 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3483 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3484 goto out;
3485
3486 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
fbe37501
ZY
3487
3488 if (input_buf[0] == '/') {
3489 char *tok;
3490 char *buf = input_buf;
3491 char file_path[MAX_INPUT_BUF_SZ];
3492 pgoff_t off_start = 0, off_end = 0;
3493 size_t input_len = strlen(input_buf);
3494
3495 tok = strsep(&buf, ",");
3496 if (tok) {
1212e00c 3497 strcpy(file_path, tok);
fbe37501
ZY
3498 } else {
3499 ret = -EINVAL;
3500 goto out;
3501 }
3502
3503 ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3504 if (ret != 2) {
3505 ret = -EINVAL;
3506 goto out;
3507 }
3508 ret = split_huge_pages_in_file(file_path, off_start, off_end);
3509 if (!ret)
3510 ret = input_len;
3511
3512 goto out;
3513 }
3514
fa6c0231
ZY
3515 ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3516 if (ret == 1 && pid == 1) {
3517 split_huge_pages_all();
3518 ret = strlen(input_buf);
3519 goto out;
3520 } else if (ret != 3) {
3521 ret = -EINVAL;
3522 goto out;
3523 }
3524
3525 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3526 if (!ret)
3527 ret = strlen(input_buf);
3528out:
3529 mutex_unlock(&split_debug_mutex);
3530 return ret;
3531
3532}
3533
3534static const struct file_operations split_huge_pages_fops = {
3535 .owner = THIS_MODULE,
3536 .write = split_huge_pages_write,
3537 .llseek = no_llseek,
3538};
49071d43
KS
3539
3540static int __init split_huge_pages_debugfs(void)
3541{
d9f7979c
GKH
3542 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3543 &split_huge_pages_fops);
49071d43
KS
3544 return 0;
3545}
3546late_initcall(split_huge_pages_debugfs);
3547#endif
616b8371
ZY
3548
3549#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
7f5abe60 3550int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
616b8371
ZY
3551 struct page *page)
3552{
a8e61d58 3553 struct folio *folio = page_folio(page);
616b8371
ZY
3554 struct vm_area_struct *vma = pvmw->vma;
3555 struct mm_struct *mm = vma->vm_mm;
3556 unsigned long address = pvmw->address;
6c287605 3557 bool anon_exclusive;
616b8371
ZY
3558 pmd_t pmdval;
3559 swp_entry_t entry;
ab6e3d09 3560 pmd_t pmdswp;
616b8371
ZY
3561
3562 if (!(pvmw->pmd && !pvmw->pte))
7f5abe60 3563 return 0;
616b8371 3564
616b8371 3565 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
8a8683ad 3566 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
6c287605 3567
e3b4b137 3568 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
a8e61d58 3569 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
e3b4b137 3570 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
6c287605 3571 set_pmd_at(mm, address, pvmw->pmd, pmdval);
7f5abe60 3572 return -EBUSY;
6c287605
DH
3573 }
3574
616b8371 3575 if (pmd_dirty(pmdval))
db44c658 3576 folio_mark_dirty(folio);
4dd845b5
AP
3577 if (pmd_write(pmdval))
3578 entry = make_writable_migration_entry(page_to_pfn(page));
6c287605
DH
3579 else if (anon_exclusive)
3580 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
4dd845b5
AP
3581 else
3582 entry = make_readable_migration_entry(page_to_pfn(page));
2e346877
PX
3583 if (pmd_young(pmdval))
3584 entry = make_migration_entry_young(entry);
3585 if (pmd_dirty(pmdval))
3586 entry = make_migration_entry_dirty(entry);
ab6e3d09
NH
3587 pmdswp = swp_entry_to_pmd(entry);
3588 if (pmd_soft_dirty(pmdval))
3589 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
24bf08c4
DH
3590 if (pmd_uffd_wp(pmdval))
3591 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
ab6e3d09 3592 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
a8e61d58
DH
3593 folio_remove_rmap_pmd(folio, page, vma);
3594 folio_put(folio);
283fd6fe 3595 trace_set_migration_pmd(address, pmd_val(pmdswp));
7f5abe60
DH
3596
3597 return 0;
616b8371
ZY
3598}
3599
3600void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3601{
14d85a6e 3602 struct folio *folio = page_folio(new);
616b8371
ZY
3603 struct vm_area_struct *vma = pvmw->vma;
3604 struct mm_struct *mm = vma->vm_mm;
3605 unsigned long address = pvmw->address;
4fba8f2a 3606 unsigned long haddr = address & HPAGE_PMD_MASK;
616b8371
ZY
3607 pmd_t pmde;
3608 swp_entry_t entry;
3609
3610 if (!(pvmw->pmd && !pvmw->pte))
3611 return;
3612
3613 entry = pmd_to_swp_entry(*pvmw->pmd);
14d85a6e 3614 folio_get(folio);
2e346877 3615 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
ab6e3d09
NH
3616 if (pmd_swp_soft_dirty(*pvmw->pmd))
3617 pmde = pmd_mksoft_dirty(pmde);
3c811f78 3618 if (is_writable_migration_entry(entry))
161e393c 3619 pmde = pmd_mkwrite(pmde, vma);
8f34f1ea 3620 if (pmd_swp_uffd_wp(*pvmw->pmd))
f1eb1bac 3621 pmde = pmd_mkuffd_wp(pmde);
2e346877
PX
3622 if (!is_migration_entry_young(entry))
3623 pmde = pmd_mkold(pmde);
3624 /* NOTE: this may contain setting soft-dirty on some archs */
14d85a6e 3625 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
2e346877 3626 pmde = pmd_mkdirty(pmde);
616b8371 3627
14d85a6e 3628 if (folio_test_anon(folio)) {
395db7b1 3629 rmap_t rmap_flags = RMAP_NONE;
6c287605
DH
3630
3631 if (!is_readable_migration_entry(entry))
3632 rmap_flags |= RMAP_EXCLUSIVE;
3633
395db7b1 3634 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
6c287605 3635 } else {
14d85a6e 3636 folio_add_file_rmap_pmd(folio, new, vma);
6c287605 3637 }
14d85a6e 3638 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
4fba8f2a 3639 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
5cbcf225
MS
3640
3641 /* No need to invalidate - it was non-present before */
616b8371 3642 update_mmu_cache_pmd(vma, address, pvmw->pmd);
283fd6fe 3643 trace_remove_migration_pmd(address, pmd_val(pmde));
616b8371
ZY
3644}
3645#endif