Merge tag 'loongarch-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai...
[linux-block.git] / mm / huge_memory.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
71e3aac0
AA
2/*
3 * Copyright (C) 2009 Red Hat, Inc.
71e3aac0
AA
4 */
5
ae3a8c1c
AM
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
71e3aac0
AA
8#include <linux/mm.h>
9#include <linux/sched.h>
fa6c0231 10#include <linux/sched/mm.h>
f7ccbae4 11#include <linux/sched/coredump.h>
6a3827d7 12#include <linux/sched/numa_balancing.h>
71e3aac0
AA
13#include <linux/highmem.h>
14#include <linux/hugetlb.h>
15#include <linux/mmu_notifier.h>
16#include <linux/rmap.h>
17#include <linux/swap.h>
97ae1749 18#include <linux/shrinker.h>
ba76149f 19#include <linux/mm_inline.h>
e9b61f19 20#include <linux/swapops.h>
fb5c2029 21#include <linux/backing-dev.h>
4897c765 22#include <linux/dax.h>
ba76149f 23#include <linux/khugepaged.h>
878aee7d 24#include <linux/freezer.h>
f25748e3 25#include <linux/pfn_t.h>
a664b2d8 26#include <linux/mman.h>
3565fce3 27#include <linux/memremap.h>
325adeb5 28#include <linux/pagemap.h>
49071d43 29#include <linux/debugfs.h>
4daae3b4 30#include <linux/migrate.h>
43b5fbbd 31#include <linux/hashtable.h>
6b251fc9 32#include <linux/userfaultfd_k.h>
33c3fc71 33#include <linux/page_idle.h>
baa355fd 34#include <linux/shmem_fs.h>
6b31d595 35#include <linux/oom.h>
98fa15f3 36#include <linux/numa.h>
f7da677b 37#include <linux/page_owner.h>
a1a3a2fc 38#include <linux/sched/sysctl.h>
467b171a 39#include <linux/memory-tiers.h>
97ae1749 40
71e3aac0
AA
41#include <asm/tlb.h>
42#include <asm/pgalloc.h>
43#include "internal.h"
014bb1de 44#include "swap.h"
71e3aac0 45
283fd6fe
AK
46#define CREATE_TRACE_POINTS
47#include <trace/events/thp.h>
48
ba76149f 49/*
b14d595a
MD
50 * By default, transparent hugepage support is disabled in order to avoid
51 * risking an increased memory footprint for applications that are not
52 * guaranteed to benefit from it. When transparent hugepage support is
53 * enabled, it is for all mappings, and khugepaged scans all mappings.
8bfa3f9a
JW
54 * Defrag is invoked by khugepaged hugepage allocations and by page faults
55 * for all hugepage allocations.
ba76149f 56 */
71e3aac0 57unsigned long transparent_hugepage_flags __read_mostly =
13ece886 58#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
ba76149f 59 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
13ece886
AA
60#endif
61#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
62 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
63#endif
444eb2a4 64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
79da5407
KS
65 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
66 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
ba76149f 67
54d91729
QZ
68static struct shrinker *deferred_split_shrinker;
69static unsigned long deferred_split_count(struct shrinker *shrink,
70 struct shrink_control *sc);
71static unsigned long deferred_split_scan(struct shrinker *shrink,
72 struct shrink_control *sc);
f000565a 73
97ae1749 74static atomic_t huge_zero_refcount;
56873f43 75struct page *huge_zero_page __read_mostly;
3b77e8c8 76unsigned long huge_zero_pfn __read_mostly = ~0UL;
3485b883
RR
77unsigned long huge_anon_orders_always __read_mostly;
78unsigned long huge_anon_orders_madvise __read_mostly;
79unsigned long huge_anon_orders_inherit __read_mostly;
80
81unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
82 unsigned long vm_flags, bool smaps,
83 bool in_pf, bool enforce_sysfs,
84 unsigned long orders)
85{
86 /* Check the intersection of requested and supported orders. */
87 orders &= vma_is_anonymous(vma) ?
88 THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
89 if (!orders)
90 return 0;
4a6c1297 91
9fec5168 92 if (!vma->vm_mm) /* vdso */
3485b883 93 return 0;
9fec5168 94
7da4e2cb
YS
95 /*
96 * Explicitly disabled through madvise or prctl, or some
97 * architectures may disable THP for some mappings, for
98 * example, s390 kvm.
99 * */
100 if ((vm_flags & VM_NOHUGEPAGE) ||
101 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
3485b883 102 return 0;
7da4e2cb
YS
103 /*
104 * If the hardware/firmware marked hugepage support disabled.
105 */
3c556d24 106 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
3485b883 107 return 0;
c0630669 108
7da4e2cb 109 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
9fec5168 110 if (vma_is_dax(vma))
3485b883 111 return in_pf ? orders : 0;
7da4e2cb
YS
112
113 /*
7a81751f 114 * khugepaged special VMA and hugetlb VMA.
7da4e2cb
YS
115 * Must be checked after dax since some dax mappings may have
116 * VM_MIXEDMAP set.
117 */
7a81751f 118 if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED))
3485b883 119 return 0;
9fec5168 120
7da4e2cb 121 /*
3485b883
RR
122 * Check alignment for file vma and size for both file and anon vma by
123 * filtering out the unsuitable orders.
7da4e2cb
YS
124 *
125 * Skip the check for page fault. Huge fault does the check in fault
3485b883 126 * handlers.
7da4e2cb 127 */
3485b883
RR
128 if (!in_pf) {
129 int order = highest_order(orders);
130 unsigned long addr;
131
132 while (orders) {
133 addr = vma->vm_end - (PAGE_SIZE << order);
134 if (thp_vma_suitable_order(vma, addr, order))
135 break;
136 order = next_order(&orders, order);
137 }
138
139 if (!orders)
140 return 0;
141 }
9fec5168 142
7da4e2cb
YS
143 /*
144 * Enabled via shmem mount options or sysfs settings.
145 * Must be done before hugepage flags check since shmem has its
146 * own flags.
147 */
148 if (!in_pf && shmem_file(vma->vm_file))
2cf13384 149 return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
3485b883
RR
150 !enforce_sysfs, vma->vm_mm, vm_flags)
151 ? orders : 0;
9fec5168 152
7a81751f 153 if (!vma_is_anonymous(vma)) {
3485b883
RR
154 /*
155 * Enforce sysfs THP requirements as necessary. Anonymous vmas
156 * were already handled in thp_vma_allowable_orders().
157 */
158 if (enforce_sysfs &&
159 (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
160 !hugepage_global_always())))
161 return 0;
162
7a81751f
ZK
163 /*
164 * Trust that ->huge_fault() handlers know what they are doing
165 * in fault path.
166 */
167 if (((in_pf || smaps)) && vma->vm_ops->huge_fault)
3485b883 168 return orders;
7a81751f
ZK
169 /* Only regular file is valid in collapse path */
170 if (((!in_pf || smaps)) && file_thp_enabled(vma))
3485b883
RR
171 return orders;
172 return 0;
7a81751f 173 }
9fec5168
YS
174
175 if (vma_is_temporary_stack(vma))
3485b883 176 return 0;
9fec5168
YS
177
178 /*
179 * THPeligible bit of smaps should show 1 for proper VMAs even
180 * though anon_vma is not initialized yet.
7da4e2cb
YS
181 *
182 * Allow page fault since anon_vma may be not initialized until
183 * the first page fault.
9fec5168
YS
184 */
185 if (!vma->anon_vma)
3485b883 186 return (smaps || in_pf) ? orders : 0;
9fec5168 187
3485b883 188 return orders;
7635d9cb
MH
189}
190
aaa9705b 191static bool get_huge_zero_page(void)
97ae1749
KS
192{
193 struct page *zero_page;
194retry:
195 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
aaa9705b 196 return true;
97ae1749
KS
197
198 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
4a6c1297 199 HPAGE_PMD_ORDER);
d8a8e1f0
KS
200 if (!zero_page) {
201 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
aaa9705b 202 return false;
d8a8e1f0 203 }
97ae1749 204 preempt_disable();
5918d10a 205 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
97ae1749 206 preempt_enable();
5ddacbe9 207 __free_pages(zero_page, compound_order(zero_page));
97ae1749
KS
208 goto retry;
209 }
3b77e8c8 210 WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
97ae1749
KS
211
212 /* We take additional reference here. It will be put back by shrinker */
213 atomic_set(&huge_zero_refcount, 2);
214 preempt_enable();
f4981502 215 count_vm_event(THP_ZERO_PAGE_ALLOC);
aaa9705b 216 return true;
4a6c1297
KS
217}
218
6fcb52a5 219static void put_huge_zero_page(void)
4a6c1297 220{
97ae1749
KS
221 /*
222 * Counter should never go to zero here. Only shrinker can put
223 * last reference.
224 */
225 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
4a6c1297
KS
226}
227
6fcb52a5
AL
228struct page *mm_get_huge_zero_page(struct mm_struct *mm)
229{
230 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
231 return READ_ONCE(huge_zero_page);
232
233 if (!get_huge_zero_page())
234 return NULL;
235
236 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
237 put_huge_zero_page();
238
239 return READ_ONCE(huge_zero_page);
240}
241
242void mm_put_huge_zero_page(struct mm_struct *mm)
243{
244 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
245 put_huge_zero_page();
246}
247
48896466
GC
248static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
249 struct shrink_control *sc)
4a6c1297 250{
48896466
GC
251 /* we can free zero page only if last reference remains */
252 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
253}
97ae1749 254
48896466
GC
255static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
256 struct shrink_control *sc)
257{
97ae1749 258 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
5918d10a
KS
259 struct page *zero_page = xchg(&huge_zero_page, NULL);
260 BUG_ON(zero_page == NULL);
3b77e8c8 261 WRITE_ONCE(huge_zero_pfn, ~0UL);
5ddacbe9 262 __free_pages(zero_page, compound_order(zero_page));
48896466 263 return HPAGE_PMD_NR;
97ae1749
KS
264 }
265
266 return 0;
4a6c1297
KS
267}
268
54d91729 269static struct shrinker *huge_zero_page_shrinker;
97ae1749 270
71e3aac0 271#ifdef CONFIG_SYSFS
71e3aac0
AA
272static ssize_t enabled_show(struct kobject *kobj,
273 struct kobj_attribute *attr, char *buf)
274{
bfb0ffeb
JP
275 const char *output;
276
444eb2a4 277 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
bfb0ffeb
JP
278 output = "[always] madvise never";
279 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
280 &transparent_hugepage_flags))
281 output = "always [madvise] never";
444eb2a4 282 else
bfb0ffeb
JP
283 output = "always madvise [never]";
284
285 return sysfs_emit(buf, "%s\n", output);
71e3aac0 286}
444eb2a4 287
71e3aac0
AA
288static ssize_t enabled_store(struct kobject *kobj,
289 struct kobj_attribute *attr,
290 const char *buf, size_t count)
291{
21440d7e 292 ssize_t ret = count;
ba76149f 293
f42f2552 294 if (sysfs_streq(buf, "always")) {
21440d7e
DR
295 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
296 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
f42f2552 297 } else if (sysfs_streq(buf, "madvise")) {
21440d7e
DR
298 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
299 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 300 } else if (sysfs_streq(buf, "never")) {
21440d7e
DR
301 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
302 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
303 } else
304 ret = -EINVAL;
ba76149f
AA
305
306 if (ret > 0) {
b46e756f 307 int err = start_stop_khugepaged();
ba76149f
AA
308 if (err)
309 ret = err;
310 }
ba76149f 311 return ret;
71e3aac0 312}
37139bb0
ML
313
314static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
71e3aac0 315
b46e756f 316ssize_t single_hugepage_flag_show(struct kobject *kobj,
bfb0ffeb
JP
317 struct kobj_attribute *attr, char *buf,
318 enum transparent_hugepage_flag flag)
71e3aac0 319{
bfb0ffeb
JP
320 return sysfs_emit(buf, "%d\n",
321 !!test_bit(flag, &transparent_hugepage_flags));
71e3aac0 322}
e27e6151 323
b46e756f 324ssize_t single_hugepage_flag_store(struct kobject *kobj,
71e3aac0
AA
325 struct kobj_attribute *attr,
326 const char *buf, size_t count,
327 enum transparent_hugepage_flag flag)
328{
e27e6151
BH
329 unsigned long value;
330 int ret;
331
332 ret = kstrtoul(buf, 10, &value);
333 if (ret < 0)
334 return ret;
335 if (value > 1)
336 return -EINVAL;
337
338 if (value)
71e3aac0 339 set_bit(flag, &transparent_hugepage_flags);
e27e6151 340 else
71e3aac0 341 clear_bit(flag, &transparent_hugepage_flags);
71e3aac0
AA
342
343 return count;
344}
345
71e3aac0
AA
346static ssize_t defrag_show(struct kobject *kobj,
347 struct kobj_attribute *attr, char *buf)
348{
bfb0ffeb
JP
349 const char *output;
350
351 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
352 &transparent_hugepage_flags))
353 output = "[always] defer defer+madvise madvise never";
354 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
355 &transparent_hugepage_flags))
356 output = "always [defer] defer+madvise madvise never";
357 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
358 &transparent_hugepage_flags))
359 output = "always defer [defer+madvise] madvise never";
360 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
361 &transparent_hugepage_flags))
362 output = "always defer defer+madvise [madvise] never";
363 else
364 output = "always defer defer+madvise madvise [never]";
365
366 return sysfs_emit(buf, "%s\n", output);
71e3aac0 367}
21440d7e 368
71e3aac0
AA
369static ssize_t defrag_store(struct kobject *kobj,
370 struct kobj_attribute *attr,
371 const char *buf, size_t count)
372{
f42f2552 373 if (sysfs_streq(buf, "always")) {
21440d7e
DR
374 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
375 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
376 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
377 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
f42f2552 378 } else if (sysfs_streq(buf, "defer+madvise")) {
21440d7e
DR
379 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
380 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
381 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
382 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 383 } else if (sysfs_streq(buf, "defer")) {
4fad7fb6
DR
384 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
385 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
386 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
387 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
f42f2552 388 } else if (sysfs_streq(buf, "madvise")) {
21440d7e
DR
389 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
390 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
391 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
392 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
f42f2552 393 } else if (sysfs_streq(buf, "never")) {
21440d7e
DR
394 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
395 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
396 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
397 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
398 } else
399 return -EINVAL;
400
401 return count;
71e3aac0 402}
37139bb0 403static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
71e3aac0 404
79da5407 405static ssize_t use_zero_page_show(struct kobject *kobj,
ae7a927d 406 struct kobj_attribute *attr, char *buf)
79da5407 407{
b46e756f 408 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 409 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
79da5407
KS
410}
411static ssize_t use_zero_page_store(struct kobject *kobj,
412 struct kobj_attribute *attr, const char *buf, size_t count)
413{
b46e756f 414 return single_hugepage_flag_store(kobj, attr, buf, count,
79da5407
KS
415 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
416}
37139bb0 417static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
49920d28
HD
418
419static ssize_t hpage_pmd_size_show(struct kobject *kobj,
ae7a927d 420 struct kobj_attribute *attr, char *buf)
49920d28 421{
ae7a927d 422 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
49920d28
HD
423}
424static struct kobj_attribute hpage_pmd_size_attr =
425 __ATTR_RO(hpage_pmd_size);
426
71e3aac0
AA
427static struct attribute *hugepage_attr[] = {
428 &enabled_attr.attr,
429 &defrag_attr.attr,
79da5407 430 &use_zero_page_attr.attr,
49920d28 431 &hpage_pmd_size_attr.attr,
396bcc52 432#ifdef CONFIG_SHMEM
5a6e75f8 433 &shmem_enabled_attr.attr,
71e3aac0
AA
434#endif
435 NULL,
436};
437
8aa95a21 438static const struct attribute_group hugepage_attr_group = {
71e3aac0 439 .attrs = hugepage_attr,
ba76149f
AA
440};
441
3485b883
RR
442static void hugepage_exit_sysfs(struct kobject *hugepage_kobj);
443static void thpsize_release(struct kobject *kobj);
444static DEFINE_SPINLOCK(huge_anon_orders_lock);
445static LIST_HEAD(thpsize_list);
446
447struct thpsize {
448 struct kobject kobj;
449 struct list_head node;
450 int order;
451};
452
453#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
454
455static ssize_t thpsize_enabled_show(struct kobject *kobj,
456 struct kobj_attribute *attr, char *buf)
457{
458 int order = to_thpsize(kobj)->order;
459 const char *output;
460
461 if (test_bit(order, &huge_anon_orders_always))
462 output = "[always] inherit madvise never";
463 else if (test_bit(order, &huge_anon_orders_inherit))
464 output = "always [inherit] madvise never";
465 else if (test_bit(order, &huge_anon_orders_madvise))
466 output = "always inherit [madvise] never";
467 else
468 output = "always inherit madvise [never]";
469
470 return sysfs_emit(buf, "%s\n", output);
471}
472
473static ssize_t thpsize_enabled_store(struct kobject *kobj,
474 struct kobj_attribute *attr,
475 const char *buf, size_t count)
476{
477 int order = to_thpsize(kobj)->order;
478 ssize_t ret = count;
479
480 if (sysfs_streq(buf, "always")) {
481 spin_lock(&huge_anon_orders_lock);
482 clear_bit(order, &huge_anon_orders_inherit);
483 clear_bit(order, &huge_anon_orders_madvise);
484 set_bit(order, &huge_anon_orders_always);
485 spin_unlock(&huge_anon_orders_lock);
486 } else if (sysfs_streq(buf, "inherit")) {
487 spin_lock(&huge_anon_orders_lock);
488 clear_bit(order, &huge_anon_orders_always);
489 clear_bit(order, &huge_anon_orders_madvise);
490 set_bit(order, &huge_anon_orders_inherit);
491 spin_unlock(&huge_anon_orders_lock);
492 } else if (sysfs_streq(buf, "madvise")) {
493 spin_lock(&huge_anon_orders_lock);
494 clear_bit(order, &huge_anon_orders_always);
495 clear_bit(order, &huge_anon_orders_inherit);
496 set_bit(order, &huge_anon_orders_madvise);
497 spin_unlock(&huge_anon_orders_lock);
498 } else if (sysfs_streq(buf, "never")) {
499 spin_lock(&huge_anon_orders_lock);
500 clear_bit(order, &huge_anon_orders_always);
501 clear_bit(order, &huge_anon_orders_inherit);
502 clear_bit(order, &huge_anon_orders_madvise);
503 spin_unlock(&huge_anon_orders_lock);
504 } else
505 ret = -EINVAL;
506
507 return ret;
508}
509
510static struct kobj_attribute thpsize_enabled_attr =
511 __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store);
512
513static struct attribute *thpsize_attrs[] = {
514 &thpsize_enabled_attr.attr,
515 NULL,
516};
517
518static const struct attribute_group thpsize_attr_group = {
519 .attrs = thpsize_attrs,
520};
521
522static const struct kobj_type thpsize_ktype = {
523 .release = &thpsize_release,
524 .sysfs_ops = &kobj_sysfs_ops,
525};
526
527static struct thpsize *thpsize_create(int order, struct kobject *parent)
528{
529 unsigned long size = (PAGE_SIZE << order) / SZ_1K;
530 struct thpsize *thpsize;
531 int ret;
532
533 thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL);
534 if (!thpsize)
535 return ERR_PTR(-ENOMEM);
536
537 ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent,
538 "hugepages-%lukB", size);
539 if (ret) {
540 kfree(thpsize);
541 return ERR_PTR(ret);
542 }
543
544 ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group);
545 if (ret) {
546 kobject_put(&thpsize->kobj);
547 return ERR_PTR(ret);
548 }
549
550 thpsize->order = order;
551 return thpsize;
552}
553
554static void thpsize_release(struct kobject *kobj)
555{
556 kfree(to_thpsize(kobj));
557}
558
569e5590 559static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
71e3aac0 560{
71e3aac0 561 int err;
3485b883
RR
562 struct thpsize *thpsize;
563 unsigned long orders;
564 int order;
565
566 /*
567 * Default to setting PMD-sized THP to inherit the global setting and
568 * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
569 * constant so we have to do this here.
570 */
571 huge_anon_orders_inherit = BIT(PMD_ORDER);
71e3aac0 572
569e5590
SL
573 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
574 if (unlikely(!*hugepage_kobj)) {
ae3a8c1c 575 pr_err("failed to create transparent hugepage kobject\n");
569e5590 576 return -ENOMEM;
ba76149f
AA
577 }
578
569e5590 579 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
ba76149f 580 if (err) {
ae3a8c1c 581 pr_err("failed to register transparent hugepage group\n");
569e5590 582 goto delete_obj;
ba76149f
AA
583 }
584
569e5590 585 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
ba76149f 586 if (err) {
ae3a8c1c 587 pr_err("failed to register transparent hugepage group\n");
569e5590 588 goto remove_hp_group;
ba76149f 589 }
569e5590 590
3485b883
RR
591 orders = THP_ORDERS_ALL_ANON;
592 order = highest_order(orders);
593 while (orders) {
594 thpsize = thpsize_create(order, *hugepage_kobj);
595 if (IS_ERR(thpsize)) {
596 pr_err("failed to create thpsize for order %d\n", order);
597 err = PTR_ERR(thpsize);
598 goto remove_all;
599 }
600 list_add(&thpsize->node, &thpsize_list);
601 order = next_order(&orders, order);
602 }
603
569e5590
SL
604 return 0;
605
3485b883
RR
606remove_all:
607 hugepage_exit_sysfs(*hugepage_kobj);
608 return err;
569e5590
SL
609remove_hp_group:
610 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
611delete_obj:
612 kobject_put(*hugepage_kobj);
613 return err;
614}
615
616static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
617{
3485b883
RR
618 struct thpsize *thpsize, *tmp;
619
620 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) {
621 list_del(&thpsize->node);
622 kobject_put(&thpsize->kobj);
623 }
624
569e5590
SL
625 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
626 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
627 kobject_put(hugepage_kobj);
628}
629#else
630static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
631{
632 return 0;
633}
634
635static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
636{
637}
638#endif /* CONFIG_SYSFS */
639
54d91729
QZ
640static int __init thp_shrinker_init(void)
641{
642 huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
643 if (!huge_zero_page_shrinker)
644 return -ENOMEM;
645
646 deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
647 SHRINKER_MEMCG_AWARE |
648 SHRINKER_NONSLAB,
649 "thp-deferred_split");
650 if (!deferred_split_shrinker) {
651 shrinker_free(huge_zero_page_shrinker);
652 return -ENOMEM;
653 }
654
655 huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
656 huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
657 shrinker_register(huge_zero_page_shrinker);
658
659 deferred_split_shrinker->count_objects = deferred_split_count;
660 deferred_split_shrinker->scan_objects = deferred_split_scan;
661 shrinker_register(deferred_split_shrinker);
662
663 return 0;
664}
665
666static void __init thp_shrinker_exit(void)
667{
668 shrinker_free(huge_zero_page_shrinker);
669 shrinker_free(deferred_split_shrinker);
670}
671
569e5590
SL
672static int __init hugepage_init(void)
673{
674 int err;
675 struct kobject *hugepage_kobj;
676
677 if (!has_transparent_hugepage()) {
3c556d24 678 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
569e5590
SL
679 return -EINVAL;
680 }
681
ff20c2e0
KS
682 /*
683 * hugepages can't be allocated by the buddy allocator
684 */
5e0a760b 685 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER > MAX_PAGE_ORDER);
ff20c2e0
KS
686 /*
687 * we use page->mapping and page->index in second tail page
688 * as list_head: assuming THP order >= 2
689 */
690 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
691
569e5590
SL
692 err = hugepage_init_sysfs(&hugepage_kobj);
693 if (err)
65ebb64f 694 goto err_sysfs;
ba76149f 695
b46e756f 696 err = khugepaged_init();
ba76149f 697 if (err)
65ebb64f 698 goto err_slab;
ba76149f 699
54d91729 700 err = thp_shrinker_init();
9a982250 701 if (err)
54d91729 702 goto err_shrinker;
97ae1749 703
97562cd2
RR
704 /*
705 * By default disable transparent hugepages on smaller systems,
706 * where the extra memory used could hurt more than TLB overhead
707 * is likely to save. The admin can still enable it through /sys.
708 */
ca79b0c2 709 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
97562cd2 710 transparent_hugepage_flags = 0;
79553da2
KS
711 return 0;
712 }
97562cd2 713
79553da2 714 err = start_stop_khugepaged();
65ebb64f
KS
715 if (err)
716 goto err_khugepaged;
ba76149f 717
569e5590 718 return 0;
65ebb64f 719err_khugepaged:
54d91729
QZ
720 thp_shrinker_exit();
721err_shrinker:
b46e756f 722 khugepaged_destroy();
65ebb64f 723err_slab:
569e5590 724 hugepage_exit_sysfs(hugepage_kobj);
65ebb64f 725err_sysfs:
ba76149f 726 return err;
71e3aac0 727}
a64fb3cd 728subsys_initcall(hugepage_init);
71e3aac0
AA
729
730static int __init setup_transparent_hugepage(char *str)
731{
732 int ret = 0;
733 if (!str)
734 goto out;
735 if (!strcmp(str, "always")) {
736 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
737 &transparent_hugepage_flags);
738 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
739 &transparent_hugepage_flags);
740 ret = 1;
741 } else if (!strcmp(str, "madvise")) {
742 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
743 &transparent_hugepage_flags);
744 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
745 &transparent_hugepage_flags);
746 ret = 1;
747 } else if (!strcmp(str, "never")) {
748 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
749 &transparent_hugepage_flags);
750 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
751 &transparent_hugepage_flags);
752 ret = 1;
753 }
754out:
755 if (!ret)
ae3a8c1c 756 pr_warn("transparent_hugepage= cannot parse, ignored\n");
71e3aac0
AA
757 return ret;
758}
759__setup("transparent_hugepage=", setup_transparent_hugepage);
760
f55e1014 761pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
71e3aac0 762{
f55e1014 763 if (likely(vma->vm_flags & VM_WRITE))
161e393c 764 pmd = pmd_mkwrite(pmd, vma);
71e3aac0
AA
765 return pmd;
766}
767
87eaceb3 768#ifdef CONFIG_MEMCG
f8baa6be
MWO
769static inline
770struct deferred_split *get_deferred_split_queue(struct folio *folio)
9a982250 771{
f8baa6be
MWO
772 struct mem_cgroup *memcg = folio_memcg(folio);
773 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
87eaceb3
YS
774
775 if (memcg)
776 return &memcg->deferred_split_queue;
777 else
778 return &pgdat->deferred_split_queue;
9a982250 779}
87eaceb3 780#else
f8baa6be
MWO
781static inline
782struct deferred_split *get_deferred_split_queue(struct folio *folio)
87eaceb3 783{
f8baa6be 784 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio));
87eaceb3
YS
785
786 return &pgdat->deferred_split_queue;
787}
788#endif
9a982250 789
da6e7bf3 790void folio_prep_large_rmappable(struct folio *folio)
9a982250 791{
8991de90
MWO
792 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
793 INIT_LIST_HEAD(&folio->_deferred_list);
de53c05f 794 folio_set_large_rmappable(folio);
9a982250
KS
795}
796
a644b0ab 797static inline bool is_transparent_hugepage(struct folio *folio)
005ba37c 798{
a644b0ab 799 if (!folio_test_large(folio))
fa1f68cc 800 return false;
005ba37c 801
f04029f3 802 return is_huge_zero_page(&folio->page) ||
de53c05f 803 folio_test_large_rmappable(folio);
005ba37c 804}
005ba37c 805
97d3d0f9
KS
806static unsigned long __thp_get_unmapped_area(struct file *filp,
807 unsigned long addr, unsigned long len,
74d2fad1
TK
808 loff_t off, unsigned long flags, unsigned long size)
809{
74d2fad1
TK
810 loff_t off_end = off + len;
811 loff_t off_align = round_up(off, size);
97d3d0f9 812 unsigned long len_pad, ret;
74d2fad1
TK
813
814 if (off_end <= off_align || (off_end - off_align) < size)
815 return 0;
816
817 len_pad = len + size;
818 if (len_pad < len || (off + len_pad) < off)
819 return 0;
820
97d3d0f9 821 ret = current->mm->get_unmapped_area(filp, addr, len_pad,
74d2fad1 822 off >> PAGE_SHIFT, flags);
97d3d0f9
KS
823
824 /*
825 * The failure might be due to length padding. The caller will retry
826 * without the padding.
827 */
828 if (IS_ERR_VALUE(ret))
74d2fad1
TK
829 return 0;
830
97d3d0f9
KS
831 /*
832 * Do not try to align to THP boundary if allocation at the address
833 * hint succeeds.
834 */
835 if (ret == addr)
836 return addr;
837
838 ret += (off - ret) & (size - 1);
839 return ret;
74d2fad1
TK
840}
841
842unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
843 unsigned long len, unsigned long pgoff, unsigned long flags)
844{
97d3d0f9 845 unsigned long ret;
74d2fad1
TK
846 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
847
97d3d0f9
KS
848 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
849 if (ret)
850 return ret;
1854bc6e 851
74d2fad1
TK
852 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
853}
854EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
855
2b740303
SJ
856static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
857 struct page *page, gfp_t gfp)
71e3aac0 858{
82b0f8c3 859 struct vm_area_struct *vma = vmf->vma;
cfe3236d 860 struct folio *folio = page_folio(page);
71e3aac0 861 pgtable_t pgtable;
82b0f8c3 862 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
2b740303 863 vm_fault_t ret = 0;
71e3aac0 864
cfe3236d 865 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
00501b53 866
cfe3236d
KW
867 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) {
868 folio_put(folio);
6b251fc9 869 count_vm_event(THP_FAULT_FALLBACK);
85b9f46e 870 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
6b251fc9
AA
871 return VM_FAULT_FALLBACK;
872 }
cfe3236d 873 folio_throttle_swaprate(folio, gfp);
00501b53 874
4cf58924 875 pgtable = pte_alloc_one(vma->vm_mm);
00501b53 876 if (unlikely(!pgtable)) {
6b31d595
MH
877 ret = VM_FAULT_OOM;
878 goto release;
00501b53 879 }
71e3aac0 880
c79b57e4 881 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
52f37629 882 /*
cfe3236d 883 * The memory barrier inside __folio_mark_uptodate makes sure that
52f37629
MK
884 * clear_huge_page writes become visible before the set_pmd_at()
885 * write.
886 */
cfe3236d 887 __folio_mark_uptodate(folio);
71e3aac0 888
82b0f8c3
JK
889 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
890 if (unlikely(!pmd_none(*vmf->pmd))) {
6b31d595 891 goto unlock_release;
71e3aac0
AA
892 } else {
893 pmd_t entry;
6b251fc9 894
6b31d595
MH
895 ret = check_stable_address_space(vma->vm_mm);
896 if (ret)
897 goto unlock_release;
898
6b251fc9
AA
899 /* Deliver the page fault to userland */
900 if (userfaultfd_missing(vma)) {
82b0f8c3 901 spin_unlock(vmf->ptl);
cfe3236d 902 folio_put(folio);
bae473a4 903 pte_free(vma->vm_mm, pgtable);
8fd5eda4
ML
904 ret = handle_userfault(vmf, VM_UFFD_MISSING);
905 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
906 return ret;
6b251fc9
AA
907 }
908
3122359a 909 entry = mk_huge_pmd(page, vma->vm_page_prot);
f55e1014 910 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
cfe3236d
KW
911 folio_add_new_anon_rmap(folio, vma, haddr);
912 folio_add_lru_vma(folio, vma);
82b0f8c3
JK
913 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
914 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
fca40573 915 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
bae473a4 916 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
c4812909 917 mm_inc_nr_ptes(vma->vm_mm);
82b0f8c3 918 spin_unlock(vmf->ptl);
6b251fc9 919 count_vm_event(THP_FAULT_ALLOC);
9d82c694 920 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
71e3aac0
AA
921 }
922
aa2e878e 923 return 0;
6b31d595
MH
924unlock_release:
925 spin_unlock(vmf->ptl);
926release:
927 if (pgtable)
928 pte_free(vma->vm_mm, pgtable);
cfe3236d 929 folio_put(folio);
6b31d595
MH
930 return ret;
931
71e3aac0
AA
932}
933
444eb2a4 934/*
21440d7e
DR
935 * always: directly stall for all thp allocations
936 * defer: wake kswapd and fail if not immediately available
937 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
938 * fail if not immediately available
939 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
940 * available
941 * never: never stall for any thp allocation
444eb2a4 942 */
164cc4fe 943gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
444eb2a4 944{
164cc4fe 945 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
2f0799a0 946
ac79f78d 947 /* Always do synchronous compaction */
a8282608
AA
948 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
949 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
ac79f78d
DR
950
951 /* Kick kcompactd and fail quickly */
21440d7e 952 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
19deb769 953 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
ac79f78d
DR
954
955 /* Synchronous compaction if madvised, otherwise kick kcompactd */
21440d7e 956 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
19deb769
DR
957 return GFP_TRANSHUGE_LIGHT |
958 (vma_madvised ? __GFP_DIRECT_RECLAIM :
959 __GFP_KSWAPD_RECLAIM);
ac79f78d
DR
960
961 /* Only do synchronous compaction if madvised */
21440d7e 962 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
19deb769
DR
963 return GFP_TRANSHUGE_LIGHT |
964 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
ac79f78d 965
19deb769 966 return GFP_TRANSHUGE_LIGHT;
444eb2a4
MG
967}
968
c4088ebd 969/* Caller must hold page table lock. */
2efeb8da 970static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
97ae1749 971 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
5918d10a 972 struct page *zero_page)
fc9fe822
KS
973{
974 pmd_t entry;
7c414164 975 if (!pmd_none(*pmd))
2efeb8da 976 return;
5918d10a 977 entry = mk_pmd(zero_page, vma->vm_page_prot);
fc9fe822 978 entry = pmd_mkhuge(entry);
c8bb4163 979 pgtable_trans_huge_deposit(mm, pmd, pgtable);
fc9fe822 980 set_pmd_at(mm, haddr, pmd, entry);
c4812909 981 mm_inc_nr_ptes(mm);
fc9fe822
KS
982}
983
2b740303 984vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
71e3aac0 985{
82b0f8c3 986 struct vm_area_struct *vma = vmf->vma;
077fcf11 987 gfp_t gfp;
cb196ee1 988 struct folio *folio;
82b0f8c3 989 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
71e3aac0 990
3485b883 991 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
c0292554 992 return VM_FAULT_FALLBACK;
128ec037
KS
993 if (unlikely(anon_vma_prepare(vma)))
994 return VM_FAULT_OOM;
4fa6893f 995 khugepaged_enter_vma(vma, vma->vm_flags);
d2081b2b 996
82b0f8c3 997 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
bae473a4 998 !mm_forbids_zeropage(vma->vm_mm) &&
128ec037
KS
999 transparent_hugepage_use_zero_page()) {
1000 pgtable_t pgtable;
1001 struct page *zero_page;
2b740303 1002 vm_fault_t ret;
4cf58924 1003 pgtable = pte_alloc_one(vma->vm_mm);
128ec037 1004 if (unlikely(!pgtable))
ba76149f 1005 return VM_FAULT_OOM;
6fcb52a5 1006 zero_page = mm_get_huge_zero_page(vma->vm_mm);
128ec037 1007 if (unlikely(!zero_page)) {
bae473a4 1008 pte_free(vma->vm_mm, pgtable);
81ab4201 1009 count_vm_event(THP_FAULT_FALLBACK);
c0292554 1010 return VM_FAULT_FALLBACK;
b9bbfbe3 1011 }
82b0f8c3 1012 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
6b251fc9 1013 ret = 0;
82b0f8c3 1014 if (pmd_none(*vmf->pmd)) {
6b31d595
MH
1015 ret = check_stable_address_space(vma->vm_mm);
1016 if (ret) {
1017 spin_unlock(vmf->ptl);
bfe8cc1d 1018 pte_free(vma->vm_mm, pgtable);
6b31d595 1019 } else if (userfaultfd_missing(vma)) {
82b0f8c3 1020 spin_unlock(vmf->ptl);
bfe8cc1d 1021 pte_free(vma->vm_mm, pgtable);
82b0f8c3 1022 ret = handle_userfault(vmf, VM_UFFD_MISSING);
6b251fc9
AA
1023 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
1024 } else {
bae473a4 1025 set_huge_zero_page(pgtable, vma->vm_mm, vma,
82b0f8c3 1026 haddr, vmf->pmd, zero_page);
fca40573 1027 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
82b0f8c3 1028 spin_unlock(vmf->ptl);
6b251fc9 1029 }
bfe8cc1d 1030 } else {
82b0f8c3 1031 spin_unlock(vmf->ptl);
bae473a4 1032 pte_free(vma->vm_mm, pgtable);
bfe8cc1d 1033 }
6b251fc9 1034 return ret;
71e3aac0 1035 }
164cc4fe 1036 gfp = vma_thp_gfp_mask(vma);
cb196ee1
MWO
1037 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
1038 if (unlikely(!folio)) {
128ec037 1039 count_vm_event(THP_FAULT_FALLBACK);
c0292554 1040 return VM_FAULT_FALLBACK;
128ec037 1041 }
cb196ee1 1042 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
71e3aac0
AA
1043}
1044
ae18d6dc 1045static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
3b6521f5
OH
1046 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1047 pgtable_t pgtable)
5cad465d
MW
1048{
1049 struct mm_struct *mm = vma->vm_mm;
1050 pmd_t entry;
1051 spinlock_t *ptl;
1052
1053 ptl = pmd_lock(mm, pmd);
c6f3c5ee
AK
1054 if (!pmd_none(*pmd)) {
1055 if (write) {
1056 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1057 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
1058 goto out_unlock;
1059 }
1060 entry = pmd_mkyoung(*pmd);
1061 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1062 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
1063 update_mmu_cache_pmd(vma, addr, pmd);
1064 }
1065
1066 goto out_unlock;
1067 }
1068
f25748e3
DW
1069 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1070 if (pfn_t_devmap(pfn))
1071 entry = pmd_mkdevmap(entry);
01871e59 1072 if (write) {
f55e1014
LT
1073 entry = pmd_mkyoung(pmd_mkdirty(entry));
1074 entry = maybe_pmd_mkwrite(entry, vma);
5cad465d 1075 }
3b6521f5
OH
1076
1077 if (pgtable) {
1078 pgtable_trans_huge_deposit(mm, pmd, pgtable);
c4812909 1079 mm_inc_nr_ptes(mm);
c6f3c5ee 1080 pgtable = NULL;
3b6521f5
OH
1081 }
1082
01871e59
RZ
1083 set_pmd_at(mm, addr, pmd, entry);
1084 update_mmu_cache_pmd(vma, addr, pmd);
c6f3c5ee
AK
1085
1086out_unlock:
5cad465d 1087 spin_unlock(ptl);
c6f3c5ee
AK
1088 if (pgtable)
1089 pte_free(mm, pgtable);
5cad465d
MW
1090}
1091
9a9731b1 1092/**
7b806d22 1093 * vmf_insert_pfn_pmd - insert a pmd size pfn
9a9731b1
THV
1094 * @vmf: Structure describing the fault
1095 * @pfn: pfn to insert
9a9731b1
THV
1096 * @write: whether it's a write fault
1097 *
7b806d22 1098 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
9a9731b1
THV
1099 *
1100 * Return: vm_fault_t value.
1101 */
7b806d22 1102vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
5cad465d 1103{
fce86ff5
DW
1104 unsigned long addr = vmf->address & PMD_MASK;
1105 struct vm_area_struct *vma = vmf->vma;
7b806d22 1106 pgprot_t pgprot = vma->vm_page_prot;
3b6521f5 1107 pgtable_t pgtable = NULL;
fce86ff5 1108
5cad465d
MW
1109 /*
1110 * If we had pmd_special, we could avoid all these restrictions,
1111 * but we need to be consistent with PTEs and architectures that
1112 * can't support a 'special' bit.
1113 */
e1fb4a08
DJ
1114 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1115 !pfn_t_devmap(pfn));
5cad465d
MW
1116 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1117 (VM_PFNMAP|VM_MIXEDMAP));
1118 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
5cad465d
MW
1119
1120 if (addr < vma->vm_start || addr >= vma->vm_end)
1121 return VM_FAULT_SIGBUS;
308a047c 1122
3b6521f5 1123 if (arch_needs_pgtable_deposit()) {
4cf58924 1124 pgtable = pte_alloc_one(vma->vm_mm);
3b6521f5
OH
1125 if (!pgtable)
1126 return VM_FAULT_OOM;
1127 }
1128
308a047c
BP
1129 track_pfn_insert(vma, &pgprot, pfn);
1130
fce86ff5 1131 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
ae18d6dc 1132 return VM_FAULT_NOPAGE;
5cad465d 1133}
7b806d22 1134EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
5cad465d 1135
a00cc7d9 1136#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
f55e1014 1137static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
a00cc7d9 1138{
f55e1014 1139 if (likely(vma->vm_flags & VM_WRITE))
a00cc7d9
MW
1140 pud = pud_mkwrite(pud);
1141 return pud;
1142}
1143
1144static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
7b806d22 1145 pud_t *pud, pfn_t pfn, bool write)
a00cc7d9
MW
1146{
1147 struct mm_struct *mm = vma->vm_mm;
7b806d22 1148 pgprot_t prot = vma->vm_page_prot;
a00cc7d9
MW
1149 pud_t entry;
1150 spinlock_t *ptl;
1151
1152 ptl = pud_lock(mm, pud);
c6f3c5ee
AK
1153 if (!pud_none(*pud)) {
1154 if (write) {
1155 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
1156 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
1157 goto out_unlock;
1158 }
1159 entry = pud_mkyoung(*pud);
1160 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
1161 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
1162 update_mmu_cache_pud(vma, addr, pud);
1163 }
1164 goto out_unlock;
1165 }
1166
a00cc7d9
MW
1167 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
1168 if (pfn_t_devmap(pfn))
1169 entry = pud_mkdevmap(entry);
1170 if (write) {
f55e1014
LT
1171 entry = pud_mkyoung(pud_mkdirty(entry));
1172 entry = maybe_pud_mkwrite(entry, vma);
a00cc7d9
MW
1173 }
1174 set_pud_at(mm, addr, pud, entry);
1175 update_mmu_cache_pud(vma, addr, pud);
c6f3c5ee
AK
1176
1177out_unlock:
a00cc7d9
MW
1178 spin_unlock(ptl);
1179}
1180
9a9731b1 1181/**
7b806d22 1182 * vmf_insert_pfn_pud - insert a pud size pfn
9a9731b1
THV
1183 * @vmf: Structure describing the fault
1184 * @pfn: pfn to insert
9a9731b1
THV
1185 * @write: whether it's a write fault
1186 *
7b806d22 1187 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
9a9731b1
THV
1188 *
1189 * Return: vm_fault_t value.
1190 */
7b806d22 1191vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
a00cc7d9 1192{
fce86ff5
DW
1193 unsigned long addr = vmf->address & PUD_MASK;
1194 struct vm_area_struct *vma = vmf->vma;
7b806d22 1195 pgprot_t pgprot = vma->vm_page_prot;
fce86ff5 1196
a00cc7d9
MW
1197 /*
1198 * If we had pud_special, we could avoid all these restrictions,
1199 * but we need to be consistent with PTEs and architectures that
1200 * can't support a 'special' bit.
1201 */
62ec0d8c
DJ
1202 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1203 !pfn_t_devmap(pfn));
a00cc7d9
MW
1204 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1205 (VM_PFNMAP|VM_MIXEDMAP));
1206 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
a00cc7d9
MW
1207
1208 if (addr < vma->vm_start || addr >= vma->vm_end)
1209 return VM_FAULT_SIGBUS;
1210
1211 track_pfn_insert(vma, &pgprot, pfn);
1212
7b806d22 1213 insert_pfn_pud(vma, addr, vmf->pud, pfn, write);
a00cc7d9
MW
1214 return VM_FAULT_NOPAGE;
1215}
7b806d22 1216EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
a00cc7d9
MW
1217#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1218
3565fce3 1219static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
a69e4717 1220 pmd_t *pmd, bool write)
3565fce3
DW
1221{
1222 pmd_t _pmd;
1223
a8f97366 1224 _pmd = pmd_mkyoung(*pmd);
a69e4717 1225 if (write)
a8f97366 1226 _pmd = pmd_mkdirty(_pmd);
3565fce3 1227 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
a69e4717 1228 pmd, _pmd, write))
3565fce3
DW
1229 update_mmu_cache_pmd(vma, addr, pmd);
1230}
1231
1232struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
df06b37f 1233 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
3565fce3
DW
1234{
1235 unsigned long pfn = pmd_pfn(*pmd);
1236 struct mm_struct *mm = vma->vm_mm;
3565fce3 1237 struct page *page;
0f089235 1238 int ret;
3565fce3
DW
1239
1240 assert_spin_locked(pmd_lockptr(mm, pmd));
1241
f6f37321 1242 if (flags & FOLL_WRITE && !pmd_write(*pmd))
3565fce3
DW
1243 return NULL;
1244
1245 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1246 /* pass */;
1247 else
1248 return NULL;
1249
1250 if (flags & FOLL_TOUCH)
a69e4717 1251 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
3565fce3
DW
1252
1253 /*
1254 * device mapped pages can only be returned if the
1255 * caller will manage the page reference count.
1256 */
3faa52c0 1257 if (!(flags & (FOLL_GET | FOLL_PIN)))
3565fce3
DW
1258 return ERR_PTR(-EEXIST);
1259
1260 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
df06b37f
KB
1261 *pgmap = get_dev_pagemap(pfn, *pgmap);
1262 if (!*pgmap)
3565fce3
DW
1263 return ERR_PTR(-EFAULT);
1264 page = pfn_to_page(pfn);
0f089235
LG
1265 ret = try_grab_page(page, flags);
1266 if (ret)
1267 page = ERR_PTR(ret);
3565fce3
DW
1268
1269 return page;
1270}
1271
71e3aac0
AA
1272int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1273 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
8f34f1ea 1274 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
71e3aac0 1275{
c4088ebd 1276 spinlock_t *dst_ptl, *src_ptl;
71e3aac0 1277 struct page *src_page;
96c772c2 1278 struct folio *src_folio;
71e3aac0 1279 pmd_t pmd;
12c9d70b 1280 pgtable_t pgtable = NULL;
628d47ce 1281 int ret = -ENOMEM;
71e3aac0 1282
628d47ce 1283 /* Skip if can be re-fill on fault */
8f34f1ea 1284 if (!vma_is_anonymous(dst_vma))
628d47ce
KS
1285 return 0;
1286
4cf58924 1287 pgtable = pte_alloc_one(dst_mm);
628d47ce
KS
1288 if (unlikely(!pgtable))
1289 goto out;
71e3aac0 1290
c4088ebd
KS
1291 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1292 src_ptl = pmd_lockptr(src_mm, src_pmd);
1293 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
71e3aac0
AA
1294
1295 ret = -EAGAIN;
1296 pmd = *src_pmd;
84c3fc4e
ZY
1297
1298#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1299 if (unlikely(is_swap_pmd(pmd))) {
1300 swp_entry_t entry = pmd_to_swp_entry(pmd);
1301
1302 VM_BUG_ON(!is_pmd_migration_entry(pmd));
6c287605 1303 if (!is_readable_migration_entry(entry)) {
4dd845b5
AP
1304 entry = make_readable_migration_entry(
1305 swp_offset(entry));
84c3fc4e 1306 pmd = swp_entry_to_pmd(entry);
ab6e3d09
NH
1307 if (pmd_swp_soft_dirty(*src_pmd))
1308 pmd = pmd_swp_mksoft_dirty(pmd);
8f34f1ea
PX
1309 if (pmd_swp_uffd_wp(*src_pmd))
1310 pmd = pmd_swp_mkuffd_wp(pmd);
84c3fc4e
ZY
1311 set_pmd_at(src_mm, addr, src_pmd, pmd);
1312 }
dd8a67f9 1313 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
af5b0f6a 1314 mm_inc_nr_ptes(dst_mm);
dd8a67f9 1315 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
8f34f1ea
PX
1316 if (!userfaultfd_wp(dst_vma))
1317 pmd = pmd_swp_clear_uffd_wp(pmd);
84c3fc4e
ZY
1318 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1319 ret = 0;
1320 goto out_unlock;
1321 }
1322#endif
1323
628d47ce 1324 if (unlikely(!pmd_trans_huge(pmd))) {
71e3aac0
AA
1325 pte_free(dst_mm, pgtable);
1326 goto out_unlock;
1327 }
fc9fe822 1328 /*
c4088ebd 1329 * When page table lock is held, the huge zero pmd should not be
fc9fe822
KS
1330 * under splitting since we don't split the page itself, only pmd to
1331 * a page table.
1332 */
1333 if (is_huge_zero_pmd(pmd)) {
97ae1749
KS
1334 /*
1335 * get_huge_zero_page() will never allocate a new page here,
1336 * since we already have a zero page to copy. It just takes a
1337 * reference.
1338 */
5fc7a5f6
PX
1339 mm_get_huge_zero_page(dst_mm);
1340 goto out_zero_page;
fc9fe822 1341 }
de466bd6 1342
628d47ce
KS
1343 src_page = pmd_page(pmd);
1344 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
96c772c2 1345 src_folio = page_folio(src_page);
d042035e 1346
96c772c2
DH
1347 folio_get(src_folio);
1348 if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
fb3d824d 1349 /* Page maybe pinned: split and retry the fault on PTEs. */
96c772c2 1350 folio_put(src_folio);
d042035e
PX
1351 pte_free(dst_mm, pgtable);
1352 spin_unlock(src_ptl);
1353 spin_unlock(dst_ptl);
8f34f1ea 1354 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
d042035e
PX
1355 return -EAGAIN;
1356 }
628d47ce 1357 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
5fc7a5f6 1358out_zero_page:
c4812909 1359 mm_inc_nr_ptes(dst_mm);
628d47ce 1360 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
71e3aac0 1361 pmdp_set_wrprotect(src_mm, addr, src_pmd);
8f34f1ea
PX
1362 if (!userfaultfd_wp(dst_vma))
1363 pmd = pmd_clear_uffd_wp(pmd);
71e3aac0
AA
1364 pmd = pmd_mkold(pmd_wrprotect(pmd));
1365 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
71e3aac0
AA
1366
1367 ret = 0;
1368out_unlock:
c4088ebd
KS
1369 spin_unlock(src_ptl);
1370 spin_unlock(dst_ptl);
71e3aac0
AA
1371out:
1372 return ret;
1373}
1374
a00cc7d9
MW
1375#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1376static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
5fe653e9 1377 pud_t *pud, bool write)
a00cc7d9
MW
1378{
1379 pud_t _pud;
1380
a8f97366 1381 _pud = pud_mkyoung(*pud);
5fe653e9 1382 if (write)
a8f97366 1383 _pud = pud_mkdirty(_pud);
a00cc7d9 1384 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
5fe653e9 1385 pud, _pud, write))
a00cc7d9
MW
1386 update_mmu_cache_pud(vma, addr, pud);
1387}
1388
1389struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
df06b37f 1390 pud_t *pud, int flags, struct dev_pagemap **pgmap)
a00cc7d9
MW
1391{
1392 unsigned long pfn = pud_pfn(*pud);
1393 struct mm_struct *mm = vma->vm_mm;
a00cc7d9 1394 struct page *page;
0f089235 1395 int ret;
a00cc7d9
MW
1396
1397 assert_spin_locked(pud_lockptr(mm, pud));
1398
f6f37321 1399 if (flags & FOLL_WRITE && !pud_write(*pud))
a00cc7d9
MW
1400 return NULL;
1401
1402 if (pud_present(*pud) && pud_devmap(*pud))
1403 /* pass */;
1404 else
1405 return NULL;
1406
1407 if (flags & FOLL_TOUCH)
5fe653e9 1408 touch_pud(vma, addr, pud, flags & FOLL_WRITE);
a00cc7d9
MW
1409
1410 /*
1411 * device mapped pages can only be returned if the
1412 * caller will manage the page reference count.
3faa52c0
JH
1413 *
1414 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
a00cc7d9 1415 */
3faa52c0 1416 if (!(flags & (FOLL_GET | FOLL_PIN)))
a00cc7d9
MW
1417 return ERR_PTR(-EEXIST);
1418
1419 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
df06b37f
KB
1420 *pgmap = get_dev_pagemap(pfn, *pgmap);
1421 if (!*pgmap)
a00cc7d9
MW
1422 return ERR_PTR(-EFAULT);
1423 page = pfn_to_page(pfn);
0f089235
LG
1424
1425 ret = try_grab_page(page, flags);
1426 if (ret)
1427 page = ERR_PTR(ret);
a00cc7d9
MW
1428
1429 return page;
1430}
1431
1432int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1433 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1434 struct vm_area_struct *vma)
1435{
1436 spinlock_t *dst_ptl, *src_ptl;
1437 pud_t pud;
1438 int ret;
1439
1440 dst_ptl = pud_lock(dst_mm, dst_pud);
1441 src_ptl = pud_lockptr(src_mm, src_pud);
1442 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1443
1444 ret = -EAGAIN;
1445 pud = *src_pud;
1446 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1447 goto out_unlock;
1448
1449 /*
1450 * When page table lock is held, the huge zero pud should not be
1451 * under splitting since we don't split the page itself, only pud to
1452 * a page table.
1453 */
1454 if (is_huge_zero_pud(pud)) {
1455 /* No huge zero pud yet */
1456 }
1457
fb3d824d 1458 /*
96c772c2
DH
1459 * TODO: once we support anonymous pages, use
1460 * folio_try_dup_anon_rmap_*() and split if duplicating fails.
fb3d824d 1461 */
a00cc7d9
MW
1462 pudp_set_wrprotect(src_mm, addr, src_pud);
1463 pud = pud_mkold(pud_wrprotect(pud));
1464 set_pud_at(dst_mm, addr, dst_pud, pud);
1465
1466 ret = 0;
1467out_unlock:
1468 spin_unlock(src_ptl);
1469 spin_unlock(dst_ptl);
1470 return ret;
1471}
1472
1473void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1474{
a00cc7d9
MW
1475 bool write = vmf->flags & FAULT_FLAG_WRITE;
1476
1477 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1478 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1479 goto unlock;
1480
5fe653e9 1481 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
a00cc7d9
MW
1482unlock:
1483 spin_unlock(vmf->ptl);
1484}
1485#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1486
5db4f15c 1487void huge_pmd_set_accessed(struct vm_fault *vmf)
a1dd450b 1488{
20f664aa 1489 bool write = vmf->flags & FAULT_FLAG_WRITE;
a1dd450b 1490
82b0f8c3 1491 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
a69e4717 1492 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
a1dd450b
WD
1493 goto unlock;
1494
a69e4717 1495 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
a1dd450b
WD
1496
1497unlock:
82b0f8c3 1498 spin_unlock(vmf->ptl);
a1dd450b
WD
1499}
1500
5db4f15c 1501vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
71e3aac0 1502{
c89357e2 1503 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
82b0f8c3 1504 struct vm_area_struct *vma = vmf->vma;
2fad3d14 1505 struct folio *folio;
3917c802 1506 struct page *page;
82b0f8c3 1507 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
5db4f15c 1508 pmd_t orig_pmd = vmf->orig_pmd;
71e3aac0 1509
82b0f8c3 1510 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
81d1b09c 1511 VM_BUG_ON_VMA(!vma->anon_vma, vma);
3917c802 1512
93b4796d 1513 if (is_huge_zero_pmd(orig_pmd))
3917c802
KS
1514 goto fallback;
1515
82b0f8c3 1516 spin_lock(vmf->ptl);
3917c802
KS
1517
1518 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1519 spin_unlock(vmf->ptl);
1520 return 0;
1521 }
71e3aac0
AA
1522
1523 page = pmd_page(orig_pmd);
2fad3d14 1524 folio = page_folio(page);
f6004e73 1525 VM_BUG_ON_PAGE(!PageHead(page), page);
3917c802 1526
6c287605
DH
1527 /* Early check when only holding the PT lock. */
1528 if (PageAnonExclusive(page))
1529 goto reuse;
1530
2fad3d14
MWO
1531 if (!folio_trylock(folio)) {
1532 folio_get(folio);
ba3c4ce6 1533 spin_unlock(vmf->ptl);
2fad3d14 1534 folio_lock(folio);
ba3c4ce6
HY
1535 spin_lock(vmf->ptl);
1536 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
3917c802 1537 spin_unlock(vmf->ptl);
2fad3d14
MWO
1538 folio_unlock(folio);
1539 folio_put(folio);
3917c802 1540 return 0;
ba3c4ce6 1541 }
2fad3d14 1542 folio_put(folio);
ba3c4ce6 1543 }
3917c802 1544
6c287605
DH
1545 /* Recheck after temporarily dropping the PT lock. */
1546 if (PageAnonExclusive(page)) {
2fad3d14 1547 folio_unlock(folio);
6c287605
DH
1548 goto reuse;
1549 }
1550
3917c802 1551 /*
2fad3d14
MWO
1552 * See do_wp_page(): we can only reuse the folio exclusively if
1553 * there are no additional references. Note that we always drain
1fec6890 1554 * the LRU cache immediately after adding a THP.
3917c802 1555 */
2fad3d14
MWO
1556 if (folio_ref_count(folio) >
1557 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
3bff7e3f 1558 goto unlock_fallback;
2fad3d14
MWO
1559 if (folio_test_swapcache(folio))
1560 folio_free_swap(folio);
1561 if (folio_ref_count(folio) == 1) {
71e3aac0 1562 pmd_t entry;
6c54dc6c 1563
06968625 1564 folio_move_anon_rmap(folio, vma);
5ca43289 1565 SetPageAnonExclusive(page);
2fad3d14 1566 folio_unlock(folio);
6c287605 1567reuse:
c89357e2
DH
1568 if (unlikely(unshare)) {
1569 spin_unlock(vmf->ptl);
1570 return 0;
1571 }
71e3aac0 1572 entry = pmd_mkyoung(orig_pmd);
f55e1014 1573 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
3917c802 1574 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
82b0f8c3 1575 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
82b0f8c3 1576 spin_unlock(vmf->ptl);
cb8d8633 1577 return 0;
71e3aac0 1578 }
3917c802 1579
3bff7e3f 1580unlock_fallback:
2fad3d14 1581 folio_unlock(folio);
82b0f8c3 1582 spin_unlock(vmf->ptl);
3917c802
KS
1583fallback:
1584 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1585 return VM_FAULT_FALLBACK;
71e3aac0
AA
1586}
1587
c27f479e
DH
1588static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1589 unsigned long addr, pmd_t pmd)
1590{
1591 struct page *page;
1592
1593 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1594 return false;
1595
1596 /* Don't touch entries that are not even readable (NUMA hinting). */
1597 if (pmd_protnone(pmd))
1598 return false;
1599
1600 /* Do we need write faults for softdirty tracking? */
1601 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1602 return false;
1603
1604 /* Do we need write faults for uffd-wp tracking? */
1605 if (userfaultfd_huge_pmd_wp(vma, pmd))
1606 return false;
1607
1608 if (!(vma->vm_flags & VM_SHARED)) {
1609 /* See can_change_pte_writable(). */
1610 page = vm_normal_page_pmd(vma, addr, pmd);
1611 return page && PageAnon(page) && PageAnonExclusive(page);
1612 }
1613
1614 /* See can_change_pte_writable(). */
1615 return pmd_dirty(pmd);
1616}
1617
5535be30
DH
1618/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1619static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1620 struct vm_area_struct *vma,
1621 unsigned int flags)
8310d48b 1622{
5535be30
DH
1623 /* If the pmd is writable, we can write to the page. */
1624 if (pmd_write(pmd))
1625 return true;
1626
1627 /* Maybe FOLL_FORCE is set to override it? */
1628 if (!(flags & FOLL_FORCE))
1629 return false;
1630
1631 /* But FOLL_FORCE has no effect on shared mappings */
1632 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1633 return false;
1634
1635 /* ... or read-only private ones */
1636 if (!(vma->vm_flags & VM_MAYWRITE))
1637 return false;
1638
1639 /* ... or already writable ones that just need to take a write fault */
1640 if (vma->vm_flags & VM_WRITE)
1641 return false;
1642
1643 /*
1644 * See can_change_pte_writable(): we broke COW and could map the page
1645 * writable if we have an exclusive anonymous page ...
1646 */
1647 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1648 return false;
1649
1650 /* ... and a write-fault isn't required for other reasons. */
1651 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1652 return false;
1653 return !userfaultfd_huge_pmd_wp(vma, pmd);
8310d48b
KF
1654}
1655
b676b293 1656struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
1657 unsigned long addr,
1658 pmd_t *pmd,
1659 unsigned int flags)
1660{
b676b293 1661 struct mm_struct *mm = vma->vm_mm;
5535be30 1662 struct page *page;
0f089235 1663 int ret;
71e3aac0 1664
c4088ebd 1665 assert_spin_locked(pmd_lockptr(mm, pmd));
71e3aac0 1666
5535be30
DH
1667 page = pmd_page(*pmd);
1668 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1669
1670 if ((flags & FOLL_WRITE) &&
1671 !can_follow_write_pmd(*pmd, page, vma, flags))
1672 return NULL;
71e3aac0 1673
85facf25
KS
1674 /* Avoid dumping huge zero page */
1675 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1676 return ERR_PTR(-EFAULT);
1677
d74943a2 1678 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
5535be30 1679 return NULL;
3faa52c0 1680
84209e87 1681 if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
a7f22660
DH
1682 return ERR_PTR(-EMLINK);
1683
b6a2619c
DH
1684 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1685 !PageAnonExclusive(page), page);
1686
0f089235
LG
1687 ret = try_grab_page(page, flags);
1688 if (ret)
1689 return ERR_PTR(ret);
3faa52c0 1690
3565fce3 1691 if (flags & FOLL_TOUCH)
a69e4717 1692 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
3faa52c0 1693
71e3aac0 1694 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
ca120cf6 1695 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
71e3aac0 1696
71e3aac0
AA
1697 return page;
1698}
1699
d10e63f2 1700/* NUMA hinting page fault entry point for trans huge pmds */
5db4f15c 1701vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
d10e63f2 1702{
82b0f8c3 1703 struct vm_area_struct *vma = vmf->vma;
c5b5a3dd
YS
1704 pmd_t oldpmd = vmf->orig_pmd;
1705 pmd_t pmd;
667ffc31 1706 struct folio *folio;
82b0f8c3 1707 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
667ffc31 1708 int nid = NUMA_NO_NODE;
33024536 1709 int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
6a56ccbc 1710 bool migrated = false, writable = false;
6688cc05 1711 int flags = 0;
d10e63f2 1712
82b0f8c3 1713 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
c5b5a3dd 1714 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
82b0f8c3 1715 spin_unlock(vmf->ptl);
de466bd6
MG
1716 goto out;
1717 }
1718
c5b5a3dd 1719 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
6a56ccbc
DH
1720
1721 /*
1722 * Detect now whether the PMD could be writable; this information
1723 * is only valid while holding the PT lock.
1724 */
1725 writable = pmd_write(pmd);
1726 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1727 can_change_pmd_writable(vma, vmf->address, pmd))
1728 writable = true;
1729
667ffc31
KW
1730 folio = vm_normal_folio_pmd(vma, haddr, pmd);
1731 if (!folio)
c5b5a3dd
YS
1732 goto out_map;
1733
1734 /* See similar comment in do_numa_page for explanation */
6a56ccbc 1735 if (!writable)
c5b5a3dd
YS
1736 flags |= TNF_NO_GROUP;
1737
667ffc31 1738 nid = folio_nid(folio);
33024536
HY
1739 /*
1740 * For memory tiering mode, cpupid of slow memory page is used
1741 * to record page access time. So use default value.
1742 */
667ffc31 1743 if (node_is_toptier(nid))
c4a8d2fa 1744 last_cpupid = folio_last_cpupid(folio);
cda6d936 1745 target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
c5b5a3dd 1746 if (target_nid == NUMA_NO_NODE) {
667ffc31 1747 folio_put(folio);
c5b5a3dd
YS
1748 goto out_map;
1749 }
1750
82b0f8c3 1751 spin_unlock(vmf->ptl);
6a56ccbc 1752 writable = false;
8b1b436d 1753
667ffc31 1754 migrated = migrate_misplaced_folio(folio, vma, target_nid);
6688cc05
PZ
1755 if (migrated) {
1756 flags |= TNF_MIGRATED;
667ffc31 1757 nid = target_nid;
c5b5a3dd 1758 } else {
074c2381 1759 flags |= TNF_MIGRATE_FAIL;
c5b5a3dd
YS
1760 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1761 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1762 spin_unlock(vmf->ptl);
1763 goto out;
1764 }
1765 goto out_map;
1766 }
b8916634
MG
1767
1768out:
667ffc31
KW
1769 if (nid != NUMA_NO_NODE)
1770 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags);
8191acbd 1771
d10e63f2 1772 return 0;
c5b5a3dd
YS
1773
1774out_map:
1775 /* Restore the PMD */
1776 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1777 pmd = pmd_mkyoung(pmd);
6a56ccbc 1778 if (writable)
161e393c 1779 pmd = pmd_mkwrite(pmd, vma);
c5b5a3dd
YS
1780 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1781 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1782 spin_unlock(vmf->ptl);
1783 goto out;
d10e63f2
MG
1784}
1785
319904ad
HY
1786/*
1787 * Return true if we do MADV_FREE successfully on entire pmd page.
1788 * Otherwise, return false.
1789 */
1790bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
b8d3c4c3 1791 pmd_t *pmd, unsigned long addr, unsigned long next)
b8d3c4c3
MK
1792{
1793 spinlock_t *ptl;
1794 pmd_t orig_pmd;
fc986a38 1795 struct folio *folio;
b8d3c4c3 1796 struct mm_struct *mm = tlb->mm;
319904ad 1797 bool ret = false;
b8d3c4c3 1798
ed6a7935 1799 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
07e32661 1800
b6ec57f4
KS
1801 ptl = pmd_trans_huge_lock(pmd, vma);
1802 if (!ptl)
25eedabe 1803 goto out_unlocked;
b8d3c4c3
MK
1804
1805 orig_pmd = *pmd;
319904ad 1806 if (is_huge_zero_pmd(orig_pmd))
b8d3c4c3 1807 goto out;
b8d3c4c3 1808
84c3fc4e
ZY
1809 if (unlikely(!pmd_present(orig_pmd))) {
1810 VM_BUG_ON(thp_migration_supported() &&
1811 !is_pmd_migration_entry(orig_pmd));
1812 goto out;
1813 }
1814
fc986a38 1815 folio = pfn_folio(pmd_pfn(orig_pmd));
b8d3c4c3 1816 /*
fc986a38
KW
1817 * If other processes are mapping this folio, we couldn't discard
1818 * the folio unless they all do MADV_FREE so let's skip the folio.
b8d3c4c3 1819 */
20b18aad 1820 if (folio_estimated_sharers(folio) != 1)
b8d3c4c3
MK
1821 goto out;
1822
fc986a38 1823 if (!folio_trylock(folio))
b8d3c4c3
MK
1824 goto out;
1825
1826 /*
1827 * If user want to discard part-pages of THP, split it so MADV_FREE
1828 * will deactivate only them.
1829 */
1830 if (next - addr != HPAGE_PMD_SIZE) {
fc986a38 1831 folio_get(folio);
b8d3c4c3 1832 spin_unlock(ptl);
fc986a38
KW
1833 split_folio(folio);
1834 folio_unlock(folio);
1835 folio_put(folio);
b8d3c4c3
MK
1836 goto out_unlocked;
1837 }
1838
fc986a38
KW
1839 if (folio_test_dirty(folio))
1840 folio_clear_dirty(folio);
1841 folio_unlock(folio);
b8d3c4c3 1842
b8d3c4c3 1843 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
58ceeb6b 1844 pmdp_invalidate(vma, addr, pmd);
b8d3c4c3
MK
1845 orig_pmd = pmd_mkold(orig_pmd);
1846 orig_pmd = pmd_mkclean(orig_pmd);
1847
1848 set_pmd_at(mm, addr, pmd, orig_pmd);
1849 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1850 }
802a3a92 1851
6a6fe9eb 1852 folio_mark_lazyfree(folio);
319904ad 1853 ret = true;
b8d3c4c3
MK
1854out:
1855 spin_unlock(ptl);
1856out_unlocked:
1857 return ret;
1858}
1859
953c66c2
AK
1860static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1861{
1862 pgtable_t pgtable;
1863
1864 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1865 pte_free(mm, pgtable);
c4812909 1866 mm_dec_nr_ptes(mm);
953c66c2
AK
1867}
1868
71e3aac0 1869int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
f21760b1 1870 pmd_t *pmd, unsigned long addr)
71e3aac0 1871{
da146769 1872 pmd_t orig_pmd;
bf929152 1873 spinlock_t *ptl;
71e3aac0 1874
ed6a7935 1875 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
07e32661 1876
b6ec57f4
KS
1877 ptl = __pmd_trans_huge_lock(pmd, vma);
1878 if (!ptl)
da146769
KS
1879 return 0;
1880 /*
1881 * For architectures like ppc64 we look at deposited pgtable
1882 * when calling pmdp_huge_get_and_clear. So do the
1883 * pgtable_trans_huge_withdraw after finishing pmdp related
1884 * operations.
1885 */
93a98695
AK
1886 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1887 tlb->fullmm);
e5136e87 1888 arch_check_zapped_pmd(vma, orig_pmd);
da146769 1889 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
2484ca9b 1890 if (vma_is_special_huge(vma)) {
3b6521f5
OH
1891 if (arch_needs_pgtable_deposit())
1892 zap_deposited_table(tlb->mm, pmd);
da146769 1893 spin_unlock(ptl);
da146769 1894 } else if (is_huge_zero_pmd(orig_pmd)) {
c14a6eb4 1895 zap_deposited_table(tlb->mm, pmd);
da146769 1896 spin_unlock(ptl);
da146769 1897 } else {
616b8371
ZY
1898 struct page *page = NULL;
1899 int flush_needed = 1;
1900
1901 if (pmd_present(orig_pmd)) {
1902 page = pmd_page(orig_pmd);
a8e61d58 1903 folio_remove_rmap_pmd(page_folio(page), page, vma);
616b8371
ZY
1904 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1905 VM_BUG_ON_PAGE(!PageHead(page), page);
1906 } else if (thp_migration_supported()) {
1907 swp_entry_t entry;
1908
1909 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1910 entry = pmd_to_swp_entry(orig_pmd);
af5cdaf8 1911 page = pfn_swap_entry_to_page(entry);
616b8371
ZY
1912 flush_needed = 0;
1913 } else
1914 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1915
b5072380 1916 if (PageAnon(page)) {
c14a6eb4 1917 zap_deposited_table(tlb->mm, pmd);
b5072380
KS
1918 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1919 } else {
953c66c2
AK
1920 if (arch_needs_pgtable_deposit())
1921 zap_deposited_table(tlb->mm, pmd);
fadae295 1922 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
b5072380 1923 }
616b8371 1924
da146769 1925 spin_unlock(ptl);
616b8371
ZY
1926 if (flush_needed)
1927 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
025c5b24 1928 }
da146769 1929 return 1;
71e3aac0
AA
1930}
1931
1dd38b6c
AK
1932#ifndef pmd_move_must_withdraw
1933static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1934 spinlock_t *old_pmd_ptl,
1935 struct vm_area_struct *vma)
1936{
1937 /*
1938 * With split pmd lock we also need to move preallocated
1939 * PTE page table if new_pmd is on different PMD page table.
1940 *
1941 * We also don't deposit and withdraw tables for file pages.
1942 */
1943 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1944}
1945#endif
1946
ab6e3d09
NH
1947static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1948{
1949#ifdef CONFIG_MEM_SOFT_DIRTY
1950 if (unlikely(is_pmd_migration_entry(pmd)))
1951 pmd = pmd_swp_mksoft_dirty(pmd);
1952 else if (pmd_present(pmd))
1953 pmd = pmd_mksoft_dirty(pmd);
1954#endif
1955 return pmd;
1956}
1957
bf8616d5 1958bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
b8aa9d9d 1959 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
37a1c49a 1960{
bf929152 1961 spinlock_t *old_ptl, *new_ptl;
37a1c49a 1962 pmd_t pmd;
37a1c49a 1963 struct mm_struct *mm = vma->vm_mm;
5d190420 1964 bool force_flush = false;
37a1c49a 1965
37a1c49a
AA
1966 /*
1967 * The destination pmd shouldn't be established, free_pgtables()
a5be621e
HD
1968 * should have released it; but move_page_tables() might have already
1969 * inserted a page table, if racing against shmem/file collapse.
37a1c49a 1970 */
a5be621e 1971 if (!pmd_none(*new_pmd)) {
37a1c49a 1972 VM_BUG_ON(pmd_trans_huge(*new_pmd));
4b471e88 1973 return false;
37a1c49a
AA
1974 }
1975
bf929152
KS
1976 /*
1977 * We don't have to worry about the ordering of src and dst
c1e8d7c6 1978 * ptlocks because exclusive mmap_lock prevents deadlock.
bf929152 1979 */
b6ec57f4
KS
1980 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1981 if (old_ptl) {
bf929152
KS
1982 new_ptl = pmd_lockptr(mm, new_pmd);
1983 if (new_ptl != old_ptl)
1984 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
8809aa2d 1985 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
eb66ae03 1986 if (pmd_present(pmd))
a2ce2666 1987 force_flush = true;
025c5b24 1988 VM_BUG_ON(!pmd_none(*new_pmd));
3592806c 1989
1dd38b6c 1990 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
b3084f4d 1991 pgtable_t pgtable;
3592806c
KS
1992 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1993 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
3592806c 1994 }
ab6e3d09
NH
1995 pmd = move_soft_dirty_pmd(pmd);
1996 set_pmd_at(mm, new_addr, new_pmd, pmd);
5d190420 1997 if (force_flush)
7c38f181 1998 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
eb66ae03
LT
1999 if (new_ptl != old_ptl)
2000 spin_unlock(new_ptl);
bf929152 2001 spin_unlock(old_ptl);
4b471e88 2002 return true;
37a1c49a 2003 }
4b471e88 2004 return false;
37a1c49a
AA
2005}
2006
f123d74a
MG
2007/*
2008 * Returns
2009 * - 0 if PMD could not be locked
f0953a1b 2010 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
e346e668 2011 * or if prot_numa but THP migration is not supported
f0953a1b 2012 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
f123d74a 2013 */
4a18419f
NA
2014int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
2015 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
2016 unsigned long cp_flags)
cd7548ab
JW
2017{
2018 struct mm_struct *mm = vma->vm_mm;
bf929152 2019 spinlock_t *ptl;
c9fe6656 2020 pmd_t oldpmd, entry;
58705444 2021 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
292924b2
PX
2022 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
2023 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6a56ccbc 2024 int ret = 1;
cd7548ab 2025
4a18419f
NA
2026 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
2027
e346e668
YS
2028 if (prot_numa && !thp_migration_supported())
2029 return 1;
2030
b6ec57f4 2031 ptl = __pmd_trans_huge_lock(pmd, vma);
0a85e51d
KS
2032 if (!ptl)
2033 return 0;
e944fd67 2034
84c3fc4e
ZY
2035#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2036 if (is_swap_pmd(*pmd)) {
2037 swp_entry_t entry = pmd_to_swp_entry(*pmd);
d986ba2b 2038 struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
24bf08c4 2039 pmd_t newpmd;
84c3fc4e
ZY
2040
2041 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
4dd845b5 2042 if (is_writable_migration_entry(entry)) {
84c3fc4e
ZY
2043 /*
2044 * A protection check is difficult so
2045 * just be safe and disable write
2046 */
d986ba2b 2047 if (folio_test_anon(folio))
6c287605
DH
2048 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
2049 else
2050 entry = make_readable_migration_entry(swp_offset(entry));
84c3fc4e 2051 newpmd = swp_entry_to_pmd(entry);
ab6e3d09
NH
2052 if (pmd_swp_soft_dirty(*pmd))
2053 newpmd = pmd_swp_mksoft_dirty(newpmd);
24bf08c4
DH
2054 } else {
2055 newpmd = *pmd;
84c3fc4e 2056 }
24bf08c4
DH
2057
2058 if (uffd_wp)
2059 newpmd = pmd_swp_mkuffd_wp(newpmd);
2060 else if (uffd_wp_resolve)
2061 newpmd = pmd_swp_clear_uffd_wp(newpmd);
2062 if (!pmd_same(*pmd, newpmd))
2063 set_pmd_at(mm, addr, pmd, newpmd);
84c3fc4e
ZY
2064 goto unlock;
2065 }
2066#endif
2067
a1a3a2fc 2068 if (prot_numa) {
d986ba2b 2069 struct folio *folio;
33024536 2070 bool toptier;
a1a3a2fc
HY
2071 /*
2072 * Avoid trapping faults against the zero page. The read-only
2073 * data is likely to be read-cached on the local CPU and
2074 * local/remote hits to the zero page are not interesting.
2075 */
2076 if (is_huge_zero_pmd(*pmd))
2077 goto unlock;
025c5b24 2078
a1a3a2fc
HY
2079 if (pmd_protnone(*pmd))
2080 goto unlock;
0a85e51d 2081
d986ba2b
KW
2082 folio = page_folio(pmd_page(*pmd));
2083 toptier = node_is_toptier(folio_nid(folio));
a1a3a2fc
HY
2084 /*
2085 * Skip scanning top tier node if normal numa
2086 * balancing is disabled
2087 */
2088 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
33024536 2089 toptier)
a1a3a2fc 2090 goto unlock;
33024536
HY
2091
2092 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
2093 !toptier)
d986ba2b
KW
2094 folio_xchg_access_time(folio,
2095 jiffies_to_msecs(jiffies));
a1a3a2fc 2096 }
ced10803 2097 /*
3e4e28c5 2098 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
ced10803 2099 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
3e4e28c5 2100 * which is also under mmap_read_lock(mm):
ced10803
KS
2101 *
2102 * CPU0: CPU1:
2103 * change_huge_pmd(prot_numa=1)
2104 * pmdp_huge_get_and_clear_notify()
2105 * madvise_dontneed()
2106 * zap_pmd_range()
2107 * pmd_trans_huge(*pmd) == 0 (without ptl)
2108 * // skip the pmd
2109 * set_pmd_at();
2110 * // pmd is re-established
2111 *
2112 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2113 * which may break userspace.
2114 *
4f831457 2115 * pmdp_invalidate_ad() is required to make sure we don't miss
ced10803
KS
2116 * dirty/young flags set by hardware.
2117 */
4f831457 2118 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
ced10803 2119
c9fe6656 2120 entry = pmd_modify(oldpmd, newprot);
f1eb1bac 2121 if (uffd_wp)
292924b2 2122 entry = pmd_mkuffd_wp(entry);
f1eb1bac 2123 else if (uffd_wp_resolve)
292924b2
PX
2124 /*
2125 * Leave the write bit to be handled by PF interrupt
2126 * handler, then things like COW could be properly
2127 * handled.
2128 */
2129 entry = pmd_clear_uffd_wp(entry);
c27f479e
DH
2130
2131 /* See change_pte_range(). */
2132 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
2133 can_change_pmd_writable(vma, addr, entry))
161e393c 2134 entry = pmd_mkwrite(entry, vma);
c27f479e 2135
0a85e51d
KS
2136 ret = HPAGE_PMD_NR;
2137 set_pmd_at(mm, addr, pmd, entry);
4a18419f 2138
c9fe6656
NA
2139 if (huge_pmd_needs_flush(oldpmd, entry))
2140 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
0a85e51d
KS
2141unlock:
2142 spin_unlock(ptl);
025c5b24
NH
2143 return ret;
2144}
2145
adef4406
AA
2146#ifdef CONFIG_USERFAULTFD
2147/*
2148 * The PT lock for src_pmd and the mmap_lock for reading are held by
2149 * the caller, but it must return after releasing the page_table_lock.
2150 * Just move the page from src_pmd to dst_pmd if possible.
2151 * Return zero if succeeded in moving the page, -EAGAIN if it needs to be
2152 * repeated by the caller, or other errors in case of failure.
2153 */
2154int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
2155 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
2156 unsigned long dst_addr, unsigned long src_addr)
2157{
2158 pmd_t _dst_pmd, src_pmdval;
2159 struct page *src_page;
2160 struct folio *src_folio;
2161 struct anon_vma *src_anon_vma;
2162 spinlock_t *src_ptl, *dst_ptl;
2163 pgtable_t src_pgtable;
2164 struct mmu_notifier_range range;
2165 int err = 0;
2166
2167 src_pmdval = *src_pmd;
2168 src_ptl = pmd_lockptr(mm, src_pmd);
2169
2170 lockdep_assert_held(src_ptl);
2171 mmap_assert_locked(mm);
2172
2173 /* Sanity checks before the operation */
2174 if (WARN_ON_ONCE(!pmd_none(dst_pmdval)) || WARN_ON_ONCE(src_addr & ~HPAGE_PMD_MASK) ||
2175 WARN_ON_ONCE(dst_addr & ~HPAGE_PMD_MASK)) {
2176 spin_unlock(src_ptl);
2177 return -EINVAL;
2178 }
2179
2180 if (!pmd_trans_huge(src_pmdval)) {
2181 spin_unlock(src_ptl);
2182 if (is_pmd_migration_entry(src_pmdval)) {
2183 pmd_migration_entry_wait(mm, &src_pmdval);
2184 return -EAGAIN;
2185 }
2186 return -ENOENT;
2187 }
2188
2189 src_page = pmd_page(src_pmdval);
2190 if (unlikely(!PageAnonExclusive(src_page))) {
2191 spin_unlock(src_ptl);
2192 return -EBUSY;
2193 }
2194
2195 src_folio = page_folio(src_page);
2196 folio_get(src_folio);
2197 spin_unlock(src_ptl);
2198
2199 flush_cache_range(src_vma, src_addr, src_addr + HPAGE_PMD_SIZE);
2200 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, src_addr,
2201 src_addr + HPAGE_PMD_SIZE);
2202 mmu_notifier_invalidate_range_start(&range);
2203
2204 folio_lock(src_folio);
2205
2206 /*
2207 * split_huge_page walks the anon_vma chain without the page
2208 * lock. Serialize against it with the anon_vma lock, the page
2209 * lock is not enough.
2210 */
2211 src_anon_vma = folio_get_anon_vma(src_folio);
2212 if (!src_anon_vma) {
2213 err = -EAGAIN;
2214 goto unlock_folio;
2215 }
2216 anon_vma_lock_write(src_anon_vma);
2217
2218 dst_ptl = pmd_lockptr(mm, dst_pmd);
2219 double_pt_lock(src_ptl, dst_ptl);
2220 if (unlikely(!pmd_same(*src_pmd, src_pmdval) ||
2221 !pmd_same(*dst_pmd, dst_pmdval))) {
2222 err = -EAGAIN;
2223 goto unlock_ptls;
2224 }
2225 if (folio_maybe_dma_pinned(src_folio) ||
2226 !PageAnonExclusive(&src_folio->page)) {
2227 err = -EBUSY;
2228 goto unlock_ptls;
2229 }
2230
2231 if (WARN_ON_ONCE(!folio_test_head(src_folio)) ||
2232 WARN_ON_ONCE(!folio_test_anon(src_folio))) {
2233 err = -EBUSY;
2234 goto unlock_ptls;
2235 }
2236
2237 folio_move_anon_rmap(src_folio, dst_vma);
2238 WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
2239
2240 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
2241 /* Folio got pinned from under us. Put it back and fail the move. */
2242 if (folio_maybe_dma_pinned(src_folio)) {
2243 set_pmd_at(mm, src_addr, src_pmd, src_pmdval);
2244 err = -EBUSY;
2245 goto unlock_ptls;
2246 }
2247
2248 _dst_pmd = mk_huge_pmd(&src_folio->page, dst_vma->vm_page_prot);
2249 /* Follow mremap() behavior and treat the entry dirty after the move */
2250 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
2251 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
2252
2253 src_pgtable = pgtable_trans_huge_withdraw(mm, src_pmd);
2254 pgtable_trans_huge_deposit(mm, dst_pmd, src_pgtable);
2255unlock_ptls:
2256 double_pt_unlock(src_ptl, dst_ptl);
2257 anon_vma_unlock_write(src_anon_vma);
2258 put_anon_vma(src_anon_vma);
2259unlock_folio:
2260 /* unblock rmap walks */
2261 folio_unlock(src_folio);
2262 mmu_notifier_invalidate_range_end(&range);
2263 folio_put(src_folio);
2264 return err;
2265}
2266#endif /* CONFIG_USERFAULTFD */
2267
025c5b24 2268/*
8f19b0c0 2269 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
025c5b24 2270 *
8f19b0c0
HY
2271 * Note that if it returns page table lock pointer, this routine returns without
2272 * unlocking page table lock. So callers must unlock it.
025c5b24 2273 */
b6ec57f4 2274spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
025c5b24 2275{
b6ec57f4
KS
2276 spinlock_t *ptl;
2277 ptl = pmd_lock(vma->vm_mm, pmd);
84c3fc4e
ZY
2278 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
2279 pmd_devmap(*pmd)))
b6ec57f4
KS
2280 return ptl;
2281 spin_unlock(ptl);
2282 return NULL;
cd7548ab
JW
2283}
2284
a00cc7d9 2285/*
d965e390 2286 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
a00cc7d9 2287 *
d965e390
ML
2288 * Note that if it returns page table lock pointer, this routine returns without
2289 * unlocking page table lock. So callers must unlock it.
a00cc7d9
MW
2290 */
2291spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
2292{
2293 spinlock_t *ptl;
2294
2295 ptl = pud_lock(vma->vm_mm, pud);
2296 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
2297 return ptl;
2298 spin_unlock(ptl);
2299 return NULL;
2300}
2301
2302#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2303int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
2304 pud_t *pud, unsigned long addr)
2305{
a00cc7d9
MW
2306 spinlock_t *ptl;
2307
2308 ptl = __pud_trans_huge_lock(pud, vma);
2309 if (!ptl)
2310 return 0;
74929079 2311
f32928ab 2312 pudp_huge_get_and_clear_full(vma, addr, pud, tlb->fullmm);
a00cc7d9 2313 tlb_remove_pud_tlb_entry(tlb, pud, addr);
2484ca9b 2314 if (vma_is_special_huge(vma)) {
a00cc7d9
MW
2315 spin_unlock(ptl);
2316 /* No zero page support yet */
2317 } else {
2318 /* No support for anonymous PUD pages yet */
2319 BUG();
2320 }
2321 return 1;
2322}
2323
2324static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2325 unsigned long haddr)
2326{
2327 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2328 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2329 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2330 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2331
ce9311cf 2332 count_vm_event(THP_SPLIT_PUD);
a00cc7d9 2333
ec8832d0 2334 pudp_huge_clear_flush(vma, haddr, pud);
a00cc7d9
MW
2335}
2336
2337void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2338 unsigned long address)
2339{
2340 spinlock_t *ptl;
ac46d4f3 2341 struct mmu_notifier_range range;
a00cc7d9 2342
7d4a8be0 2343 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
6f4f13e8 2344 address & HPAGE_PUD_MASK,
ac46d4f3
JG
2345 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2346 mmu_notifier_invalidate_range_start(&range);
2347 ptl = pud_lock(vma->vm_mm, pud);
a00cc7d9
MW
2348 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2349 goto out;
ac46d4f3 2350 __split_huge_pud_locked(vma, pud, range.start);
a00cc7d9
MW
2351
2352out:
2353 spin_unlock(ptl);
ec8832d0 2354 mmu_notifier_invalidate_range_end(&range);
a00cc7d9
MW
2355}
2356#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2357
eef1b3ba
KS
2358static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2359 unsigned long haddr, pmd_t *pmd)
2360{
2361 struct mm_struct *mm = vma->vm_mm;
2362 pgtable_t pgtable;
42b2af2c 2363 pmd_t _pmd, old_pmd;
c9c1ee20
HD
2364 unsigned long addr;
2365 pte_t *pte;
eef1b3ba
KS
2366 int i;
2367
0f10851e
JG
2368 /*
2369 * Leave pmd empty until pte is filled note that it is fine to delay
2370 * notification until mmu_notifier_invalidate_range_end() as we are
2371 * replacing a zero pmd write protected page with a zero pte write
2372 * protected page.
2373 *
ee65728e 2374 * See Documentation/mm/mmu_notifier.rst
0f10851e 2375 */
42b2af2c 2376 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
eef1b3ba
KS
2377
2378 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2379 pmd_populate(mm, &_pmd, pgtable);
2380
c9c1ee20
HD
2381 pte = pte_offset_map(&_pmd, haddr);
2382 VM_BUG_ON(!pte);
2383 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2384 pte_t entry;
2385
2386 entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot);
eef1b3ba 2387 entry = pte_mkspecial(entry);
42b2af2c
DH
2388 if (pmd_uffd_wp(old_pmd))
2389 entry = pte_mkuffd_wp(entry);
c33c7948 2390 VM_BUG_ON(!pte_none(ptep_get(pte)));
c9c1ee20
HD
2391 set_pte_at(mm, addr, pte, entry);
2392 pte++;
eef1b3ba 2393 }
c9c1ee20 2394 pte_unmap(pte - 1);
eef1b3ba
KS
2395 smp_wmb(); /* make pte visible before pmd */
2396 pmd_populate(mm, pmd, pgtable);
eef1b3ba
KS
2397}
2398
2399static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
ba988280 2400 unsigned long haddr, bool freeze)
eef1b3ba
KS
2401{
2402 struct mm_struct *mm = vma->vm_mm;
91b2978a 2403 struct folio *folio;
eef1b3ba
KS
2404 struct page *page;
2405 pgtable_t pgtable;
423ac9af 2406 pmd_t old_pmd, _pmd;
292924b2 2407 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
0ccf7f16 2408 bool anon_exclusive = false, dirty = false;
2ac015e2 2409 unsigned long addr;
c9c1ee20 2410 pte_t *pte;
eef1b3ba
KS
2411 int i;
2412
2413 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2414 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2415 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
84c3fc4e
ZY
2416 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2417 && !pmd_devmap(*pmd));
eef1b3ba
KS
2418
2419 count_vm_event(THP_SPLIT_PMD);
2420
d21b9e57 2421 if (!vma_is_anonymous(vma)) {
ec8832d0 2422 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
953c66c2
AK
2423 /*
2424 * We are going to unmap this huge page. So
2425 * just go ahead and zap it
2426 */
2427 if (arch_needs_pgtable_deposit())
2428 zap_deposited_table(mm, pmd);
2484ca9b 2429 if (vma_is_special_huge(vma))
d21b9e57 2430 return;
99fa8a48
HD
2431 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2432 swp_entry_t entry;
2433
2434 entry = pmd_to_swp_entry(old_pmd);
af5cdaf8 2435 page = pfn_swap_entry_to_page(entry);
99fa8a48
HD
2436 } else {
2437 page = pmd_page(old_pmd);
a8e61d58
DH
2438 folio = page_folio(page);
2439 if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
2440 folio_set_dirty(folio);
2441 if (!folio_test_referenced(folio) && pmd_young(old_pmd))
2442 folio_set_referenced(folio);
2443 folio_remove_rmap_pmd(folio, page, vma);
2444 folio_put(folio);
99fa8a48 2445 }
fadae295 2446 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
eef1b3ba 2447 return;
99fa8a48
HD
2448 }
2449
3b77e8c8 2450 if (is_huge_zero_pmd(*pmd)) {
4645b9fe
JG
2451 /*
2452 * FIXME: Do we want to invalidate secondary mmu by calling
1af5a810
AP
2453 * mmu_notifier_arch_invalidate_secondary_tlbs() see comments below
2454 * inside __split_huge_pmd() ?
4645b9fe
JG
2455 *
2456 * We are going from a zero huge page write protected to zero
2457 * small page also write protected so it does not seems useful
2458 * to invalidate secondary mmu at this time.
2459 */
eef1b3ba
KS
2460 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2461 }
2462
423ac9af
AK
2463 /*
2464 * Up to this point the pmd is present and huge and userland has the
2465 * whole access to the hugepage during the split (which happens in
2466 * place). If we overwrite the pmd with the not-huge version pointing
2467 * to the pte here (which of course we could if all CPUs were bug
2468 * free), userland could trigger a small page size TLB miss on the
2469 * small sized TLB while the hugepage TLB entry is still established in
2470 * the huge TLB. Some CPU doesn't like that.
42742d9b
AK
2471 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2472 * 383 on page 105. Intel should be safe but is also warns that it's
423ac9af
AK
2473 * only safe if the permission and cache attributes of the two entries
2474 * loaded in the two TLB is identical (which should be the case here).
2475 * But it is generally safer to never allow small and huge TLB entries
2476 * for the same virtual address to be loaded simultaneously. So instead
2477 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2478 * current pmd notpresent (atomically because here the pmd_trans_huge
2479 * must remain set at all times on the pmd until the split is complete
2480 * for this pmd), then we flush the SMP TLB and finally we write the
2481 * non-huge version of the pmd entry with pmd_populate.
2482 */
2483 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2484
423ac9af 2485 pmd_migration = is_pmd_migration_entry(old_pmd);
2e83ee1d 2486 if (unlikely(pmd_migration)) {
84c3fc4e
ZY
2487 swp_entry_t entry;
2488
423ac9af 2489 entry = pmd_to_swp_entry(old_pmd);
af5cdaf8 2490 page = pfn_swap_entry_to_page(entry);
4dd845b5 2491 write = is_writable_migration_entry(entry);
6c287605
DH
2492 if (PageAnon(page))
2493 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2e346877
PX
2494 young = is_migration_entry_young(entry);
2495 dirty = is_migration_entry_dirty(entry);
2e83ee1d 2496 soft_dirty = pmd_swp_soft_dirty(old_pmd);
f45ec5ff 2497 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2e83ee1d 2498 } else {
423ac9af 2499 page = pmd_page(old_pmd);
91b2978a 2500 folio = page_folio(page);
0ccf7f16
PX
2501 if (pmd_dirty(old_pmd)) {
2502 dirty = true;
91b2978a 2503 folio_set_dirty(folio);
0ccf7f16 2504 }
2e83ee1d
PX
2505 write = pmd_write(old_pmd);
2506 young = pmd_young(old_pmd);
2507 soft_dirty = pmd_soft_dirty(old_pmd);
292924b2 2508 uffd_wp = pmd_uffd_wp(old_pmd);
6c287605 2509
91b2978a
DH
2510 VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio);
2511 VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
6c287605
DH
2512
2513 /*
2514 * Without "freeze", we'll simply split the PMD, propagating the
2515 * PageAnonExclusive() flag for each PTE by setting it for
2516 * each subpage -- no need to (temporarily) clear.
2517 *
2518 * With "freeze" we want to replace mapped pages by
2519 * migration entries right away. This is only possible if we
2520 * managed to clear PageAnonExclusive() -- see
2521 * set_pmd_migration_entry().
2522 *
2523 * In case we cannot clear PageAnonExclusive(), split the PMD
2524 * only and let try_to_migrate_one() fail later.
088b8aa5 2525 *
e3b4b137 2526 * See folio_try_share_anon_rmap_pmd(): invalidate PMD first.
6c287605 2527 */
91b2978a 2528 anon_exclusive = PageAnonExclusive(page);
e3b4b137
DH
2529 if (freeze && anon_exclusive &&
2530 folio_try_share_anon_rmap_pmd(folio, page))
6c287605 2531 freeze = false;
91b2978a
DH
2532 if (!freeze) {
2533 rmap_t rmap_flags = RMAP_NONE;
2534
2535 folio_ref_add(folio, HPAGE_PMD_NR - 1);
2536 if (anon_exclusive)
2537 rmap_flags |= RMAP_EXCLUSIVE;
2538 folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR,
2539 vma, haddr, rmap_flags);
2540 }
2e83ee1d 2541 }
eef1b3ba 2542
423ac9af
AK
2543 /*
2544 * Withdraw the table only after we mark the pmd entry invalid.
2545 * This's critical for some architectures (Power).
2546 */
eef1b3ba
KS
2547 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2548 pmd_populate(mm, &_pmd, pgtable);
2549
c9c1ee20
HD
2550 pte = pte_offset_map(&_pmd, haddr);
2551 VM_BUG_ON(!pte);
2ac015e2 2552 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
c9c1ee20 2553 pte_t entry;
eef1b3ba
KS
2554 /*
2555 * Note that NUMA hinting access restrictions are not
2556 * transferred to avoid any possibility of altering
2557 * permissions across VMAs.
2558 */
84c3fc4e 2559 if (freeze || pmd_migration) {
ba988280 2560 swp_entry_t swp_entry;
4dd845b5
AP
2561 if (write)
2562 swp_entry = make_writable_migration_entry(
2563 page_to_pfn(page + i));
6c287605
DH
2564 else if (anon_exclusive)
2565 swp_entry = make_readable_exclusive_migration_entry(
2566 page_to_pfn(page + i));
4dd845b5
AP
2567 else
2568 swp_entry = make_readable_migration_entry(
2569 page_to_pfn(page + i));
2e346877
PX
2570 if (young)
2571 swp_entry = make_migration_entry_young(swp_entry);
2572 if (dirty)
2573 swp_entry = make_migration_entry_dirty(swp_entry);
ba988280 2574 entry = swp_entry_to_pte(swp_entry);
804dd150
AA
2575 if (soft_dirty)
2576 entry = pte_swp_mksoft_dirty(entry);
f45ec5ff
PX
2577 if (uffd_wp)
2578 entry = pte_swp_mkuffd_wp(entry);
ba988280 2579 } else {
6d2329f8 2580 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
1462c52e 2581 if (write)
161e393c 2582 entry = pte_mkwrite(entry, vma);
ba988280
KS
2583 if (!young)
2584 entry = pte_mkold(entry);
e833bc50
PX
2585 /* NOTE: this may set soft-dirty too on some archs */
2586 if (dirty)
2587 entry = pte_mkdirty(entry);
804dd150
AA
2588 if (soft_dirty)
2589 entry = pte_mksoft_dirty(entry);
292924b2
PX
2590 if (uffd_wp)
2591 entry = pte_mkuffd_wp(entry);
ba988280 2592 }
c33c7948 2593 VM_BUG_ON(!pte_none(ptep_get(pte)));
2ac015e2 2594 set_pte_at(mm, addr, pte, entry);
c9c1ee20 2595 pte++;
eef1b3ba 2596 }
c9c1ee20 2597 pte_unmap(pte - 1);
eef1b3ba 2598
cb67f428 2599 if (!pmd_migration)
a8e61d58 2600 folio_remove_rmap_pmd(folio, page, vma);
96d82deb
HD
2601 if (freeze)
2602 put_page(page);
eef1b3ba
KS
2603
2604 smp_wmb(); /* make pte visible before pmd */
2605 pmd_populate(mm, pmd, pgtable);
2606}
2607
2608void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
af28a988 2609 unsigned long address, bool freeze, struct folio *folio)
eef1b3ba
KS
2610{
2611 spinlock_t *ptl;
ac46d4f3 2612 struct mmu_notifier_range range;
eef1b3ba 2613
7d4a8be0 2614 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
6f4f13e8 2615 address & HPAGE_PMD_MASK,
ac46d4f3
JG
2616 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2617 mmu_notifier_invalidate_range_start(&range);
2618 ptl = pmd_lock(vma->vm_mm, pmd);
33f4751e
NH
2619
2620 /*
af28a988
MWO
2621 * If caller asks to setup a migration entry, we need a folio to check
2622 * pmd against. Otherwise we can end up replacing wrong folio.
33f4751e 2623 */
af28a988 2624 VM_BUG_ON(freeze && !folio);
83a8441f 2625 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
33f4751e 2626
7f760917 2627 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
83a8441f 2628 is_pmd_migration_entry(*pmd)) {
cea33328
ML
2629 /*
2630 * It's safe to call pmd_page when folio is set because it's
2631 * guaranteed that pmd is present.
2632 */
83a8441f
MWO
2633 if (folio && folio != page_folio(pmd_page(*pmd)))
2634 goto out;
7f760917 2635 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
83a8441f 2636 }
7f760917 2637
e90309c9 2638out:
eef1b3ba 2639 spin_unlock(ptl);
ec8832d0 2640 mmu_notifier_invalidate_range_end(&range);
eef1b3ba
KS
2641}
2642
fec89c10 2643void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
af28a988 2644 bool freeze, struct folio *folio)
94fcc585 2645{
50722804 2646 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
94fcc585 2647
50722804 2648 if (!pmd)
f72e7dcd
HD
2649 return;
2650
af28a988 2651 __split_huge_pmd(vma, pmd, address, freeze, folio);
94fcc585
AA
2652}
2653
71f9e58e
ML
2654static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2655{
2656 /*
2657 * If the new address isn't hpage aligned and it could previously
2658 * contain an hugepage: check if we need to split an huge pmd.
2659 */
2660 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2661 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2662 ALIGN(address, HPAGE_PMD_SIZE)))
2663 split_huge_pmd_address(vma, address, false, NULL);
2664}
2665
e1b9996b 2666void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
2667 unsigned long start,
2668 unsigned long end,
2669 long adjust_next)
2670{
71f9e58e
ML
2671 /* Check if we need to split start first. */
2672 split_huge_pmd_if_needed(vma, start);
94fcc585 2673
71f9e58e
ML
2674 /* Check if we need to split end next. */
2675 split_huge_pmd_if_needed(vma, end);
94fcc585
AA
2676
2677 /*
68540502 2678 * If we're also updating the next vma vm_start,
71f9e58e 2679 * check if we need to split it.
94fcc585
AA
2680 */
2681 if (adjust_next > 0) {
68540502 2682 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
94fcc585 2683 unsigned long nstart = next->vm_start;
f9d86a60 2684 nstart += adjust_next;
71f9e58e 2685 split_huge_pmd_if_needed(next, nstart);
94fcc585
AA
2686 }
2687}
e9b61f19 2688
684555aa 2689static void unmap_folio(struct folio *folio)
e9b61f19 2690{
a98a2f0c 2691 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
3027c6f8 2692 TTU_SYNC | TTU_BATCH_FLUSH;
e9b61f19 2693
684555aa 2694 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
e9b61f19 2695
a98a2f0c
AP
2696 /*
2697 * Anon pages need migration entries to preserve them, but file
2698 * pages can simply be left unmapped, then faulted back on demand.
2699 * If that is ever changed (perhaps for mlock), update remap_page().
2700 */
4b8554c5
MWO
2701 if (folio_test_anon(folio))
2702 try_to_migrate(folio, ttu_flags);
a98a2f0c 2703 else
869f7ee6 2704 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
3027c6f8
BW
2705
2706 try_to_unmap_flush();
e9b61f19
KS
2707}
2708
4eecb8b9 2709static void remap_page(struct folio *folio, unsigned long nr)
e9b61f19 2710{
4eecb8b9 2711 int i = 0;
ab02c252 2712
684555aa 2713 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
4eecb8b9 2714 if (!folio_test_anon(folio))
ab02c252 2715 return;
4eecb8b9
MWO
2716 for (;;) {
2717 remove_migration_ptes(folio, folio, true);
2718 i += folio_nr_pages(folio);
2719 if (i >= nr)
2720 break;
2721 folio = folio_next(folio);
ace71a19 2722 }
e9b61f19
KS
2723}
2724
94866635 2725static void lru_add_page_tail(struct page *head, struct page *tail,
88dcb9a3
AS
2726 struct lruvec *lruvec, struct list_head *list)
2727{
94866635
AS
2728 VM_BUG_ON_PAGE(!PageHead(head), head);
2729 VM_BUG_ON_PAGE(PageCompound(tail), head);
2730 VM_BUG_ON_PAGE(PageLRU(tail), head);
6168d0da 2731 lockdep_assert_held(&lruvec->lru_lock);
88dcb9a3 2732
6dbb5741 2733 if (list) {
88dcb9a3 2734 /* page reclaim is reclaiming a huge page */
6dbb5741 2735 VM_WARN_ON(PageLRU(head));
94866635
AS
2736 get_page(tail);
2737 list_add_tail(&tail->lru, list);
88dcb9a3 2738 } else {
6dbb5741
AS
2739 /* head is still on lru (and we have it frozen) */
2740 VM_WARN_ON(!PageLRU(head));
07ca7606
HD
2741 if (PageUnevictable(tail))
2742 tail->mlock_count = 0;
2743 else
2744 list_add_tail(&tail->lru, &head->lru);
6dbb5741 2745 SetPageLRU(tail);
88dcb9a3
AS
2746 }
2747}
2748
07e09c48 2749static void __split_huge_page_tail(struct folio *folio, int tail,
e9b61f19
KS
2750 struct lruvec *lruvec, struct list_head *list)
2751{
07e09c48 2752 struct page *head = &folio->page;
e9b61f19 2753 struct page *page_tail = head + tail;
07e09c48
DH
2754 /*
2755 * Careful: new_folio is not a "real" folio before we cleared PageTail.
2756 * Don't pass it around before clear_compound_head().
2757 */
2758 struct folio *new_folio = (struct folio *)page_tail;
e9b61f19 2759
8df651c7 2760 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
e9b61f19
KS
2761
2762 /*
605ca5ed
KK
2763 * Clone page flags before unfreezing refcount.
2764 *
2765 * After successful get_page_unless_zero() might follow flags change,
8958b249 2766 * for example lock_page() which set PG_waiters.
6c287605
DH
2767 *
2768 * Note that for mapped sub-pages of an anonymous THP,
684555aa 2769 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
6c287605
DH
2770 * the migration entry instead from where remap_page() will restore it.
2771 * We can still have PG_anon_exclusive set on effectively unmapped and
2772 * unreferenced sub-pages of an anonymous THP: we can simply drop
2773 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
e9b61f19 2774 */
e9b61f19
KS
2775 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2776 page_tail->flags |= (head->flags &
2777 ((1L << PG_referenced) |
2778 (1L << PG_swapbacked) |
38d8b4e6 2779 (1L << PG_swapcache) |
e9b61f19
KS
2780 (1L << PG_mlocked) |
2781 (1L << PG_uptodate) |
2782 (1L << PG_active) |
1899ad18 2783 (1L << PG_workingset) |
e9b61f19 2784 (1L << PG_locked) |
b8d3c4c3 2785 (1L << PG_unevictable) |
b0284cd2 2786#ifdef CONFIG_ARCH_USES_PG_ARCH_X
72e6afa0 2787 (1L << PG_arch_2) |
ef6458b1 2788 (1L << PG_arch_3) |
72e6afa0 2789#endif
ec1c86b2
YZ
2790 (1L << PG_dirty) |
2791 LRU_GEN_MASK | LRU_REFS_MASK));
e9b61f19 2792
cb67f428 2793 /* ->mapping in first and second tail page is replaced by other uses */
173d9d9f
HD
2794 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2795 page_tail);
2796 page_tail->mapping = head->mapping;
2797 page_tail->index = head->index + tail;
71e2d666
MG
2798
2799 /*
cfeed8ff
DH
2800 * page->private should not be set in tail pages. Fix up and warn once
2801 * if private is unexpectedly set.
71e2d666 2802 */
cfeed8ff
DH
2803 if (unlikely(page_tail->private)) {
2804 VM_WARN_ON_ONCE_PAGE(true, page_tail);
71e2d666
MG
2805 page_tail->private = 0;
2806 }
07e09c48
DH
2807 if (folio_test_swapcache(folio))
2808 new_folio->swap.val = folio->swap.val + tail;
173d9d9f 2809
605ca5ed 2810 /* Page flags must be visible before we make the page non-compound. */
e9b61f19
KS
2811 smp_wmb();
2812
605ca5ed
KK
2813 /*
2814 * Clear PageTail before unfreezing page refcount.
2815 *
2816 * After successful get_page_unless_zero() might follow put_page()
2817 * which needs correct compound_head().
2818 */
e9b61f19
KS
2819 clear_compound_head(page_tail);
2820
605ca5ed 2821 /* Finally unfreeze refcount. Additional reference from page cache. */
b7542769
KW
2822 page_ref_unfreeze(page_tail, 1 + (!folio_test_anon(folio) ||
2823 folio_test_swapcache(folio)));
605ca5ed 2824
b7542769
KW
2825 if (folio_test_young(folio))
2826 folio_set_young(new_folio);
2827 if (folio_test_idle(folio))
2828 folio_set_idle(new_folio);
e9b61f19 2829
c8253011 2830 folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio));
94723aaf
MH
2831
2832 /*
2833 * always add to the tail because some iterators expect new
2834 * pages to show after the currently processed elements - e.g.
2835 * migrate_pages
2836 */
e9b61f19 2837 lru_add_page_tail(head, page_tail, lruvec, list);
e9b61f19
KS
2838}
2839
baa355fd 2840static void __split_huge_page(struct page *page, struct list_head *list,
b6769834 2841 pgoff_t end)
e9b61f19 2842{
e809c3fe
MWO
2843 struct folio *folio = page_folio(page);
2844 struct page *head = &folio->page;
e9b61f19 2845 struct lruvec *lruvec;
4101196b
MWO
2846 struct address_space *swap_cache = NULL;
2847 unsigned long offset = 0;
8cce5475 2848 unsigned int nr = thp_nr_pages(head);
509f0069 2849 int i, nr_dropped = 0;
e9b61f19 2850
e9b61f19 2851 /* complete memcg works before add pages to LRU */
be6c8982 2852 split_page_memcg(head, nr);
e9b61f19 2853
07e09c48
DH
2854 if (folio_test_anon(folio) && folio_test_swapcache(folio)) {
2855 offset = swp_offset(folio->swap);
2856 swap_cache = swap_address_space(folio->swap);
4101196b
MWO
2857 xa_lock(&swap_cache->i_pages);
2858 }
2859
f0953a1b 2860 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
e809c3fe 2861 lruvec = folio_lruvec_lock(folio);
b6769834 2862
eac96c3e
YS
2863 ClearPageHasHWPoisoned(head);
2864
8cce5475 2865 for (i = nr - 1; i >= 1; i--) {
07e09c48 2866 __split_huge_page_tail(folio, i, lruvec, list);
d144bf62 2867 /* Some pages can be beyond EOF: drop them from page cache */
baa355fd 2868 if (head[i].index >= end) {
fb5c2029
MWO
2869 struct folio *tail = page_folio(head + i);
2870
d144bf62 2871 if (shmem_mapping(head->mapping))
509f0069 2872 nr_dropped++;
fb5c2029
MWO
2873 else if (folio_test_clear_dirty(tail))
2874 folio_account_cleaned(tail,
2875 inode_to_wb(folio->mapping->host));
2876 __filemap_remove_folio(tail, NULL);
2877 folio_put(tail);
4101196b
MWO
2878 } else if (!PageAnon(page)) {
2879 __xa_store(&head->mapping->i_pages, head[i].index,
2880 head + i, 0);
2881 } else if (swap_cache) {
2882 __xa_store(&swap_cache->i_pages, offset + i,
2883 head + i, 0);
baa355fd
KS
2884 }
2885 }
e9b61f19
KS
2886
2887 ClearPageCompound(head);
6168d0da 2888 unlock_page_lruvec(lruvec);
b6769834 2889 /* Caller disabled irqs, so they are still disabled here */
f7da677b 2890
8cce5475 2891 split_page_owner(head, nr);
f7da677b 2892
baa355fd
KS
2893 /* See comment in __split_huge_page_tail() */
2894 if (PageAnon(head)) {
aa5dc07f 2895 /* Additional pin to swap cache */
4101196b 2896 if (PageSwapCache(head)) {
38d8b4e6 2897 page_ref_add(head, 2);
4101196b
MWO
2898 xa_unlock(&swap_cache->i_pages);
2899 } else {
38d8b4e6 2900 page_ref_inc(head);
4101196b 2901 }
baa355fd 2902 } else {
aa5dc07f 2903 /* Additional pin to page cache */
baa355fd 2904 page_ref_add(head, 2);
b93b0163 2905 xa_unlock(&head->mapping->i_pages);
baa355fd 2906 }
b6769834 2907 local_irq_enable();
e9b61f19 2908
509f0069
HD
2909 if (nr_dropped)
2910 shmem_uncharge(head->mapping->host, nr_dropped);
4eecb8b9 2911 remap_page(folio, nr);
e9b61f19 2912
07e09c48
DH
2913 if (folio_test_swapcache(folio))
2914 split_swap_cluster(folio->swap);
c4f9c701 2915
8cce5475 2916 for (i = 0; i < nr; i++) {
e9b61f19
KS
2917 struct page *subpage = head + i;
2918 if (subpage == page)
2919 continue;
2920 unlock_page(subpage);
2921
2922 /*
2923 * Subpages may be freed if there wasn't any mapping
2924 * like if add_to_swap() is running on a lru page that
2925 * had its mapping zapped. And freeing these pages
2926 * requires taking the lru_lock so we do the put_page
2927 * of the tail pages after the split is complete.
2928 */
0b175468 2929 free_page_and_swap_cache(subpage);
e9b61f19
KS
2930 }
2931}
2932
b8f593cd 2933/* Racy check whether the huge page can be split */
d4b4084a 2934bool can_split_folio(struct folio *folio, int *pextra_pins)
b8f593cd
HY
2935{
2936 int extra_pins;
2937
aa5dc07f 2938 /* Additional pins from page cache */
d4b4084a
MWO
2939 if (folio_test_anon(folio))
2940 extra_pins = folio_test_swapcache(folio) ?
2941 folio_nr_pages(folio) : 0;
b8f593cd 2942 else
d4b4084a 2943 extra_pins = folio_nr_pages(folio);
b8f593cd
HY
2944 if (pextra_pins)
2945 *pextra_pins = extra_pins;
d4b4084a 2946 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
b8f593cd
HY
2947}
2948
e9b61f19
KS
2949/*
2950 * This function splits huge page into normal pages. @page can point to any
2951 * subpage of huge page to split. Split doesn't change the position of @page.
2952 *
2953 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2954 * The huge page must be locked.
2955 *
2956 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2957 *
2958 * Both head page and tail pages will inherit mapping, flags, and so on from
2959 * the hugepage.
2960 *
2961 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2962 * they are not mapped.
2963 *
2964 * Returns 0 if the hugepage is split successfully.
2965 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2966 * us.
2967 */
2968int split_huge_page_to_list(struct page *page, struct list_head *list)
2969{
4eecb8b9 2970 struct folio *folio = page_folio(page);
f8baa6be 2971 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
3e9a13da 2972 XA_STATE(xas, &folio->mapping->i_pages, folio->index);
baa355fd
KS
2973 struct anon_vma *anon_vma = NULL;
2974 struct address_space *mapping = NULL;
504e070d 2975 int extra_pins, ret;
006d3ff2 2976 pgoff_t end;
478d134e 2977 bool is_hzp;
e9b61f19 2978
3e9a13da
MWO
2979 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2980 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
e9b61f19 2981
3e9a13da 2982 is_hzp = is_huge_zero_page(&folio->page);
4737edbb
NH
2983 if (is_hzp) {
2984 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
478d134e 2985 return -EBUSY;
4737edbb 2986 }
478d134e 2987
3e9a13da 2988 if (folio_test_writeback(folio))
59807685
HY
2989 return -EBUSY;
2990
3e9a13da 2991 if (folio_test_anon(folio)) {
baa355fd 2992 /*
c1e8d7c6 2993 * The caller does not necessarily hold an mmap_lock that would
baa355fd
KS
2994 * prevent the anon_vma disappearing so we first we take a
2995 * reference to it and then lock the anon_vma for write. This
2f031c6f 2996 * is similar to folio_lock_anon_vma_read except the write lock
baa355fd
KS
2997 * is taken to serialise against parallel split or collapse
2998 * operations.
2999 */
29eea9b5 3000 anon_vma = folio_get_anon_vma(folio);
baa355fd
KS
3001 if (!anon_vma) {
3002 ret = -EBUSY;
3003 goto out;
3004 }
006d3ff2 3005 end = -1;
baa355fd
KS
3006 mapping = NULL;
3007 anon_vma_lock_write(anon_vma);
3008 } else {
6a3edd29
YF
3009 gfp_t gfp;
3010
3e9a13da 3011 mapping = folio->mapping;
baa355fd
KS
3012
3013 /* Truncated ? */
3014 if (!mapping) {
3015 ret = -EBUSY;
3016 goto out;
3017 }
3018
6a3edd29
YF
3019 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
3020 GFP_RECLAIM_MASK);
3021
0201ebf2 3022 if (!filemap_release_folio(folio, gfp)) {
6a3edd29
YF
3023 ret = -EBUSY;
3024 goto out;
3025 }
3026
3e9a13da 3027 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
6b24ca4a
MWO
3028 if (xas_error(&xas)) {
3029 ret = xas_error(&xas);
3030 goto out;
3031 }
3032
baa355fd
KS
3033 anon_vma = NULL;
3034 i_mmap_lock_read(mapping);
006d3ff2
HD
3035
3036 /*
3037 *__split_huge_page() may need to trim off pages beyond EOF:
3038 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
3039 * which cannot be nested inside the page tree lock. So note
3040 * end now: i_size itself may be changed at any moment, but
3e9a13da 3041 * folio lock is good enough to serialize the trimming.
006d3ff2
HD
3042 */
3043 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
d144bf62
HD
3044 if (shmem_mapping(mapping))
3045 end = shmem_fallocend(mapping->host, end);
e9b61f19 3046 }
e9b61f19
KS
3047
3048 /*
684555aa 3049 * Racy check if we can split the page, before unmap_folio() will
e9b61f19
KS
3050 * split PMDs
3051 */
d4b4084a 3052 if (!can_split_folio(folio, &extra_pins)) {
fd4a7ac3 3053 ret = -EAGAIN;
e9b61f19
KS
3054 goto out_unlock;
3055 }
3056
684555aa 3057 unmap_folio(folio);
e9b61f19 3058
b6769834
AS
3059 /* block interrupt reentry in xa_lock and spinlock */
3060 local_irq_disable();
baa355fd 3061 if (mapping) {
baa355fd 3062 /*
3e9a13da
MWO
3063 * Check if the folio is present in page cache.
3064 * We assume all tail are present too, if folio is there.
baa355fd 3065 */
6b24ca4a
MWO
3066 xas_lock(&xas);
3067 xas_reset(&xas);
3e9a13da 3068 if (xas_load(&xas) != folio)
baa355fd
KS
3069 goto fail;
3070 }
3071
0139aa7b 3072 /* Prevent deferred_split_scan() touching ->_refcount */
364c1eeb 3073 spin_lock(&ds_queue->split_queue_lock);
3e9a13da 3074 if (folio_ref_freeze(folio, 1 + extra_pins)) {
4375a553 3075 if (!list_empty(&folio->_deferred_list)) {
364c1eeb 3076 ds_queue->split_queue_len--;
4375a553 3077 list_del(&folio->_deferred_list);
9a982250 3078 }
afb97172 3079 spin_unlock(&ds_queue->split_queue_lock);
06d3eff6 3080 if (mapping) {
3e9a13da 3081 int nr = folio_nr_pages(folio);
bf9ecead 3082
3e9a13da 3083 xas_split(&xas, folio, folio_order(folio));
a48d5bdc
SR
3084 if (folio_test_pmd_mappable(folio)) {
3085 if (folio_test_swapbacked(folio)) {
3086 __lruvec_stat_mod_folio(folio,
3087 NR_SHMEM_THPS, -nr);
3088 } else {
3089 __lruvec_stat_mod_folio(folio,
3090 NR_FILE_THPS, -nr);
3091 filemap_nr_thps_dec(mapping);
3092 }
1ca7554d 3093 }
06d3eff6
KS
3094 }
3095
b6769834 3096 __split_huge_page(page, list, end);
c4f9c701 3097 ret = 0;
e9b61f19 3098 } else {
364c1eeb 3099 spin_unlock(&ds_queue->split_queue_lock);
504e070d
YS
3100fail:
3101 if (mapping)
6b24ca4a 3102 xas_unlock(&xas);
b6769834 3103 local_irq_enable();
4eecb8b9 3104 remap_page(folio, folio_nr_pages(folio));
fd4a7ac3 3105 ret = -EAGAIN;
e9b61f19
KS
3106 }
3107
3108out_unlock:
baa355fd
KS
3109 if (anon_vma) {
3110 anon_vma_unlock_write(anon_vma);
3111 put_anon_vma(anon_vma);
3112 }
3113 if (mapping)
3114 i_mmap_unlock_read(mapping);
e9b61f19 3115out:
69a37a8b 3116 xas_destroy(&xas);
e9b61f19
KS
3117 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3118 return ret;
3119}
9a982250 3120
8dc4a8f1 3121void folio_undo_large_rmappable(struct folio *folio)
9a982250 3122{
8dc4a8f1 3123 struct deferred_split *ds_queue;
9a982250
KS
3124 unsigned long flags;
3125
deedad80
YF
3126 /*
3127 * At this point, there is no one trying to add the folio to
3128 * deferred_list. If folio is not in deferred_list, it's safe
3129 * to check without acquiring the split_queue_lock.
3130 */
8dc4a8f1
MWO
3131 if (data_race(list_empty(&folio->_deferred_list)))
3132 return;
3133
3134 ds_queue = get_deferred_split_queue(folio);
3135 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3136 if (!list_empty(&folio->_deferred_list)) {
3137 ds_queue->split_queue_len--;
9bcef597 3138 list_del_init(&folio->_deferred_list);
9a982250 3139 }
8dc4a8f1 3140 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250
KS
3141}
3142
f158ed61 3143void deferred_split_folio(struct folio *folio)
9a982250 3144{
f8baa6be 3145 struct deferred_split *ds_queue = get_deferred_split_queue(folio);
87eaceb3 3146#ifdef CONFIG_MEMCG
8991de90 3147 struct mem_cgroup *memcg = folio_memcg(folio);
87eaceb3 3148#endif
9a982250
KS
3149 unsigned long flags;
3150
8991de90 3151 VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
9a982250 3152
87eaceb3
YS
3153 /*
3154 * The try_to_unmap() in page reclaim path might reach here too,
3155 * this may cause a race condition to corrupt deferred split queue.
8991de90 3156 * And, if page reclaim is already handling the same folio, it is
87eaceb3
YS
3157 * unnecessary to handle it again in shrinker.
3158 *
8991de90
MWO
3159 * Check the swapcache flag to determine if the folio is being
3160 * handled by page reclaim since THP swap would add the folio into
87eaceb3
YS
3161 * swap cache before calling try_to_unmap().
3162 */
8991de90 3163 if (folio_test_swapcache(folio))
87eaceb3
YS
3164 return;
3165
8991de90 3166 if (!list_empty(&folio->_deferred_list))
87eaceb3
YS
3167 return;
3168
364c1eeb 3169 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
8991de90 3170 if (list_empty(&folio->_deferred_list)) {
f9719a03 3171 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
8991de90 3172 list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
364c1eeb 3173 ds_queue->split_queue_len++;
87eaceb3
YS
3174#ifdef CONFIG_MEMCG
3175 if (memcg)
8991de90 3176 set_shrinker_bit(memcg, folio_nid(folio),
54d91729 3177 deferred_split_shrinker->id);
87eaceb3 3178#endif
9a982250 3179 }
364c1eeb 3180 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250
KS
3181}
3182
3183static unsigned long deferred_split_count(struct shrinker *shrink,
3184 struct shrink_control *sc)
3185{
a3d0a918 3186 struct pglist_data *pgdata = NODE_DATA(sc->nid);
364c1eeb 3187 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
87eaceb3
YS
3188
3189#ifdef CONFIG_MEMCG
3190 if (sc->memcg)
3191 ds_queue = &sc->memcg->deferred_split_queue;
3192#endif
364c1eeb 3193 return READ_ONCE(ds_queue->split_queue_len);
9a982250
KS
3194}
3195
3196static unsigned long deferred_split_scan(struct shrinker *shrink,
3197 struct shrink_control *sc)
3198{
a3d0a918 3199 struct pglist_data *pgdata = NODE_DATA(sc->nid);
364c1eeb 3200 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
9a982250 3201 unsigned long flags;
4375a553
MWO
3202 LIST_HEAD(list);
3203 struct folio *folio, *next;
9a982250
KS
3204 int split = 0;
3205
87eaceb3
YS
3206#ifdef CONFIG_MEMCG
3207 if (sc->memcg)
3208 ds_queue = &sc->memcg->deferred_split_queue;
3209#endif
3210
364c1eeb 3211 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
9a982250 3212 /* Take pin on all head pages to avoid freeing them under us */
4375a553
MWO
3213 list_for_each_entry_safe(folio, next, &ds_queue->split_queue,
3214 _deferred_list) {
3215 if (folio_try_get(folio)) {
3216 list_move(&folio->_deferred_list, &list);
e3ae1953 3217 } else {
4375a553
MWO
3218 /* We lost race with folio_put() */
3219 list_del_init(&folio->_deferred_list);
364c1eeb 3220 ds_queue->split_queue_len--;
9a982250 3221 }
e3ae1953
KS
3222 if (!--sc->nr_to_scan)
3223 break;
9a982250 3224 }
364c1eeb 3225 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250 3226
4375a553
MWO
3227 list_for_each_entry_safe(folio, next, &list, _deferred_list) {
3228 if (!folio_trylock(folio))
fa41b900 3229 goto next;
9a982250 3230 /* split_huge_page() removes page from list on success */
4375a553 3231 if (!split_folio(folio))
9a982250 3232 split++;
4375a553 3233 folio_unlock(folio);
fa41b900 3234next:
4375a553 3235 folio_put(folio);
9a982250
KS
3236 }
3237
364c1eeb
YS
3238 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
3239 list_splice_tail(&list, &ds_queue->split_queue);
3240 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
9a982250 3241
cb8d68ec
KS
3242 /*
3243 * Stop shrinker if we didn't split any page, but the queue is empty.
3244 * This can happen if pages were freed under us.
3245 */
364c1eeb 3246 if (!split && list_empty(&ds_queue->split_queue))
cb8d68ec
KS
3247 return SHRINK_STOP;
3248 return split;
9a982250
KS
3249}
3250
49071d43 3251#ifdef CONFIG_DEBUG_FS
fa6c0231 3252static void split_huge_pages_all(void)
49071d43
KS
3253{
3254 struct zone *zone;
3255 struct page *page;
630e7c5e 3256 struct folio *folio;
49071d43
KS
3257 unsigned long pfn, max_zone_pfn;
3258 unsigned long total = 0, split = 0;
3259
fa6c0231 3260 pr_debug("Split all THPs\n");
a17206da
ML
3261 for_each_zone(zone) {
3262 if (!managed_zone(zone))
3263 continue;
49071d43
KS
3264 max_zone_pfn = zone_end_pfn(zone);
3265 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
a17206da 3266 int nr_pages;
49071d43 3267
2b7aa91b 3268 page = pfn_to_online_page(pfn);
630e7c5e
KW
3269 if (!page || PageTail(page))
3270 continue;
3271 folio = page_folio(page);
3272 if (!folio_try_get(folio))
49071d43
KS
3273 continue;
3274
630e7c5e 3275 if (unlikely(page_folio(page) != folio))
49071d43
KS
3276 goto next;
3277
630e7c5e 3278 if (zone != folio_zone(folio))
49071d43
KS
3279 goto next;
3280
630e7c5e
KW
3281 if (!folio_test_large(folio)
3282 || folio_test_hugetlb(folio)
3283 || !folio_test_lru(folio))
49071d43
KS
3284 goto next;
3285
3286 total++;
630e7c5e
KW
3287 folio_lock(folio);
3288 nr_pages = folio_nr_pages(folio);
3289 if (!split_folio(folio))
49071d43 3290 split++;
a17206da 3291 pfn += nr_pages - 1;
630e7c5e 3292 folio_unlock(folio);
49071d43 3293next:
630e7c5e 3294 folio_put(folio);
fa6c0231 3295 cond_resched();
49071d43
KS
3296 }
3297 }
3298
fa6c0231
ZY
3299 pr_debug("%lu of %lu THP split\n", split, total);
3300}
49071d43 3301
fa6c0231
ZY
3302static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
3303{
3304 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
3305 is_vm_hugetlb_page(vma);
3306}
3307
3308static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
3309 unsigned long vaddr_end)
3310{
3311 int ret = 0;
3312 struct task_struct *task;
3313 struct mm_struct *mm;
3314 unsigned long total = 0, split = 0;
3315 unsigned long addr;
3316
3317 vaddr_start &= PAGE_MASK;
3318 vaddr_end &= PAGE_MASK;
3319
3320 /* Find the task_struct from pid */
3321 rcu_read_lock();
3322 task = find_task_by_vpid(pid);
3323 if (!task) {
3324 rcu_read_unlock();
3325 ret = -ESRCH;
3326 goto out;
3327 }
3328 get_task_struct(task);
3329 rcu_read_unlock();
3330
3331 /* Find the mm_struct */
3332 mm = get_task_mm(task);
3333 put_task_struct(task);
3334
3335 if (!mm) {
3336 ret = -EINVAL;
3337 goto out;
3338 }
3339
3340 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3341 pid, vaddr_start, vaddr_end);
3342
3343 mmap_read_lock(mm);
3344 /*
3345 * always increase addr by PAGE_SIZE, since we could have a PTE page
3346 * table filled with PTE-mapped THPs, each of which is distinct.
3347 */
3348 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
74ba2b38 3349 struct vm_area_struct *vma = vma_lookup(mm, addr);
fa6c0231 3350 struct page *page;
a644b0ab 3351 struct folio *folio;
fa6c0231 3352
74ba2b38 3353 if (!vma)
fa6c0231
ZY
3354 break;
3355
3356 /* skip special VMA and hugetlb VMA */
3357 if (vma_not_suitable_for_thp_split(vma)) {
3358 addr = vma->vm_end;
3359 continue;
3360 }
3361
3362 /* FOLL_DUMP to ignore special (like zero) pages */
87d2762e 3363 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
fa6c0231 3364
f7091ed6 3365 if (IS_ERR_OR_NULL(page))
fa6c0231
ZY
3366 continue;
3367
a644b0ab
MWO
3368 folio = page_folio(page);
3369 if (!is_transparent_hugepage(folio))
fa6c0231
ZY
3370 goto next;
3371
3372 total++;
a644b0ab 3373 if (!can_split_folio(folio, NULL))
fa6c0231
ZY
3374 goto next;
3375
a644b0ab 3376 if (!folio_trylock(folio))
fa6c0231
ZY
3377 goto next;
3378
a644b0ab 3379 if (!split_folio(folio))
fa6c0231
ZY
3380 split++;
3381
a644b0ab 3382 folio_unlock(folio);
fa6c0231 3383next:
a644b0ab 3384 folio_put(folio);
fa6c0231
ZY
3385 cond_resched();
3386 }
3387 mmap_read_unlock(mm);
3388 mmput(mm);
3389
3390 pr_debug("%lu of %lu THP split\n", split, total);
3391
3392out:
3393 return ret;
49071d43 3394}
fa6c0231 3395
fbe37501
ZY
3396static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3397 pgoff_t off_end)
3398{
3399 struct filename *file;
3400 struct file *candidate;
3401 struct address_space *mapping;
3402 int ret = -EINVAL;
3403 pgoff_t index;
3404 int nr_pages = 1;
3405 unsigned long total = 0, split = 0;
3406
3407 file = getname_kernel(file_path);
3408 if (IS_ERR(file))
3409 return ret;
3410
3411 candidate = file_open_name(file, O_RDONLY, 0);
3412 if (IS_ERR(candidate))
3413 goto out;
3414
3415 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3416 file_path, off_start, off_end);
3417
3418 mapping = candidate->f_mapping;
3419
3420 for (index = off_start; index < off_end; index += nr_pages) {
1fb130b2 3421 struct folio *folio = filemap_get_folio(mapping, index);
fbe37501
ZY
3422
3423 nr_pages = 1;
66dabbb6 3424 if (IS_ERR(folio))
fbe37501
ZY
3425 continue;
3426
9ee2c086 3427 if (!folio_test_large(folio))
fbe37501
ZY
3428 goto next;
3429
3430 total++;
9ee2c086 3431 nr_pages = folio_nr_pages(folio);
fbe37501 3432
9ee2c086 3433 if (!folio_trylock(folio))
fbe37501
ZY
3434 goto next;
3435
9ee2c086 3436 if (!split_folio(folio))
fbe37501
ZY
3437 split++;
3438
9ee2c086 3439 folio_unlock(folio);
fbe37501 3440next:
9ee2c086 3441 folio_put(folio);
fbe37501
ZY
3442 cond_resched();
3443 }
3444
3445 filp_close(candidate, NULL);
3446 ret = 0;
3447
3448 pr_debug("%lu of %lu file-backed THP split\n", split, total);
3449out:
3450 putname(file);
3451 return ret;
3452}
3453
fa6c0231
ZY
3454#define MAX_INPUT_BUF_SZ 255
3455
3456static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3457 size_t count, loff_t *ppops)
3458{
3459 static DEFINE_MUTEX(split_debug_mutex);
3460 ssize_t ret;
fbe37501
ZY
3461 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3462 char input_buf[MAX_INPUT_BUF_SZ];
fa6c0231
ZY
3463 int pid;
3464 unsigned long vaddr_start, vaddr_end;
3465
3466 ret = mutex_lock_interruptible(&split_debug_mutex);
3467 if (ret)
3468 return ret;
3469
3470 ret = -EFAULT;
3471
3472 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3473 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3474 goto out;
3475
3476 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
fbe37501
ZY
3477
3478 if (input_buf[0] == '/') {
3479 char *tok;
3480 char *buf = input_buf;
3481 char file_path[MAX_INPUT_BUF_SZ];
3482 pgoff_t off_start = 0, off_end = 0;
3483 size_t input_len = strlen(input_buf);
3484
3485 tok = strsep(&buf, ",");
3486 if (tok) {
1212e00c 3487 strcpy(file_path, tok);
fbe37501
ZY
3488 } else {
3489 ret = -EINVAL;
3490 goto out;
3491 }
3492
3493 ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3494 if (ret != 2) {
3495 ret = -EINVAL;
3496 goto out;
3497 }
3498 ret = split_huge_pages_in_file(file_path, off_start, off_end);
3499 if (!ret)
3500 ret = input_len;
3501
3502 goto out;
3503 }
3504
fa6c0231
ZY
3505 ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3506 if (ret == 1 && pid == 1) {
3507 split_huge_pages_all();
3508 ret = strlen(input_buf);
3509 goto out;
3510 } else if (ret != 3) {
3511 ret = -EINVAL;
3512 goto out;
3513 }
3514
3515 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3516 if (!ret)
3517 ret = strlen(input_buf);
3518out:
3519 mutex_unlock(&split_debug_mutex);
3520 return ret;
3521
3522}
3523
3524static const struct file_operations split_huge_pages_fops = {
3525 .owner = THIS_MODULE,
3526 .write = split_huge_pages_write,
3527 .llseek = no_llseek,
3528};
49071d43
KS
3529
3530static int __init split_huge_pages_debugfs(void)
3531{
d9f7979c
GKH
3532 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3533 &split_huge_pages_fops);
49071d43
KS
3534 return 0;
3535}
3536late_initcall(split_huge_pages_debugfs);
3537#endif
616b8371
ZY
3538
3539#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
7f5abe60 3540int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
616b8371
ZY
3541 struct page *page)
3542{
a8e61d58 3543 struct folio *folio = page_folio(page);
616b8371
ZY
3544 struct vm_area_struct *vma = pvmw->vma;
3545 struct mm_struct *mm = vma->vm_mm;
3546 unsigned long address = pvmw->address;
6c287605 3547 bool anon_exclusive;
616b8371
ZY
3548 pmd_t pmdval;
3549 swp_entry_t entry;
ab6e3d09 3550 pmd_t pmdswp;
616b8371
ZY
3551
3552 if (!(pvmw->pmd && !pvmw->pte))
7f5abe60 3553 return 0;
616b8371 3554
616b8371 3555 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
8a8683ad 3556 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
6c287605 3557
e3b4b137 3558 /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */
a8e61d58 3559 anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
e3b4b137 3560 if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) {
6c287605 3561 set_pmd_at(mm, address, pvmw->pmd, pmdval);
7f5abe60 3562 return -EBUSY;
6c287605
DH
3563 }
3564
616b8371 3565 if (pmd_dirty(pmdval))
a8e61d58 3566 folio_set_dirty(folio);
4dd845b5
AP
3567 if (pmd_write(pmdval))
3568 entry = make_writable_migration_entry(page_to_pfn(page));
6c287605
DH
3569 else if (anon_exclusive)
3570 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
4dd845b5
AP
3571 else
3572 entry = make_readable_migration_entry(page_to_pfn(page));
2e346877
PX
3573 if (pmd_young(pmdval))
3574 entry = make_migration_entry_young(entry);
3575 if (pmd_dirty(pmdval))
3576 entry = make_migration_entry_dirty(entry);
ab6e3d09
NH
3577 pmdswp = swp_entry_to_pmd(entry);
3578 if (pmd_soft_dirty(pmdval))
3579 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
24bf08c4
DH
3580 if (pmd_uffd_wp(pmdval))
3581 pmdswp = pmd_swp_mkuffd_wp(pmdswp);
ab6e3d09 3582 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
a8e61d58
DH
3583 folio_remove_rmap_pmd(folio, page, vma);
3584 folio_put(folio);
283fd6fe 3585 trace_set_migration_pmd(address, pmd_val(pmdswp));
7f5abe60
DH
3586
3587 return 0;
616b8371
ZY
3588}
3589
3590void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3591{
14d85a6e 3592 struct folio *folio = page_folio(new);
616b8371
ZY
3593 struct vm_area_struct *vma = pvmw->vma;
3594 struct mm_struct *mm = vma->vm_mm;
3595 unsigned long address = pvmw->address;
4fba8f2a 3596 unsigned long haddr = address & HPAGE_PMD_MASK;
616b8371
ZY
3597 pmd_t pmde;
3598 swp_entry_t entry;
3599
3600 if (!(pvmw->pmd && !pvmw->pte))
3601 return;
3602
3603 entry = pmd_to_swp_entry(*pvmw->pmd);
14d85a6e 3604 folio_get(folio);
2e346877 3605 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
ab6e3d09
NH
3606 if (pmd_swp_soft_dirty(*pvmw->pmd))
3607 pmde = pmd_mksoft_dirty(pmde);
3c811f78 3608 if (is_writable_migration_entry(entry))
161e393c 3609 pmde = pmd_mkwrite(pmde, vma);
8f34f1ea 3610 if (pmd_swp_uffd_wp(*pvmw->pmd))
f1eb1bac 3611 pmde = pmd_mkuffd_wp(pmde);
2e346877
PX
3612 if (!is_migration_entry_young(entry))
3613 pmde = pmd_mkold(pmde);
3614 /* NOTE: this may contain setting soft-dirty on some archs */
14d85a6e 3615 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
2e346877 3616 pmde = pmd_mkdirty(pmde);
616b8371 3617
14d85a6e 3618 if (folio_test_anon(folio)) {
395db7b1 3619 rmap_t rmap_flags = RMAP_NONE;
6c287605
DH
3620
3621 if (!is_readable_migration_entry(entry))
3622 rmap_flags |= RMAP_EXCLUSIVE;
3623
395db7b1 3624 folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags);
6c287605 3625 } else {
14d85a6e 3626 folio_add_file_rmap_pmd(folio, new, vma);
6c287605 3627 }
14d85a6e 3628 VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new));
4fba8f2a 3629 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
5cbcf225
MS
3630
3631 /* No need to invalidate - it was non-present before */
616b8371 3632 update_mmu_cache_pmd(vma, address, pvmw->pmd);
283fd6fe 3633 trace_remove_migration_pmd(address, pmd_val(pmde));
616b8371
ZY
3634}
3635#endif