Merge tag 'net-next-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev...
[linux-block.git] / mm / memory.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*
9 * demand-loading started 01.12.91 - seems it is high on the list of
10 * things wanted, and it should be easy to implement. - Linus
11 */
12
13/*
14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15 * pages started 02.12.91, seems to work. - Linus.
16 *
17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18 * would have taken more than the 6M I have free, but it worked well as
19 * far as I could see.
20 *
21 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22 */
23
24/*
25 * Real VM (paging to/from disk) started 18.12.91. Much more work and
26 * thought has to go into this. Oh, well..
27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
28 * Found it. Everything seems to work now.
29 * 20.12.91 - Ok, making the swap-device changeable like the root.
30 */
31
32/*
33 * 05.04.94 - Multi-page memory management added for v1.1.
166f61b9 34 * Idea by Alex Bligh (alex@cconcepts.co.uk)
1da177e4
LT
35 *
36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
37 * (Gerhard.Wichert@pdb.siemens.de)
38 *
39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40 */
41
42#include <linux/kernel_stat.h>
43#include <linux/mm.h>
36090def 44#include <linux/mm_inline.h>
6e84f315 45#include <linux/sched/mm.h>
f7ccbae4 46#include <linux/sched/coredump.h>
6a3827d7 47#include <linux/sched/numa_balancing.h>
29930025 48#include <linux/sched/task.h>
1da177e4
LT
49#include <linux/hugetlb.h>
50#include <linux/mman.h>
51#include <linux/swap.h>
52#include <linux/highmem.h>
53#include <linux/pagemap.h>
5042db43 54#include <linux/memremap.h>
b073d7f8 55#include <linux/kmsan.h>
9a840895 56#include <linux/ksm.h>
1da177e4 57#include <linux/rmap.h>
b95f1b31 58#include <linux/export.h>
0ff92245 59#include <linux/delayacct.h>
1da177e4 60#include <linux/init.h>
01c8f1c4 61#include <linux/pfn_t.h>
edc79b2a 62#include <linux/writeback.h>
8a9f3ccd 63#include <linux/memcontrol.h>
cddb8a5c 64#include <linux/mmu_notifier.h>
3dc14741
HD
65#include <linux/swapops.h>
66#include <linux/elf.h>
5a0e3ad6 67#include <linux/gfp.h>
4daae3b4 68#include <linux/migrate.h>
2fbc57c5 69#include <linux/string.h>
467b171a 70#include <linux/memory-tiers.h>
1592eef0 71#include <linux/debugfs.h>
6b251fc9 72#include <linux/userfaultfd_k.h>
bc2466e4 73#include <linux/dax.h>
6b31d595 74#include <linux/oom.h>
98fa15f3 75#include <linux/numa.h>
bce617ed
PX
76#include <linux/perf_event.h>
77#include <linux/ptrace.h>
e80d3909 78#include <linux/vmalloc.h>
33024536 79#include <linux/sched/sysctl.h>
7a7f0946 80#include <linux/net_mm.h>
1da177e4 81
b3d1411b
JFG
82#include <trace/events/kmem.h>
83
6952b61d 84#include <asm/io.h>
33a709b2 85#include <asm/mmu_context.h>
1da177e4 86#include <asm/pgalloc.h>
7c0f6ba6 87#include <linux/uaccess.h>
1da177e4
LT
88#include <asm/tlb.h>
89#include <asm/tlbflush.h>
1da177e4 90
e80d3909 91#include "pgalloc-track.h"
42b77728 92#include "internal.h"
014bb1de 93#include "swap.h"
42b77728 94
af27d940 95#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
90572890 96#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
75980e97
PZ
97#endif
98
a9ee6cf5 99#ifndef CONFIG_NUMA
1da177e4 100unsigned long max_mapnr;
1da177e4 101EXPORT_SYMBOL(max_mapnr);
166f61b9
TH
102
103struct page *mem_map;
1da177e4
LT
104EXPORT_SYMBOL(mem_map);
105#endif
106
5c041f5d 107static vm_fault_t do_fault(struct vm_fault *vmf);
2bad466c
PX
108static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
109static bool vmf_pte_changed(struct vm_fault *vmf);
110
111/*
112 * Return true if the original pte was a uffd-wp pte marker (so the pte was
113 * wr-protected).
114 */
115static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
116{
117 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
118 return false;
119
120 return pte_marker_uffd_wp(vmf->orig_pte);
121}
5c041f5d 122
1da177e4
LT
123/*
124 * A number of key systems in x86 including ioremap() rely on the assumption
125 * that high_memory defines the upper bound on direct map memory, then end
126 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
127 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
128 * and ZONE_HIGHMEM.
129 */
166f61b9 130void *high_memory;
1da177e4 131EXPORT_SYMBOL(high_memory);
1da177e4 132
32a93233
IM
133/*
134 * Randomize the address space (stacks, mmaps, brk, etc.).
135 *
136 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
137 * as ancient (libc5 based) binaries can segfault. )
138 */
139int randomize_va_space __read_mostly =
140#ifdef CONFIG_COMPAT_BRK
141 1;
142#else
143 2;
144#endif
a62eaf15 145
46bdb427
WD
146#ifndef arch_wants_old_prefaulted_pte
147static inline bool arch_wants_old_prefaulted_pte(void)
148{
149 /*
150 * Transitioning a PTE from 'old' to 'young' can be expensive on
151 * some architectures, even if it's performed in hardware. By
152 * default, "false" means prefaulted entries will be 'young'.
153 */
154 return false;
155}
156#endif
157
a62eaf15
AK
158static int __init disable_randmaps(char *s)
159{
160 randomize_va_space = 0;
9b41046c 161 return 1;
a62eaf15
AK
162}
163__setup("norandmaps", disable_randmaps);
164
62eede62 165unsigned long zero_pfn __read_mostly;
0b70068e
AB
166EXPORT_SYMBOL(zero_pfn);
167
166f61b9
TH
168unsigned long highest_memmap_pfn __read_mostly;
169
a13ea5b7
HD
170/*
171 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
172 */
173static int __init init_zero_pfn(void)
174{
175 zero_pfn = page_to_pfn(ZERO_PAGE(0));
176 return 0;
177}
e720e7d0 178early_initcall(init_zero_pfn);
a62eaf15 179
f1a79412 180void mm_trace_rss_stat(struct mm_struct *mm, int member)
b3d1411b 181{
f1a79412 182 trace_rss_stat(mm, member);
b3d1411b 183}
d559db08 184
1da177e4
LT
185/*
186 * Note: this doesn't free the actual pages themselves. That
187 * has been handled earlier when unmapping all the memory regions.
188 */
9e1b32ca
BH
189static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
190 unsigned long addr)
1da177e4 191{
2f569afd 192 pgtable_t token = pmd_pgtable(*pmd);
e0da382c 193 pmd_clear(pmd);
9e1b32ca 194 pte_free_tlb(tlb, token, addr);
c4812909 195 mm_dec_nr_ptes(tlb->mm);
1da177e4
LT
196}
197
e0da382c
HD
198static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
199 unsigned long addr, unsigned long end,
200 unsigned long floor, unsigned long ceiling)
1da177e4
LT
201{
202 pmd_t *pmd;
203 unsigned long next;
e0da382c 204 unsigned long start;
1da177e4 205
e0da382c 206 start = addr;
1da177e4 207 pmd = pmd_offset(pud, addr);
1da177e4
LT
208 do {
209 next = pmd_addr_end(addr, end);
210 if (pmd_none_or_clear_bad(pmd))
211 continue;
9e1b32ca 212 free_pte_range(tlb, pmd, addr);
1da177e4
LT
213 } while (pmd++, addr = next, addr != end);
214
e0da382c
HD
215 start &= PUD_MASK;
216 if (start < floor)
217 return;
218 if (ceiling) {
219 ceiling &= PUD_MASK;
220 if (!ceiling)
221 return;
1da177e4 222 }
e0da382c
HD
223 if (end - 1 > ceiling - 1)
224 return;
225
226 pmd = pmd_offset(pud, start);
227 pud_clear(pud);
9e1b32ca 228 pmd_free_tlb(tlb, pmd, start);
dc6c9a35 229 mm_dec_nr_pmds(tlb->mm);
1da177e4
LT
230}
231
c2febafc 232static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
e0da382c
HD
233 unsigned long addr, unsigned long end,
234 unsigned long floor, unsigned long ceiling)
1da177e4
LT
235{
236 pud_t *pud;
237 unsigned long next;
e0da382c 238 unsigned long start;
1da177e4 239
e0da382c 240 start = addr;
c2febafc 241 pud = pud_offset(p4d, addr);
1da177e4
LT
242 do {
243 next = pud_addr_end(addr, end);
244 if (pud_none_or_clear_bad(pud))
245 continue;
e0da382c 246 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
1da177e4
LT
247 } while (pud++, addr = next, addr != end);
248
c2febafc
KS
249 start &= P4D_MASK;
250 if (start < floor)
251 return;
252 if (ceiling) {
253 ceiling &= P4D_MASK;
254 if (!ceiling)
255 return;
256 }
257 if (end - 1 > ceiling - 1)
258 return;
259
260 pud = pud_offset(p4d, start);
261 p4d_clear(p4d);
262 pud_free_tlb(tlb, pud, start);
b4e98d9a 263 mm_dec_nr_puds(tlb->mm);
c2febafc
KS
264}
265
266static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
267 unsigned long addr, unsigned long end,
268 unsigned long floor, unsigned long ceiling)
269{
270 p4d_t *p4d;
271 unsigned long next;
272 unsigned long start;
273
274 start = addr;
275 p4d = p4d_offset(pgd, addr);
276 do {
277 next = p4d_addr_end(addr, end);
278 if (p4d_none_or_clear_bad(p4d))
279 continue;
280 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
281 } while (p4d++, addr = next, addr != end);
282
e0da382c
HD
283 start &= PGDIR_MASK;
284 if (start < floor)
285 return;
286 if (ceiling) {
287 ceiling &= PGDIR_MASK;
288 if (!ceiling)
289 return;
1da177e4 290 }
e0da382c
HD
291 if (end - 1 > ceiling - 1)
292 return;
293
c2febafc 294 p4d = p4d_offset(pgd, start);
e0da382c 295 pgd_clear(pgd);
c2febafc 296 p4d_free_tlb(tlb, p4d, start);
1da177e4
LT
297}
298
299/*
e0da382c 300 * This function frees user-level page tables of a process.
1da177e4 301 */
42b77728 302void free_pgd_range(struct mmu_gather *tlb,
e0da382c
HD
303 unsigned long addr, unsigned long end,
304 unsigned long floor, unsigned long ceiling)
1da177e4
LT
305{
306 pgd_t *pgd;
307 unsigned long next;
e0da382c
HD
308
309 /*
310 * The next few lines have given us lots of grief...
311 *
312 * Why are we testing PMD* at this top level? Because often
313 * there will be no work to do at all, and we'd prefer not to
314 * go all the way down to the bottom just to discover that.
315 *
316 * Why all these "- 1"s? Because 0 represents both the bottom
317 * of the address space and the top of it (using -1 for the
318 * top wouldn't help much: the masks would do the wrong thing).
319 * The rule is that addr 0 and floor 0 refer to the bottom of
320 * the address space, but end 0 and ceiling 0 refer to the top
321 * Comparisons need to use "end - 1" and "ceiling - 1" (though
322 * that end 0 case should be mythical).
323 *
324 * Wherever addr is brought up or ceiling brought down, we must
325 * be careful to reject "the opposite 0" before it confuses the
326 * subsequent tests. But what about where end is brought down
327 * by PMD_SIZE below? no, end can't go down to 0 there.
328 *
329 * Whereas we round start (addr) and ceiling down, by different
330 * masks at different levels, in order to test whether a table
331 * now has no other vmas using it, so can be freed, we don't
332 * bother to round floor or end up - the tests don't need that.
333 */
1da177e4 334
e0da382c
HD
335 addr &= PMD_MASK;
336 if (addr < floor) {
337 addr += PMD_SIZE;
338 if (!addr)
339 return;
340 }
341 if (ceiling) {
342 ceiling &= PMD_MASK;
343 if (!ceiling)
344 return;
345 }
346 if (end - 1 > ceiling - 1)
347 end -= PMD_SIZE;
348 if (addr > end - 1)
349 return;
07e32661
AK
350 /*
351 * We add page table cache pages with PAGE_SIZE,
352 * (see pte_free_tlb()), flush the tlb if we need
353 */
ed6a7935 354 tlb_change_page_size(tlb, PAGE_SIZE);
42b77728 355 pgd = pgd_offset(tlb->mm, addr);
1da177e4
LT
356 do {
357 next = pgd_addr_end(addr, end);
358 if (pgd_none_or_clear_bad(pgd))
359 continue;
c2febafc 360 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
1da177e4 361 } while (pgd++, addr = next, addr != end);
e0da382c
HD
362}
363
763ecb03
LH
364void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
365 struct vm_area_struct *vma, unsigned long floor,
98e51a22 366 unsigned long ceiling, bool mm_wr_locked)
e0da382c 367{
763ecb03
LH
368 MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
369
370 do {
e0da382c 371 unsigned long addr = vma->vm_start;
763ecb03
LH
372 struct vm_area_struct *next;
373
374 /*
375 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
376 * be 0. This will underflow and is okay.
377 */
378 next = mas_find(&mas, ceiling - 1);
e0da382c 379
8f4f8c16 380 /*
25d9e2d1 381 * Hide vma from rmap and truncate_pagecache before freeing
382 * pgtables
8f4f8c16 383 */
98e51a22
SB
384 if (mm_wr_locked)
385 vma_start_write(vma);
5beb4930 386 unlink_anon_vmas(vma);
8f4f8c16
HD
387 unlink_file_vma(vma);
388
9da61aef 389 if (is_vm_hugetlb_page(vma)) {
3bf5ee95 390 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
166f61b9 391 floor, next ? next->vm_start : ceiling);
3bf5ee95
HD
392 } else {
393 /*
394 * Optimization: gather nearby vmas into one call down
395 */
396 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
4866920b 397 && !is_vm_hugetlb_page(next)) {
3bf5ee95 398 vma = next;
763ecb03 399 next = mas_find(&mas, ceiling - 1);
98e51a22
SB
400 if (mm_wr_locked)
401 vma_start_write(vma);
5beb4930 402 unlink_anon_vmas(vma);
8f4f8c16 403 unlink_file_vma(vma);
3bf5ee95
HD
404 }
405 free_pgd_range(tlb, addr, vma->vm_end,
166f61b9 406 floor, next ? next->vm_start : ceiling);
3bf5ee95 407 }
e0da382c 408 vma = next;
763ecb03 409 } while (vma);
1da177e4
LT
410}
411
03c4f204 412void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
1da177e4 413{
03c4f204 414 spinlock_t *ptl = pmd_lock(mm, pmd);
1bb3630e 415
8ac1f832 416 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
c4812909 417 mm_inc_nr_ptes(mm);
ed33b5a6
QZ
418 /*
419 * Ensure all pte setup (eg. pte page lock and page clearing) are
420 * visible before the pte is made visible to other CPUs by being
421 * put into page tables.
422 *
423 * The other side of the story is the pointer chasing in the page
424 * table walking code (when walking the page table without locking;
425 * ie. most of the time). Fortunately, these data accesses consist
426 * of a chain of data-dependent loads, meaning most CPUs (alpha
427 * being the notable exception) will already guarantee loads are
428 * seen in-order. See the alpha page table accessors for the
429 * smp_rmb() barriers in page table walking code.
430 */
431 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
03c4f204
QZ
432 pmd_populate(mm, pmd, *pte);
433 *pte = NULL;
4b471e88 434 }
c4088ebd 435 spin_unlock(ptl);
03c4f204
QZ
436}
437
4cf58924 438int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
1da177e4 439{
4cf58924 440 pgtable_t new = pte_alloc_one(mm);
1bb3630e
HD
441 if (!new)
442 return -ENOMEM;
443
03c4f204 444 pmd_install(mm, pmd, &new);
2f569afd
MS
445 if (new)
446 pte_free(mm, new);
1bb3630e 447 return 0;
1da177e4
LT
448}
449
4cf58924 450int __pte_alloc_kernel(pmd_t *pmd)
1da177e4 451{
4cf58924 452 pte_t *new = pte_alloc_one_kernel(&init_mm);
1bb3630e
HD
453 if (!new)
454 return -ENOMEM;
455
456 spin_lock(&init_mm.page_table_lock);
8ac1f832 457 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
ed33b5a6 458 smp_wmb(); /* See comment in pmd_install() */
1bb3630e 459 pmd_populate_kernel(&init_mm, pmd, new);
2f569afd 460 new = NULL;
4b471e88 461 }
1bb3630e 462 spin_unlock(&init_mm.page_table_lock);
2f569afd
MS
463 if (new)
464 pte_free_kernel(&init_mm, new);
1bb3630e 465 return 0;
1da177e4
LT
466}
467
d559db08
KH
468static inline void init_rss_vec(int *rss)
469{
470 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
471}
472
473static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
ae859762 474{
d559db08
KH
475 int i;
476
34e55232 477 if (current->mm == mm)
05af2e10 478 sync_mm_rss(mm);
d559db08
KH
479 for (i = 0; i < NR_MM_COUNTERS; i++)
480 if (rss[i])
481 add_mm_counter(mm, i, rss[i]);
ae859762
HD
482}
483
b5810039 484/*
6aab341e
LT
485 * This function is called to print an error when a bad pte
486 * is found. For example, we might have a PFN-mapped pte in
487 * a region that doesn't allow it.
b5810039
NP
488 *
489 * The calling function must still handle the error.
490 */
3dc14741
HD
491static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
492 pte_t pte, struct page *page)
b5810039 493{
3dc14741 494 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
c2febafc
KS
495 p4d_t *p4d = p4d_offset(pgd, addr);
496 pud_t *pud = pud_offset(p4d, addr);
3dc14741
HD
497 pmd_t *pmd = pmd_offset(pud, addr);
498 struct address_space *mapping;
499 pgoff_t index;
d936cf9b
HD
500 static unsigned long resume;
501 static unsigned long nr_shown;
502 static unsigned long nr_unshown;
503
504 /*
505 * Allow a burst of 60 reports, then keep quiet for that minute;
506 * or allow a steady drip of one report per second.
507 */
508 if (nr_shown == 60) {
509 if (time_before(jiffies, resume)) {
510 nr_unshown++;
511 return;
512 }
513 if (nr_unshown) {
1170532b
JP
514 pr_alert("BUG: Bad page map: %lu messages suppressed\n",
515 nr_unshown);
d936cf9b
HD
516 nr_unshown = 0;
517 }
518 nr_shown = 0;
519 }
520 if (nr_shown++ == 0)
521 resume = jiffies + 60 * HZ;
3dc14741
HD
522
523 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
524 index = linear_page_index(vma, addr);
525
1170532b
JP
526 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
527 current->comm,
528 (long long)pte_val(pte), (long long)pmd_val(*pmd));
718a3821 529 if (page)
f0b791a3 530 dump_page(page, "bad pte");
6aa9b8b2 531 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
1170532b 532 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
7e0a1265 533 pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
2682582a
KK
534 vma->vm_file,
535 vma->vm_ops ? vma->vm_ops->fault : NULL,
536 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
7e0a1265 537 mapping ? mapping->a_ops->read_folio : NULL);
b5810039 538 dump_stack();
373d4d09 539 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
b5810039
NP
540}
541
ee498ed7 542/*
7e675137 543 * vm_normal_page -- This function gets the "struct page" associated with a pte.
6aab341e 544 *
7e675137
NP
545 * "Special" mappings do not wish to be associated with a "struct page" (either
546 * it doesn't exist, or it exists but they don't want to touch it). In this
547 * case, NULL is returned here. "Normal" mappings do have a struct page.
b379d790 548 *
7e675137
NP
549 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
550 * pte bit, in which case this function is trivial. Secondly, an architecture
551 * may not have a spare pte bit, which requires a more complicated scheme,
552 * described below.
553 *
554 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
555 * special mapping (even if there are underlying and valid "struct pages").
556 * COWed pages of a VM_PFNMAP are always normal.
6aab341e 557 *
b379d790
JH
558 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
559 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
7e675137
NP
560 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
561 * mapping will always honor the rule
6aab341e
LT
562 *
563 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
564 *
7e675137
NP
565 * And for normal mappings this is false.
566 *
567 * This restricts such mappings to be a linear translation from virtual address
568 * to pfn. To get around this restriction, we allow arbitrary mappings so long
569 * as the vma is not a COW mapping; in that case, we know that all ptes are
570 * special (because none can have been COWed).
b379d790 571 *
b379d790 572 *
7e675137 573 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
b379d790
JH
574 *
575 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
576 * page" backing, however the difference is that _all_ pages with a struct
577 * page (that is, those where pfn_valid is true) are refcounted and considered
578 * normal pages by the VM. The disadvantage is that pages are refcounted
579 * (which can be slower and simply not an option for some PFNMAP users). The
580 * advantage is that we don't have to follow the strict linearity rule of
581 * PFNMAP mappings in order to support COWable mappings.
582 *
ee498ed7 583 */
25b2995a
CH
584struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
585 pte_t pte)
ee498ed7 586{
22b31eec 587 unsigned long pfn = pte_pfn(pte);
7e675137 588
00b3a331 589 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
b38af472 590 if (likely(!pte_special(pte)))
22b31eec 591 goto check_pfn;
667a0a06
DV
592 if (vma->vm_ops && vma->vm_ops->find_special_page)
593 return vma->vm_ops->find_special_page(vma, addr);
a13ea5b7
HD
594 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
595 return NULL;
df6ad698
JG
596 if (is_zero_pfn(pfn))
597 return NULL;
e1fb4a08 598 if (pte_devmap(pte))
3218f871
AS
599 /*
600 * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
601 * and will have refcounts incremented on their struct pages
602 * when they are inserted into PTEs, thus they are safe to
603 * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
604 * do not have refcounts. Example of legacy ZONE_DEVICE is
605 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
606 */
e1fb4a08
DJ
607 return NULL;
608
df6ad698 609 print_bad_pte(vma, addr, pte, NULL);
7e675137
NP
610 return NULL;
611 }
612
00b3a331 613 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
7e675137 614
b379d790
JH
615 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
616 if (vma->vm_flags & VM_MIXEDMAP) {
617 if (!pfn_valid(pfn))
618 return NULL;
619 goto out;
620 } else {
7e675137
NP
621 unsigned long off;
622 off = (addr - vma->vm_start) >> PAGE_SHIFT;
b379d790
JH
623 if (pfn == vma->vm_pgoff + off)
624 return NULL;
625 if (!is_cow_mapping(vma->vm_flags))
626 return NULL;
627 }
6aab341e
LT
628 }
629
b38af472
HD
630 if (is_zero_pfn(pfn))
631 return NULL;
00b3a331 632
22b31eec
HD
633check_pfn:
634 if (unlikely(pfn > highest_memmap_pfn)) {
635 print_bad_pte(vma, addr, pte, NULL);
636 return NULL;
637 }
6aab341e
LT
638
639 /*
7e675137 640 * NOTE! We still have PageReserved() pages in the page tables.
7e675137 641 * eg. VDSO mappings can cause them to exist.
6aab341e 642 */
b379d790 643out:
6aab341e 644 return pfn_to_page(pfn);
ee498ed7
HD
645}
646
318e9342
VMO
647struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
648 pte_t pte)
649{
650 struct page *page = vm_normal_page(vma, addr, pte);
651
652 if (page)
653 return page_folio(page);
654 return NULL;
655}
656
28093f9f
GS
657#ifdef CONFIG_TRANSPARENT_HUGEPAGE
658struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
659 pmd_t pmd)
660{
661 unsigned long pfn = pmd_pfn(pmd);
662
663 /*
664 * There is no pmd_special() but there may be special pmds, e.g.
665 * in a direct-access (dax) mapping, so let's just replicate the
00b3a331 666 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
28093f9f
GS
667 */
668 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
669 if (vma->vm_flags & VM_MIXEDMAP) {
670 if (!pfn_valid(pfn))
671 return NULL;
672 goto out;
673 } else {
674 unsigned long off;
675 off = (addr - vma->vm_start) >> PAGE_SHIFT;
676 if (pfn == vma->vm_pgoff + off)
677 return NULL;
678 if (!is_cow_mapping(vma->vm_flags))
679 return NULL;
680 }
681 }
682
e1fb4a08
DJ
683 if (pmd_devmap(pmd))
684 return NULL;
3cde287b 685 if (is_huge_zero_pmd(pmd))
28093f9f
GS
686 return NULL;
687 if (unlikely(pfn > highest_memmap_pfn))
688 return NULL;
689
690 /*
691 * NOTE! We still have PageReserved() pages in the page tables.
692 * eg. VDSO mappings can cause them to exist.
693 */
694out:
695 return pfn_to_page(pfn);
696}
697#endif
698
b756a3b5
AP
699static void restore_exclusive_pte(struct vm_area_struct *vma,
700 struct page *page, unsigned long address,
701 pte_t *ptep)
702{
c33c7948 703 pte_t orig_pte;
b756a3b5
AP
704 pte_t pte;
705 swp_entry_t entry;
706
c33c7948 707 orig_pte = ptep_get(ptep);
b756a3b5 708 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
c33c7948 709 if (pte_swp_soft_dirty(orig_pte))
b756a3b5
AP
710 pte = pte_mksoft_dirty(pte);
711
c33c7948
RR
712 entry = pte_to_swp_entry(orig_pte);
713 if (pte_swp_uffd_wp(orig_pte))
b756a3b5
AP
714 pte = pte_mkuffd_wp(pte);
715 else if (is_writable_device_exclusive_entry(entry))
716 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
717
6c287605
DH
718 VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page)));
719
b756a3b5
AP
720 /*
721 * No need to take a page reference as one was already
722 * created when the swap entry was made.
723 */
724 if (PageAnon(page))
f1e2db12 725 page_add_anon_rmap(page, vma, address, RMAP_NONE);
b756a3b5
AP
726 else
727 /*
728 * Currently device exclusive access only supports anonymous
729 * memory so the entry shouldn't point to a filebacked page.
730 */
4d8ff640 731 WARN_ON_ONCE(1);
b756a3b5 732
1eba86c0
PT
733 set_pte_at(vma->vm_mm, address, ptep, pte);
734
b756a3b5
AP
735 /*
736 * No need to invalidate - it was non-present before. However
737 * secondary CPUs may have mappings that need invalidating.
738 */
739 update_mmu_cache(vma, address, ptep);
740}
741
742/*
743 * Tries to restore an exclusive pte if the page lock can be acquired without
744 * sleeping.
745 */
746static int
747try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
748 unsigned long addr)
749{
c33c7948 750 swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
b756a3b5
AP
751 struct page *page = pfn_swap_entry_to_page(entry);
752
753 if (trylock_page(page)) {
754 restore_exclusive_pte(vma, page, addr, src_pte);
755 unlock_page(page);
756 return 0;
757 }
758
759 return -EBUSY;
760}
761
1da177e4
LT
762/*
763 * copy one vm_area from one task to the other. Assumes the page tables
764 * already present in the new task to be cleared in the whole range
765 * covered by this vma.
1da177e4
LT
766 */
767
df3a57d1
LT
768static unsigned long
769copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
8f34f1ea
PX
770 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
771 struct vm_area_struct *src_vma, unsigned long addr, int *rss)
1da177e4 772{
8f34f1ea 773 unsigned long vm_flags = dst_vma->vm_flags;
c33c7948
RR
774 pte_t orig_pte = ptep_get(src_pte);
775 pte_t pte = orig_pte;
1da177e4 776 struct page *page;
c33c7948 777 swp_entry_t entry = pte_to_swp_entry(orig_pte);
df3a57d1
LT
778
779 if (likely(!non_swap_entry(entry))) {
780 if (swap_duplicate(entry) < 0)
9a5cc85c 781 return -EIO;
df3a57d1
LT
782
783 /* make sure dst_mm is on swapoff's mmlist. */
784 if (unlikely(list_empty(&dst_mm->mmlist))) {
785 spin_lock(&mmlist_lock);
786 if (list_empty(&dst_mm->mmlist))
787 list_add(&dst_mm->mmlist,
788 &src_mm->mmlist);
789 spin_unlock(&mmlist_lock);
790 }
1493a191 791 /* Mark the swap entry as shared. */
c33c7948
RR
792 if (pte_swp_exclusive(orig_pte)) {
793 pte = pte_swp_clear_exclusive(orig_pte);
1493a191
DH
794 set_pte_at(src_mm, addr, src_pte, pte);
795 }
df3a57d1
LT
796 rss[MM_SWAPENTS]++;
797 } else if (is_migration_entry(entry)) {
af5cdaf8 798 page = pfn_swap_entry_to_page(entry);
1da177e4 799
df3a57d1 800 rss[mm_counter(page)]++;
5042db43 801
6c287605 802 if (!is_readable_migration_entry(entry) &&
df3a57d1 803 is_cow_mapping(vm_flags)) {
5042db43 804 /*
6c287605
DH
805 * COW mappings require pages in both parent and child
806 * to be set to read. A previously exclusive entry is
807 * now shared.
5042db43 808 */
4dd845b5
AP
809 entry = make_readable_migration_entry(
810 swp_offset(entry));
df3a57d1 811 pte = swp_entry_to_pte(entry);
c33c7948 812 if (pte_swp_soft_dirty(orig_pte))
df3a57d1 813 pte = pte_swp_mksoft_dirty(pte);
c33c7948 814 if (pte_swp_uffd_wp(orig_pte))
df3a57d1
LT
815 pte = pte_swp_mkuffd_wp(pte);
816 set_pte_at(src_mm, addr, src_pte, pte);
817 }
818 } else if (is_device_private_entry(entry)) {
af5cdaf8 819 page = pfn_swap_entry_to_page(entry);
5042db43 820
df3a57d1
LT
821 /*
822 * Update rss count even for unaddressable pages, as
823 * they should treated just like normal pages in this
824 * respect.
825 *
826 * We will likely want to have some new rss counters
827 * for unaddressable pages, at some point. But for now
828 * keep things as they are.
829 */
830 get_page(page);
831 rss[mm_counter(page)]++;
fb3d824d
DH
832 /* Cannot fail as these pages cannot get pinned. */
833 BUG_ON(page_try_dup_anon_rmap(page, false, src_vma));
df3a57d1
LT
834
835 /*
836 * We do not preserve soft-dirty information, because so
837 * far, checkpoint/restore is the only feature that
838 * requires that. And checkpoint/restore does not work
839 * when a device driver is involved (you cannot easily
840 * save and restore device driver state).
841 */
4dd845b5 842 if (is_writable_device_private_entry(entry) &&
df3a57d1 843 is_cow_mapping(vm_flags)) {
4dd845b5
AP
844 entry = make_readable_device_private_entry(
845 swp_offset(entry));
df3a57d1 846 pte = swp_entry_to_pte(entry);
c33c7948 847 if (pte_swp_uffd_wp(orig_pte))
df3a57d1
LT
848 pte = pte_swp_mkuffd_wp(pte);
849 set_pte_at(src_mm, addr, src_pte, pte);
1da177e4 850 }
b756a3b5
AP
851 } else if (is_device_exclusive_entry(entry)) {
852 /*
853 * Make device exclusive entries present by restoring the
854 * original entry then copying as for a present pte. Device
855 * exclusive entries currently only support private writable
856 * (ie. COW) mappings.
857 */
858 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
859 if (try_restore_exclusive_pte(src_pte, src_vma, addr))
860 return -EBUSY;
861 return -ENOENT;
c56d1b62 862 } else if (is_pte_marker_entry(entry)) {
7e3ce3f8 863 if (is_swapin_error_entry(entry) || userfaultfd_wp(dst_vma))
49d6d7fb 864 set_pte_at(dst_mm, addr, dst_pte, pte);
c56d1b62 865 return 0;
1da177e4 866 }
8f34f1ea
PX
867 if (!userfaultfd_wp(dst_vma))
868 pte = pte_swp_clear_uffd_wp(pte);
df3a57d1
LT
869 set_pte_at(dst_mm, addr, dst_pte, pte);
870 return 0;
871}
872
70e806e4 873/*
b51ad4f8 874 * Copy a present and normal page.
70e806e4 875 *
b51ad4f8
DH
876 * NOTE! The usual case is that this isn't required;
877 * instead, the caller can just increase the page refcount
878 * and re-use the pte the traditional way.
70e806e4
PX
879 *
880 * And if we need a pre-allocated page but don't yet have
881 * one, return a negative error to let the preallocation
882 * code know so that it can do so outside the page table
883 * lock.
884 */
885static inline int
c78f4636
PX
886copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
887 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
edf50470 888 struct folio **prealloc, struct page *page)
70e806e4 889{
edf50470 890 struct folio *new_folio;
b51ad4f8 891 pte_t pte;
70e806e4 892
edf50470
MWO
893 new_folio = *prealloc;
894 if (!new_folio)
70e806e4
PX
895 return -EAGAIN;
896
897 /*
898 * We have a prealloc page, all good! Take it
899 * over and copy the page & arm it.
900 */
901 *prealloc = NULL;
edf50470
MWO
902 copy_user_highpage(&new_folio->page, page, addr, src_vma);
903 __folio_mark_uptodate(new_folio);
904 folio_add_new_anon_rmap(new_folio, dst_vma, addr);
905 folio_add_lru_vma(new_folio, dst_vma);
906 rss[MM_ANONPAGES]++;
70e806e4
PX
907
908 /* All done, just insert the new page copy in the child */
edf50470 909 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
c78f4636 910 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
c33c7948 911 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
8f34f1ea 912 /* Uffd-wp needs to be delivered to dest pte as well */
f1eb1bac 913 pte = pte_mkuffd_wp(pte);
c78f4636 914 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
70e806e4
PX
915 return 0;
916}
917
918/*
919 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
920 * is required to copy this pte.
921 */
922static inline int
c78f4636
PX
923copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
924 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
edf50470 925 struct folio **prealloc)
df3a57d1 926{
c78f4636
PX
927 struct mm_struct *src_mm = src_vma->vm_mm;
928 unsigned long vm_flags = src_vma->vm_flags;
c33c7948 929 pte_t pte = ptep_get(src_pte);
df3a57d1 930 struct page *page;
14ddee41 931 struct folio *folio;
df3a57d1 932
c78f4636 933 page = vm_normal_page(src_vma, addr, pte);
14ddee41
MWO
934 if (page)
935 folio = page_folio(page);
936 if (page && folio_test_anon(folio)) {
b51ad4f8
DH
937 /*
938 * If this page may have been pinned by the parent process,
939 * copy the page immediately for the child so that we'll always
940 * guarantee the pinned page won't be randomly replaced in the
941 * future.
942 */
14ddee41 943 folio_get(folio);
fb3d824d 944 if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) {
14ddee41
MWO
945 /* Page may be pinned, we have to copy. */
946 folio_put(folio);
fb3d824d
DH
947 return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
948 addr, rss, prealloc, page);
949 }
edf50470 950 rss[MM_ANONPAGES]++;
b51ad4f8 951 } else if (page) {
14ddee41 952 folio_get(folio);
fb3d824d 953 page_dup_file_rmap(page, false);
edf50470 954 rss[mm_counter_file(page)]++;
70e806e4
PX
955 }
956
1da177e4
LT
957 /*
958 * If it's a COW mapping, write protect it both
959 * in the parent and the child
960 */
1b2de5d0 961 if (is_cow_mapping(vm_flags) && pte_write(pte)) {
1da177e4 962 ptep_set_wrprotect(src_mm, addr, src_pte);
3dc90795 963 pte = pte_wrprotect(pte);
1da177e4 964 }
14ddee41 965 VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
1da177e4
LT
966
967 /*
968 * If it's a shared mapping, mark it clean in
969 * the child
970 */
971 if (vm_flags & VM_SHARED)
972 pte = pte_mkclean(pte);
973 pte = pte_mkold(pte);
6aab341e 974
8f34f1ea 975 if (!userfaultfd_wp(dst_vma))
b569a176
PX
976 pte = pte_clear_uffd_wp(pte);
977
c78f4636 978 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
70e806e4
PX
979 return 0;
980}
981
edf50470
MWO
982static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm,
983 struct vm_area_struct *vma, unsigned long addr)
70e806e4 984{
edf50470 985 struct folio *new_folio;
70e806e4 986
edf50470
MWO
987 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
988 if (!new_folio)
70e806e4
PX
989 return NULL;
990
edf50470
MWO
991 if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
992 folio_put(new_folio);
70e806e4 993 return NULL;
6aab341e 994 }
e601ded4 995 folio_throttle_swaprate(new_folio, GFP_KERNEL);
ae859762 996
edf50470 997 return new_folio;
1da177e4
LT
998}
999
c78f4636
PX
1000static int
1001copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1002 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1003 unsigned long end)
1da177e4 1004{
c78f4636
PX
1005 struct mm_struct *dst_mm = dst_vma->vm_mm;
1006 struct mm_struct *src_mm = src_vma->vm_mm;
c36987e2 1007 pte_t *orig_src_pte, *orig_dst_pte;
1da177e4 1008 pte_t *src_pte, *dst_pte;
c33c7948 1009 pte_t ptent;
c74df32c 1010 spinlock_t *src_ptl, *dst_ptl;
70e806e4 1011 int progress, ret = 0;
d559db08 1012 int rss[NR_MM_COUNTERS];
570a335b 1013 swp_entry_t entry = (swp_entry_t){0};
edf50470 1014 struct folio *prealloc = NULL;
1da177e4
LT
1015
1016again:
70e806e4 1017 progress = 0;
d559db08
KH
1018 init_rss_vec(rss);
1019
3db82b93
HD
1020 /*
1021 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1022 * error handling here, assume that exclusive mmap_lock on dst and src
1023 * protects anon from unexpected THP transitions; with shmem and file
1024 * protected by mmap_lock-less collapse skipping areas with anon_vma
1025 * (whereas vma_needs_copy() skips areas without anon_vma). A rework
1026 * can remove such assumptions later, but this is good enough for now.
1027 */
c74df32c 1028 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
70e806e4
PX
1029 if (!dst_pte) {
1030 ret = -ENOMEM;
1031 goto out;
1032 }
3db82b93
HD
1033 src_pte = pte_offset_map_nolock(src_mm, src_pmd, addr, &src_ptl);
1034 if (!src_pte) {
1035 pte_unmap_unlock(dst_pte, dst_ptl);
1036 /* ret == 0 */
1037 goto out;
1038 }
f20dc5f7 1039 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
c36987e2
DN
1040 orig_src_pte = src_pte;
1041 orig_dst_pte = dst_pte;
6606c3e0 1042 arch_enter_lazy_mmu_mode();
1da177e4 1043
1da177e4
LT
1044 do {
1045 /*
1046 * We are holding two locks at this point - either of them
1047 * could generate latencies in another task on another CPU.
1048 */
e040f218
HD
1049 if (progress >= 32) {
1050 progress = 0;
1051 if (need_resched() ||
95c354fe 1052 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
e040f218
HD
1053 break;
1054 }
c33c7948
RR
1055 ptent = ptep_get(src_pte);
1056 if (pte_none(ptent)) {
1da177e4
LT
1057 progress++;
1058 continue;
1059 }
c33c7948 1060 if (unlikely(!pte_present(ptent))) {
9a5cc85c
AP
1061 ret = copy_nonpresent_pte(dst_mm, src_mm,
1062 dst_pte, src_pte,
1063 dst_vma, src_vma,
1064 addr, rss);
1065 if (ret == -EIO) {
c33c7948 1066 entry = pte_to_swp_entry(ptep_get(src_pte));
79a1971c 1067 break;
b756a3b5
AP
1068 } else if (ret == -EBUSY) {
1069 break;
1070 } else if (!ret) {
1071 progress += 8;
1072 continue;
9a5cc85c 1073 }
b756a3b5
AP
1074
1075 /*
1076 * Device exclusive entry restored, continue by copying
1077 * the now present pte.
1078 */
1079 WARN_ON_ONCE(ret != -ENOENT);
79a1971c 1080 }
70e806e4 1081 /* copy_present_pte() will clear `*prealloc' if consumed */
c78f4636
PX
1082 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1083 addr, rss, &prealloc);
70e806e4
PX
1084 /*
1085 * If we need a pre-allocated page for this pte, drop the
1086 * locks, allocate, and try again.
1087 */
1088 if (unlikely(ret == -EAGAIN))
1089 break;
1090 if (unlikely(prealloc)) {
1091 /*
1092 * pre-alloc page cannot be reused by next time so as
1093 * to strictly follow mempolicy (e.g., alloc_page_vma()
1094 * will allocate page according to address). This
1095 * could only happen if one pinned pte changed.
1096 */
edf50470 1097 folio_put(prealloc);
70e806e4
PX
1098 prealloc = NULL;
1099 }
1da177e4
LT
1100 progress += 8;
1101 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1da177e4 1102
6606c3e0 1103 arch_leave_lazy_mmu_mode();
3db82b93 1104 pte_unmap_unlock(orig_src_pte, src_ptl);
d559db08 1105 add_mm_rss_vec(dst_mm, rss);
c36987e2 1106 pte_unmap_unlock(orig_dst_pte, dst_ptl);
c74df32c 1107 cond_resched();
570a335b 1108
9a5cc85c
AP
1109 if (ret == -EIO) {
1110 VM_WARN_ON_ONCE(!entry.val);
70e806e4
PX
1111 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1112 ret = -ENOMEM;
1113 goto out;
1114 }
1115 entry.val = 0;
b756a3b5
AP
1116 } else if (ret == -EBUSY) {
1117 goto out;
9a5cc85c 1118 } else if (ret == -EAGAIN) {
c78f4636 1119 prealloc = page_copy_prealloc(src_mm, src_vma, addr);
70e806e4 1120 if (!prealloc)
570a335b 1121 return -ENOMEM;
9a5cc85c
AP
1122 } else if (ret) {
1123 VM_WARN_ON_ONCE(1);
570a335b 1124 }
9a5cc85c
AP
1125
1126 /* We've captured and resolved the error. Reset, try again. */
1127 ret = 0;
1128
1da177e4
LT
1129 if (addr != end)
1130 goto again;
70e806e4
PX
1131out:
1132 if (unlikely(prealloc))
edf50470 1133 folio_put(prealloc);
70e806e4 1134 return ret;
1da177e4
LT
1135}
1136
c78f4636
PX
1137static inline int
1138copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1139 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1140 unsigned long end)
1da177e4 1141{
c78f4636
PX
1142 struct mm_struct *dst_mm = dst_vma->vm_mm;
1143 struct mm_struct *src_mm = src_vma->vm_mm;
1da177e4
LT
1144 pmd_t *src_pmd, *dst_pmd;
1145 unsigned long next;
1146
1147 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1148 if (!dst_pmd)
1149 return -ENOMEM;
1150 src_pmd = pmd_offset(src_pud, addr);
1151 do {
1152 next = pmd_addr_end(addr, end);
84c3fc4e
ZY
1153 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1154 || pmd_devmap(*src_pmd)) {
71e3aac0 1155 int err;
c78f4636 1156 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
8f34f1ea
PX
1157 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1158 addr, dst_vma, src_vma);
71e3aac0
AA
1159 if (err == -ENOMEM)
1160 return -ENOMEM;
1161 if (!err)
1162 continue;
1163 /* fall through */
1164 }
1da177e4
LT
1165 if (pmd_none_or_clear_bad(src_pmd))
1166 continue;
c78f4636
PX
1167 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1168 addr, next))
1da177e4
LT
1169 return -ENOMEM;
1170 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1171 return 0;
1172}
1173
c78f4636
PX
1174static inline int
1175copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1176 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1177 unsigned long end)
1da177e4 1178{
c78f4636
PX
1179 struct mm_struct *dst_mm = dst_vma->vm_mm;
1180 struct mm_struct *src_mm = src_vma->vm_mm;
1da177e4
LT
1181 pud_t *src_pud, *dst_pud;
1182 unsigned long next;
1183
c2febafc 1184 dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1da177e4
LT
1185 if (!dst_pud)
1186 return -ENOMEM;
c2febafc 1187 src_pud = pud_offset(src_p4d, addr);
1da177e4
LT
1188 do {
1189 next = pud_addr_end(addr, end);
a00cc7d9
MW
1190 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1191 int err;
1192
c78f4636 1193 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
a00cc7d9 1194 err = copy_huge_pud(dst_mm, src_mm,
c78f4636 1195 dst_pud, src_pud, addr, src_vma);
a00cc7d9
MW
1196 if (err == -ENOMEM)
1197 return -ENOMEM;
1198 if (!err)
1199 continue;
1200 /* fall through */
1201 }
1da177e4
LT
1202 if (pud_none_or_clear_bad(src_pud))
1203 continue;
c78f4636
PX
1204 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1205 addr, next))
1da177e4
LT
1206 return -ENOMEM;
1207 } while (dst_pud++, src_pud++, addr = next, addr != end);
1208 return 0;
1209}
1210
c78f4636
PX
1211static inline int
1212copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1213 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1214 unsigned long end)
c2febafc 1215{
c78f4636 1216 struct mm_struct *dst_mm = dst_vma->vm_mm;
c2febafc
KS
1217 p4d_t *src_p4d, *dst_p4d;
1218 unsigned long next;
1219
1220 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1221 if (!dst_p4d)
1222 return -ENOMEM;
1223 src_p4d = p4d_offset(src_pgd, addr);
1224 do {
1225 next = p4d_addr_end(addr, end);
1226 if (p4d_none_or_clear_bad(src_p4d))
1227 continue;
c78f4636
PX
1228 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1229 addr, next))
c2febafc
KS
1230 return -ENOMEM;
1231 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1232 return 0;
1233}
1234
c56d1b62
PX
1235/*
1236 * Return true if the vma needs to copy the pgtable during this fork(). Return
1237 * false when we can speed up fork() by allowing lazy page faults later until
1238 * when the child accesses the memory range.
1239 */
bc70fbf2 1240static bool
c56d1b62
PX
1241vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1242{
1243 /*
1244 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1245 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1246 * contains uffd-wp protection information, that's something we can't
1247 * retrieve from page cache, and skip copying will lose those info.
1248 */
1249 if (userfaultfd_wp(dst_vma))
1250 return true;
1251
bcd51a3c 1252 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
c56d1b62
PX
1253 return true;
1254
1255 if (src_vma->anon_vma)
1256 return true;
1257
1258 /*
1259 * Don't copy ptes where a page fault will fill them correctly. Fork
1260 * becomes much lighter when there are big shared or private readonly
1261 * mappings. The tradeoff is that copy_page_range is more efficient
1262 * than faulting.
1263 */
1264 return false;
1265}
1266
c78f4636
PX
1267int
1268copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1da177e4
LT
1269{
1270 pgd_t *src_pgd, *dst_pgd;
1271 unsigned long next;
c78f4636
PX
1272 unsigned long addr = src_vma->vm_start;
1273 unsigned long end = src_vma->vm_end;
1274 struct mm_struct *dst_mm = dst_vma->vm_mm;
1275 struct mm_struct *src_mm = src_vma->vm_mm;
ac46d4f3 1276 struct mmu_notifier_range range;
2ec74c3e 1277 bool is_cow;
cddb8a5c 1278 int ret;
1da177e4 1279
c56d1b62 1280 if (!vma_needs_copy(dst_vma, src_vma))
0661a336 1281 return 0;
d992895b 1282
c78f4636 1283 if (is_vm_hugetlb_page(src_vma))
bc70fbf2 1284 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1da177e4 1285
c78f4636 1286 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
2ab64037 1287 /*
1288 * We do not free on error cases below as remove_vma
1289 * gets called on error from higher level routine
1290 */
c78f4636 1291 ret = track_pfn_copy(src_vma);
2ab64037 1292 if (ret)
1293 return ret;
1294 }
1295
cddb8a5c
AA
1296 /*
1297 * We need to invalidate the secondary MMU mappings only when
1298 * there could be a permission downgrade on the ptes of the
1299 * parent mm. And a permission downgrade will only happen if
1300 * is_cow_mapping() returns true.
1301 */
c78f4636 1302 is_cow = is_cow_mapping(src_vma->vm_flags);
ac46d4f3
JG
1303
1304 if (is_cow) {
7269f999 1305 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
7d4a8be0 1306 0, src_mm, addr, end);
ac46d4f3 1307 mmu_notifier_invalidate_range_start(&range);
57efa1fe
JG
1308 /*
1309 * Disabling preemption is not needed for the write side, as
1310 * the read side doesn't spin, but goes to the mmap_lock.
1311 *
1312 * Use the raw variant of the seqcount_t write API to avoid
1313 * lockdep complaining about preemptibility.
1314 */
1315 mmap_assert_write_locked(src_mm);
1316 raw_write_seqcount_begin(&src_mm->write_protect_seq);
ac46d4f3 1317 }
cddb8a5c
AA
1318
1319 ret = 0;
1da177e4
LT
1320 dst_pgd = pgd_offset(dst_mm, addr);
1321 src_pgd = pgd_offset(src_mm, addr);
1322 do {
1323 next = pgd_addr_end(addr, end);
1324 if (pgd_none_or_clear_bad(src_pgd))
1325 continue;
c78f4636
PX
1326 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1327 addr, next))) {
d155df53 1328 untrack_pfn_clear(dst_vma);
cddb8a5c
AA
1329 ret = -ENOMEM;
1330 break;
1331 }
1da177e4 1332 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
cddb8a5c 1333
57efa1fe
JG
1334 if (is_cow) {
1335 raw_write_seqcount_end(&src_mm->write_protect_seq);
ac46d4f3 1336 mmu_notifier_invalidate_range_end(&range);
57efa1fe 1337 }
cddb8a5c 1338 return ret;
1da177e4
LT
1339}
1340
5abfd71d
PX
1341/* Whether we should zap all COWed (private) pages too */
1342static inline bool should_zap_cows(struct zap_details *details)
1343{
1344 /* By default, zap all pages */
1345 if (!details)
1346 return true;
1347
1348 /* Or, we zap COWed pages only if the caller wants to */
2e148f1e 1349 return details->even_cows;
5abfd71d
PX
1350}
1351
2e148f1e 1352/* Decides whether we should zap this page with the page pointer specified */
254ab940 1353static inline bool should_zap_page(struct zap_details *details, struct page *page)
3506659e 1354{
5abfd71d
PX
1355 /* If we can make a decision without *page.. */
1356 if (should_zap_cows(details))
254ab940 1357 return true;
5abfd71d
PX
1358
1359 /* E.g. the caller passes NULL for the case of a zero page */
1360 if (!page)
254ab940 1361 return true;
3506659e 1362
2e148f1e
PX
1363 /* Otherwise we should only zap non-anon pages */
1364 return !PageAnon(page);
3506659e
MWO
1365}
1366
999dad82
PX
1367static inline bool zap_drop_file_uffd_wp(struct zap_details *details)
1368{
1369 if (!details)
1370 return false;
1371
1372 return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1373}
1374
1375/*
1376 * This function makes sure that we'll replace the none pte with an uffd-wp
1377 * swap special pte marker when necessary. Must be with the pgtable lock held.
1378 */
1379static inline void
1380zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1381 unsigned long addr, pte_t *pte,
1382 struct zap_details *details, pte_t pteval)
1383{
2bad466c
PX
1384 /* Zap on anonymous always means dropping everything */
1385 if (vma_is_anonymous(vma))
1386 return;
1387
999dad82
PX
1388 if (zap_drop_file_uffd_wp(details))
1389 return;
1390
1391 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
1392}
1393
51c6f666 1394static unsigned long zap_pte_range(struct mmu_gather *tlb,
b5810039 1395 struct vm_area_struct *vma, pmd_t *pmd,
1da177e4 1396 unsigned long addr, unsigned long end,
97a89413 1397 struct zap_details *details)
1da177e4 1398{
b5810039 1399 struct mm_struct *mm = tlb->mm;
d16dfc55 1400 int force_flush = 0;
d559db08 1401 int rss[NR_MM_COUNTERS];
97a89413 1402 spinlock_t *ptl;
5f1a1907 1403 pte_t *start_pte;
97a89413 1404 pte_t *pte;
8a5f14a2 1405 swp_entry_t entry;
d559db08 1406
ed6a7935 1407 tlb_change_page_size(tlb, PAGE_SIZE);
e303297e 1408 init_rss_vec(rss);
3db82b93
HD
1409 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1410 if (!pte)
1411 return addr;
1412
3ea27719 1413 flush_tlb_batched_pending(mm);
6606c3e0 1414 arch_enter_lazy_mmu_mode();
1da177e4 1415 do {
c33c7948 1416 pte_t ptent = ptep_get(pte);
8018db85
PX
1417 struct page *page;
1418
166f61b9 1419 if (pte_none(ptent))
1da177e4 1420 continue;
6f5e6b9e 1421
7b167b68
MK
1422 if (need_resched())
1423 break;
1424
1da177e4 1425 if (pte_present(ptent)) {
5df397de
LT
1426 unsigned int delay_rmap;
1427
25b2995a 1428 page = vm_normal_page(vma, addr, ptent);
254ab940 1429 if (unlikely(!should_zap_page(details, page)))
91b61ef3 1430 continue;
b5810039 1431 ptent = ptep_get_and_clear_full(mm, addr, pte,
a600388d 1432 tlb->fullmm);
1da177e4 1433 tlb_remove_tlb_entry(tlb, pte, addr);
999dad82
PX
1434 zap_install_uffd_wp_if_needed(vma, addr, pte, details,
1435 ptent);
1da177e4
LT
1436 if (unlikely(!page))
1437 continue;
eca56ff9 1438
5df397de 1439 delay_rmap = 0;
eca56ff9 1440 if (!PageAnon(page)) {
1cf35d47 1441 if (pte_dirty(ptent)) {
6237bcd9 1442 set_page_dirty(page);
5df397de
LT
1443 if (tlb_delay_rmap(tlb)) {
1444 delay_rmap = 1;
1445 force_flush = 1;
1446 }
1cf35d47 1447 }
8788f678 1448 if (pte_young(ptent) && likely(vma_has_recency(vma)))
bf3f3bc5 1449 mark_page_accessed(page);
6237bcd9 1450 }
eca56ff9 1451 rss[mm_counter(page)]--;
5df397de
LT
1452 if (!delay_rmap) {
1453 page_remove_rmap(page, vma, false);
1454 if (unlikely(page_mapcount(page) < 0))
1455 print_bad_pte(vma, addr, ptent, page);
1456 }
1457 if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) {
1cf35d47 1458 force_flush = 1;
ce9ec37b 1459 addr += PAGE_SIZE;
d16dfc55 1460 break;
1cf35d47 1461 }
1da177e4
LT
1462 continue;
1463 }
5042db43
JG
1464
1465 entry = pte_to_swp_entry(ptent);
b756a3b5
AP
1466 if (is_device_private_entry(entry) ||
1467 is_device_exclusive_entry(entry)) {
8018db85 1468 page = pfn_swap_entry_to_page(entry);
254ab940 1469 if (unlikely(!should_zap_page(details, page)))
91b61ef3 1470 continue;
999dad82
PX
1471 /*
1472 * Both device private/exclusive mappings should only
1473 * work with anonymous page so far, so we don't need to
1474 * consider uffd-wp bit when zap. For more information,
1475 * see zap_install_uffd_wp_if_needed().
1476 */
1477 WARN_ON_ONCE(!vma_is_anonymous(vma));
5042db43 1478 rss[mm_counter(page)]--;
b756a3b5 1479 if (is_device_private_entry(entry))
cea86fe2 1480 page_remove_rmap(page, vma, false);
5042db43 1481 put_page(page);
8018db85 1482 } else if (!non_swap_entry(entry)) {
5abfd71d
PX
1483 /* Genuine swap entry, hence a private anon page */
1484 if (!should_zap_cows(details))
1485 continue;
8a5f14a2 1486 rss[MM_SWAPENTS]--;
8018db85
PX
1487 if (unlikely(!free_swap_and_cache(entry)))
1488 print_bad_pte(vma, addr, ptent, NULL);
5abfd71d 1489 } else if (is_migration_entry(entry)) {
af5cdaf8 1490 page = pfn_swap_entry_to_page(entry);
254ab940 1491 if (!should_zap_page(details, page))
5abfd71d 1492 continue;
eca56ff9 1493 rss[mm_counter(page)]--;
999dad82 1494 } else if (pte_marker_entry_uffd_wp(entry)) {
2bad466c
PX
1495 /*
1496 * For anon: always drop the marker; for file: only
1497 * drop the marker if explicitly requested.
1498 */
1499 if (!vma_is_anonymous(vma) &&
1500 !zap_drop_file_uffd_wp(details))
999dad82 1501 continue;
9f186f9e
ML
1502 } else if (is_hwpoison_entry(entry) ||
1503 is_swapin_error_entry(entry)) {
5abfd71d
PX
1504 if (!should_zap_cows(details))
1505 continue;
1506 } else {
1507 /* We should have covered all the swap entry types */
1508 WARN_ON_ONCE(1);
b084d435 1509 }
9888a1ca 1510 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
999dad82 1511 zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
97a89413 1512 } while (pte++, addr += PAGE_SIZE, addr != end);
ae859762 1513
d559db08 1514 add_mm_rss_vec(mm, rss);
6606c3e0 1515 arch_leave_lazy_mmu_mode();
51c6f666 1516
1cf35d47 1517 /* Do the actual TLB flush before dropping ptl */
5df397de 1518 if (force_flush) {
1cf35d47 1519 tlb_flush_mmu_tlbonly(tlb);
f036c818 1520 tlb_flush_rmaps(tlb, vma);
5df397de 1521 }
1cf35d47
LT
1522 pte_unmap_unlock(start_pte, ptl);
1523
1524 /*
1525 * If we forced a TLB flush (either due to running out of
1526 * batch buffers or because we needed to flush dirty TLB
1527 * entries before releasing the ptl), free the batched
3db82b93 1528 * memory too. Come back again if we didn't do everything.
1cf35d47 1529 */
3db82b93 1530 if (force_flush)
fa0aafb8 1531 tlb_flush_mmu(tlb);
d16dfc55 1532
51c6f666 1533 return addr;
1da177e4
LT
1534}
1535
51c6f666 1536static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
b5810039 1537 struct vm_area_struct *vma, pud_t *pud,
1da177e4 1538 unsigned long addr, unsigned long end,
97a89413 1539 struct zap_details *details)
1da177e4
LT
1540{
1541 pmd_t *pmd;
1542 unsigned long next;
1543
1544 pmd = pmd_offset(pud, addr);
1545 do {
1546 next = pmd_addr_end(addr, end);
84c3fc4e 1547 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
53406ed1 1548 if (next - addr != HPAGE_PMD_SIZE)
fd60775a 1549 __split_huge_pmd(vma, pmd, addr, false, NULL);
3db82b93
HD
1550 else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1551 addr = next;
1552 continue;
1553 }
71e3aac0 1554 /* fall through */
3506659e
MWO
1555 } else if (details && details->single_folio &&
1556 folio_test_pmd_mappable(details->single_folio) &&
22061a1f
HD
1557 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1558 spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1559 /*
1560 * Take and drop THP pmd lock so that we cannot return
1561 * prematurely, while zap_huge_pmd() has cleared *pmd,
1562 * but not yet decremented compound_mapcount().
1563 */
1564 spin_unlock(ptl);
71e3aac0 1565 }
3db82b93
HD
1566 if (pmd_none(*pmd)) {
1567 addr = next;
1568 continue;
1569 }
1570 addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1571 if (addr != next)
1572 pmd--;
1573 } while (pmd++, cond_resched(), addr != end);
51c6f666
RH
1574
1575 return addr;
1da177e4
LT
1576}
1577
51c6f666 1578static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
c2febafc 1579 struct vm_area_struct *vma, p4d_t *p4d,
1da177e4 1580 unsigned long addr, unsigned long end,
97a89413 1581 struct zap_details *details)
1da177e4
LT
1582{
1583 pud_t *pud;
1584 unsigned long next;
1585
c2febafc 1586 pud = pud_offset(p4d, addr);
1da177e4
LT
1587 do {
1588 next = pud_addr_end(addr, end);
a00cc7d9
MW
1589 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1590 if (next - addr != HPAGE_PUD_SIZE) {
42fc5414 1591 mmap_assert_locked(tlb->mm);
a00cc7d9
MW
1592 split_huge_pud(vma, pud, addr);
1593 } else if (zap_huge_pud(tlb, vma, pud, addr))
1594 goto next;
1595 /* fall through */
1596 }
97a89413 1597 if (pud_none_or_clear_bad(pud))
1da177e4 1598 continue;
97a89413 1599 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
a00cc7d9
MW
1600next:
1601 cond_resched();
97a89413 1602 } while (pud++, addr = next, addr != end);
51c6f666
RH
1603
1604 return addr;
1da177e4
LT
1605}
1606
c2febafc
KS
1607static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1608 struct vm_area_struct *vma, pgd_t *pgd,
1609 unsigned long addr, unsigned long end,
1610 struct zap_details *details)
1611{
1612 p4d_t *p4d;
1613 unsigned long next;
1614
1615 p4d = p4d_offset(pgd, addr);
1616 do {
1617 next = p4d_addr_end(addr, end);
1618 if (p4d_none_or_clear_bad(p4d))
1619 continue;
1620 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1621 } while (p4d++, addr = next, addr != end);
1622
1623 return addr;
1624}
1625
aac45363 1626void unmap_page_range(struct mmu_gather *tlb,
038c7aa1
AV
1627 struct vm_area_struct *vma,
1628 unsigned long addr, unsigned long end,
1629 struct zap_details *details)
1da177e4
LT
1630{
1631 pgd_t *pgd;
1632 unsigned long next;
1633
1da177e4
LT
1634 BUG_ON(addr >= end);
1635 tlb_start_vma(tlb, vma);
1636 pgd = pgd_offset(vma->vm_mm, addr);
1637 do {
1638 next = pgd_addr_end(addr, end);
97a89413 1639 if (pgd_none_or_clear_bad(pgd))
1da177e4 1640 continue;
c2febafc 1641 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
97a89413 1642 } while (pgd++, addr = next, addr != end);
1da177e4
LT
1643 tlb_end_vma(tlb, vma);
1644}
51c6f666 1645
f5cc4eef
AV
1646
1647static void unmap_single_vma(struct mmu_gather *tlb,
1648 struct vm_area_struct *vma, unsigned long start_addr,
4f74d2c8 1649 unsigned long end_addr,
68f48381 1650 struct zap_details *details, bool mm_wr_locked)
f5cc4eef
AV
1651{
1652 unsigned long start = max(vma->vm_start, start_addr);
1653 unsigned long end;
1654
1655 if (start >= vma->vm_end)
1656 return;
1657 end = min(vma->vm_end, end_addr);
1658 if (end <= vma->vm_start)
1659 return;
1660
cbc91f71
SD
1661 if (vma->vm_file)
1662 uprobe_munmap(vma, start, end);
1663
b3b9c293 1664 if (unlikely(vma->vm_flags & VM_PFNMAP))
68f48381 1665 untrack_pfn(vma, 0, 0, mm_wr_locked);
f5cc4eef
AV
1666
1667 if (start != end) {
1668 if (unlikely(is_vm_hugetlb_page(vma))) {
1669 /*
1670 * It is undesirable to test vma->vm_file as it
1671 * should be non-null for valid hugetlb area.
1672 * However, vm_file will be NULL in the error
7aa6b4ad 1673 * cleanup path of mmap_region. When
f5cc4eef 1674 * hugetlbfs ->mmap method fails,
7aa6b4ad 1675 * mmap_region() nullifies vma->vm_file
f5cc4eef
AV
1676 * before calling this function to clean up.
1677 * Since no pte has actually been setup, it is
1678 * safe to do nothing in this case.
1679 */
24669e58 1680 if (vma->vm_file) {
05e90bd0
PX
1681 zap_flags_t zap_flags = details ?
1682 details->zap_flags : 0;
05e90bd0
PX
1683 __unmap_hugepage_range_final(tlb, vma, start, end,
1684 NULL, zap_flags);
24669e58 1685 }
f5cc4eef
AV
1686 } else
1687 unmap_page_range(tlb, vma, start, end, details);
1688 }
1da177e4
LT
1689}
1690
1da177e4
LT
1691/**
1692 * unmap_vmas - unmap a range of memory covered by a list of vma's
0164f69d 1693 * @tlb: address of the caller's struct mmu_gather
763ecb03 1694 * @mt: the maple tree
1da177e4
LT
1695 * @vma: the starting vma
1696 * @start_addr: virtual address at which to start unmapping
1697 * @end_addr: virtual address at which to end unmapping
1da177e4 1698 *
508034a3 1699 * Unmap all pages in the vma list.
1da177e4 1700 *
1da177e4
LT
1701 * Only addresses between `start' and `end' will be unmapped.
1702 *
1703 * The VMA list must be sorted in ascending virtual address order.
1704 *
1705 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1706 * range after unmap_vmas() returns. So the only responsibility here is to
1707 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1708 * drops the lock and schedules.
1709 */
763ecb03 1710void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
1da177e4 1711 struct vm_area_struct *vma, unsigned long start_addr,
68f48381 1712 unsigned long end_addr, bool mm_wr_locked)
1da177e4 1713{
ac46d4f3 1714 struct mmu_notifier_range range;
999dad82 1715 struct zap_details details = {
04ada095 1716 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
999dad82
PX
1717 /* Careful - we need to zap private pages too! */
1718 .even_cows = true,
1719 };
763ecb03 1720 MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
1da177e4 1721
7d4a8be0 1722 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
6f4f13e8 1723 start_addr, end_addr);
ac46d4f3 1724 mmu_notifier_invalidate_range_start(&range);
763ecb03 1725 do {
68f48381
SB
1726 unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
1727 mm_wr_locked);
763ecb03 1728 } while ((vma = mas_find(&mas, end_addr - 1)) != NULL);
ac46d4f3 1729 mmu_notifier_invalidate_range_end(&range);
1da177e4
LT
1730}
1731
f5cc4eef
AV
1732/**
1733 * zap_page_range_single - remove user pages in a given range
1734 * @vma: vm_area_struct holding the applicable pages
1735 * @address: starting address of pages to zap
1736 * @size: number of bytes to zap
8a5f14a2 1737 * @details: details of shared cache invalidation
f5cc4eef
AV
1738 *
1739 * The range must fit into one VMA.
1da177e4 1740 */
21b85b09 1741void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1da177e4
LT
1742 unsigned long size, struct zap_details *details)
1743{
21b85b09 1744 const unsigned long end = address + size;
ac46d4f3 1745 struct mmu_notifier_range range;
d16dfc55 1746 struct mmu_gather tlb;
1da177e4 1747
1da177e4 1748 lru_add_drain();
7d4a8be0 1749 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
21b85b09
MK
1750 address, end);
1751 if (is_vm_hugetlb_page(vma))
1752 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1753 &range.end);
a72afd87 1754 tlb_gather_mmu(&tlb, vma->vm_mm);
ac46d4f3
JG
1755 update_hiwater_rss(vma->vm_mm);
1756 mmu_notifier_invalidate_range_start(&range);
21b85b09
MK
1757 /*
1758 * unmap 'address-end' not 'range.start-range.end' as range
1759 * could have been expanded for hugetlb pmd sharing.
1760 */
68f48381 1761 unmap_single_vma(&tlb, vma, address, end, details, false);
ac46d4f3 1762 mmu_notifier_invalidate_range_end(&range);
ae8eba8b 1763 tlb_finish_mmu(&tlb);
1da177e4
LT
1764}
1765
c627f9cc
JS
1766/**
1767 * zap_vma_ptes - remove ptes mapping the vma
1768 * @vma: vm_area_struct holding ptes to be zapped
1769 * @address: starting address of pages to zap
1770 * @size: number of bytes to zap
1771 *
1772 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1773 *
1774 * The entire address range must be fully contained within the vma.
1775 *
c627f9cc 1776 */
27d036e3 1777void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
c627f9cc
JS
1778 unsigned long size)
1779{
88a35912 1780 if (!range_in_vma(vma, address, address + size) ||
c627f9cc 1781 !(vma->vm_flags & VM_PFNMAP))
27d036e3
LR
1782 return;
1783
f5cc4eef 1784 zap_page_range_single(vma, address, size, NULL);
c627f9cc
JS
1785}
1786EXPORT_SYMBOL_GPL(zap_vma_ptes);
1787
8cd3984d 1788static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
c9cfcddf 1789{
c2febafc
KS
1790 pgd_t *pgd;
1791 p4d_t *p4d;
1792 pud_t *pud;
1793 pmd_t *pmd;
1794
1795 pgd = pgd_offset(mm, addr);
1796 p4d = p4d_alloc(mm, pgd, addr);
1797 if (!p4d)
1798 return NULL;
1799 pud = pud_alloc(mm, p4d, addr);
1800 if (!pud)
1801 return NULL;
1802 pmd = pmd_alloc(mm, pud, addr);
1803 if (!pmd)
1804 return NULL;
1805
1806 VM_BUG_ON(pmd_trans_huge(*pmd));
8cd3984d
AR
1807 return pmd;
1808}
1809
1810pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1811 spinlock_t **ptl)
1812{
1813 pmd_t *pmd = walk_to_pmd(mm, addr);
1814
1815 if (!pmd)
1816 return NULL;
c2febafc 1817 return pte_alloc_map_lock(mm, pmd, addr, ptl);
c9cfcddf
LT
1818}
1819
8efd6f5b
AR
1820static int validate_page_before_insert(struct page *page)
1821{
1822 if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1823 return -EINVAL;
1824 flush_dcache_page(page);
1825 return 0;
1826}
1827
cea86fe2 1828static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
8efd6f5b
AR
1829 unsigned long addr, struct page *page, pgprot_t prot)
1830{
c33c7948 1831 if (!pte_none(ptep_get(pte)))
8efd6f5b
AR
1832 return -EBUSY;
1833 /* Ok, finally just insert the thing.. */
1834 get_page(page);
f1a79412 1835 inc_mm_counter(vma->vm_mm, mm_counter_file(page));
cea86fe2
HD
1836 page_add_file_rmap(page, vma, false);
1837 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
8efd6f5b
AR
1838 return 0;
1839}
1840
238f58d8
LT
1841/*
1842 * This is the old fallback for page remapping.
1843 *
1844 * For historical reasons, it only allows reserved pages. Only
1845 * old drivers should use this, and they needed to mark their
1846 * pages reserved for the old functions anyway.
1847 */
423bad60
NP
1848static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1849 struct page *page, pgprot_t prot)
238f58d8
LT
1850{
1851 int retval;
c9cfcddf 1852 pte_t *pte;
8a9f3ccd
BS
1853 spinlock_t *ptl;
1854
8efd6f5b
AR
1855 retval = validate_page_before_insert(page);
1856 if (retval)
5b4e655e 1857 goto out;
238f58d8 1858 retval = -ENOMEM;
cea86fe2 1859 pte = get_locked_pte(vma->vm_mm, addr, &ptl);
238f58d8 1860 if (!pte)
5b4e655e 1861 goto out;
cea86fe2 1862 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
238f58d8
LT
1863 pte_unmap_unlock(pte, ptl);
1864out:
1865 return retval;
1866}
1867
8cd3984d 1868#ifdef pte_index
cea86fe2 1869static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
8cd3984d
AR
1870 unsigned long addr, struct page *page, pgprot_t prot)
1871{
1872 int err;
1873
1874 if (!page_count(page))
1875 return -EINVAL;
1876 err = validate_page_before_insert(page);
7f70c2a6
AR
1877 if (err)
1878 return err;
cea86fe2 1879 return insert_page_into_pte_locked(vma, pte, addr, page, prot);
8cd3984d
AR
1880}
1881
1882/* insert_pages() amortizes the cost of spinlock operations
1883 * when inserting pages in a loop. Arch *must* define pte_index.
1884 */
1885static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1886 struct page **pages, unsigned long *num, pgprot_t prot)
1887{
1888 pmd_t *pmd = NULL;
7f70c2a6
AR
1889 pte_t *start_pte, *pte;
1890 spinlock_t *pte_lock;
8cd3984d
AR
1891 struct mm_struct *const mm = vma->vm_mm;
1892 unsigned long curr_page_idx = 0;
1893 unsigned long remaining_pages_total = *num;
1894 unsigned long pages_to_write_in_pmd;
1895 int ret;
1896more:
1897 ret = -EFAULT;
1898 pmd = walk_to_pmd(mm, addr);
1899 if (!pmd)
1900 goto out;
1901
1902 pages_to_write_in_pmd = min_t(unsigned long,
1903 remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1904
1905 /* Allocate the PTE if necessary; takes PMD lock once only. */
1906 ret = -ENOMEM;
1907 if (pte_alloc(mm, pmd))
1908 goto out;
8cd3984d
AR
1909
1910 while (pages_to_write_in_pmd) {
1911 int pte_idx = 0;
1912 const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1913
7f70c2a6 1914 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
3db82b93
HD
1915 if (!start_pte) {
1916 ret = -EFAULT;
1917 goto out;
1918 }
7f70c2a6 1919 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
cea86fe2 1920 int err = insert_page_in_batch_locked(vma, pte,
8cd3984d
AR
1921 addr, pages[curr_page_idx], prot);
1922 if (unlikely(err)) {
7f70c2a6 1923 pte_unmap_unlock(start_pte, pte_lock);
8cd3984d
AR
1924 ret = err;
1925 remaining_pages_total -= pte_idx;
1926 goto out;
1927 }
1928 addr += PAGE_SIZE;
1929 ++curr_page_idx;
1930 }
7f70c2a6 1931 pte_unmap_unlock(start_pte, pte_lock);
8cd3984d
AR
1932 pages_to_write_in_pmd -= batch_size;
1933 remaining_pages_total -= batch_size;
1934 }
1935 if (remaining_pages_total)
1936 goto more;
1937 ret = 0;
1938out:
1939 *num = remaining_pages_total;
1940 return ret;
1941}
1942#endif /* ifdef pte_index */
1943
1944/**
1945 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1946 * @vma: user vma to map to
1947 * @addr: target start user address of these pages
1948 * @pages: source kernel pages
1949 * @num: in: number of pages to map. out: number of pages that were *not*
1950 * mapped. (0 means all pages were successfully mapped).
1951 *
1952 * Preferred over vm_insert_page() when inserting multiple pages.
1953 *
1954 * In case of error, we may have mapped a subset of the provided
1955 * pages. It is the caller's responsibility to account for this case.
1956 *
1957 * The same restrictions apply as in vm_insert_page().
1958 */
1959int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1960 struct page **pages, unsigned long *num)
1961{
1962#ifdef pte_index
1963 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1964
1965 if (addr < vma->vm_start || end_addr >= vma->vm_end)
1966 return -EFAULT;
1967 if (!(vma->vm_flags & VM_MIXEDMAP)) {
d8ed45c5 1968 BUG_ON(mmap_read_trylock(vma->vm_mm));
8cd3984d 1969 BUG_ON(vma->vm_flags & VM_PFNMAP);
1c71222e 1970 vm_flags_set(vma, VM_MIXEDMAP);
8cd3984d
AR
1971 }
1972 /* Defer page refcount checking till we're about to map that page. */
1973 return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1974#else
1975 unsigned long idx = 0, pgcount = *num;
45779b03 1976 int err = -EINVAL;
8cd3984d
AR
1977
1978 for (; idx < pgcount; ++idx) {
1979 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1980 if (err)
1981 break;
1982 }
1983 *num = pgcount - idx;
1984 return err;
1985#endif /* ifdef pte_index */
1986}
1987EXPORT_SYMBOL(vm_insert_pages);
1988
bfa5bf6d
REB
1989/**
1990 * vm_insert_page - insert single page into user vma
1991 * @vma: user vma to map to
1992 * @addr: target user address of this page
1993 * @page: source kernel page
1994 *
a145dd41
LT
1995 * This allows drivers to insert individual pages they've allocated
1996 * into a user vma.
1997 *
1998 * The page has to be a nice clean _individual_ kernel allocation.
1999 * If you allocate a compound page, you need to have marked it as
2000 * such (__GFP_COMP), or manually just split the page up yourself
8dfcc9ba 2001 * (see split_page()).
a145dd41
LT
2002 *
2003 * NOTE! Traditionally this was done with "remap_pfn_range()" which
2004 * took an arbitrary page protection parameter. This doesn't allow
2005 * that. Your vma protection will have to be set up correctly, which
2006 * means that if you want a shared writable mapping, you'd better
2007 * ask for a shared writable mapping!
2008 *
2009 * The page does not need to be reserved.
4b6e1e37
KK
2010 *
2011 * Usually this function is called from f_op->mmap() handler
c1e8d7c6 2012 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
4b6e1e37
KK
2013 * Caller must set VM_MIXEDMAP on vma if it wants to call this
2014 * function from other places, for example from page-fault handler.
a862f68a
MR
2015 *
2016 * Return: %0 on success, negative error code otherwise.
a145dd41 2017 */
423bad60
NP
2018int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2019 struct page *page)
a145dd41
LT
2020{
2021 if (addr < vma->vm_start || addr >= vma->vm_end)
2022 return -EFAULT;
2023 if (!page_count(page))
2024 return -EINVAL;
4b6e1e37 2025 if (!(vma->vm_flags & VM_MIXEDMAP)) {
d8ed45c5 2026 BUG_ON(mmap_read_trylock(vma->vm_mm));
4b6e1e37 2027 BUG_ON(vma->vm_flags & VM_PFNMAP);
1c71222e 2028 vm_flags_set(vma, VM_MIXEDMAP);
4b6e1e37 2029 }
423bad60 2030 return insert_page(vma, addr, page, vma->vm_page_prot);
a145dd41 2031}
e3c3374f 2032EXPORT_SYMBOL(vm_insert_page);
a145dd41 2033
a667d745
SJ
2034/*
2035 * __vm_map_pages - maps range of kernel pages into user vma
2036 * @vma: user vma to map to
2037 * @pages: pointer to array of source kernel pages
2038 * @num: number of pages in page array
2039 * @offset: user's requested vm_pgoff
2040 *
2041 * This allows drivers to map range of kernel pages into a user vma.
2042 *
2043 * Return: 0 on success and error code otherwise.
2044 */
2045static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2046 unsigned long num, unsigned long offset)
2047{
2048 unsigned long count = vma_pages(vma);
2049 unsigned long uaddr = vma->vm_start;
2050 int ret, i;
2051
2052 /* Fail if the user requested offset is beyond the end of the object */
96756fcb 2053 if (offset >= num)
a667d745
SJ
2054 return -ENXIO;
2055
2056 /* Fail if the user requested size exceeds available object size */
2057 if (count > num - offset)
2058 return -ENXIO;
2059
2060 for (i = 0; i < count; i++) {
2061 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2062 if (ret < 0)
2063 return ret;
2064 uaddr += PAGE_SIZE;
2065 }
2066
2067 return 0;
2068}
2069
2070/**
2071 * vm_map_pages - maps range of kernel pages starts with non zero offset
2072 * @vma: user vma to map to
2073 * @pages: pointer to array of source kernel pages
2074 * @num: number of pages in page array
2075 *
2076 * Maps an object consisting of @num pages, catering for the user's
2077 * requested vm_pgoff
2078 *
2079 * If we fail to insert any page into the vma, the function will return
2080 * immediately leaving any previously inserted pages present. Callers
2081 * from the mmap handler may immediately return the error as their caller
2082 * will destroy the vma, removing any successfully inserted pages. Other
2083 * callers should make their own arrangements for calling unmap_region().
2084 *
2085 * Context: Process context. Called by mmap handlers.
2086 * Return: 0 on success and error code otherwise.
2087 */
2088int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2089 unsigned long num)
2090{
2091 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2092}
2093EXPORT_SYMBOL(vm_map_pages);
2094
2095/**
2096 * vm_map_pages_zero - map range of kernel pages starts with zero offset
2097 * @vma: user vma to map to
2098 * @pages: pointer to array of source kernel pages
2099 * @num: number of pages in page array
2100 *
2101 * Similar to vm_map_pages(), except that it explicitly sets the offset
2102 * to 0. This function is intended for the drivers that did not consider
2103 * vm_pgoff.
2104 *
2105 * Context: Process context. Called by mmap handlers.
2106 * Return: 0 on success and error code otherwise.
2107 */
2108int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2109 unsigned long num)
2110{
2111 return __vm_map_pages(vma, pages, num, 0);
2112}
2113EXPORT_SYMBOL(vm_map_pages_zero);
2114
9b5a8e00 2115static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
b2770da6 2116 pfn_t pfn, pgprot_t prot, bool mkwrite)
423bad60
NP
2117{
2118 struct mm_struct *mm = vma->vm_mm;
423bad60
NP
2119 pte_t *pte, entry;
2120 spinlock_t *ptl;
2121
423bad60
NP
2122 pte = get_locked_pte(mm, addr, &ptl);
2123 if (!pte)
9b5a8e00 2124 return VM_FAULT_OOM;
c33c7948
RR
2125 entry = ptep_get(pte);
2126 if (!pte_none(entry)) {
b2770da6
RZ
2127 if (mkwrite) {
2128 /*
2129 * For read faults on private mappings the PFN passed
2130 * in may not match the PFN we have mapped if the
2131 * mapped PFN is a writeable COW page. In the mkwrite
2132 * case we are creating a writable PTE for a shared
f2c57d91
JK
2133 * mapping and we expect the PFNs to match. If they
2134 * don't match, we are likely racing with block
2135 * allocation and mapping invalidation so just skip the
2136 * update.
b2770da6 2137 */
c33c7948
RR
2138 if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
2139 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
b2770da6 2140 goto out_unlock;
f2c57d91 2141 }
c33c7948 2142 entry = pte_mkyoung(entry);
cae85cb8
JK
2143 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2144 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2145 update_mmu_cache(vma, addr, pte);
2146 }
2147 goto out_unlock;
b2770da6 2148 }
423bad60
NP
2149
2150 /* Ok, finally just insert the thing.. */
01c8f1c4
DW
2151 if (pfn_t_devmap(pfn))
2152 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2153 else
2154 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
b2770da6 2155
b2770da6
RZ
2156 if (mkwrite) {
2157 entry = pte_mkyoung(entry);
2158 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2159 }
2160
423bad60 2161 set_pte_at(mm, addr, pte, entry);
4b3073e1 2162 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
423bad60 2163
423bad60
NP
2164out_unlock:
2165 pte_unmap_unlock(pte, ptl);
9b5a8e00 2166 return VM_FAULT_NOPAGE;
423bad60
NP
2167}
2168
f5e6d1d5
MW
2169/**
2170 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2171 * @vma: user vma to map to
2172 * @addr: target user address of this page
2173 * @pfn: source kernel pfn
2174 * @pgprot: pgprot flags for the inserted page
2175 *
a1a0aea5 2176 * This is exactly like vmf_insert_pfn(), except that it allows drivers
f5e6d1d5
MW
2177 * to override pgprot on a per-page basis.
2178 *
2179 * This only makes sense for IO mappings, and it makes no sense for
2180 * COW mappings. In general, using multiple vmas is preferable;
ae2b01f3 2181 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
f5e6d1d5
MW
2182 * impractical.
2183 *
28d8b812
LS
2184 * pgprot typically only differs from @vma->vm_page_prot when drivers set
2185 * caching- and encryption bits different than those of @vma->vm_page_prot,
2186 * because the caching- or encryption mode may not be known at mmap() time.
2187 *
2188 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2189 * to set caching and encryption bits for those vmas (except for COW pages).
2190 * This is ensured by core vm only modifying these page table entries using
2191 * functions that don't touch caching- or encryption bits, using pte_modify()
2192 * if needed. (See for example mprotect()).
2193 *
2194 * Also when new page-table entries are created, this is only done using the
2195 * fault() callback, and never using the value of vma->vm_page_prot,
2196 * except for page-table entries that point to anonymous pages as the result
2197 * of COW.
574c5b3d 2198 *
ae2b01f3 2199 * Context: Process context. May allocate using %GFP_KERNEL.
f5e6d1d5
MW
2200 * Return: vm_fault_t value.
2201 */
2202vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2203 unsigned long pfn, pgprot_t pgprot)
2204{
6d958546
MW
2205 /*
2206 * Technically, architectures with pte_special can avoid all these
2207 * restrictions (same for remap_pfn_range). However we would like
2208 * consistency in testing and feature parity among all, so we should
2209 * try to keep these invariants in place for everybody.
2210 */
2211 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2212 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2213 (VM_PFNMAP|VM_MIXEDMAP));
2214 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2215 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2216
2217 if (addr < vma->vm_start || addr >= vma->vm_end)
2218 return VM_FAULT_SIGBUS;
2219
2220 if (!pfn_modify_allowed(pfn, pgprot))
2221 return VM_FAULT_SIGBUS;
2222
2223 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2224
9b5a8e00 2225 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
6d958546 2226 false);
f5e6d1d5
MW
2227}
2228EXPORT_SYMBOL(vmf_insert_pfn_prot);
e0dc0d8f 2229
ae2b01f3
MW
2230/**
2231 * vmf_insert_pfn - insert single pfn into user vma
2232 * @vma: user vma to map to
2233 * @addr: target user address of this page
2234 * @pfn: source kernel pfn
2235 *
2236 * Similar to vm_insert_page, this allows drivers to insert individual pages
2237 * they've allocated into a user vma. Same comments apply.
2238 *
2239 * This function should only be called from a vm_ops->fault handler, and
2240 * in that case the handler should return the result of this function.
2241 *
2242 * vma cannot be a COW mapping.
2243 *
2244 * As this is called only for pages that do not currently exist, we
2245 * do not need to flush old virtual caches or the TLB.
2246 *
2247 * Context: Process context. May allocate using %GFP_KERNEL.
2248 * Return: vm_fault_t value.
2249 */
2250vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2251 unsigned long pfn)
2252{
2253 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2254}
2255EXPORT_SYMBOL(vmf_insert_pfn);
2256
785a3fab
DW
2257static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2258{
2259 /* these checks mirror the abort conditions in vm_normal_page */
2260 if (vma->vm_flags & VM_MIXEDMAP)
2261 return true;
2262 if (pfn_t_devmap(pfn))
2263 return true;
2264 if (pfn_t_special(pfn))
2265 return true;
2266 if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2267 return true;
2268 return false;
2269}
2270
79f3aa5b 2271static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
28d8b812 2272 unsigned long addr, pfn_t pfn, bool mkwrite)
423bad60 2273{
28d8b812 2274 pgprot_t pgprot = vma->vm_page_prot;
79f3aa5b 2275 int err;
87744ab3 2276
785a3fab 2277 BUG_ON(!vm_mixed_ok(vma, pfn));
e0dc0d8f 2278
423bad60 2279 if (addr < vma->vm_start || addr >= vma->vm_end)
79f3aa5b 2280 return VM_FAULT_SIGBUS;
308a047c
BP
2281
2282 track_pfn_insert(vma, &pgprot, pfn);
e0dc0d8f 2283
42e4089c 2284 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
79f3aa5b 2285 return VM_FAULT_SIGBUS;
42e4089c 2286
423bad60
NP
2287 /*
2288 * If we don't have pte special, then we have to use the pfn_valid()
2289 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2290 * refcount the page if pfn_valid is true (hence insert_page rather
62eede62
HD
2291 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2292 * without pte special, it would there be refcounted as a normal page.
423bad60 2293 */
00b3a331
LD
2294 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2295 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
423bad60
NP
2296 struct page *page;
2297
03fc2da6
DW
2298 /*
2299 * At this point we are committed to insert_page()
2300 * regardless of whether the caller specified flags that
2301 * result in pfn_t_has_page() == false.
2302 */
2303 page = pfn_to_page(pfn_t_to_pfn(pfn));
79f3aa5b
MW
2304 err = insert_page(vma, addr, page, pgprot);
2305 } else {
9b5a8e00 2306 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
423bad60 2307 }
b2770da6 2308
5d747637
MW
2309 if (err == -ENOMEM)
2310 return VM_FAULT_OOM;
2311 if (err < 0 && err != -EBUSY)
2312 return VM_FAULT_SIGBUS;
2313
2314 return VM_FAULT_NOPAGE;
e0dc0d8f 2315}
79f3aa5b
MW
2316
2317vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2318 pfn_t pfn)
2319{
28d8b812 2320 return __vm_insert_mixed(vma, addr, pfn, false);
79f3aa5b 2321}
5d747637 2322EXPORT_SYMBOL(vmf_insert_mixed);
e0dc0d8f 2323
ab77dab4
SJ
2324/*
2325 * If the insertion of PTE failed because someone else already added a
2326 * different entry in the mean time, we treat that as success as we assume
2327 * the same entry was actually inserted.
2328 */
ab77dab4
SJ
2329vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2330 unsigned long addr, pfn_t pfn)
b2770da6 2331{
28d8b812 2332 return __vm_insert_mixed(vma, addr, pfn, true);
b2770da6 2333}
ab77dab4 2334EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
b2770da6 2335
1da177e4
LT
2336/*
2337 * maps a range of physical memory into the requested pages. the old
2338 * mappings are removed. any references to nonexistent pages results
2339 * in null mappings (currently treated as "copy-on-access")
2340 */
2341static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2342 unsigned long addr, unsigned long end,
2343 unsigned long pfn, pgprot_t prot)
2344{
90a3e375 2345 pte_t *pte, *mapped_pte;
c74df32c 2346 spinlock_t *ptl;
42e4089c 2347 int err = 0;
1da177e4 2348
90a3e375 2349 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1da177e4
LT
2350 if (!pte)
2351 return -ENOMEM;
6606c3e0 2352 arch_enter_lazy_mmu_mode();
1da177e4 2353 do {
c33c7948 2354 BUG_ON(!pte_none(ptep_get(pte)));
42e4089c
AK
2355 if (!pfn_modify_allowed(pfn, prot)) {
2356 err = -EACCES;
2357 break;
2358 }
7e675137 2359 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1da177e4
LT
2360 pfn++;
2361 } while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0 2362 arch_leave_lazy_mmu_mode();
90a3e375 2363 pte_unmap_unlock(mapped_pte, ptl);
42e4089c 2364 return err;
1da177e4
LT
2365}
2366
2367static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2368 unsigned long addr, unsigned long end,
2369 unsigned long pfn, pgprot_t prot)
2370{
2371 pmd_t *pmd;
2372 unsigned long next;
42e4089c 2373 int err;
1da177e4
LT
2374
2375 pfn -= addr >> PAGE_SHIFT;
2376 pmd = pmd_alloc(mm, pud, addr);
2377 if (!pmd)
2378 return -ENOMEM;
f66055ab 2379 VM_BUG_ON(pmd_trans_huge(*pmd));
1da177e4
LT
2380 do {
2381 next = pmd_addr_end(addr, end);
42e4089c
AK
2382 err = remap_pte_range(mm, pmd, addr, next,
2383 pfn + (addr >> PAGE_SHIFT), prot);
2384 if (err)
2385 return err;
1da177e4
LT
2386 } while (pmd++, addr = next, addr != end);
2387 return 0;
2388}
2389
c2febafc 2390static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
1da177e4
LT
2391 unsigned long addr, unsigned long end,
2392 unsigned long pfn, pgprot_t prot)
2393{
2394 pud_t *pud;
2395 unsigned long next;
42e4089c 2396 int err;
1da177e4
LT
2397
2398 pfn -= addr >> PAGE_SHIFT;
c2febafc 2399 pud = pud_alloc(mm, p4d, addr);
1da177e4
LT
2400 if (!pud)
2401 return -ENOMEM;
2402 do {
2403 next = pud_addr_end(addr, end);
42e4089c
AK
2404 err = remap_pmd_range(mm, pud, addr, next,
2405 pfn + (addr >> PAGE_SHIFT), prot);
2406 if (err)
2407 return err;
1da177e4
LT
2408 } while (pud++, addr = next, addr != end);
2409 return 0;
2410}
2411
c2febafc
KS
2412static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2413 unsigned long addr, unsigned long end,
2414 unsigned long pfn, pgprot_t prot)
2415{
2416 p4d_t *p4d;
2417 unsigned long next;
42e4089c 2418 int err;
c2febafc
KS
2419
2420 pfn -= addr >> PAGE_SHIFT;
2421 p4d = p4d_alloc(mm, pgd, addr);
2422 if (!p4d)
2423 return -ENOMEM;
2424 do {
2425 next = p4d_addr_end(addr, end);
42e4089c
AK
2426 err = remap_pud_range(mm, p4d, addr, next,
2427 pfn + (addr >> PAGE_SHIFT), prot);
2428 if (err)
2429 return err;
c2febafc
KS
2430 } while (p4d++, addr = next, addr != end);
2431 return 0;
2432}
2433
74ffa5a3
CH
2434/*
2435 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
2436 * must have pre-validated the caching bits of the pgprot_t.
bfa5bf6d 2437 */
74ffa5a3
CH
2438int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2439 unsigned long pfn, unsigned long size, pgprot_t prot)
1da177e4
LT
2440{
2441 pgd_t *pgd;
2442 unsigned long next;
2d15cab8 2443 unsigned long end = addr + PAGE_ALIGN(size);
1da177e4
LT
2444 struct mm_struct *mm = vma->vm_mm;
2445 int err;
2446
0c4123e3
AZ
2447 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2448 return -EINVAL;
2449
1da177e4
LT
2450 /*
2451 * Physically remapped pages are special. Tell the
2452 * rest of the world about it:
2453 * VM_IO tells people not to look at these pages
2454 * (accesses can have side effects).
6aab341e
LT
2455 * VM_PFNMAP tells the core MM that the base pages are just
2456 * raw PFN mappings, and do not have a "struct page" associated
2457 * with them.
314e51b9
KK
2458 * VM_DONTEXPAND
2459 * Disable vma merging and expanding with mremap().
2460 * VM_DONTDUMP
2461 * Omit vma from core dump, even when VM_IO turned off.
fb155c16
LT
2462 *
2463 * There's a horrible special case to handle copy-on-write
2464 * behaviour that some programs depend on. We mark the "original"
2465 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
b3b9c293 2466 * See vm_normal_page() for details.
1da177e4 2467 */
b3b9c293
KK
2468 if (is_cow_mapping(vma->vm_flags)) {
2469 if (addr != vma->vm_start || end != vma->vm_end)
2470 return -EINVAL;
fb155c16 2471 vma->vm_pgoff = pfn;
b3b9c293
KK
2472 }
2473
1c71222e 2474 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1da177e4
LT
2475
2476 BUG_ON(addr >= end);
2477 pfn -= addr >> PAGE_SHIFT;
2478 pgd = pgd_offset(mm, addr);
2479 flush_cache_range(vma, addr, end);
1da177e4
LT
2480 do {
2481 next = pgd_addr_end(addr, end);
c2febafc 2482 err = remap_p4d_range(mm, pgd, addr, next,
1da177e4
LT
2483 pfn + (addr >> PAGE_SHIFT), prot);
2484 if (err)
74ffa5a3 2485 return err;
1da177e4 2486 } while (pgd++, addr = next, addr != end);
2ab64037 2487
74ffa5a3
CH
2488 return 0;
2489}
2490
2491/**
2492 * remap_pfn_range - remap kernel memory to userspace
2493 * @vma: user vma to map to
2494 * @addr: target page aligned user address to start at
2495 * @pfn: page frame number of kernel physical memory address
2496 * @size: size of mapping area
2497 * @prot: page protection flags for this mapping
2498 *
2499 * Note: this is only safe if the mm semaphore is held when called.
2500 *
2501 * Return: %0 on success, negative error code otherwise.
2502 */
2503int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2504 unsigned long pfn, unsigned long size, pgprot_t prot)
2505{
2506 int err;
2507
2508 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2ab64037 2509 if (err)
74ffa5a3 2510 return -EINVAL;
2ab64037 2511
74ffa5a3
CH
2512 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2513 if (err)
68f48381 2514 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
1da177e4
LT
2515 return err;
2516}
2517EXPORT_SYMBOL(remap_pfn_range);
2518
b4cbb197
LT
2519/**
2520 * vm_iomap_memory - remap memory to userspace
2521 * @vma: user vma to map to
abd69b9e 2522 * @start: start of the physical memory to be mapped
b4cbb197
LT
2523 * @len: size of area
2524 *
2525 * This is a simplified io_remap_pfn_range() for common driver use. The
2526 * driver just needs to give us the physical memory range to be mapped,
2527 * we'll figure out the rest from the vma information.
2528 *
2529 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2530 * whatever write-combining details or similar.
a862f68a
MR
2531 *
2532 * Return: %0 on success, negative error code otherwise.
b4cbb197
LT
2533 */
2534int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2535{
2536 unsigned long vm_len, pfn, pages;
2537
2538 /* Check that the physical memory area passed in looks valid */
2539 if (start + len < start)
2540 return -EINVAL;
2541 /*
2542 * You *really* shouldn't map things that aren't page-aligned,
2543 * but we've historically allowed it because IO memory might
2544 * just have smaller alignment.
2545 */
2546 len += start & ~PAGE_MASK;
2547 pfn = start >> PAGE_SHIFT;
2548 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2549 if (pfn + pages < pfn)
2550 return -EINVAL;
2551
2552 /* We start the mapping 'vm_pgoff' pages into the area */
2553 if (vma->vm_pgoff > pages)
2554 return -EINVAL;
2555 pfn += vma->vm_pgoff;
2556 pages -= vma->vm_pgoff;
2557
2558 /* Can we fit all of the mapping? */
2559 vm_len = vma->vm_end - vma->vm_start;
2560 if (vm_len >> PAGE_SHIFT > pages)
2561 return -EINVAL;
2562
2563 /* Ok, let it rip */
2564 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2565}
2566EXPORT_SYMBOL(vm_iomap_memory);
2567
aee16b3c
JF
2568static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2569 unsigned long addr, unsigned long end,
e80d3909
JR
2570 pte_fn_t fn, void *data, bool create,
2571 pgtbl_mod_mask *mask)
aee16b3c 2572{
8abb50c7 2573 pte_t *pte, *mapped_pte;
be1db475 2574 int err = 0;
3f649ab7 2575 spinlock_t *ptl;
aee16b3c 2576
be1db475 2577 if (create) {
8abb50c7 2578 mapped_pte = pte = (mm == &init_mm) ?
e80d3909 2579 pte_alloc_kernel_track(pmd, addr, mask) :
be1db475
DA
2580 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2581 if (!pte)
2582 return -ENOMEM;
2583 } else {
8abb50c7 2584 mapped_pte = pte = (mm == &init_mm) ?
be1db475
DA
2585 pte_offset_kernel(pmd, addr) :
2586 pte_offset_map_lock(mm, pmd, addr, &ptl);
3db82b93
HD
2587 if (!pte)
2588 return -EINVAL;
be1db475 2589 }
aee16b3c 2590
38e0edb1
JF
2591 arch_enter_lazy_mmu_mode();
2592
eeb4a05f
CH
2593 if (fn) {
2594 do {
c33c7948 2595 if (create || !pte_none(ptep_get(pte))) {
eeb4a05f
CH
2596 err = fn(pte++, addr, data);
2597 if (err)
2598 break;
2599 }
2600 } while (addr += PAGE_SIZE, addr != end);
2601 }
e80d3909 2602 *mask |= PGTBL_PTE_MODIFIED;
aee16b3c 2603
38e0edb1
JF
2604 arch_leave_lazy_mmu_mode();
2605
aee16b3c 2606 if (mm != &init_mm)
8abb50c7 2607 pte_unmap_unlock(mapped_pte, ptl);
aee16b3c
JF
2608 return err;
2609}
2610
2611static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2612 unsigned long addr, unsigned long end,
e80d3909
JR
2613 pte_fn_t fn, void *data, bool create,
2614 pgtbl_mod_mask *mask)
aee16b3c
JF
2615{
2616 pmd_t *pmd;
2617 unsigned long next;
be1db475 2618 int err = 0;
aee16b3c 2619
ceb86879
AK
2620 BUG_ON(pud_huge(*pud));
2621
be1db475 2622 if (create) {
e80d3909 2623 pmd = pmd_alloc_track(mm, pud, addr, mask);
be1db475
DA
2624 if (!pmd)
2625 return -ENOMEM;
2626 } else {
2627 pmd = pmd_offset(pud, addr);
2628 }
aee16b3c
JF
2629 do {
2630 next = pmd_addr_end(addr, end);
0c95cba4
NP
2631 if (pmd_none(*pmd) && !create)
2632 continue;
2633 if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2634 return -EINVAL;
2635 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2636 if (!create)
2637 continue;
2638 pmd_clear_bad(pmd);
be1db475 2639 }
0c95cba4
NP
2640 err = apply_to_pte_range(mm, pmd, addr, next,
2641 fn, data, create, mask);
2642 if (err)
2643 break;
aee16b3c 2644 } while (pmd++, addr = next, addr != end);
0c95cba4 2645
aee16b3c
JF
2646 return err;
2647}
2648
c2febafc 2649static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
aee16b3c 2650 unsigned long addr, unsigned long end,
e80d3909
JR
2651 pte_fn_t fn, void *data, bool create,
2652 pgtbl_mod_mask *mask)
aee16b3c
JF
2653{
2654 pud_t *pud;
2655 unsigned long next;
be1db475 2656 int err = 0;
aee16b3c 2657
be1db475 2658 if (create) {
e80d3909 2659 pud = pud_alloc_track(mm, p4d, addr, mask);
be1db475
DA
2660 if (!pud)
2661 return -ENOMEM;
2662 } else {
2663 pud = pud_offset(p4d, addr);
2664 }
aee16b3c
JF
2665 do {
2666 next = pud_addr_end(addr, end);
0c95cba4
NP
2667 if (pud_none(*pud) && !create)
2668 continue;
2669 if (WARN_ON_ONCE(pud_leaf(*pud)))
2670 return -EINVAL;
2671 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2672 if (!create)
2673 continue;
2674 pud_clear_bad(pud);
be1db475 2675 }
0c95cba4
NP
2676 err = apply_to_pmd_range(mm, pud, addr, next,
2677 fn, data, create, mask);
2678 if (err)
2679 break;
aee16b3c 2680 } while (pud++, addr = next, addr != end);
0c95cba4 2681
aee16b3c
JF
2682 return err;
2683}
2684
c2febafc
KS
2685static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2686 unsigned long addr, unsigned long end,
e80d3909
JR
2687 pte_fn_t fn, void *data, bool create,
2688 pgtbl_mod_mask *mask)
c2febafc
KS
2689{
2690 p4d_t *p4d;
2691 unsigned long next;
be1db475 2692 int err = 0;
c2febafc 2693
be1db475 2694 if (create) {
e80d3909 2695 p4d = p4d_alloc_track(mm, pgd, addr, mask);
be1db475
DA
2696 if (!p4d)
2697 return -ENOMEM;
2698 } else {
2699 p4d = p4d_offset(pgd, addr);
2700 }
c2febafc
KS
2701 do {
2702 next = p4d_addr_end(addr, end);
0c95cba4
NP
2703 if (p4d_none(*p4d) && !create)
2704 continue;
2705 if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2706 return -EINVAL;
2707 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2708 if (!create)
2709 continue;
2710 p4d_clear_bad(p4d);
be1db475 2711 }
0c95cba4
NP
2712 err = apply_to_pud_range(mm, p4d, addr, next,
2713 fn, data, create, mask);
2714 if (err)
2715 break;
c2febafc 2716 } while (p4d++, addr = next, addr != end);
0c95cba4 2717
c2febafc
KS
2718 return err;
2719}
2720
be1db475
DA
2721static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2722 unsigned long size, pte_fn_t fn,
2723 void *data, bool create)
aee16b3c
JF
2724{
2725 pgd_t *pgd;
e80d3909 2726 unsigned long start = addr, next;
57250a5b 2727 unsigned long end = addr + size;
e80d3909 2728 pgtbl_mod_mask mask = 0;
be1db475 2729 int err = 0;
aee16b3c 2730
9cb65bc3
MP
2731 if (WARN_ON(addr >= end))
2732 return -EINVAL;
2733
aee16b3c
JF
2734 pgd = pgd_offset(mm, addr);
2735 do {
2736 next = pgd_addr_end(addr, end);
0c95cba4 2737 if (pgd_none(*pgd) && !create)
be1db475 2738 continue;
0c95cba4
NP
2739 if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2740 return -EINVAL;
2741 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2742 if (!create)
2743 continue;
2744 pgd_clear_bad(pgd);
2745 }
2746 err = apply_to_p4d_range(mm, pgd, addr, next,
2747 fn, data, create, &mask);
aee16b3c
JF
2748 if (err)
2749 break;
2750 } while (pgd++, addr = next, addr != end);
57250a5b 2751
e80d3909
JR
2752 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2753 arch_sync_kernel_mappings(start, start + size);
2754
aee16b3c
JF
2755 return err;
2756}
be1db475
DA
2757
2758/*
2759 * Scan a region of virtual memory, filling in page tables as necessary
2760 * and calling a provided function on each leaf page table.
2761 */
2762int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2763 unsigned long size, pte_fn_t fn, void *data)
2764{
2765 return __apply_to_page_range(mm, addr, size, fn, data, true);
2766}
aee16b3c
JF
2767EXPORT_SYMBOL_GPL(apply_to_page_range);
2768
be1db475
DA
2769/*
2770 * Scan a region of virtual memory, calling a provided function on
2771 * each leaf page table where it exists.
2772 *
2773 * Unlike apply_to_page_range, this does _not_ fill in page tables
2774 * where they are absent.
2775 */
2776int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2777 unsigned long size, pte_fn_t fn, void *data)
2778{
2779 return __apply_to_page_range(mm, addr, size, fn, data, false);
2780}
2781EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2782
8f4e2101 2783/*
9b4bdd2f
KS
2784 * handle_pte_fault chooses page fault handler according to an entry which was
2785 * read non-atomically. Before making any commitment, on those architectures
2786 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2787 * parts, do_swap_page must check under lock before unmapping the pte and
2788 * proceeding (but do_wp_page is only called after already making such a check;
a335b2e1 2789 * and do_anonymous_page can safely check later on).
8f4e2101 2790 */
2ca99358 2791static inline int pte_unmap_same(struct vm_fault *vmf)
8f4e2101
HD
2792{
2793 int same = 1;
923717cb 2794#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
8f4e2101 2795 if (sizeof(pte_t) > sizeof(unsigned long)) {
c7ad0880 2796 spin_lock(vmf->ptl);
c33c7948 2797 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
c7ad0880 2798 spin_unlock(vmf->ptl);
8f4e2101
HD
2799 }
2800#endif
2ca99358
PX
2801 pte_unmap(vmf->pte);
2802 vmf->pte = NULL;
8f4e2101
HD
2803 return same;
2804}
2805
a873dfe1
TL
2806/*
2807 * Return:
2808 * 0: copied succeeded
2809 * -EHWPOISON: copy failed due to hwpoison in source page
2810 * -EAGAIN: copied failed (some other reason)
2811 */
2812static inline int __wp_page_copy_user(struct page *dst, struct page *src,
2813 struct vm_fault *vmf)
6aab341e 2814{
a873dfe1 2815 int ret;
83d116c5
JH
2816 void *kaddr;
2817 void __user *uaddr;
83d116c5
JH
2818 struct vm_area_struct *vma = vmf->vma;
2819 struct mm_struct *mm = vma->vm_mm;
2820 unsigned long addr = vmf->address;
2821
83d116c5 2822 if (likely(src)) {
d302c239
TL
2823 if (copy_mc_user_highpage(dst, src, addr, vma)) {
2824 memory_failure_queue(page_to_pfn(src), 0);
a873dfe1 2825 return -EHWPOISON;
d302c239 2826 }
a873dfe1 2827 return 0;
83d116c5
JH
2828 }
2829
6aab341e
LT
2830 /*
2831 * If the source page was a PFN mapping, we don't have
2832 * a "struct page" for it. We do a best-effort copy by
2833 * just copying from the original user address. If that
2834 * fails, we just zero-fill it. Live with it.
2835 */
83d116c5
JH
2836 kaddr = kmap_atomic(dst);
2837 uaddr = (void __user *)(addr & PAGE_MASK);
2838
2839 /*
2840 * On architectures with software "accessed" bits, we would
2841 * take a double page fault, so mark it accessed here.
2842 */
3db82b93 2843 vmf->pte = NULL;
e1fd09e3 2844 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
83d116c5 2845 pte_t entry;
5d2a2dbb 2846
83d116c5 2847 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
c33c7948 2848 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
83d116c5
JH
2849 /*
2850 * Other thread has already handled the fault
7df67697 2851 * and update local tlb only
83d116c5 2852 */
a92cbb82
HD
2853 if (vmf->pte)
2854 update_mmu_tlb(vma, addr, vmf->pte);
a873dfe1 2855 ret = -EAGAIN;
83d116c5
JH
2856 goto pte_unlock;
2857 }
2858
2859 entry = pte_mkyoung(vmf->orig_pte);
2860 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2861 update_mmu_cache(vma, addr, vmf->pte);
2862 }
2863
2864 /*
2865 * This really shouldn't fail, because the page is there
2866 * in the page tables. But it might just be unreadable,
2867 * in which case we just give up and fill the result with
2868 * zeroes.
2869 */
2870 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3db82b93 2871 if (vmf->pte)
c3e5ea6e
KS
2872 goto warn;
2873
2874 /* Re-validate under PTL if the page is still mapped */
2875 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
c33c7948 2876 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
7df67697 2877 /* The PTE changed under us, update local tlb */
a92cbb82
HD
2878 if (vmf->pte)
2879 update_mmu_tlb(vma, addr, vmf->pte);
a873dfe1 2880 ret = -EAGAIN;
c3e5ea6e
KS
2881 goto pte_unlock;
2882 }
2883
5d2a2dbb 2884 /*
985ba004 2885 * The same page can be mapped back since last copy attempt.
c3e5ea6e 2886 * Try to copy again under PTL.
5d2a2dbb 2887 */
c3e5ea6e
KS
2888 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2889 /*
2890 * Give a warn in case there can be some obscure
2891 * use-case
2892 */
2893warn:
2894 WARN_ON_ONCE(1);
2895 clear_page(kaddr);
2896 }
83d116c5
JH
2897 }
2898
a873dfe1 2899 ret = 0;
83d116c5
JH
2900
2901pte_unlock:
3db82b93 2902 if (vmf->pte)
83d116c5
JH
2903 pte_unmap_unlock(vmf->pte, vmf->ptl);
2904 kunmap_atomic(kaddr);
2905 flush_dcache_page(dst);
2906
2907 return ret;
6aab341e
LT
2908}
2909
c20cd45e
MH
2910static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2911{
2912 struct file *vm_file = vma->vm_file;
2913
2914 if (vm_file)
2915 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2916
2917 /*
2918 * Special mappings (e.g. VDSO) do not have any file so fake
2919 * a default GFP_KERNEL for them.
2920 */
2921 return GFP_KERNEL;
2922}
2923
fb09a464
KS
2924/*
2925 * Notify the address space that the page is about to become writable so that
2926 * it can prohibit this or wait for the page to get into an appropriate state.
2927 *
2928 * We do this without the lock held, so that it can sleep if it needs to.
2929 */
2b740303 2930static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
fb09a464 2931{
2b740303 2932 vm_fault_t ret;
38b8cb7f
JK
2933 struct page *page = vmf->page;
2934 unsigned int old_flags = vmf->flags;
fb09a464 2935
38b8cb7f 2936 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
fb09a464 2937
dc617f29
DW
2938 if (vmf->vma->vm_file &&
2939 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2940 return VM_FAULT_SIGBUS;
2941
11bac800 2942 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
38b8cb7f
JK
2943 /* Restore original flags so that caller is not surprised */
2944 vmf->flags = old_flags;
fb09a464
KS
2945 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2946 return ret;
2947 if (unlikely(!(ret & VM_FAULT_LOCKED))) {
2948 lock_page(page);
2949 if (!page->mapping) {
2950 unlock_page(page);
2951 return 0; /* retry */
2952 }
2953 ret |= VM_FAULT_LOCKED;
2954 } else
2955 VM_BUG_ON_PAGE(!PageLocked(page), page);
2956 return ret;
2957}
2958
97ba0c2b
JK
2959/*
2960 * Handle dirtying of a page in shared file mapping on a write fault.
2961 *
2962 * The function expects the page to be locked and unlocks it.
2963 */
89b15332 2964static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
97ba0c2b 2965{
89b15332 2966 struct vm_area_struct *vma = vmf->vma;
97ba0c2b 2967 struct address_space *mapping;
89b15332 2968 struct page *page = vmf->page;
97ba0c2b
JK
2969 bool dirtied;
2970 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2971
2972 dirtied = set_page_dirty(page);
2973 VM_BUG_ON_PAGE(PageAnon(page), page);
2974 /*
2975 * Take a local copy of the address_space - page.mapping may be zeroed
2976 * by truncate after unlock_page(). The address_space itself remains
2977 * pinned by vma->vm_file's reference. We rely on unlock_page()'s
2978 * release semantics to prevent the compiler from undoing this copying.
2979 */
2980 mapping = page_rmapping(page);
2981 unlock_page(page);
2982
89b15332
JW
2983 if (!page_mkwrite)
2984 file_update_time(vma->vm_file);
2985
2986 /*
2987 * Throttle page dirtying rate down to writeback speed.
2988 *
2989 * mapping may be NULL here because some device drivers do not
2990 * set page.mapping but still dirty their pages
2991 *
c1e8d7c6 2992 * Drop the mmap_lock before waiting on IO, if we can. The file
89b15332
JW
2993 * is pinning the mapping, as per above.
2994 */
97ba0c2b 2995 if ((dirtied || page_mkwrite) && mapping) {
89b15332
JW
2996 struct file *fpin;
2997
2998 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
97ba0c2b 2999 balance_dirty_pages_ratelimited(mapping);
89b15332
JW
3000 if (fpin) {
3001 fput(fpin);
d9272525 3002 return VM_FAULT_COMPLETED;
89b15332 3003 }
97ba0c2b
JK
3004 }
3005
89b15332 3006 return 0;
97ba0c2b
JK
3007}
3008
4e047f89
SR
3009/*
3010 * Handle write page faults for pages that can be reused in the current vma
3011 *
3012 * This can happen either due to the mapping being with the VM_SHARED flag,
3013 * or due to us being the last reference standing to the page. In either
3014 * case, all we need to do here is to mark the page as writable and update
3015 * any related book-keeping.
3016 */
997dd98d 3017static inline void wp_page_reuse(struct vm_fault *vmf)
82b0f8c3 3018 __releases(vmf->ptl)
4e047f89 3019{
82b0f8c3 3020 struct vm_area_struct *vma = vmf->vma;
a41b70d6 3021 struct page *page = vmf->page;
4e047f89 3022 pte_t entry;
6c287605 3023
c89357e2 3024 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
cdb281e6 3025 VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page));
6c287605 3026
4e047f89
SR
3027 /*
3028 * Clear the pages cpupid information as the existing
3029 * information potentially belongs to a now completely
3030 * unrelated process.
3031 */
3032 if (page)
3033 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
3034
2994302b
JK
3035 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3036 entry = pte_mkyoung(vmf->orig_pte);
4e047f89 3037 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
82b0f8c3
JK
3038 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3039 update_mmu_cache(vma, vmf->address, vmf->pte);
3040 pte_unmap_unlock(vmf->pte, vmf->ptl);
798a6b87 3041 count_vm_event(PGREUSE);
4e047f89
SR
3042}
3043
2f38ab2c 3044/*
c89357e2
DH
3045 * Handle the case of a page which we actually need to copy to a new page,
3046 * either due to COW or unsharing.
2f38ab2c 3047 *
c1e8d7c6 3048 * Called with mmap_lock locked and the old page referenced, but
2f38ab2c
SR
3049 * without the ptl held.
3050 *
3051 * High level logic flow:
3052 *
3053 * - Allocate a page, copy the content of the old page to the new one.
3054 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3055 * - Take the PTL. If the pte changed, bail out and release the allocated page
3056 * - If the pte is still the way we remember it, update the page table and all
3057 * relevant references. This includes dropping the reference the page-table
3058 * held to the old page, as well as updating the rmap.
3059 * - In any case, unlock the PTL and drop the reference we took to the old page.
3060 */
2b740303 3061static vm_fault_t wp_page_copy(struct vm_fault *vmf)
2f38ab2c 3062{
c89357e2 3063 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
82b0f8c3 3064 struct vm_area_struct *vma = vmf->vma;
bae473a4 3065 struct mm_struct *mm = vma->vm_mm;
28d41a48
MWO
3066 struct folio *old_folio = NULL;
3067 struct folio *new_folio = NULL;
2f38ab2c
SR
3068 pte_t entry;
3069 int page_copied = 0;
ac46d4f3 3070 struct mmu_notifier_range range;
a873dfe1 3071 int ret;
2f38ab2c 3072
662ce1dc
YY
3073 delayacct_wpcopy_start();
3074
28d41a48
MWO
3075 if (vmf->page)
3076 old_folio = page_folio(vmf->page);
2f38ab2c
SR
3077 if (unlikely(anon_vma_prepare(vma)))
3078 goto oom;
3079
2994302b 3080 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
6bc56a4d
MWO
3081 new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
3082 if (!new_folio)
2f38ab2c
SR
3083 goto oom;
3084 } else {
28d41a48
MWO
3085 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
3086 vmf->address, false);
3087 if (!new_folio)
2f38ab2c 3088 goto oom;
83d116c5 3089
28d41a48 3090 ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
a873dfe1 3091 if (ret) {
83d116c5
JH
3092 /*
3093 * COW failed, if the fault was solved by other,
3094 * it's fine. If not, userspace would re-fault on
3095 * the same address and we will handle the fault
3096 * from the second attempt.
a873dfe1 3097 * The -EHWPOISON case will not be retried.
83d116c5 3098 */
28d41a48
MWO
3099 folio_put(new_folio);
3100 if (old_folio)
3101 folio_put(old_folio);
662ce1dc
YY
3102
3103 delayacct_wpcopy_end();
a873dfe1 3104 return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
83d116c5 3105 }
28d41a48 3106 kmsan_copy_page_meta(&new_folio->page, vmf->page);
2f38ab2c 3107 }
2f38ab2c 3108
28d41a48 3109 if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
2f38ab2c 3110 goto oom_free_new;
4d4f75bf 3111 folio_throttle_swaprate(new_folio, GFP_KERNEL);
2f38ab2c 3112
28d41a48 3113 __folio_mark_uptodate(new_folio);
eb3c24f3 3114
7d4a8be0 3115 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
6f4f13e8 3116 vmf->address & PAGE_MASK,
ac46d4f3
JG
3117 (vmf->address & PAGE_MASK) + PAGE_SIZE);
3118 mmu_notifier_invalidate_range_start(&range);
2f38ab2c
SR
3119
3120 /*
3121 * Re-check the pte - we dropped the lock
3122 */
82b0f8c3 3123 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
c33c7948 3124 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
28d41a48
MWO
3125 if (old_folio) {
3126 if (!folio_test_anon(old_folio)) {
3127 dec_mm_counter(mm, mm_counter_file(&old_folio->page));
f1a79412 3128 inc_mm_counter(mm, MM_ANONPAGES);
2f38ab2c
SR
3129 }
3130 } else {
f1a79412 3131 inc_mm_counter(mm, MM_ANONPAGES);
2f38ab2c 3132 }
2994302b 3133 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
28d41a48 3134 entry = mk_pte(&new_folio->page, vma->vm_page_prot);
50c25ee9 3135 entry = pte_sw_mkyoung(entry);
c89357e2
DH
3136 if (unlikely(unshare)) {
3137 if (pte_soft_dirty(vmf->orig_pte))
3138 entry = pte_mksoft_dirty(entry);
3139 if (pte_uffd_wp(vmf->orig_pte))
3140 entry = pte_mkuffd_wp(entry);
3141 } else {
3142 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3143 }
111fe718 3144
2f38ab2c
SR
3145 /*
3146 * Clear the pte entry and flush it first, before updating the
111fe718
NP
3147 * pte with the new entry, to keep TLBs on different CPUs in
3148 * sync. This code used to set the new PTE then flush TLBs, but
3149 * that left a window where the new PTE could be loaded into
3150 * some TLBs while the old PTE remains in others.
2f38ab2c 3151 */
82b0f8c3 3152 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
28d41a48
MWO
3153 folio_add_new_anon_rmap(new_folio, vma, vmf->address);
3154 folio_add_lru_vma(new_folio, vma);
2f38ab2c
SR
3155 /*
3156 * We call the notify macro here because, when using secondary
3157 * mmu page tables (such as kvm shadow page tables), we want the
3158 * new page to be mapped directly into the secondary page table.
3159 */
c89357e2 3160 BUG_ON(unshare && pte_write(entry));
82b0f8c3
JK
3161 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3162 update_mmu_cache(vma, vmf->address, vmf->pte);
28d41a48 3163 if (old_folio) {
2f38ab2c
SR
3164 /*
3165 * Only after switching the pte to the new page may
3166 * we remove the mapcount here. Otherwise another
3167 * process may come and find the rmap count decremented
3168 * before the pte is switched to the new page, and
3169 * "reuse" the old page writing into it while our pte
3170 * here still points into it and can be read by other
3171 * threads.
3172 *
3173 * The critical issue is to order this
3174 * page_remove_rmap with the ptp_clear_flush above.
3175 * Those stores are ordered by (if nothing else,)
3176 * the barrier present in the atomic_add_negative
3177 * in page_remove_rmap.
3178 *
3179 * Then the TLB flush in ptep_clear_flush ensures that
3180 * no process can access the old page before the
3181 * decremented mapcount is visible. And the old page
3182 * cannot be reused until after the decremented
3183 * mapcount is visible. So transitively, TLBs to
3184 * old page will be flushed before it can be reused.
3185 */
28d41a48 3186 page_remove_rmap(vmf->page, vma, false);
2f38ab2c
SR
3187 }
3188
3189 /* Free the old page.. */
28d41a48 3190 new_folio = old_folio;
2f38ab2c 3191 page_copied = 1;
3db82b93
HD
3192 pte_unmap_unlock(vmf->pte, vmf->ptl);
3193 } else if (vmf->pte) {
7df67697 3194 update_mmu_tlb(vma, vmf->address, vmf->pte);
3db82b93 3195 pte_unmap_unlock(vmf->pte, vmf->ptl);
2f38ab2c
SR
3196 }
3197
4645b9fe
JG
3198 /*
3199 * No need to double call mmu_notifier->invalidate_range() callback as
3200 * the above ptep_clear_flush_notify() did already call it.
3201 */
ac46d4f3 3202 mmu_notifier_invalidate_range_only_end(&range);
3db82b93
HD
3203
3204 if (new_folio)
3205 folio_put(new_folio);
28d41a48 3206 if (old_folio) {
f4c4a3f4 3207 if (page_copied)
28d41a48
MWO
3208 free_swap_cache(&old_folio->page);
3209 folio_put(old_folio);
2f38ab2c 3210 }
662ce1dc
YY
3211
3212 delayacct_wpcopy_end();
cb8d8633 3213 return 0;
2f38ab2c 3214oom_free_new:
28d41a48 3215 folio_put(new_folio);
2f38ab2c 3216oom:
28d41a48
MWO
3217 if (old_folio)
3218 folio_put(old_folio);
662ce1dc
YY
3219
3220 delayacct_wpcopy_end();
2f38ab2c
SR
3221 return VM_FAULT_OOM;
3222}
3223
66a6197c
JK
3224/**
3225 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3226 * writeable once the page is prepared
3227 *
3228 * @vmf: structure describing the fault
3229 *
3230 * This function handles all that is needed to finish a write page fault in a
3231 * shared mapping due to PTE being read-only once the mapped page is prepared.
a862f68a 3232 * It handles locking of PTE and modifying it.
66a6197c
JK
3233 *
3234 * The function expects the page to be locked or other protection against
3235 * concurrent faults / writeback (such as DAX radix tree locks).
a862f68a 3236 *
2797e79f 3237 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
a862f68a 3238 * we acquired PTE lock.
66a6197c 3239 */
2b740303 3240vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
66a6197c
JK
3241{
3242 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3243 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3244 &vmf->ptl);
3db82b93
HD
3245 if (!vmf->pte)
3246 return VM_FAULT_NOPAGE;
66a6197c
JK
3247 /*
3248 * We might have raced with another page fault while we released the
3249 * pte_offset_map_lock.
3250 */
c33c7948 3251 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
7df67697 3252 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
66a6197c 3253 pte_unmap_unlock(vmf->pte, vmf->ptl);
a19e2553 3254 return VM_FAULT_NOPAGE;
66a6197c
JK
3255 }
3256 wp_page_reuse(vmf);
a19e2553 3257 return 0;
66a6197c
JK
3258}
3259
dd906184
BH
3260/*
3261 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3262 * mapping
3263 */
2b740303 3264static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
dd906184 3265{
82b0f8c3 3266 struct vm_area_struct *vma = vmf->vma;
bae473a4 3267
dd906184 3268 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
2b740303 3269 vm_fault_t ret;
dd906184 3270
82b0f8c3 3271 pte_unmap_unlock(vmf->pte, vmf->ptl);
fe82221f 3272 vmf->flags |= FAULT_FLAG_MKWRITE;
11bac800 3273 ret = vma->vm_ops->pfn_mkwrite(vmf);
2f89dc12 3274 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
dd906184 3275 return ret;
66a6197c 3276 return finish_mkwrite_fault(vmf);
dd906184 3277 }
997dd98d 3278 wp_page_reuse(vmf);
cb8d8633 3279 return 0;
dd906184
BH
3280}
3281
2b740303 3282static vm_fault_t wp_page_shared(struct vm_fault *vmf)
82b0f8c3 3283 __releases(vmf->ptl)
93e478d4 3284{
82b0f8c3 3285 struct vm_area_struct *vma = vmf->vma;
cb8d8633 3286 vm_fault_t ret = 0;
93e478d4 3287
a41b70d6 3288 get_page(vmf->page);
93e478d4 3289
93e478d4 3290 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2b740303 3291 vm_fault_t tmp;
93e478d4 3292
82b0f8c3 3293 pte_unmap_unlock(vmf->pte, vmf->ptl);
38b8cb7f 3294 tmp = do_page_mkwrite(vmf);
93e478d4
SR
3295 if (unlikely(!tmp || (tmp &
3296 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
a41b70d6 3297 put_page(vmf->page);
93e478d4
SR
3298 return tmp;
3299 }
66a6197c 3300 tmp = finish_mkwrite_fault(vmf);
a19e2553 3301 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
a41b70d6 3302 unlock_page(vmf->page);
a41b70d6 3303 put_page(vmf->page);
66a6197c 3304 return tmp;
93e478d4 3305 }
66a6197c
JK
3306 } else {
3307 wp_page_reuse(vmf);
997dd98d 3308 lock_page(vmf->page);
93e478d4 3309 }
89b15332 3310 ret |= fault_dirty_shared_page(vmf);
997dd98d 3311 put_page(vmf->page);
93e478d4 3312
89b15332 3313 return ret;
93e478d4
SR
3314}
3315
1da177e4 3316/*
c89357e2
DH
3317 * This routine handles present pages, when
3318 * * users try to write to a shared page (FAULT_FLAG_WRITE)
3319 * * GUP wants to take a R/O pin on a possibly shared anonymous page
3320 * (FAULT_FLAG_UNSHARE)
3321 *
3322 * It is done by copying the page to a new address and decrementing the
3323 * shared-page counter for the old page.
1da177e4 3324 *
1da177e4
LT
3325 * Note that this routine assumes that the protection checks have been
3326 * done by the caller (the low-level page fault routine in most cases).
c89357e2
DH
3327 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3328 * done any necessary COW.
1da177e4 3329 *
c89357e2
DH
3330 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3331 * though the page will change only once the write actually happens. This
3332 * avoids a few races, and potentially makes it more efficient.
1da177e4 3333 *
c1e8d7c6 3334 * We enter with non-exclusive mmap_lock (to exclude vma changes,
8f4e2101 3335 * but allow concurrent faults), with pte both mapped and locked.
c1e8d7c6 3336 * We return with mmap_lock still held, but pte unmapped and unlocked.
1da177e4 3337 */
2b740303 3338static vm_fault_t do_wp_page(struct vm_fault *vmf)
82b0f8c3 3339 __releases(vmf->ptl)
1da177e4 3340{
c89357e2 3341 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
82b0f8c3 3342 struct vm_area_struct *vma = vmf->vma;
b9086fde 3343 struct folio *folio = NULL;
1da177e4 3344
c89357e2 3345 if (likely(!unshare)) {
c33c7948 3346 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
c89357e2
DH
3347 pte_unmap_unlock(vmf->pte, vmf->ptl);
3348 return handle_userfault(vmf, VM_UFFD_WP);
3349 }
3350
3351 /*
3352 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3353 * is flushed in this case before copying.
3354 */
3355 if (unlikely(userfaultfd_wp(vmf->vma) &&
3356 mm_tlb_flush_pending(vmf->vma->vm_mm)))
3357 flush_tlb_page(vmf->vma, vmf->address);
3358 }
6ce64428 3359
a41b70d6 3360 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
c89357e2 3361
b9086fde
DH
3362 /*
3363 * Shared mapping: we are guaranteed to have VM_WRITE and
3364 * FAULT_FLAG_WRITE set at this point.
3365 */
3366 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
251b97f5 3367 /*
64e45507
PF
3368 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3369 * VM_PFNMAP VMA.
251b97f5
PZ
3370 *
3371 * We should not cow pages in a shared writeable mapping.
dd906184 3372 * Just mark the pages writable and/or call ops->pfn_mkwrite.
251b97f5 3373 */
b9086fde 3374 if (!vmf->page)
2994302b 3375 return wp_pfn_shared(vmf);
b9086fde 3376 return wp_page_shared(vmf);
251b97f5 3377 }
1da177e4 3378
b9086fde
DH
3379 if (vmf->page)
3380 folio = page_folio(vmf->page);
3381
d08b3851 3382 /*
b9086fde
DH
3383 * Private mapping: create an exclusive anonymous page copy if reuse
3384 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
d08b3851 3385 */
b9086fde 3386 if (folio && folio_test_anon(folio)) {
6c287605
DH
3387 /*
3388 * If the page is exclusive to this process we must reuse the
3389 * page without further checks.
3390 */
e4a2ed94 3391 if (PageAnonExclusive(vmf->page))
6c287605
DH
3392 goto reuse;
3393
53a05ad9 3394 /*
e4a2ed94
MWO
3395 * We have to verify under folio lock: these early checks are
3396 * just an optimization to avoid locking the folio and freeing
53a05ad9
DH
3397 * the swapcache if there is little hope that we can reuse.
3398 *
e4a2ed94 3399 * KSM doesn't necessarily raise the folio refcount.
53a05ad9 3400 */
e4a2ed94 3401 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
d4c47097 3402 goto copy;
e4a2ed94 3403 if (!folio_test_lru(folio))
d4c47097 3404 /*
1fec6890
MWO
3405 * We cannot easily detect+handle references from
3406 * remote LRU caches or references to LRU folios.
d4c47097
DH
3407 */
3408 lru_add_drain();
e4a2ed94 3409 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
09854ba9 3410 goto copy;
e4a2ed94 3411 if (!folio_trylock(folio))
09854ba9 3412 goto copy;
e4a2ed94
MWO
3413 if (folio_test_swapcache(folio))
3414 folio_free_swap(folio);
3415 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3416 folio_unlock(folio);
52d1e606 3417 goto copy;
b009c024 3418 }
09854ba9 3419 /*
e4a2ed94
MWO
3420 * Ok, we've got the only folio reference from our mapping
3421 * and the folio is locked, it's dark out, and we're wearing
53a05ad9 3422 * sunglasses. Hit it.
09854ba9 3423 */
e4a2ed94
MWO
3424 page_move_anon_rmap(vmf->page, vma);
3425 folio_unlock(folio);
6c287605 3426reuse:
c89357e2
DH
3427 if (unlikely(unshare)) {
3428 pte_unmap_unlock(vmf->pte, vmf->ptl);
3429 return 0;
3430 }
be068f29 3431 wp_page_reuse(vmf);
cb8d8633 3432 return 0;
1da177e4 3433 }
52d1e606 3434copy:
1da177e4
LT
3435 /*
3436 * Ok, we need to copy. Oh, well..
3437 */
b9086fde
DH
3438 if (folio)
3439 folio_get(folio);
28766805 3440
82b0f8c3 3441 pte_unmap_unlock(vmf->pte, vmf->ptl);
94bfe85b 3442#ifdef CONFIG_KSM
b9086fde 3443 if (folio && folio_test_ksm(folio))
94bfe85b
YY
3444 count_vm_event(COW_KSM);
3445#endif
a41b70d6 3446 return wp_page_copy(vmf);
1da177e4
LT
3447}
3448
97a89413 3449static void unmap_mapping_range_vma(struct vm_area_struct *vma,
1da177e4
LT
3450 unsigned long start_addr, unsigned long end_addr,
3451 struct zap_details *details)
3452{
f5cc4eef 3453 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
1da177e4
LT
3454}
3455
f808c13f 3456static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
232a6a1c
PX
3457 pgoff_t first_index,
3458 pgoff_t last_index,
1da177e4
LT
3459 struct zap_details *details)
3460{
3461 struct vm_area_struct *vma;
1da177e4
LT
3462 pgoff_t vba, vea, zba, zea;
3463
232a6a1c 3464 vma_interval_tree_foreach(vma, root, first_index, last_index) {
1da177e4 3465 vba = vma->vm_pgoff;
d6e93217 3466 vea = vba + vma_pages(vma) - 1;
f9871da9
ML
3467 zba = max(first_index, vba);
3468 zea = min(last_index, vea);
1da177e4 3469
97a89413 3470 unmap_mapping_range_vma(vma,
1da177e4
LT
3471 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3472 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
97a89413 3473 details);
1da177e4
LT
3474 }
3475}
3476
22061a1f 3477/**
3506659e
MWO
3478 * unmap_mapping_folio() - Unmap single folio from processes.
3479 * @folio: The locked folio to be unmapped.
22061a1f 3480 *
3506659e 3481 * Unmap this folio from any userspace process which still has it mmaped.
22061a1f
HD
3482 * Typically, for efficiency, the range of nearby pages has already been
3483 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
3506659e
MWO
3484 * truncation or invalidation holds the lock on a folio, it may find that
3485 * the page has been remapped again: and then uses unmap_mapping_folio()
22061a1f
HD
3486 * to unmap it finally.
3487 */
3506659e 3488void unmap_mapping_folio(struct folio *folio)
22061a1f 3489{
3506659e 3490 struct address_space *mapping = folio->mapping;
22061a1f 3491 struct zap_details details = { };
232a6a1c
PX
3492 pgoff_t first_index;
3493 pgoff_t last_index;
22061a1f 3494
3506659e 3495 VM_BUG_ON(!folio_test_locked(folio));
22061a1f 3496
3506659e
MWO
3497 first_index = folio->index;
3498 last_index = folio->index + folio_nr_pages(folio) - 1;
232a6a1c 3499
2e148f1e 3500 details.even_cows = false;
3506659e 3501 details.single_folio = folio;
999dad82 3502 details.zap_flags = ZAP_FLAG_DROP_MARKER;
22061a1f 3503
2c865995 3504 i_mmap_lock_read(mapping);
22061a1f 3505 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
232a6a1c
PX
3506 unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3507 last_index, &details);
2c865995 3508 i_mmap_unlock_read(mapping);
22061a1f
HD
3509}
3510
977fbdcd
MW
3511/**
3512 * unmap_mapping_pages() - Unmap pages from processes.
3513 * @mapping: The address space containing pages to be unmapped.
3514 * @start: Index of first page to be unmapped.
3515 * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
3516 * @even_cows: Whether to unmap even private COWed pages.
3517 *
3518 * Unmap the pages in this address space from any userspace process which
3519 * has them mmaped. Generally, you want to remove COWed pages as well when
3520 * a file is being truncated, but not when invalidating pages from the page
3521 * cache.
3522 */
3523void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3524 pgoff_t nr, bool even_cows)
3525{
3526 struct zap_details details = { };
232a6a1c
PX
3527 pgoff_t first_index = start;
3528 pgoff_t last_index = start + nr - 1;
977fbdcd 3529
2e148f1e 3530 details.even_cows = even_cows;
232a6a1c
PX
3531 if (last_index < first_index)
3532 last_index = ULONG_MAX;
977fbdcd 3533
2c865995 3534 i_mmap_lock_read(mapping);
977fbdcd 3535 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
232a6a1c
PX
3536 unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3537 last_index, &details);
2c865995 3538 i_mmap_unlock_read(mapping);
977fbdcd 3539}
6e0e99d5 3540EXPORT_SYMBOL_GPL(unmap_mapping_pages);
977fbdcd 3541
1da177e4 3542/**
8a5f14a2 3543 * unmap_mapping_range - unmap the portion of all mmaps in the specified
977fbdcd 3544 * address_space corresponding to the specified byte range in the underlying
8a5f14a2
KS
3545 * file.
3546 *
3d41088f 3547 * @mapping: the address space containing mmaps to be unmapped.
1da177e4
LT
3548 * @holebegin: byte in first page to unmap, relative to the start of
3549 * the underlying file. This will be rounded down to a PAGE_SIZE
25d9e2d1 3550 * boundary. Note that this is different from truncate_pagecache(), which
1da177e4
LT
3551 * must keep the partial page. In contrast, we must get rid of
3552 * partial pages.
3553 * @holelen: size of prospective hole in bytes. This will be rounded
3554 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
3555 * end of the file.
3556 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3557 * but 0 when invalidating pagecache, don't throw away private data.
3558 */
3559void unmap_mapping_range(struct address_space *mapping,
3560 loff_t const holebegin, loff_t const holelen, int even_cows)
3561{
1da177e4
LT
3562 pgoff_t hba = holebegin >> PAGE_SHIFT;
3563 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3564
3565 /* Check for overflow. */
3566 if (sizeof(holelen) > sizeof(hlen)) {
3567 long long holeend =
3568 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3569 if (holeend & ~(long long)ULONG_MAX)
3570 hlen = ULONG_MAX - hba + 1;
3571 }
3572
977fbdcd 3573 unmap_mapping_pages(mapping, hba, hlen, even_cows);
1da177e4
LT
3574}
3575EXPORT_SYMBOL(unmap_mapping_range);
3576
b756a3b5
AP
3577/*
3578 * Restore a potential device exclusive pte to a working pte entry
3579 */
3580static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3581{
19672a9e 3582 struct folio *folio = page_folio(vmf->page);
b756a3b5
AP
3583 struct vm_area_struct *vma = vmf->vma;
3584 struct mmu_notifier_range range;
3585
7c7b9629
AP
3586 /*
3587 * We need a reference to lock the folio because we don't hold
3588 * the PTL so a racing thread can remove the device-exclusive
3589 * entry and unmap it. If the folio is free the entry must
3590 * have been removed already. If it happens to have already
3591 * been re-allocated after being freed all we do is lock and
3592 * unlock it.
3593 */
3594 if (!folio_try_get(folio))
3595 return 0;
3596
3597 if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) {
3598 folio_put(folio);
b756a3b5 3599 return VM_FAULT_RETRY;
7c7b9629 3600 }
7d4a8be0 3601 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
b756a3b5
AP
3602 vma->vm_mm, vmf->address & PAGE_MASK,
3603 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3604 mmu_notifier_invalidate_range_start(&range);
3605
3606 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3607 &vmf->ptl);
c33c7948 3608 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
19672a9e 3609 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
b756a3b5 3610
3db82b93
HD
3611 if (vmf->pte)
3612 pte_unmap_unlock(vmf->pte, vmf->ptl);
19672a9e 3613 folio_unlock(folio);
7c7b9629 3614 folio_put(folio);
b756a3b5
AP
3615
3616 mmu_notifier_invalidate_range_end(&range);
3617 return 0;
3618}
3619
a160e537 3620static inline bool should_try_to_free_swap(struct folio *folio,
c145e0b4
DH
3621 struct vm_area_struct *vma,
3622 unsigned int fault_flags)
3623{
a160e537 3624 if (!folio_test_swapcache(folio))
c145e0b4 3625 return false;
9202d527 3626 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
a160e537 3627 folio_test_mlocked(folio))
c145e0b4
DH
3628 return true;
3629 /*
3630 * If we want to map a page that's in the swapcache writable, we
3631 * have to detect via the refcount if we're really the exclusive
3632 * user. Try freeing the swapcache to get rid of the swapcache
3633 * reference only in case it's likely that we'll be the exlusive user.
3634 */
a160e537
MWO
3635 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
3636 folio_ref_count(folio) == 2;
c145e0b4
DH
3637}
3638
9c28a205
PX
3639static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
3640{
3641 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3642 vmf->address, &vmf->ptl);
3db82b93
HD
3643 if (!vmf->pte)
3644 return 0;
9c28a205
PX
3645 /*
3646 * Be careful so that we will only recover a special uffd-wp pte into a
3647 * none pte. Otherwise it means the pte could have changed, so retry.
7e3ce3f8
PX
3648 *
3649 * This should also cover the case where e.g. the pte changed
3650 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_SWAPIN_ERROR.
3651 * So is_pte_marker() check is not enough to safely drop the pte.
9c28a205 3652 */
c33c7948 3653 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
9c28a205
PX
3654 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
3655 pte_unmap_unlock(vmf->pte, vmf->ptl);
3656 return 0;
3657}
3658
2bad466c
PX
3659static vm_fault_t do_pte_missing(struct vm_fault *vmf)
3660{
3661 if (vma_is_anonymous(vmf->vma))
3662 return do_anonymous_page(vmf);
3663 else
3664 return do_fault(vmf);
3665}
3666
9c28a205
PX
3667/*
3668 * This is actually a page-missing access, but with uffd-wp special pte
3669 * installed. It means this pte was wr-protected before being unmapped.
3670 */
3671static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
3672{
3673 /*
3674 * Just in case there're leftover special ptes even after the region
7a079ba2 3675 * got unregistered - we can simply clear them.
9c28a205 3676 */
2bad466c 3677 if (unlikely(!userfaultfd_wp(vmf->vma)))
9c28a205
PX
3678 return pte_marker_clear(vmf);
3679
2bad466c 3680 return do_pte_missing(vmf);
9c28a205
PX
3681}
3682
5c041f5d
PX
3683static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
3684{
3685 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
3686 unsigned long marker = pte_marker_get(entry);
3687
3688 /*
ca92ea3d
PX
3689 * PTE markers should never be empty. If anything weird happened,
3690 * the best thing to do is to kill the process along with its mm.
5c041f5d 3691 */
ca92ea3d 3692 if (WARN_ON_ONCE(!marker))
5c041f5d
PX
3693 return VM_FAULT_SIGBUS;
3694
15520a3f
PX
3695 /* Higher priority than uffd-wp when data corrupted */
3696 if (marker & PTE_MARKER_SWAPIN_ERROR)
3697 return VM_FAULT_SIGBUS;
3698
9c28a205
PX
3699 if (pte_marker_entry_uffd_wp(entry))
3700 return pte_marker_handle_uffd_wp(vmf);
3701
3702 /* This is an unknown pte marker */
3703 return VM_FAULT_SIGBUS;
5c041f5d
PX
3704}
3705
1da177e4 3706/*
c1e8d7c6 3707 * We enter with non-exclusive mmap_lock (to exclude vma changes,
8f4e2101 3708 * but allow concurrent faults), and pte mapped but not yet locked.
9a95f3cf
PC
3709 * We return with pte unmapped and unlocked.
3710 *
c1e8d7c6 3711 * We return with the mmap_lock locked or unlocked in the same cases
9a95f3cf 3712 * as does filemap_fault().
1da177e4 3713 */
2b740303 3714vm_fault_t do_swap_page(struct vm_fault *vmf)
1da177e4 3715{
82b0f8c3 3716 struct vm_area_struct *vma = vmf->vma;
d4f9565a
MWO
3717 struct folio *swapcache, *folio = NULL;
3718 struct page *page;
2799e775 3719 struct swap_info_struct *si = NULL;
14f9135d 3720 rmap_t rmap_flags = RMAP_NONE;
1493a191 3721 bool exclusive = false;
65500d23 3722 swp_entry_t entry;
1da177e4 3723 pte_t pte;
d065bd81 3724 int locked;
2b740303 3725 vm_fault_t ret = 0;
aae466b0 3726 void *shadow = NULL;
1da177e4 3727
2ca99358 3728 if (!pte_unmap_same(vmf))
8f4e2101 3729 goto out;
65500d23 3730
17c05f18
SB
3731 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3732 ret = VM_FAULT_RETRY;
3733 goto out;
3734 }
3735
2994302b 3736 entry = pte_to_swp_entry(vmf->orig_pte);
d1737fdb
AK
3737 if (unlikely(non_swap_entry(entry))) {
3738 if (is_migration_entry(entry)) {
82b0f8c3
JK
3739 migration_entry_wait(vma->vm_mm, vmf->pmd,
3740 vmf->address);
b756a3b5
AP
3741 } else if (is_device_exclusive_entry(entry)) {
3742 vmf->page = pfn_swap_entry_to_page(entry);
3743 ret = remove_device_exclusive_entry(vmf);
5042db43 3744 } else if (is_device_private_entry(entry)) {
af5cdaf8 3745 vmf->page = pfn_swap_entry_to_page(entry);
16ce101d
AP
3746 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3747 vmf->address, &vmf->ptl);
3db82b93 3748 if (unlikely(!vmf->pte ||
c33c7948
RR
3749 !pte_same(ptep_get(vmf->pte),
3750 vmf->orig_pte)))
3b65f437 3751 goto unlock;
16ce101d
AP
3752
3753 /*
3754 * Get a page reference while we know the page can't be
3755 * freed.
3756 */
3757 get_page(vmf->page);
3758 pte_unmap_unlock(vmf->pte, vmf->ptl);
4a955bed 3759 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
16ce101d 3760 put_page(vmf->page);
d1737fdb
AK
3761 } else if (is_hwpoison_entry(entry)) {
3762 ret = VM_FAULT_HWPOISON;
5c041f5d
PX
3763 } else if (is_pte_marker_entry(entry)) {
3764 ret = handle_pte_marker(vmf);
d1737fdb 3765 } else {
2994302b 3766 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
d99be1a8 3767 ret = VM_FAULT_SIGBUS;
d1737fdb 3768 }
0697212a
CL
3769 goto out;
3770 }
0bcac06f 3771
2799e775
ML
3772 /* Prevent swapoff from happening to us. */
3773 si = get_swap_device(entry);
3774 if (unlikely(!si))
3775 goto out;
0bcac06f 3776
5a423081
MWO
3777 folio = swap_cache_get_folio(entry, vma, vmf->address);
3778 if (folio)
3779 page = folio_file_page(folio, swp_offset(entry));
d4f9565a 3780 swapcache = folio;
f8020772 3781
d4f9565a 3782 if (!folio) {
a449bf58
QC
3783 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3784 __swap_count(entry) == 1) {
0bcac06f 3785 /* skip swapcache */
63ad4add
MWO
3786 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
3787 vma, vmf->address, false);
3788 page = &folio->page;
3789 if (folio) {
3790 __folio_set_locked(folio);
3791 __folio_set_swapbacked(folio);
4c6355b2 3792
65995918 3793 if (mem_cgroup_swapin_charge_folio(folio,
63ad4add
MWO
3794 vma->vm_mm, GFP_KERNEL,
3795 entry)) {
545b1b07 3796 ret = VM_FAULT_OOM;
4c6355b2 3797 goto out_page;
545b1b07 3798 }
0add0c77 3799 mem_cgroup_swapin_uncharge_swap(entry);
4c6355b2 3800
aae466b0
JK
3801 shadow = get_shadow_from_swap_cache(entry);
3802 if (shadow)
63ad4add 3803 workingset_refault(folio, shadow);
0076f029 3804
63ad4add 3805 folio_add_lru(folio);
0add0c77
SB
3806
3807 /* To provide entry to swap_readpage() */
63ad4add 3808 folio_set_swap_entry(folio, entry);
5169b844 3809 swap_readpage(page, true, NULL);
63ad4add 3810 folio->private = NULL;
0bcac06f 3811 }
aa8d22a1 3812 } else {
e9e9b7ec
MK
3813 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3814 vmf);
63ad4add
MWO
3815 if (page)
3816 folio = page_folio(page);
d4f9565a 3817 swapcache = folio;
0bcac06f
MK
3818 }
3819
d4f9565a 3820 if (!folio) {
1da177e4 3821 /*
8f4e2101
HD
3822 * Back out if somebody else faulted in this pte
3823 * while we released the pte lock.
1da177e4 3824 */
82b0f8c3
JK
3825 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3826 vmf->address, &vmf->ptl);
c33c7948
RR
3827 if (likely(vmf->pte &&
3828 pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
1da177e4 3829 ret = VM_FAULT_OOM;
65500d23 3830 goto unlock;
1da177e4
LT
3831 }
3832
3833 /* Had to read the page from swap area: Major fault */
3834 ret = VM_FAULT_MAJOR;
f8891e5e 3835 count_vm_event(PGMAJFAULT);
2262185c 3836 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
d1737fdb 3837 } else if (PageHWPoison(page)) {
71f72525
WF
3838 /*
3839 * hwpoisoned dirty swapcache pages are kept for killing
3840 * owner processes (which may be unknown at hwpoison time)
3841 */
d1737fdb 3842 ret = VM_FAULT_HWPOISON;
4779cb31 3843 goto out_release;
1da177e4
LT
3844 }
3845
19672a9e 3846 locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags);
e709ffd6 3847
d065bd81
ML
3848 if (!locked) {
3849 ret |= VM_FAULT_RETRY;
3850 goto out_release;
3851 }
073e587e 3852
84d60fdd
DH
3853 if (swapcache) {
3854 /*
3b344157 3855 * Make sure folio_free_swap() or swapoff did not release the
84d60fdd
DH
3856 * swapcache from under us. The page pin, and pte_same test
3857 * below, are not enough to exclude that. Even if it is still
3858 * swapcache, we need to check that the page's swap has not
3859 * changed.
3860 */
63ad4add 3861 if (unlikely(!folio_test_swapcache(folio) ||
84d60fdd
DH
3862 page_private(page) != entry.val))
3863 goto out_page;
3864
3865 /*
3866 * KSM sometimes has to copy on read faults, for example, if
3867 * page->index of !PageKSM() pages would be nonlinear inside the
3868 * anon VMA -- PageKSM() is lost on actual swapout.
3869 */
3870 page = ksm_might_need_to_copy(page, vma, vmf->address);
3871 if (unlikely(!page)) {
3872 ret = VM_FAULT_OOM;
84d60fdd 3873 goto out_page;
6b970599
KW
3874 } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) {
3875 ret = VM_FAULT_HWPOISON;
3876 goto out_page;
84d60fdd 3877 }
63ad4add 3878 folio = page_folio(page);
c145e0b4
DH
3879
3880 /*
3881 * If we want to map a page that's in the swapcache writable, we
3882 * have to detect via the refcount if we're really the exclusive
3883 * owner. Try removing the extra reference from the local LRU
1fec6890 3884 * caches if required.
c145e0b4 3885 */
d4f9565a 3886 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
63ad4add 3887 !folio_test_ksm(folio) && !folio_test_lru(folio))
c145e0b4 3888 lru_add_drain();
5ad64688
HD
3889 }
3890
4231f842 3891 folio_throttle_swaprate(folio, GFP_KERNEL);
8a9f3ccd 3892
1da177e4 3893 /*
8f4e2101 3894 * Back out if somebody else already faulted in this pte.
1da177e4 3895 */
82b0f8c3
JK
3896 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3897 &vmf->ptl);
c33c7948 3898 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
b8107480 3899 goto out_nomap;
b8107480 3900
63ad4add 3901 if (unlikely(!folio_test_uptodate(folio))) {
b8107480
KK
3902 ret = VM_FAULT_SIGBUS;
3903 goto out_nomap;
1da177e4
LT
3904 }
3905
78fbe906
DH
3906 /*
3907 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
3908 * must never point at an anonymous page in the swapcache that is
3909 * PG_anon_exclusive. Sanity check that this holds and especially, that
3910 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
3911 * check after taking the PT lock and making sure that nobody
3912 * concurrently faulted in this page and set PG_anon_exclusive.
3913 */
63ad4add
MWO
3914 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
3915 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
78fbe906 3916
1493a191
DH
3917 /*
3918 * Check under PT lock (to protect against concurrent fork() sharing
3919 * the swap entry concurrently) for certainly exclusive pages.
3920 */
63ad4add 3921 if (!folio_test_ksm(folio)) {
1493a191 3922 exclusive = pte_swp_exclusive(vmf->orig_pte);
d4f9565a 3923 if (folio != swapcache) {
1493a191
DH
3924 /*
3925 * We have a fresh page that is not exposed to the
3926 * swapcache -> certainly exclusive.
3927 */
3928 exclusive = true;
63ad4add 3929 } else if (exclusive && folio_test_writeback(folio) &&
eacde327 3930 data_race(si->flags & SWP_STABLE_WRITES)) {
1493a191
DH
3931 /*
3932 * This is tricky: not all swap backends support
3933 * concurrent page modifications while under writeback.
3934 *
3935 * So if we stumble over such a page in the swapcache
3936 * we must not set the page exclusive, otherwise we can
3937 * map it writable without further checks and modify it
3938 * while still under writeback.
3939 *
3940 * For these problematic swap backends, simply drop the
3941 * exclusive marker: this is perfectly fine as we start
3942 * writeback only if we fully unmapped the page and
3943 * there are no unexpected references on the page after
3944 * unmapping succeeded. After fully unmapped, no
3945 * further GUP references (FOLL_GET and FOLL_PIN) can
3946 * appear, so dropping the exclusive marker and mapping
3947 * it only R/O is fine.
3948 */
3949 exclusive = false;
3950 }
3951 }
3952
8c7c6e34 3953 /*
c145e0b4
DH
3954 * Remove the swap entry and conditionally try to free up the swapcache.
3955 * We're already holding a reference on the page but haven't mapped it
3956 * yet.
8c7c6e34 3957 */
c145e0b4 3958 swap_free(entry);
a160e537
MWO
3959 if (should_try_to_free_swap(folio, vma, vmf->flags))
3960 folio_free_swap(folio);
1da177e4 3961
f1a79412
SB
3962 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
3963 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1da177e4 3964 pte = mk_pte(page, vma->vm_page_prot);
c145e0b4
DH
3965
3966 /*
1493a191
DH
3967 * Same logic as in do_wp_page(); however, optimize for pages that are
3968 * certainly not shared either because we just allocated them without
3969 * exposing them to the swapcache or because the swap entry indicates
3970 * exclusivity.
c145e0b4 3971 */
63ad4add
MWO
3972 if (!folio_test_ksm(folio) &&
3973 (exclusive || folio_ref_count(folio) == 1)) {
6c287605
DH
3974 if (vmf->flags & FAULT_FLAG_WRITE) {
3975 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
3976 vmf->flags &= ~FAULT_FLAG_WRITE;
6c287605 3977 }
14f9135d 3978 rmap_flags |= RMAP_EXCLUSIVE;
1da177e4 3979 }
1da177e4 3980 flush_icache_page(vma, page);
2994302b 3981 if (pte_swp_soft_dirty(vmf->orig_pte))
179ef71c 3982 pte = pte_mksoft_dirty(pte);
f1eb1bac 3983 if (pte_swp_uffd_wp(vmf->orig_pte))
f45ec5ff 3984 pte = pte_mkuffd_wp(pte);
2994302b 3985 vmf->orig_pte = pte;
0bcac06f
MK
3986
3987 /* ksm created a completely new copy */
d4f9565a 3988 if (unlikely(folio != swapcache && swapcache)) {
40f2bbf7 3989 page_add_new_anon_rmap(page, vma, vmf->address);
63ad4add 3990 folio_add_lru_vma(folio, vma);
0bcac06f 3991 } else {
f1e2db12 3992 page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
00501b53 3993 }
1da177e4 3994
63ad4add
MWO
3995 VM_BUG_ON(!folio_test_anon(folio) ||
3996 (pte_write(pte) && !PageAnonExclusive(page)));
1eba86c0
PT
3997 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3998 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3999
63ad4add 4000 folio_unlock(folio);
d4f9565a 4001 if (folio != swapcache && swapcache) {
4969c119
AA
4002 /*
4003 * Hold the lock to avoid the swap entry to be reused
4004 * until we take the PT lock for the pte_same() check
4005 * (to avoid false positives from pte_same). For
4006 * further safety release the lock after the swap_free
4007 * so that the swap count won't change under a
4008 * parallel locked swapcache.
4009 */
d4f9565a
MWO
4010 folio_unlock(swapcache);
4011 folio_put(swapcache);
4969c119 4012 }
c475a8ab 4013
82b0f8c3 4014 if (vmf->flags & FAULT_FLAG_WRITE) {
2994302b 4015 ret |= do_wp_page(vmf);
61469f1d
HD
4016 if (ret & VM_FAULT_ERROR)
4017 ret &= VM_FAULT_ERROR;
1da177e4
LT
4018 goto out;
4019 }
4020
4021 /* No need to invalidate - it was non-present before */
82b0f8c3 4022 update_mmu_cache(vma, vmf->address, vmf->pte);
65500d23 4023unlock:
3db82b93
HD
4024 if (vmf->pte)
4025 pte_unmap_unlock(vmf->pte, vmf->ptl);
1da177e4 4026out:
2799e775
ML
4027 if (si)
4028 put_swap_device(si);
1da177e4 4029 return ret;
b8107480 4030out_nomap:
3db82b93
HD
4031 if (vmf->pte)
4032 pte_unmap_unlock(vmf->pte, vmf->ptl);
bc43f75c 4033out_page:
63ad4add 4034 folio_unlock(folio);
4779cb31 4035out_release:
63ad4add 4036 folio_put(folio);
d4f9565a
MWO
4037 if (folio != swapcache && swapcache) {
4038 folio_unlock(swapcache);
4039 folio_put(swapcache);
4969c119 4040 }
2799e775
ML
4041 if (si)
4042 put_swap_device(si);
65500d23 4043 return ret;
1da177e4
LT
4044}
4045
4046/*
c1e8d7c6 4047 * We enter with non-exclusive mmap_lock (to exclude vma changes,
8f4e2101 4048 * but allow concurrent faults), and pte mapped but not yet locked.
c1e8d7c6 4049 * We return with mmap_lock still held, but pte unmapped and unlocked.
1da177e4 4050 */
2b740303 4051static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
1da177e4 4052{
2bad466c 4053 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
82b0f8c3 4054 struct vm_area_struct *vma = vmf->vma;
6bc56a4d 4055 struct folio *folio;
2b740303 4056 vm_fault_t ret = 0;
1da177e4 4057 pte_t entry;
1da177e4 4058
6b7339f4
KS
4059 /* File mapping without ->vm_ops ? */
4060 if (vma->vm_flags & VM_SHARED)
4061 return VM_FAULT_SIGBUS;
4062
7267ec00 4063 /*
3db82b93
HD
4064 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
4065 * be distinguished from a transient failure of pte_offset_map().
7267ec00 4066 */
4cf58924 4067 if (pte_alloc(vma->vm_mm, vmf->pmd))
7267ec00
KS
4068 return VM_FAULT_OOM;
4069
11ac5524 4070 /* Use the zero-page for reads */
82b0f8c3 4071 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
bae473a4 4072 !mm_forbids_zeropage(vma->vm_mm)) {
82b0f8c3 4073 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
62eede62 4074 vma->vm_page_prot));
82b0f8c3
JK
4075 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4076 vmf->address, &vmf->ptl);
3db82b93
HD
4077 if (!vmf->pte)
4078 goto unlock;
2bad466c 4079 if (vmf_pte_changed(vmf)) {
7df67697 4080 update_mmu_tlb(vma, vmf->address, vmf->pte);
a13ea5b7 4081 goto unlock;
7df67697 4082 }
6b31d595
MH
4083 ret = check_stable_address_space(vma->vm_mm);
4084 if (ret)
4085 goto unlock;
6b251fc9
AA
4086 /* Deliver the page fault to userland, check inside PT lock */
4087 if (userfaultfd_missing(vma)) {
82b0f8c3
JK
4088 pte_unmap_unlock(vmf->pte, vmf->ptl);
4089 return handle_userfault(vmf, VM_UFFD_MISSING);
6b251fc9 4090 }
a13ea5b7
HD
4091 goto setpte;
4092 }
4093
557ed1fa 4094 /* Allocate our own private page. */
557ed1fa
NP
4095 if (unlikely(anon_vma_prepare(vma)))
4096 goto oom;
6bc56a4d
MWO
4097 folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
4098 if (!folio)
557ed1fa 4099 goto oom;
eb3c24f3 4100
6bc56a4d 4101 if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
eb3c24f3 4102 goto oom_free_page;
e2bf3e2c 4103 folio_throttle_swaprate(folio, GFP_KERNEL);
eb3c24f3 4104
52f37629 4105 /*
cb3184de 4106 * The memory barrier inside __folio_mark_uptodate makes sure that
f4f5329d 4107 * preceding stores to the page contents become visible before
52f37629
MK
4108 * the set_pte_at() write.
4109 */
cb3184de 4110 __folio_mark_uptodate(folio);
8f4e2101 4111
cb3184de 4112 entry = mk_pte(&folio->page, vma->vm_page_prot);
50c25ee9 4113 entry = pte_sw_mkyoung(entry);
1ac0cb5d
HD
4114 if (vma->vm_flags & VM_WRITE)
4115 entry = pte_mkwrite(pte_mkdirty(entry));
1da177e4 4116
82b0f8c3
JK
4117 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4118 &vmf->ptl);
3db82b93
HD
4119 if (!vmf->pte)
4120 goto release;
2bad466c 4121 if (vmf_pte_changed(vmf)) {
bce8cb3c 4122 update_mmu_tlb(vma, vmf->address, vmf->pte);
557ed1fa 4123 goto release;
7df67697 4124 }
9ba69294 4125
6b31d595
MH
4126 ret = check_stable_address_space(vma->vm_mm);
4127 if (ret)
4128 goto release;
4129
6b251fc9
AA
4130 /* Deliver the page fault to userland, check inside PT lock */
4131 if (userfaultfd_missing(vma)) {
82b0f8c3 4132 pte_unmap_unlock(vmf->pte, vmf->ptl);
cb3184de 4133 folio_put(folio);
82b0f8c3 4134 return handle_userfault(vmf, VM_UFFD_MISSING);
6b251fc9
AA
4135 }
4136
f1a79412 4137 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
cb3184de
MWO
4138 folio_add_new_anon_rmap(folio, vma, vmf->address);
4139 folio_add_lru_vma(folio, vma);
a13ea5b7 4140setpte:
2bad466c
PX
4141 if (uffd_wp)
4142 entry = pte_mkuffd_wp(entry);
82b0f8c3 4143 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
1da177e4
LT
4144
4145 /* No need to invalidate - it was non-present before */
82b0f8c3 4146 update_mmu_cache(vma, vmf->address, vmf->pte);
65500d23 4147unlock:
3db82b93
HD
4148 if (vmf->pte)
4149 pte_unmap_unlock(vmf->pte, vmf->ptl);
6b31d595 4150 return ret;
8f4e2101 4151release:
cb3184de 4152 folio_put(folio);
8f4e2101 4153 goto unlock;
8a9f3ccd 4154oom_free_page:
cb3184de 4155 folio_put(folio);
65500d23 4156oom:
1da177e4
LT
4157 return VM_FAULT_OOM;
4158}
4159
9a95f3cf 4160/*
c1e8d7c6 4161 * The mmap_lock must have been held on entry, and may have been
9a95f3cf
PC
4162 * released depending on flags and vma->vm_ops->fault() return value.
4163 * See filemap_fault() and __lock_page_retry().
4164 */
2b740303 4165static vm_fault_t __do_fault(struct vm_fault *vmf)
7eae74af 4166{
82b0f8c3 4167 struct vm_area_struct *vma = vmf->vma;
2b740303 4168 vm_fault_t ret;
7eae74af 4169
63f3655f
MH
4170 /*
4171 * Preallocate pte before we take page_lock because this might lead to
4172 * deadlocks for memcg reclaim which waits for pages under writeback:
4173 * lock_page(A)
4174 * SetPageWriteback(A)
4175 * unlock_page(A)
4176 * lock_page(B)
4177 * lock_page(B)
d383807a 4178 * pte_alloc_one
63f3655f
MH
4179 * shrink_page_list
4180 * wait_on_page_writeback(A)
4181 * SetPageWriteback(B)
4182 * unlock_page(B)
4183 * # flush A, B to clear the writeback
4184 */
4185 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
a7069ee3 4186 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
63f3655f
MH
4187 if (!vmf->prealloc_pte)
4188 return VM_FAULT_OOM;
63f3655f
MH
4189 }
4190
11bac800 4191 ret = vma->vm_ops->fault(vmf);
3917048d 4192 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
b1aa812b 4193 VM_FAULT_DONE_COW)))
bc2466e4 4194 return ret;
7eae74af 4195
667240e0 4196 if (unlikely(PageHWPoison(vmf->page))) {
3149c79f 4197 struct page *page = vmf->page;
e53ac737
RR
4198 vm_fault_t poisonret = VM_FAULT_HWPOISON;
4199 if (ret & VM_FAULT_LOCKED) {
3149c79f
RR
4200 if (page_mapped(page))
4201 unmap_mapping_pages(page_mapping(page),
4202 page->index, 1, false);
e53ac737 4203 /* Retry if a clean page was removed from the cache. */
3149c79f
RR
4204 if (invalidate_inode_page(page))
4205 poisonret = VM_FAULT_NOPAGE;
4206 unlock_page(page);
e53ac737 4207 }
3149c79f 4208 put_page(page);
936ca80d 4209 vmf->page = NULL;
e53ac737 4210 return poisonret;
7eae74af
KS
4211 }
4212
4213 if (unlikely(!(ret & VM_FAULT_LOCKED)))
667240e0 4214 lock_page(vmf->page);
7eae74af 4215 else
667240e0 4216 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
7eae74af 4217
7eae74af
KS
4218 return ret;
4219}
4220
396bcc52 4221#ifdef CONFIG_TRANSPARENT_HUGEPAGE
82b0f8c3 4222static void deposit_prealloc_pte(struct vm_fault *vmf)
953c66c2 4223{
82b0f8c3 4224 struct vm_area_struct *vma = vmf->vma;
953c66c2 4225
82b0f8c3 4226 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
953c66c2
AK
4227 /*
4228 * We are going to consume the prealloc table,
4229 * count that as nr_ptes.
4230 */
c4812909 4231 mm_inc_nr_ptes(vma->vm_mm);
7f2b6ce8 4232 vmf->prealloc_pte = NULL;
953c66c2
AK
4233}
4234
f9ce0be7 4235vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
10102459 4236{
82b0f8c3
JK
4237 struct vm_area_struct *vma = vmf->vma;
4238 bool write = vmf->flags & FAULT_FLAG_WRITE;
4239 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
10102459 4240 pmd_t entry;
2b740303 4241 int i;
d01ac3c3 4242 vm_fault_t ret = VM_FAULT_FALLBACK;
10102459
KS
4243
4244 if (!transhuge_vma_suitable(vma, haddr))
d01ac3c3 4245 return ret;
10102459 4246
10102459 4247 page = compound_head(page);
d01ac3c3
MWO
4248 if (compound_order(page) != HPAGE_PMD_ORDER)
4249 return ret;
10102459 4250
eac96c3e
YS
4251 /*
4252 * Just backoff if any subpage of a THP is corrupted otherwise
4253 * the corrupted page may mapped by PMD silently to escape the
4254 * check. This kind of THP just can be PTE mapped. Access to
4255 * the corrupted subpage should trigger SIGBUS as expected.
4256 */
4257 if (unlikely(PageHasHWPoisoned(page)))
4258 return ret;
4259
953c66c2 4260 /*
f0953a1b 4261 * Archs like ppc64 need additional space to store information
953c66c2
AK
4262 * related to pte entry. Use the preallocated table for that.
4263 */
82b0f8c3 4264 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4cf58924 4265 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
82b0f8c3 4266 if (!vmf->prealloc_pte)
953c66c2 4267 return VM_FAULT_OOM;
953c66c2
AK
4268 }
4269
82b0f8c3
JK
4270 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4271 if (unlikely(!pmd_none(*vmf->pmd)))
10102459
KS
4272 goto out;
4273
4274 for (i = 0; i < HPAGE_PMD_NR; i++)
4275 flush_icache_page(vma, page + i);
4276
4277 entry = mk_huge_pmd(page, vma->vm_page_prot);
4278 if (write)
f55e1014 4279 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
10102459 4280
fadae295 4281 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
cea86fe2
HD
4282 page_add_file_rmap(page, vma, true);
4283
953c66c2
AK
4284 /*
4285 * deposit and withdraw with pmd lock held
4286 */
4287 if (arch_needs_pgtable_deposit())
82b0f8c3 4288 deposit_prealloc_pte(vmf);
10102459 4289
82b0f8c3 4290 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
10102459 4291
82b0f8c3 4292 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
10102459
KS
4293
4294 /* fault is handled */
4295 ret = 0;
95ecedcd 4296 count_vm_event(THP_FILE_MAPPED);
10102459 4297out:
82b0f8c3 4298 spin_unlock(vmf->ptl);
10102459
KS
4299 return ret;
4300}
4301#else
f9ce0be7 4302vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
10102459 4303{
f9ce0be7 4304 return VM_FAULT_FALLBACK;
10102459
KS
4305}
4306#endif
4307
9d3af4b4 4308void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
3bb97794 4309{
82b0f8c3 4310 struct vm_area_struct *vma = vmf->vma;
2bad466c 4311 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
82b0f8c3 4312 bool write = vmf->flags & FAULT_FLAG_WRITE;
9d3af4b4 4313 bool prefault = vmf->address != addr;
3bb97794 4314 pte_t entry;
7267ec00 4315
3bb97794
KS
4316 flush_icache_page(vma, page);
4317 entry = mk_pte(page, vma->vm_page_prot);
46bdb427
WD
4318
4319 if (prefault && arch_wants_old_prefaulted_pte())
4320 entry = pte_mkold(entry);
50c25ee9
TB
4321 else
4322 entry = pte_sw_mkyoung(entry);
46bdb427 4323
3bb97794
KS
4324 if (write)
4325 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
9c28a205 4326 if (unlikely(uffd_wp))
f1eb1bac 4327 entry = pte_mkuffd_wp(entry);
bae473a4
KS
4328 /* copy-on-write page */
4329 if (write && !(vma->vm_flags & VM_SHARED)) {
f1a79412 4330 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
40f2bbf7 4331 page_add_new_anon_rmap(page, vma, addr);
b518154e 4332 lru_cache_add_inactive_or_unevictable(page, vma);
3bb97794 4333 } else {
f1a79412 4334 inc_mm_counter(vma->vm_mm, mm_counter_file(page));
cea86fe2 4335 page_add_file_rmap(page, vma, false);
3bb97794 4336 }
9d3af4b4 4337 set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
3bb97794
KS
4338}
4339
f46f2ade
PX
4340static bool vmf_pte_changed(struct vm_fault *vmf)
4341{
4342 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
c33c7948 4343 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
f46f2ade 4344
c33c7948 4345 return !pte_none(ptep_get(vmf->pte));
f46f2ade
PX
4346}
4347
9118c0cb
JK
4348/**
4349 * finish_fault - finish page fault once we have prepared the page to fault
4350 *
4351 * @vmf: structure describing the fault
4352 *
4353 * This function handles all that is needed to finish a page fault once the
4354 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
4355 * given page, adds reverse page mapping, handles memcg charges and LRU
a862f68a 4356 * addition.
9118c0cb
JK
4357 *
4358 * The function expects the page to be locked and on success it consumes a
4359 * reference of a page being mapped (for the PTE which maps it).
a862f68a
MR
4360 *
4361 * Return: %0 on success, %VM_FAULT_ code in case of error.
9118c0cb 4362 */
2b740303 4363vm_fault_t finish_fault(struct vm_fault *vmf)
9118c0cb 4364{
f9ce0be7 4365 struct vm_area_struct *vma = vmf->vma;
9118c0cb 4366 struct page *page;
f9ce0be7 4367 vm_fault_t ret;
9118c0cb
JK
4368
4369 /* Did we COW the page? */
f9ce0be7 4370 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
9118c0cb
JK
4371 page = vmf->cow_page;
4372 else
4373 page = vmf->page;
6b31d595
MH
4374
4375 /*
4376 * check even for read faults because we might have lost our CoWed
4377 * page
4378 */
f9ce0be7
KS
4379 if (!(vma->vm_flags & VM_SHARED)) {
4380 ret = check_stable_address_space(vma->vm_mm);
4381 if (ret)
4382 return ret;
4383 }
4384
4385 if (pmd_none(*vmf->pmd)) {
4386 if (PageTransCompound(page)) {
4387 ret = do_set_pmd(vmf, page);
4388 if (ret != VM_FAULT_FALLBACK)
4389 return ret;
4390 }
4391
03c4f204
QZ
4392 if (vmf->prealloc_pte)
4393 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4394 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
f9ce0be7
KS
4395 return VM_FAULT_OOM;
4396 }
4397
f9ce0be7
KS
4398 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4399 vmf->address, &vmf->ptl);
3db82b93
HD
4400 if (!vmf->pte)
4401 return VM_FAULT_NOPAGE;
70427f6e 4402
f9ce0be7 4403 /* Re-check under ptl */
70427f6e 4404 if (likely(!vmf_pte_changed(vmf))) {
9d3af4b4 4405 do_set_pte(vmf, page, vmf->address);
70427f6e
SA
4406
4407 /* no need to invalidate: a not-present page won't be cached */
4408 update_mmu_cache(vma, vmf->address, vmf->pte);
4409
4410 ret = 0;
4411 } else {
4412 update_mmu_tlb(vma, vmf->address, vmf->pte);
f9ce0be7 4413 ret = VM_FAULT_NOPAGE;
70427f6e 4414 }
f9ce0be7 4415
f9ce0be7 4416 pte_unmap_unlock(vmf->pte, vmf->ptl);
9118c0cb
JK
4417 return ret;
4418}
4419
53d36a56
LS
4420static unsigned long fault_around_pages __read_mostly =
4421 65536 >> PAGE_SHIFT;
a9b0f861 4422
a9b0f861
KS
4423#ifdef CONFIG_DEBUG_FS
4424static int fault_around_bytes_get(void *data, u64 *val)
1592eef0 4425{
53d36a56 4426 *val = fault_around_pages << PAGE_SHIFT;
1592eef0
KS
4427 return 0;
4428}
4429
b4903d6e 4430/*
da391d64
WK
4431 * fault_around_bytes must be rounded down to the nearest page order as it's
4432 * what do_fault_around() expects to see.
b4903d6e 4433 */
a9b0f861 4434static int fault_around_bytes_set(void *data, u64 val)
1592eef0 4435{
a9b0f861 4436 if (val / PAGE_SIZE > PTRS_PER_PTE)
1592eef0 4437 return -EINVAL;
53d36a56
LS
4438
4439 /*
4440 * The minimum value is 1 page, however this results in no fault-around
4441 * at all. See should_fault_around().
4442 */
4443 fault_around_pages = max(rounddown_pow_of_two(val) >> PAGE_SHIFT, 1UL);
4444
1592eef0
KS
4445 return 0;
4446}
0a1345f8 4447DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
a9b0f861 4448 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
1592eef0
KS
4449
4450static int __init fault_around_debugfs(void)
4451{
d9f7979c
GKH
4452 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4453 &fault_around_bytes_fops);
1592eef0
KS
4454 return 0;
4455}
4456late_initcall(fault_around_debugfs);
1592eef0 4457#endif
8c6e50b0 4458
1fdb412b
KS
4459/*
4460 * do_fault_around() tries to map few pages around the fault address. The hope
4461 * is that the pages will be needed soon and this will lower the number of
4462 * faults to handle.
4463 *
4464 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4465 * not ready to be mapped: not up-to-date, locked, etc.
4466 *
9042599e
LS
4467 * This function doesn't cross VMA or page table boundaries, in order to call
4468 * map_pages() and acquire a PTE lock only once.
1fdb412b 4469 *
53d36a56 4470 * fault_around_pages defines how many pages we'll try to map.
da391d64
WK
4471 * do_fault_around() expects it to be set to a power of two less than or equal
4472 * to PTRS_PER_PTE.
1fdb412b 4473 *
da391d64 4474 * The virtual address of the area that we map is naturally aligned to
53d36a56 4475 * fault_around_pages * PAGE_SIZE rounded down to the machine page size
da391d64
WK
4476 * (and therefore to page order). This way it's easier to guarantee
4477 * that we don't cross page table boundaries.
1fdb412b 4478 */
2b740303 4479static vm_fault_t do_fault_around(struct vm_fault *vmf)
8c6e50b0 4480{
53d36a56 4481 pgoff_t nr_pages = READ_ONCE(fault_around_pages);
9042599e
LS
4482 pgoff_t pte_off = pte_index(vmf->address);
4483 /* The page offset of vmf->address within the VMA. */
4484 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
4485 pgoff_t from_pte, to_pte;
58ef47ef 4486 vm_fault_t ret;
8c6e50b0 4487
9042599e
LS
4488 /* The PTE offset of the start address, clamped to the VMA. */
4489 from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
4490 pte_off - min(pte_off, vma_off));
aecd6f44 4491
9042599e
LS
4492 /* The PTE offset of the end address, clamped to the VMA and PTE. */
4493 to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
4494 pte_off + vma_pages(vmf->vma) - vma_off) - 1;
8c6e50b0 4495
82b0f8c3 4496 if (pmd_none(*vmf->pmd)) {
4cf58924 4497 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
82b0f8c3 4498 if (!vmf->prealloc_pte)
f9ce0be7 4499 return VM_FAULT_OOM;
8c6e50b0
KS
4500 }
4501
58ef47ef
MWO
4502 rcu_read_lock();
4503 ret = vmf->vma->vm_ops->map_pages(vmf,
4504 vmf->pgoff + from_pte - pte_off,
4505 vmf->pgoff + to_pte - pte_off);
4506 rcu_read_unlock();
4507
4508 return ret;
8c6e50b0
KS
4509}
4510
9c28a205
PX
4511/* Return true if we should do read fault-around, false otherwise */
4512static inline bool should_fault_around(struct vm_fault *vmf)
4513{
4514 /* No ->map_pages? No way to fault around... */
4515 if (!vmf->vma->vm_ops->map_pages)
4516 return false;
4517
4518 if (uffd_disable_fault_around(vmf->vma))
4519 return false;
4520
53d36a56
LS
4521 /* A single page implies no faulting 'around' at all. */
4522 return fault_around_pages > 1;
9c28a205
PX
4523}
4524
2b740303 4525static vm_fault_t do_read_fault(struct vm_fault *vmf)
e655fb29 4526{
2b740303 4527 vm_fault_t ret = 0;
8c6e50b0
KS
4528
4529 /*
4530 * Let's call ->map_pages() first and use ->fault() as fallback
4531 * if page by the offset is not ready to be mapped (cold cache or
4532 * something).
4533 */
9c28a205
PX
4534 if (should_fault_around(vmf)) {
4535 ret = do_fault_around(vmf);
4536 if (ret)
4537 return ret;
8c6e50b0 4538 }
e655fb29 4539
936ca80d 4540 ret = __do_fault(vmf);
e655fb29
KS
4541 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4542 return ret;
4543
9118c0cb 4544 ret |= finish_fault(vmf);
936ca80d 4545 unlock_page(vmf->page);
7267ec00 4546 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
936ca80d 4547 put_page(vmf->page);
e655fb29
KS
4548 return ret;
4549}
4550
2b740303 4551static vm_fault_t do_cow_fault(struct vm_fault *vmf)
ec47c3b9 4552{
82b0f8c3 4553 struct vm_area_struct *vma = vmf->vma;
2b740303 4554 vm_fault_t ret;
ec47c3b9
KS
4555
4556 if (unlikely(anon_vma_prepare(vma)))
4557 return VM_FAULT_OOM;
4558
936ca80d
JK
4559 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4560 if (!vmf->cow_page)
ec47c3b9
KS
4561 return VM_FAULT_OOM;
4562
8f425e4e
MWO
4563 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
4564 GFP_KERNEL)) {
936ca80d 4565 put_page(vmf->cow_page);
ec47c3b9
KS
4566 return VM_FAULT_OOM;
4567 }
68fa572b 4568 folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL);
ec47c3b9 4569
936ca80d 4570 ret = __do_fault(vmf);
ec47c3b9
KS
4571 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4572 goto uncharge_out;
3917048d
JK
4573 if (ret & VM_FAULT_DONE_COW)
4574 return ret;
ec47c3b9 4575
b1aa812b 4576 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
936ca80d 4577 __SetPageUptodate(vmf->cow_page);
ec47c3b9 4578
9118c0cb 4579 ret |= finish_fault(vmf);
b1aa812b
JK
4580 unlock_page(vmf->page);
4581 put_page(vmf->page);
7267ec00
KS
4582 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4583 goto uncharge_out;
ec47c3b9
KS
4584 return ret;
4585uncharge_out:
936ca80d 4586 put_page(vmf->cow_page);
ec47c3b9
KS
4587 return ret;
4588}
4589
2b740303 4590static vm_fault_t do_shared_fault(struct vm_fault *vmf)
1da177e4 4591{
82b0f8c3 4592 struct vm_area_struct *vma = vmf->vma;
2b740303 4593 vm_fault_t ret, tmp;
1d65f86d 4594
936ca80d 4595 ret = __do_fault(vmf);
7eae74af 4596 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
f0c6d4d2 4597 return ret;
1da177e4
LT
4598
4599 /*
f0c6d4d2
KS
4600 * Check if the backing address space wants to know that the page is
4601 * about to become writable
1da177e4 4602 */
fb09a464 4603 if (vma->vm_ops->page_mkwrite) {
936ca80d 4604 unlock_page(vmf->page);
38b8cb7f 4605 tmp = do_page_mkwrite(vmf);
fb09a464
KS
4606 if (unlikely(!tmp ||
4607 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
936ca80d 4608 put_page(vmf->page);
fb09a464 4609 return tmp;
4294621f 4610 }
fb09a464
KS
4611 }
4612
9118c0cb 4613 ret |= finish_fault(vmf);
7267ec00
KS
4614 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4615 VM_FAULT_RETRY))) {
936ca80d
JK
4616 unlock_page(vmf->page);
4617 put_page(vmf->page);
f0c6d4d2 4618 return ret;
1da177e4 4619 }
b827e496 4620
89b15332 4621 ret |= fault_dirty_shared_page(vmf);
1d65f86d 4622 return ret;
54cb8821 4623}
d00806b1 4624
9a95f3cf 4625/*
c1e8d7c6 4626 * We enter with non-exclusive mmap_lock (to exclude vma changes,
9a95f3cf 4627 * but allow concurrent faults).
c1e8d7c6 4628 * The mmap_lock may have been released depending on flags and our
9138e47e 4629 * return value. See filemap_fault() and __folio_lock_or_retry().
c1e8d7c6 4630 * If mmap_lock is released, vma may become invalid (for example
fc8efd2d 4631 * by other thread calling munmap()).
9a95f3cf 4632 */
2b740303 4633static vm_fault_t do_fault(struct vm_fault *vmf)
54cb8821 4634{
82b0f8c3 4635 struct vm_area_struct *vma = vmf->vma;
fc8efd2d 4636 struct mm_struct *vm_mm = vma->vm_mm;
2b740303 4637 vm_fault_t ret;
54cb8821 4638
ff09d7ec
AK
4639 /*
4640 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4641 */
4642 if (!vma->vm_ops->fault) {
3db82b93
HD
4643 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4644 vmf->address, &vmf->ptl);
4645 if (unlikely(!vmf->pte))
ff09d7ec
AK
4646 ret = VM_FAULT_SIGBUS;
4647 else {
ff09d7ec
AK
4648 /*
4649 * Make sure this is not a temporary clearing of pte
4650 * by holding ptl and checking again. A R/M/W update
4651 * of pte involves: take ptl, clearing the pte so that
4652 * we don't have concurrent modification by hardware
4653 * followed by an update.
4654 */
c33c7948 4655 if (unlikely(pte_none(ptep_get(vmf->pte))))
ff09d7ec
AK
4656 ret = VM_FAULT_SIGBUS;
4657 else
4658 ret = VM_FAULT_NOPAGE;
4659
4660 pte_unmap_unlock(vmf->pte, vmf->ptl);
4661 }
4662 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
b0b9b3df
HD
4663 ret = do_read_fault(vmf);
4664 else if (!(vma->vm_flags & VM_SHARED))
4665 ret = do_cow_fault(vmf);
4666 else
4667 ret = do_shared_fault(vmf);
4668
4669 /* preallocated pagetable is unused: free it */
4670 if (vmf->prealloc_pte) {
fc8efd2d 4671 pte_free(vm_mm, vmf->prealloc_pte);
7f2b6ce8 4672 vmf->prealloc_pte = NULL;
b0b9b3df
HD
4673 }
4674 return ret;
54cb8821
NP
4675}
4676
f4c0d836
YS
4677int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4678 unsigned long addr, int page_nid, int *flags)
9532fec1
MG
4679{
4680 get_page(page);
4681
fc137c0d
R
4682 /* Record the current PID acceesing VMA */
4683 vma_set_access_pid_bit(vma);
4684
9532fec1 4685 count_vm_numa_event(NUMA_HINT_FAULTS);
04bb2f94 4686 if (page_nid == numa_node_id()) {
9532fec1 4687 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
04bb2f94
RR
4688 *flags |= TNF_FAULT_LOCAL;
4689 }
9532fec1
MG
4690
4691 return mpol_misplaced(page, vma, addr);
4692}
4693
2b740303 4694static vm_fault_t do_numa_page(struct vm_fault *vmf)
d10e63f2 4695{
82b0f8c3 4696 struct vm_area_struct *vma = vmf->vma;
4daae3b4 4697 struct page *page = NULL;
98fa15f3 4698 int page_nid = NUMA_NO_NODE;
6a56ccbc 4699 bool writable = false;
90572890 4700 int last_cpupid;
cbee9f88 4701 int target_nid;
04a86453 4702 pte_t pte, old_pte;
6688cc05 4703 int flags = 0;
d10e63f2
MG
4704
4705 /*
166f61b9
TH
4706 * The "pte" at this point cannot be used safely without
4707 * validation through pte_unmap_same(). It's of NUMA type but
4708 * the pfn may be screwed if the read is non atomic.
166f61b9 4709 */
82b0f8c3 4710 spin_lock(vmf->ptl);
c33c7948 4711 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
82b0f8c3 4712 pte_unmap_unlock(vmf->pte, vmf->ptl);
4daae3b4
MG
4713 goto out;
4714 }
4715
b99a342d
HY
4716 /* Get the normal PTE */
4717 old_pte = ptep_get(vmf->pte);
04a86453 4718 pte = pte_modify(old_pte, vma->vm_page_prot);
d10e63f2 4719
6a56ccbc
DH
4720 /*
4721 * Detect now whether the PTE could be writable; this information
4722 * is only valid while holding the PT lock.
4723 */
4724 writable = pte_write(pte);
4725 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
4726 can_change_pte_writable(vma, vmf->address, pte))
4727 writable = true;
4728
82b0f8c3 4729 page = vm_normal_page(vma, vmf->address, pte);
3218f871 4730 if (!page || is_zone_device_page(page))
b99a342d 4731 goto out_map;
d10e63f2 4732
e81c4802 4733 /* TODO: handle PTE-mapped THP */
b99a342d
HY
4734 if (PageCompound(page))
4735 goto out_map;
e81c4802 4736
6688cc05 4737 /*
bea66fbd
MG
4738 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4739 * much anyway since they can be in shared cache state. This misses
4740 * the case where a mapping is writable but the process never writes
4741 * to it but pte_write gets cleared during protection updates and
4742 * pte_dirty has unpredictable behaviour between PTE scan updates,
4743 * background writeback, dirty balancing and application behaviour.
6688cc05 4744 */
6a56ccbc 4745 if (!writable)
6688cc05
PZ
4746 flags |= TNF_NO_GROUP;
4747
dabe1d99
RR
4748 /*
4749 * Flag if the page is shared between multiple address spaces. This
4750 * is later used when determining whether to group tasks together
4751 */
4752 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4753 flags |= TNF_SHARED;
4754
8191acbd 4755 page_nid = page_to_nid(page);
33024536
HY
4756 /*
4757 * For memory tiering mode, cpupid of slow memory page is used
4758 * to record page access time. So use default value.
4759 */
4760 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
4761 !node_is_toptier(page_nid))
4762 last_cpupid = (-1 & LAST_CPUPID_MASK);
4763 else
4764 last_cpupid = page_cpupid_last(page);
82b0f8c3 4765 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
bae473a4 4766 &flags);
98fa15f3 4767 if (target_nid == NUMA_NO_NODE) {
4daae3b4 4768 put_page(page);
b99a342d 4769 goto out_map;
4daae3b4 4770 }
b99a342d 4771 pte_unmap_unlock(vmf->pte, vmf->ptl);
6a56ccbc 4772 writable = false;
4daae3b4
MG
4773
4774 /* Migrate to the requested node */
bf90ac19 4775 if (migrate_misplaced_page(page, vma, target_nid)) {
8191acbd 4776 page_nid = target_nid;
6688cc05 4777 flags |= TNF_MIGRATED;
b99a342d 4778 } else {
074c2381 4779 flags |= TNF_MIGRATE_FAIL;
c7ad0880
HD
4780 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4781 vmf->address, &vmf->ptl);
4782 if (unlikely(!vmf->pte))
4783 goto out;
c33c7948 4784 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
b99a342d
HY
4785 pte_unmap_unlock(vmf->pte, vmf->ptl);
4786 goto out;
4787 }
4788 goto out_map;
4789 }
4daae3b4
MG
4790
4791out:
98fa15f3 4792 if (page_nid != NUMA_NO_NODE)
6688cc05 4793 task_numa_fault(last_cpupid, page_nid, 1, flags);
d10e63f2 4794 return 0;
b99a342d
HY
4795out_map:
4796 /*
4797 * Make it present again, depending on how arch implements
4798 * non-accessible ptes, some can allow access by kernel mode.
4799 */
4800 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4801 pte = pte_modify(old_pte, vma->vm_page_prot);
4802 pte = pte_mkyoung(pte);
6a56ccbc 4803 if (writable)
b99a342d
HY
4804 pte = pte_mkwrite(pte);
4805 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4806 update_mmu_cache(vma, vmf->address, vmf->pte);
4807 pte_unmap_unlock(vmf->pte, vmf->ptl);
4808 goto out;
d10e63f2
MG
4809}
4810
2b740303 4811static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
b96375f7 4812{
f4200391 4813 if (vma_is_anonymous(vmf->vma))
82b0f8c3 4814 return do_huge_pmd_anonymous_page(vmf);
a2d58167 4815 if (vmf->vma->vm_ops->huge_fault)
c791ace1 4816 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
b96375f7
MW
4817 return VM_FAULT_FALLBACK;
4818}
4819
183f24aa 4820/* `inline' is required to avoid gcc 4.1.2 build error */
5db4f15c 4821static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
b96375f7 4822{
c89357e2 4823 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
aea06577 4824 vm_fault_t ret;
c89357e2 4825
529b930b 4826 if (vma_is_anonymous(vmf->vma)) {
c89357e2
DH
4827 if (likely(!unshare) &&
4828 userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
529b930b 4829 return handle_userfault(vmf, VM_UFFD_WP);
5db4f15c 4830 return do_huge_pmd_wp_page(vmf);
529b930b 4831 }
327e9fd4 4832
aea06577
DH
4833 if (vmf->vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
4834 if (vmf->vma->vm_ops->huge_fault) {
4835 ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4836 if (!(ret & VM_FAULT_FALLBACK))
4837 return ret;
4838 }
327e9fd4 4839 }
af9e4d5f 4840
327e9fd4 4841 /* COW or write-notify handled on pte level: split pmd. */
82b0f8c3 4842 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
af9e4d5f 4843
b96375f7
MW
4844 return VM_FAULT_FALLBACK;
4845}
4846
2b740303 4847static vm_fault_t create_huge_pud(struct vm_fault *vmf)
a00cc7d9 4848{
14c99d65
GJ
4849#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
4850 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4851 /* No support for anonymous transparent PUD pages yet */
4852 if (vma_is_anonymous(vmf->vma))
4853 return VM_FAULT_FALLBACK;
4854 if (vmf->vma->vm_ops->huge_fault)
4855 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4856#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4857 return VM_FAULT_FALLBACK;
4858}
4859
4860static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4861{
327e9fd4
THV
4862#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
4863 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
aea06577
DH
4864 vm_fault_t ret;
4865
a00cc7d9
MW
4866 /* No support for anonymous transparent PUD pages yet */
4867 if (vma_is_anonymous(vmf->vma))
327e9fd4 4868 goto split;
aea06577
DH
4869 if (vmf->vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
4870 if (vmf->vma->vm_ops->huge_fault) {
4871 ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4872 if (!(ret & VM_FAULT_FALLBACK))
4873 return ret;
4874 }
327e9fd4
THV
4875 }
4876split:
4877 /* COW or write-notify not handled on PUD level: split pud.*/
4878 __split_huge_pud(vmf->vma, vmf->pud, vmf->address);
14c99d65 4879#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
a00cc7d9
MW
4880 return VM_FAULT_FALLBACK;
4881}
4882
1da177e4
LT
4883/*
4884 * These routines also need to handle stuff like marking pages dirty
4885 * and/or accessed for architectures that don't do it in hardware (most
4886 * RISC architectures). The early dirtying is also good on the i386.
4887 *
4888 * There is also a hook called "update_mmu_cache()" that architectures
4889 * with external mmu caches can use to update those (ie the Sparc or
4890 * PowerPC hashed page tables that act as extended TLBs).
4891 *
c1e8d7c6 4892 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
7267ec00 4893 * concurrent faults).
9a95f3cf 4894 *
c1e8d7c6 4895 * The mmap_lock may have been released depending on flags and our return value.
9138e47e 4896 * See filemap_fault() and __folio_lock_or_retry().
1da177e4 4897 */
2b740303 4898static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
1da177e4
LT
4899{
4900 pte_t entry;
4901
82b0f8c3 4902 if (unlikely(pmd_none(*vmf->pmd))) {
7267ec00
KS
4903 /*
4904 * Leave __pte_alloc() until later: because vm_ops->fault may
4905 * want to allocate huge page, and if we expose page table
4906 * for an instant, it will be difficult to retract from
4907 * concurrent faults and from rmap lookups.
4908 */
82b0f8c3 4909 vmf->pte = NULL;
f46f2ade 4910 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
7267ec00 4911 } else {
7267ec00
KS
4912 /*
4913 * A regular pmd is established and it can't morph into a huge
c7ad0880
HD
4914 * pmd by anon khugepaged, since that takes mmap_lock in write
4915 * mode; but shmem or file collapse to THP could still morph
4916 * it into a huge pmd: just retry later if so.
7267ec00 4917 */
c7ad0880
HD
4918 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd,
4919 vmf->address, &vmf->ptl);
4920 if (unlikely(!vmf->pte))
4921 return 0;
26e1a0c3 4922 vmf->orig_pte = ptep_get_lockless(vmf->pte);
f46f2ade 4923 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
7267ec00 4924
2994302b 4925 if (pte_none(vmf->orig_pte)) {
82b0f8c3
JK
4926 pte_unmap(vmf->pte);
4927 vmf->pte = NULL;
65500d23 4928 }
1da177e4
LT
4929 }
4930
2bad466c
PX
4931 if (!vmf->pte)
4932 return do_pte_missing(vmf);
7267ec00 4933
2994302b
JK
4934 if (!pte_present(vmf->orig_pte))
4935 return do_swap_page(vmf);
7267ec00 4936
2994302b
JK
4937 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4938 return do_numa_page(vmf);
d10e63f2 4939
82b0f8c3 4940 spin_lock(vmf->ptl);
2994302b 4941 entry = vmf->orig_pte;
c33c7948 4942 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
7df67697 4943 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
8f4e2101 4944 goto unlock;
7df67697 4945 }
c89357e2 4946 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
f6f37321 4947 if (!pte_write(entry))
2994302b 4948 return do_wp_page(vmf);
c89357e2
DH
4949 else if (likely(vmf->flags & FAULT_FLAG_WRITE))
4950 entry = pte_mkdirty(entry);
1da177e4
LT
4951 }
4952 entry = pte_mkyoung(entry);
82b0f8c3
JK
4953 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4954 vmf->flags & FAULT_FLAG_WRITE)) {
4955 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
1a44e149 4956 } else {
b7333b58
YS
4957 /* Skip spurious TLB flush for retried page fault */
4958 if (vmf->flags & FAULT_FLAG_TRIED)
4959 goto unlock;
1a44e149
AA
4960 /*
4961 * This is needed only for protection faults but the arch code
4962 * is not yet telling us if this is a protection fault or not.
4963 * This still avoids useless tlb flushes for .text page faults
4964 * with threads.
4965 */
82b0f8c3 4966 if (vmf->flags & FAULT_FLAG_WRITE)
99c29133
GS
4967 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
4968 vmf->pte);
1a44e149 4969 }
8f4e2101 4970unlock:
82b0f8c3 4971 pte_unmap_unlock(vmf->pte, vmf->ptl);
83c54070 4972 return 0;
1da177e4
LT
4973}
4974
4975/*
4976 * By the time we get here, we already hold the mm semaphore
9a95f3cf 4977 *
c1e8d7c6 4978 * The mmap_lock may have been released depending on flags and our
9138e47e 4979 * return value. See filemap_fault() and __folio_lock_or_retry().
1da177e4 4980 */
2b740303
SJ
4981static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4982 unsigned long address, unsigned int flags)
1da177e4 4983{
82b0f8c3 4984 struct vm_fault vmf = {
bae473a4 4985 .vma = vma,
1a29d85e 4986 .address = address & PAGE_MASK,
824ddc60 4987 .real_address = address,
bae473a4 4988 .flags = flags,
0721ec8b 4989 .pgoff = linear_page_index(vma, address),
667240e0 4990 .gfp_mask = __get_fault_gfp_mask(vma),
bae473a4 4991 };
dcddffd4 4992 struct mm_struct *mm = vma->vm_mm;
7da4e2cb 4993 unsigned long vm_flags = vma->vm_flags;
1da177e4 4994 pgd_t *pgd;
c2febafc 4995 p4d_t *p4d;
2b740303 4996 vm_fault_t ret;
1da177e4 4997
1da177e4 4998 pgd = pgd_offset(mm, address);
c2febafc
KS
4999 p4d = p4d_alloc(mm, pgd, address);
5000 if (!p4d)
5001 return VM_FAULT_OOM;
a00cc7d9 5002
c2febafc 5003 vmf.pud = pud_alloc(mm, p4d, address);
a00cc7d9 5004 if (!vmf.pud)
c74df32c 5005 return VM_FAULT_OOM;
625110b5 5006retry_pud:
7da4e2cb 5007 if (pud_none(*vmf.pud) &&
a7f4e6e4 5008 hugepage_vma_check(vma, vm_flags, false, true, true)) {
a00cc7d9
MW
5009 ret = create_huge_pud(&vmf);
5010 if (!(ret & VM_FAULT_FALLBACK))
5011 return ret;
5012 } else {
5013 pud_t orig_pud = *vmf.pud;
5014
5015 barrier();
5016 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
a00cc7d9 5017
c89357e2
DH
5018 /*
5019 * TODO once we support anonymous PUDs: NUMA case and
5020 * FAULT_FLAG_UNSHARE handling.
5021 */
5022 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
a00cc7d9
MW
5023 ret = wp_huge_pud(&vmf, orig_pud);
5024 if (!(ret & VM_FAULT_FALLBACK))
5025 return ret;
5026 } else {
5027 huge_pud_set_accessed(&vmf, orig_pud);
5028 return 0;
5029 }
5030 }
5031 }
5032
5033 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
82b0f8c3 5034 if (!vmf.pmd)
c74df32c 5035 return VM_FAULT_OOM;
625110b5
TH
5036
5037 /* Huge pud page fault raced with pmd_alloc? */
5038 if (pud_trans_unstable(vmf.pud))
5039 goto retry_pud;
5040
7da4e2cb 5041 if (pmd_none(*vmf.pmd) &&
a7f4e6e4 5042 hugepage_vma_check(vma, vm_flags, false, true, true)) {
a2d58167 5043 ret = create_huge_pmd(&vmf);
c0292554
KS
5044 if (!(ret & VM_FAULT_FALLBACK))
5045 return ret;
71e3aac0 5046 } else {
26e1a0c3 5047 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
1f1d06c3 5048
5db4f15c 5049 if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
84c3fc4e 5050 VM_BUG_ON(thp_migration_supported() &&
5db4f15c
YS
5051 !is_pmd_migration_entry(vmf.orig_pmd));
5052 if (is_pmd_migration_entry(vmf.orig_pmd))
84c3fc4e
ZY
5053 pmd_migration_entry_wait(mm, vmf.pmd);
5054 return 0;
5055 }
5db4f15c
YS
5056 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
5057 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
5058 return do_huge_pmd_numa_page(&vmf);
d10e63f2 5059
c89357e2
DH
5060 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
5061 !pmd_write(vmf.orig_pmd)) {
5db4f15c 5062 ret = wp_huge_pmd(&vmf);
9845cbbd
KS
5063 if (!(ret & VM_FAULT_FALLBACK))
5064 return ret;
a1dd450b 5065 } else {
5db4f15c 5066 huge_pmd_set_accessed(&vmf);
9845cbbd 5067 return 0;
1f1d06c3 5068 }
71e3aac0
AA
5069 }
5070 }
5071
82b0f8c3 5072 return handle_pte_fault(&vmf);
1da177e4
LT
5073}
5074
bce617ed 5075/**
f0953a1b 5076 * mm_account_fault - Do page fault accounting
bce617ed
PX
5077 *
5078 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting
5079 * of perf event counters, but we'll still do the per-task accounting to
5080 * the task who triggered this page fault.
5081 * @address: the faulted address.
5082 * @flags: the fault flags.
5083 * @ret: the fault retcode.
5084 *
f0953a1b 5085 * This will take care of most of the page fault accounting. Meanwhile, it
bce617ed 5086 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
f0953a1b 5087 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
bce617ed
PX
5088 * still be in per-arch page fault handlers at the entry of page fault.
5089 */
53156443 5090static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
bce617ed
PX
5091 unsigned long address, unsigned int flags,
5092 vm_fault_t ret)
5093{
5094 bool major;
5095
53156443
SB
5096 /* Incomplete faults will be accounted upon completion. */
5097 if (ret & VM_FAULT_RETRY)
5098 return;
5099
bce617ed 5100 /*
53156443
SB
5101 * To preserve the behavior of older kernels, PGFAULT counters record
5102 * both successful and failed faults, as opposed to perf counters,
5103 * which ignore failed cases.
bce617ed 5104 */
53156443
SB
5105 count_vm_event(PGFAULT);
5106 count_memcg_event_mm(mm, PGFAULT);
5107
5108 /*
5109 * Do not account for unsuccessful faults (e.g. when the address wasn't
5110 * valid). That includes arch_vma_access_permitted() failing before
5111 * reaching here. So this is not a "this many hardware page faults"
5112 * counter. We should use the hw profiling for that.
5113 */
5114 if (ret & VM_FAULT_ERROR)
bce617ed
PX
5115 return;
5116
5117 /*
5118 * We define the fault as a major fault when the final successful fault
5119 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
5120 * handle it immediately previously).
5121 */
5122 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
5123
a2beb5f1
PX
5124 if (major)
5125 current->maj_flt++;
5126 else
5127 current->min_flt++;
5128
bce617ed 5129 /*
a2beb5f1
PX
5130 * If the fault is done for GUP, regs will be NULL. We only do the
5131 * accounting for the per thread fault counters who triggered the
5132 * fault, and we skip the perf event updates.
bce617ed
PX
5133 */
5134 if (!regs)
5135 return;
5136
a2beb5f1 5137 if (major)
bce617ed 5138 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
a2beb5f1 5139 else
bce617ed 5140 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
bce617ed
PX
5141}
5142
ec1c86b2
YZ
5143#ifdef CONFIG_LRU_GEN
5144static void lru_gen_enter_fault(struct vm_area_struct *vma)
5145{
8788f678
YZ
5146 /* the LRU algorithm only applies to accesses with recency */
5147 current->in_lru_fault = vma_has_recency(vma);
ec1c86b2
YZ
5148}
5149
5150static void lru_gen_exit_fault(void)
5151{
5152 current->in_lru_fault = false;
5153}
5154#else
5155static void lru_gen_enter_fault(struct vm_area_struct *vma)
5156{
5157}
5158
5159static void lru_gen_exit_fault(void)
5160{
5161}
5162#endif /* CONFIG_LRU_GEN */
5163
cdc5021c
DH
5164static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
5165 unsigned int *flags)
5166{
5167 if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
5168 if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
5169 return VM_FAULT_SIGSEGV;
5170 /*
5171 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
5172 * just treat it like an ordinary read-fault otherwise.
5173 */
5174 if (!is_cow_mapping(vma->vm_flags))
5175 *flags &= ~FAULT_FLAG_UNSHARE;
79881fed
DH
5176 } else if (*flags & FAULT_FLAG_WRITE) {
5177 /* Write faults on read-only mappings are impossible ... */
5178 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
5179 return VM_FAULT_SIGSEGV;
5180 /* ... and FOLL_FORCE only applies to COW mappings. */
5181 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
5182 !is_cow_mapping(vma->vm_flags)))
5183 return VM_FAULT_SIGSEGV;
cdc5021c
DH
5184 }
5185 return 0;
5186}
5187
9a95f3cf
PC
5188/*
5189 * By the time we get here, we already hold the mm semaphore
5190 *
c1e8d7c6 5191 * The mmap_lock may have been released depending on flags and our
9138e47e 5192 * return value. See filemap_fault() and __folio_lock_or_retry().
9a95f3cf 5193 */
2b740303 5194vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
bce617ed 5195 unsigned int flags, struct pt_regs *regs)
519e5247 5196{
53156443
SB
5197 /* If the fault handler drops the mmap_lock, vma may be freed */
5198 struct mm_struct *mm = vma->vm_mm;
2b740303 5199 vm_fault_t ret;
519e5247
JW
5200
5201 __set_current_state(TASK_RUNNING);
5202
cdc5021c
DH
5203 ret = sanitize_fault_flags(vma, &flags);
5204 if (ret)
53156443 5205 goto out;
cdc5021c 5206
de0c799b
LD
5207 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
5208 flags & FAULT_FLAG_INSTRUCTION,
53156443
SB
5209 flags & FAULT_FLAG_REMOTE)) {
5210 ret = VM_FAULT_SIGSEGV;
5211 goto out;
5212 }
de0c799b 5213
519e5247
JW
5214 /*
5215 * Enable the memcg OOM handling for faults triggered in user
5216 * space. Kernel faults are handled more gracefully.
5217 */
5218 if (flags & FAULT_FLAG_USER)
29ef680a 5219 mem_cgroup_enter_user_fault();
519e5247 5220
ec1c86b2
YZ
5221 lru_gen_enter_fault(vma);
5222
bae473a4
KS
5223 if (unlikely(is_vm_hugetlb_page(vma)))
5224 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5225 else
5226 ret = __handle_mm_fault(vma, address, flags);
519e5247 5227
ec1c86b2
YZ
5228 lru_gen_exit_fault();
5229
49426420 5230 if (flags & FAULT_FLAG_USER) {
29ef680a 5231 mem_cgroup_exit_user_fault();
166f61b9
TH
5232 /*
5233 * The task may have entered a memcg OOM situation but
5234 * if the allocation error was handled gracefully (no
5235 * VM_FAULT_OOM), there is no need to kill anything.
5236 * Just clean up the OOM state peacefully.
5237 */
5238 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
5239 mem_cgroup_oom_synchronize(false);
49426420 5240 }
53156443
SB
5241out:
5242 mm_account_fault(mm, regs, address, flags, ret);
bce617ed 5243
519e5247
JW
5244 return ret;
5245}
e1d6d01a 5246EXPORT_SYMBOL_GPL(handle_mm_fault);
519e5247 5247
50ee3253
SB
5248#ifdef CONFIG_PER_VMA_LOCK
5249/*
5250 * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
5251 * stable and not isolated. If the VMA is not found or is being modified the
5252 * function returns NULL.
5253 */
5254struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
5255 unsigned long address)
5256{
5257 MA_STATE(mas, &mm->mm_mt, address, address);
5258 struct vm_area_struct *vma;
5259
5260 rcu_read_lock();
5261retry:
5262 vma = mas_walk(&mas);
5263 if (!vma)
5264 goto inval;
5265
7a7f0946
AR
5266 /* Only anonymous and tcp vmas are supported for now */
5267 if (!vma_is_anonymous(vma) && !vma_is_tcp(vma))
50ee3253
SB
5268 goto inval;
5269
2ac0af1b 5270 /* find_mergeable_anon_vma uses adjacent vmas which are not locked */
7a7f0946 5271 if (!vma->anon_vma && !vma_is_tcp(vma))
2ac0af1b
SB
5272 goto inval;
5273
50ee3253
SB
5274 if (!vma_start_read(vma))
5275 goto inval;
5276
444eeb17
SB
5277 /*
5278 * Due to the possibility of userfault handler dropping mmap_lock, avoid
5279 * it for now and fall back to page fault handling under mmap_lock.
5280 */
5281 if (userfaultfd_armed(vma)) {
5282 vma_end_read(vma);
5283 goto inval;
5284 }
5285
50ee3253
SB
5286 /* Check since vm_start/vm_end might change before we lock the VMA */
5287 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
5288 vma_end_read(vma);
5289 goto inval;
5290 }
5291
5292 /* Check if the VMA got isolated after we found it */
5293 if (vma->detached) {
5294 vma_end_read(vma);
52f23865 5295 count_vm_vma_lock_event(VMA_LOCK_MISS);
50ee3253
SB
5296 /* The area was replaced with another one */
5297 goto retry;
5298 }
5299
5300 rcu_read_unlock();
5301 return vma;
5302inval:
5303 rcu_read_unlock();
52f23865 5304 count_vm_vma_lock_event(VMA_LOCK_ABORT);
50ee3253
SB
5305 return NULL;
5306}
5307#endif /* CONFIG_PER_VMA_LOCK */
5308
90eceff1
KS
5309#ifndef __PAGETABLE_P4D_FOLDED
5310/*
5311 * Allocate p4d page table.
5312 * We've already handled the fast-path in-line.
5313 */
5314int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
5315{
5316 p4d_t *new = p4d_alloc_one(mm, address);
5317 if (!new)
5318 return -ENOMEM;
5319
90eceff1 5320 spin_lock(&mm->page_table_lock);
ed33b5a6 5321 if (pgd_present(*pgd)) { /* Another has populated it */
90eceff1 5322 p4d_free(mm, new);
ed33b5a6
QZ
5323 } else {
5324 smp_wmb(); /* See comment in pmd_install() */
90eceff1 5325 pgd_populate(mm, pgd, new);
ed33b5a6 5326 }
90eceff1
KS
5327 spin_unlock(&mm->page_table_lock);
5328 return 0;
5329}
5330#endif /* __PAGETABLE_P4D_FOLDED */
5331
1da177e4
LT
5332#ifndef __PAGETABLE_PUD_FOLDED
5333/*
5334 * Allocate page upper directory.
872fec16 5335 * We've already handled the fast-path in-line.
1da177e4 5336 */
c2febafc 5337int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
1da177e4 5338{
c74df32c
HD
5339 pud_t *new = pud_alloc_one(mm, address);
5340 if (!new)
1bb3630e 5341 return -ENOMEM;
1da177e4 5342
872fec16 5343 spin_lock(&mm->page_table_lock);
b4e98d9a
KS
5344 if (!p4d_present(*p4d)) {
5345 mm_inc_nr_puds(mm);
ed33b5a6 5346 smp_wmb(); /* See comment in pmd_install() */
c2febafc 5347 p4d_populate(mm, p4d, new);
b4e98d9a 5348 } else /* Another has populated it */
5e541973 5349 pud_free(mm, new);
c74df32c 5350 spin_unlock(&mm->page_table_lock);
1bb3630e 5351 return 0;
1da177e4
LT
5352}
5353#endif /* __PAGETABLE_PUD_FOLDED */
5354
5355#ifndef __PAGETABLE_PMD_FOLDED
5356/*
5357 * Allocate page middle directory.
872fec16 5358 * We've already handled the fast-path in-line.
1da177e4 5359 */
1bb3630e 5360int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1da177e4 5361{
a00cc7d9 5362 spinlock_t *ptl;
c74df32c
HD
5363 pmd_t *new = pmd_alloc_one(mm, address);
5364 if (!new)
1bb3630e 5365 return -ENOMEM;
1da177e4 5366
a00cc7d9 5367 ptl = pud_lock(mm, pud);
dc6c9a35
KS
5368 if (!pud_present(*pud)) {
5369 mm_inc_nr_pmds(mm);
ed33b5a6 5370 smp_wmb(); /* See comment in pmd_install() */
1bb3630e 5371 pud_populate(mm, pud, new);
ed33b5a6 5372 } else { /* Another has populated it */
5e541973 5373 pmd_free(mm, new);
ed33b5a6 5374 }
a00cc7d9 5375 spin_unlock(ptl);
1bb3630e 5376 return 0;
e0f39591 5377}
1da177e4
LT
5378#endif /* __PAGETABLE_PMD_FOLDED */
5379
0e5e64c0
MS
5380/**
5381 * follow_pte - look up PTE at a user virtual address
5382 * @mm: the mm_struct of the target address space
5383 * @address: user virtual address
5384 * @ptepp: location to store found PTE
5385 * @ptlp: location to store the lock for the PTE
5386 *
5387 * On a successful return, the pointer to the PTE is stored in @ptepp;
5388 * the corresponding lock is taken and its location is stored in @ptlp.
5389 * The contents of the PTE are only stable until @ptlp is released;
5390 * any further use, if any, must be protected against invalidation
5391 * with MMU notifiers.
5392 *
5393 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
5394 * should be taken for read.
5395 *
5396 * KVM uses this function. While it is arguably less bad than ``follow_pfn``,
5397 * it is not a good general-purpose API.
5398 *
5399 * Return: zero on success, -ve otherwise.
5400 */
5401int follow_pte(struct mm_struct *mm, unsigned long address,
5402 pte_t **ptepp, spinlock_t **ptlp)
f8ad0f49
JW
5403{
5404 pgd_t *pgd;
c2febafc 5405 p4d_t *p4d;
f8ad0f49
JW
5406 pud_t *pud;
5407 pmd_t *pmd;
5408 pte_t *ptep;
5409
5410 pgd = pgd_offset(mm, address);
5411 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
5412 goto out;
5413
c2febafc
KS
5414 p4d = p4d_offset(pgd, address);
5415 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
5416 goto out;
5417
5418 pud = pud_offset(p4d, address);
f8ad0f49
JW
5419 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
5420 goto out;
5421
5422 pmd = pmd_offset(pud, address);
f66055ab 5423 VM_BUG_ON(pmd_trans_huge(*pmd));
f8ad0f49 5424
f8ad0f49 5425 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
3db82b93
HD
5426 if (!ptep)
5427 goto out;
c33c7948 5428 if (!pte_present(ptep_get(ptep)))
f8ad0f49
JW
5429 goto unlock;
5430 *ptepp = ptep;
5431 return 0;
5432unlock:
5433 pte_unmap_unlock(ptep, *ptlp);
5434out:
5435 return -EINVAL;
5436}
9fd6dad1
PB
5437EXPORT_SYMBOL_GPL(follow_pte);
5438
3b6748e2
JW
5439/**
5440 * follow_pfn - look up PFN at a user virtual address
5441 * @vma: memory mapping
5442 * @address: user virtual address
5443 * @pfn: location to store found PFN
5444 *
5445 * Only IO mappings and raw PFN mappings are allowed.
5446 *
9fd6dad1
PB
5447 * This function does not allow the caller to read the permissions
5448 * of the PTE. Do not use it.
5449 *
a862f68a 5450 * Return: zero and the pfn at @pfn on success, -ve otherwise.
3b6748e2
JW
5451 */
5452int follow_pfn(struct vm_area_struct *vma, unsigned long address,
5453 unsigned long *pfn)
5454{
5455 int ret = -EINVAL;
5456 spinlock_t *ptl;
5457 pte_t *ptep;
5458
5459 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5460 return ret;
5461
9fd6dad1 5462 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
3b6748e2
JW
5463 if (ret)
5464 return ret;
c33c7948 5465 *pfn = pte_pfn(ptep_get(ptep));
3b6748e2
JW
5466 pte_unmap_unlock(ptep, ptl);
5467 return 0;
5468}
5469EXPORT_SYMBOL(follow_pfn);
5470
28b2ee20 5471#ifdef CONFIG_HAVE_IOREMAP_PROT
d87fe660 5472int follow_phys(struct vm_area_struct *vma,
5473 unsigned long address, unsigned int flags,
5474 unsigned long *prot, resource_size_t *phys)
28b2ee20 5475{
03668a4d 5476 int ret = -EINVAL;
28b2ee20
RR
5477 pte_t *ptep, pte;
5478 spinlock_t *ptl;
28b2ee20 5479
d87fe660 5480 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5481 goto out;
28b2ee20 5482
9fd6dad1 5483 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
d87fe660 5484 goto out;
c33c7948 5485 pte = ptep_get(ptep);
03668a4d 5486
f6f37321 5487 if ((flags & FOLL_WRITE) && !pte_write(pte))
28b2ee20 5488 goto unlock;
28b2ee20
RR
5489
5490 *prot = pgprot_val(pte_pgprot(pte));
03668a4d 5491 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
28b2ee20 5492
03668a4d 5493 ret = 0;
28b2ee20
RR
5494unlock:
5495 pte_unmap_unlock(ptep, ptl);
5496out:
d87fe660 5497 return ret;
28b2ee20
RR
5498}
5499
96667f8a
DV
5500/**
5501 * generic_access_phys - generic implementation for iomem mmap access
5502 * @vma: the vma to access
f0953a1b 5503 * @addr: userspace address, not relative offset within @vma
96667f8a
DV
5504 * @buf: buffer to read/write
5505 * @len: length of transfer
5506 * @write: set to FOLL_WRITE when writing, otherwise reading
5507 *
5508 * This is a generic implementation for &vm_operations_struct.access for an
5509 * iomem mapping. This callback is used by access_process_vm() when the @vma is
5510 * not page based.
5511 */
28b2ee20
RR
5512int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5513 void *buf, int len, int write)
5514{
5515 resource_size_t phys_addr;
5516 unsigned long prot = 0;
2bc7273b 5517 void __iomem *maddr;
96667f8a
DV
5518 pte_t *ptep, pte;
5519 spinlock_t *ptl;
5520 int offset = offset_in_page(addr);
5521 int ret = -EINVAL;
5522
5523 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5524 return -EINVAL;
5525
5526retry:
e913a8cd 5527 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
96667f8a 5528 return -EINVAL;
c33c7948 5529 pte = ptep_get(ptep);
96667f8a 5530 pte_unmap_unlock(ptep, ptl);
28b2ee20 5531
96667f8a
DV
5532 prot = pgprot_val(pte_pgprot(pte));
5533 phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5534
5535 if ((write & FOLL_WRITE) && !pte_write(pte))
28b2ee20
RR
5536 return -EINVAL;
5537
9cb12d7b 5538 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
24eee1e4 5539 if (!maddr)
5540 return -ENOMEM;
5541
e913a8cd 5542 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
96667f8a
DV
5543 goto out_unmap;
5544
c33c7948 5545 if (!pte_same(pte, ptep_get(ptep))) {
96667f8a
DV
5546 pte_unmap_unlock(ptep, ptl);
5547 iounmap(maddr);
5548
5549 goto retry;
5550 }
5551
28b2ee20
RR
5552 if (write)
5553 memcpy_toio(maddr + offset, buf, len);
5554 else
5555 memcpy_fromio(buf, maddr + offset, len);
96667f8a
DV
5556 ret = len;
5557 pte_unmap_unlock(ptep, ptl);
5558out_unmap:
28b2ee20
RR
5559 iounmap(maddr);
5560
96667f8a 5561 return ret;
28b2ee20 5562}
5a73633e 5563EXPORT_SYMBOL_GPL(generic_access_phys);
28b2ee20
RR
5564#endif
5565
0ec76a11 5566/*
d3f5ffca 5567 * Access another process' address space as given in mm.
0ec76a11 5568 */
d3f5ffca
JH
5569int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
5570 int len, unsigned int gup_flags)
0ec76a11 5571{
0ec76a11 5572 void *old_buf = buf;
442486ec 5573 int write = gup_flags & FOLL_WRITE;
0ec76a11 5574
d8ed45c5 5575 if (mmap_read_lock_killable(mm))
1e426fe2
KK
5576 return 0;
5577
183ff22b 5578 /* ignore errors, just check how much was successfully transferred */
0ec76a11 5579 while (len) {
ca5e8632 5580 int bytes, offset;
0ec76a11 5581 void *maddr;
ca5e8632
LS
5582 struct vm_area_struct *vma = NULL;
5583 struct page *page = get_user_page_vma_remote(mm, addr,
5584 gup_flags, &vma);
0ec76a11 5585
ca5e8632 5586 if (IS_ERR_OR_NULL(page)) {
dbffcd03
RR
5587#ifndef CONFIG_HAVE_IOREMAP_PROT
5588 break;
5589#else
ca5e8632
LS
5590 int res = 0;
5591
28b2ee20
RR
5592 /*
5593 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5594 * we can access using slightly different code.
5595 */
6581ccf0 5596 vma = vma_lookup(mm, addr);
3e418f98 5597 if (!vma)
28b2ee20
RR
5598 break;
5599 if (vma->vm_ops && vma->vm_ops->access)
ca5e8632 5600 res = vma->vm_ops->access(vma, addr, buf,
28b2ee20 5601 len, write);
ca5e8632 5602 if (res <= 0)
28b2ee20 5603 break;
ca5e8632 5604 bytes = res;
dbffcd03 5605#endif
0ec76a11 5606 } else {
28b2ee20
RR
5607 bytes = len;
5608 offset = addr & (PAGE_SIZE-1);
5609 if (bytes > PAGE_SIZE-offset)
5610 bytes = PAGE_SIZE-offset;
5611
5612 maddr = kmap(page);
5613 if (write) {
5614 copy_to_user_page(vma, page, addr,
5615 maddr + offset, buf, bytes);
5616 set_page_dirty_lock(page);
5617 } else {
5618 copy_from_user_page(vma, page, addr,
5619 buf, maddr + offset, bytes);
5620 }
5621 kunmap(page);
09cbfeaf 5622 put_page(page);
0ec76a11 5623 }
0ec76a11
DH
5624 len -= bytes;
5625 buf += bytes;
5626 addr += bytes;
5627 }
d8ed45c5 5628 mmap_read_unlock(mm);
0ec76a11
DH
5629
5630 return buf - old_buf;
5631}
03252919 5632
5ddd36b9 5633/**
ae91dbfc 5634 * access_remote_vm - access another process' address space
5ddd36b9
SW
5635 * @mm: the mm_struct of the target address space
5636 * @addr: start address to access
5637 * @buf: source or destination buffer
5638 * @len: number of bytes to transfer
6347e8d5 5639 * @gup_flags: flags modifying lookup behaviour
5ddd36b9
SW
5640 *
5641 * The caller must hold a reference on @mm.
a862f68a
MR
5642 *
5643 * Return: number of bytes copied from source to destination.
5ddd36b9
SW
5644 */
5645int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6347e8d5 5646 void *buf, int len, unsigned int gup_flags)
5ddd36b9 5647{
d3f5ffca 5648 return __access_remote_vm(mm, addr, buf, len, gup_flags);
5ddd36b9
SW
5649}
5650
206cb636
SW
5651/*
5652 * Access another process' address space.
5653 * Source/target buffer must be kernel space,
5654 * Do not walk the page table directly, use get_user_pages
5655 */
5656int access_process_vm(struct task_struct *tsk, unsigned long addr,
f307ab6d 5657 void *buf, int len, unsigned int gup_flags)
206cb636
SW
5658{
5659 struct mm_struct *mm;
5660 int ret;
5661
5662 mm = get_task_mm(tsk);
5663 if (!mm)
5664 return 0;
5665
d3f5ffca 5666 ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
442486ec 5667
206cb636
SW
5668 mmput(mm);
5669
5670 return ret;
5671}
fcd35857 5672EXPORT_SYMBOL_GPL(access_process_vm);
206cb636 5673
03252919
AK
5674/*
5675 * Print the name of a VMA.
5676 */
5677void print_vma_addr(char *prefix, unsigned long ip)
5678{
5679 struct mm_struct *mm = current->mm;
5680 struct vm_area_struct *vma;
5681
e8bff74a 5682 /*
0a7f682d 5683 * we might be running from an atomic context so we cannot sleep
e8bff74a 5684 */
d8ed45c5 5685 if (!mmap_read_trylock(mm))
e8bff74a
IM
5686 return;
5687
03252919
AK
5688 vma = find_vma(mm, ip);
5689 if (vma && vma->vm_file) {
5690 struct file *f = vma->vm_file;
0a7f682d 5691 char *buf = (char *)__get_free_page(GFP_NOWAIT);
03252919 5692 if (buf) {
2fbc57c5 5693 char *p;
03252919 5694
9bf39ab2 5695 p = file_path(f, buf, PAGE_SIZE);
03252919
AK
5696 if (IS_ERR(p))
5697 p = "?";
2fbc57c5 5698 printk("%s%s[%lx+%lx]", prefix, kbasename(p),
03252919
AK
5699 vma->vm_start,
5700 vma->vm_end - vma->vm_start);
5701 free_page((unsigned long)buf);
5702 }
5703 }
d8ed45c5 5704 mmap_read_unlock(mm);
03252919 5705}
3ee1afa3 5706
662bbcb2 5707#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
9ec23531 5708void __might_fault(const char *file, int line)
3ee1afa3 5709{
9ec23531 5710 if (pagefault_disabled())
662bbcb2 5711 return;
42a38756 5712 __might_sleep(file, line);
9ec23531 5713#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
662bbcb2 5714 if (current->mm)
da1c55f1 5715 might_lock_read(&current->mm->mmap_lock);
9ec23531 5716#endif
3ee1afa3 5717}
9ec23531 5718EXPORT_SYMBOL(__might_fault);
3ee1afa3 5719#endif
47ad8475
AA
5720
5721#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
c6ddfb6c
HY
5722/*
5723 * Process all subpages of the specified huge page with the specified
5724 * operation. The target subpage will be processed last to keep its
5725 * cache lines hot.
5726 */
1cb9dc4b 5727static inline int process_huge_page(
c6ddfb6c 5728 unsigned long addr_hint, unsigned int pages_per_huge_page,
1cb9dc4b 5729 int (*process_subpage)(unsigned long addr, int idx, void *arg),
c6ddfb6c 5730 void *arg)
47ad8475 5731{
1cb9dc4b 5732 int i, n, base, l, ret;
c79b57e4
HY
5733 unsigned long addr = addr_hint &
5734 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
47ad8475 5735
c6ddfb6c 5736 /* Process target subpage last to keep its cache lines hot */
47ad8475 5737 might_sleep();
c79b57e4
HY
5738 n = (addr_hint - addr) / PAGE_SIZE;
5739 if (2 * n <= pages_per_huge_page) {
c6ddfb6c 5740 /* If target subpage in first half of huge page */
c79b57e4
HY
5741 base = 0;
5742 l = n;
c6ddfb6c 5743 /* Process subpages at the end of huge page */
c79b57e4
HY
5744 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5745 cond_resched();
1cb9dc4b
LS
5746 ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
5747 if (ret)
5748 return ret;
c79b57e4
HY
5749 }
5750 } else {
c6ddfb6c 5751 /* If target subpage in second half of huge page */
c79b57e4
HY
5752 base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5753 l = pages_per_huge_page - n;
c6ddfb6c 5754 /* Process subpages at the begin of huge page */
c79b57e4
HY
5755 for (i = 0; i < base; i++) {
5756 cond_resched();
1cb9dc4b
LS
5757 ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
5758 if (ret)
5759 return ret;
c79b57e4
HY
5760 }
5761 }
5762 /*
c6ddfb6c
HY
5763 * Process remaining subpages in left-right-left-right pattern
5764 * towards the target subpage
c79b57e4
HY
5765 */
5766 for (i = 0; i < l; i++) {
5767 int left_idx = base + i;
5768 int right_idx = base + 2 * l - 1 - i;
5769
5770 cond_resched();
1cb9dc4b
LS
5771 ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5772 if (ret)
5773 return ret;
47ad8475 5774 cond_resched();
1cb9dc4b
LS
5775 ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5776 if (ret)
5777 return ret;
47ad8475 5778 }
1cb9dc4b 5779 return 0;
47ad8475
AA
5780}
5781
c6ddfb6c
HY
5782static void clear_gigantic_page(struct page *page,
5783 unsigned long addr,
5784 unsigned int pages_per_huge_page)
5785{
5786 int i;
14455eab 5787 struct page *p;
c6ddfb6c
HY
5788
5789 might_sleep();
14455eab
CL
5790 for (i = 0; i < pages_per_huge_page; i++) {
5791 p = nth_page(page, i);
c6ddfb6c
HY
5792 cond_resched();
5793 clear_user_highpage(p, addr + i * PAGE_SIZE);
5794 }
5795}
5796
1cb9dc4b 5797static int clear_subpage(unsigned long addr, int idx, void *arg)
c6ddfb6c
HY
5798{
5799 struct page *page = arg;
5800
5801 clear_user_highpage(page + idx, addr);
1cb9dc4b 5802 return 0;
c6ddfb6c
HY
5803}
5804
5805void clear_huge_page(struct page *page,
5806 unsigned long addr_hint, unsigned int pages_per_huge_page)
5807{
5808 unsigned long addr = addr_hint &
5809 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5810
5811 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5812 clear_gigantic_page(page, addr, pages_per_huge_page);
5813 return;
5814 }
5815
5816 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5817}
5818
1cb9dc4b 5819static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
c0e8150e
Z
5820 unsigned long addr,
5821 struct vm_area_struct *vma,
5822 unsigned int pages_per_huge_page)
47ad8475
AA
5823{
5824 int i;
c0e8150e
Z
5825 struct page *dst_page;
5826 struct page *src_page;
47ad8475 5827
14455eab 5828 for (i = 0; i < pages_per_huge_page; i++) {
c0e8150e
Z
5829 dst_page = folio_page(dst, i);
5830 src_page = folio_page(src, i);
14455eab 5831
47ad8475 5832 cond_resched();
1cb9dc4b
LS
5833 if (copy_mc_user_highpage(dst_page, src_page,
5834 addr + i*PAGE_SIZE, vma)) {
5835 memory_failure_queue(page_to_pfn(src_page), 0);
5836 return -EHWPOISON;
5837 }
47ad8475 5838 }
1cb9dc4b 5839 return 0;
47ad8475
AA
5840}
5841
c9f4cd71
HY
5842struct copy_subpage_arg {
5843 struct page *dst;
5844 struct page *src;
5845 struct vm_area_struct *vma;
5846};
5847
1cb9dc4b 5848static int copy_subpage(unsigned long addr, int idx, void *arg)
c9f4cd71
HY
5849{
5850 struct copy_subpage_arg *copy_arg = arg;
5851
1cb9dc4b
LS
5852 if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5853 addr, copy_arg->vma)) {
5854 memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0);
5855 return -EHWPOISON;
5856 }
5857 return 0;
c9f4cd71
HY
5858}
5859
1cb9dc4b
LS
5860int copy_user_large_folio(struct folio *dst, struct folio *src,
5861 unsigned long addr_hint, struct vm_area_struct *vma)
47ad8475 5862{
c0e8150e 5863 unsigned int pages_per_huge_page = folio_nr_pages(dst);
c9f4cd71
HY
5864 unsigned long addr = addr_hint &
5865 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5866 struct copy_subpage_arg arg = {
c0e8150e
Z
5867 .dst = &dst->page,
5868 .src = &src->page,
c9f4cd71
HY
5869 .vma = vma,
5870 };
47ad8475 5871
1cb9dc4b
LS
5872 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
5873 return copy_user_gigantic_page(dst, src, addr, vma,
5874 pages_per_huge_page);
47ad8475 5875
1cb9dc4b 5876 return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
47ad8475 5877}
fa4d75c1 5878
e87340ca
Z
5879long copy_folio_from_user(struct folio *dst_folio,
5880 const void __user *usr_src,
5881 bool allow_pagefault)
fa4d75c1 5882{
e87340ca 5883 void *kaddr;
fa4d75c1 5884 unsigned long i, rc = 0;
e87340ca
Z
5885 unsigned int nr_pages = folio_nr_pages(dst_folio);
5886 unsigned long ret_val = nr_pages * PAGE_SIZE;
14455eab 5887 struct page *subpage;
fa4d75c1 5888
e87340ca
Z
5889 for (i = 0; i < nr_pages; i++) {
5890 subpage = folio_page(dst_folio, i);
5891 kaddr = kmap_local_page(subpage);
0d508c1f
Z
5892 if (!allow_pagefault)
5893 pagefault_disable();
e87340ca 5894 rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
0d508c1f
Z
5895 if (!allow_pagefault)
5896 pagefault_enable();
e87340ca 5897 kunmap_local(kaddr);
fa4d75c1
MK
5898
5899 ret_val -= (PAGE_SIZE - rc);
5900 if (rc)
5901 break;
5902
e763243c
MS
5903 flush_dcache_page(subpage);
5904
fa4d75c1
MK
5905 cond_resched();
5906 }
5907 return ret_val;
5908}
47ad8475 5909#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
49076ec2 5910
40b64acd 5911#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
b35f1819
KS
5912
5913static struct kmem_cache *page_ptl_cachep;
5914
5915void __init ptlock_cache_init(void)
5916{
5917 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5918 SLAB_PANIC, NULL);
5919}
5920
539edb58 5921bool ptlock_alloc(struct page *page)
49076ec2
KS
5922{
5923 spinlock_t *ptl;
5924
b35f1819 5925 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
49076ec2
KS
5926 if (!ptl)
5927 return false;
539edb58 5928 page->ptl = ptl;
49076ec2
KS
5929 return true;
5930}
5931
539edb58 5932void ptlock_free(struct page *page)
49076ec2 5933{
b35f1819 5934 kmem_cache_free(page_ptl_cachep, page->ptl);
49076ec2
KS
5935}
5936#endif