Merge tag 'x86-asm-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux-2.6-block.git] / mm / memory.c
CommitLineData
d61ea1cb 1
457c8996 2// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
3/*
4 * linux/mm/memory.c
5 *
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
7 */
8
9/*
10 * demand-loading started 01.12.91 - seems it is high on the list of
11 * things wanted, and it should be easy to implement. - Linus
12 */
13
14/*
15 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
16 * pages started 02.12.91, seems to work. - Linus.
17 *
18 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
19 * would have taken more than the 6M I have free, but it worked well as
20 * far as I could see.
21 *
22 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
23 */
24
25/*
26 * Real VM (paging to/from disk) started 18.12.91. Much more work and
27 * thought has to go into this. Oh, well..
28 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why.
29 * Found it. Everything seems to work now.
30 * 20.12.91 - Ok, making the swap-device changeable like the root.
31 */
32
33/*
34 * 05.04.94 - Multi-page memory management added for v1.1.
166f61b9 35 * Idea by Alex Bligh (alex@cconcepts.co.uk)
1da177e4
LT
36 *
37 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG
38 * (Gerhard.Wichert@pdb.siemens.de)
39 *
40 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
41 */
42
43#include <linux/kernel_stat.h>
44#include <linux/mm.h>
36090def 45#include <linux/mm_inline.h>
6e84f315 46#include <linux/sched/mm.h>
f7ccbae4 47#include <linux/sched/coredump.h>
6a3827d7 48#include <linux/sched/numa_balancing.h>
29930025 49#include <linux/sched/task.h>
1da177e4
LT
50#include <linux/hugetlb.h>
51#include <linux/mman.h>
52#include <linux/swap.h>
53#include <linux/highmem.h>
54#include <linux/pagemap.h>
5042db43 55#include <linux/memremap.h>
b073d7f8 56#include <linux/kmsan.h>
9a840895 57#include <linux/ksm.h>
1da177e4 58#include <linux/rmap.h>
b95f1b31 59#include <linux/export.h>
0ff92245 60#include <linux/delayacct.h>
1da177e4 61#include <linux/init.h>
01c8f1c4 62#include <linux/pfn_t.h>
edc79b2a 63#include <linux/writeback.h>
8a9f3ccd 64#include <linux/memcontrol.h>
cddb8a5c 65#include <linux/mmu_notifier.h>
3dc14741
HD
66#include <linux/swapops.h>
67#include <linux/elf.h>
5a0e3ad6 68#include <linux/gfp.h>
4daae3b4 69#include <linux/migrate.h>
2fbc57c5 70#include <linux/string.h>
467b171a 71#include <linux/memory-tiers.h>
1592eef0 72#include <linux/debugfs.h>
6b251fc9 73#include <linux/userfaultfd_k.h>
bc2466e4 74#include <linux/dax.h>
6b31d595 75#include <linux/oom.h>
98fa15f3 76#include <linux/numa.h>
bce617ed
PX
77#include <linux/perf_event.h>
78#include <linux/ptrace.h>
e80d3909 79#include <linux/vmalloc.h>
33024536 80#include <linux/sched/sysctl.h>
1da177e4 81
b3d1411b
JFG
82#include <trace/events/kmem.h>
83
6952b61d 84#include <asm/io.h>
33a709b2 85#include <asm/mmu_context.h>
1da177e4 86#include <asm/pgalloc.h>
7c0f6ba6 87#include <linux/uaccess.h>
1da177e4
LT
88#include <asm/tlb.h>
89#include <asm/tlbflush.h>
1da177e4 90
e80d3909 91#include "pgalloc-track.h"
42b77728 92#include "internal.h"
014bb1de 93#include "swap.h"
42b77728 94
af27d940 95#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
90572890 96#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
75980e97
PZ
97#endif
98
a9ee6cf5 99#ifndef CONFIG_NUMA
1da177e4 100unsigned long max_mapnr;
1da177e4 101EXPORT_SYMBOL(max_mapnr);
166f61b9
TH
102
103struct page *mem_map;
1da177e4
LT
104EXPORT_SYMBOL(mem_map);
105#endif
106
5c041f5d 107static vm_fault_t do_fault(struct vm_fault *vmf);
2bad466c
PX
108static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
109static bool vmf_pte_changed(struct vm_fault *vmf);
110
111/*
112 * Return true if the original pte was a uffd-wp pte marker (so the pte was
113 * wr-protected).
114 */
115static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
116{
117 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
118 return false;
119
120 return pte_marker_uffd_wp(vmf->orig_pte);
121}
5c041f5d 122
1da177e4
LT
123/*
124 * A number of key systems in x86 including ioremap() rely on the assumption
125 * that high_memory defines the upper bound on direct map memory, then end
e99fb98d 126 * of ZONE_NORMAL.
1da177e4 127 */
166f61b9 128void *high_memory;
1da177e4 129EXPORT_SYMBOL(high_memory);
1da177e4 130
32a93233
IM
131/*
132 * Randomize the address space (stacks, mmaps, brk, etc.).
133 *
134 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
135 * as ancient (libc5 based) binaries can segfault. )
136 */
137int randomize_va_space __read_mostly =
138#ifdef CONFIG_COMPAT_BRK
139 1;
140#else
141 2;
142#endif
a62eaf15 143
46bdb427
WD
144#ifndef arch_wants_old_prefaulted_pte
145static inline bool arch_wants_old_prefaulted_pte(void)
146{
147 /*
148 * Transitioning a PTE from 'old' to 'young' can be expensive on
149 * some architectures, even if it's performed in hardware. By
150 * default, "false" means prefaulted entries will be 'young'.
151 */
152 return false;
153}
154#endif
155
a62eaf15
AK
156static int __init disable_randmaps(char *s)
157{
158 randomize_va_space = 0;
9b41046c 159 return 1;
a62eaf15
AK
160}
161__setup("norandmaps", disable_randmaps);
162
62eede62 163unsigned long zero_pfn __read_mostly;
0b70068e
AB
164EXPORT_SYMBOL(zero_pfn);
165
166f61b9
TH
166unsigned long highest_memmap_pfn __read_mostly;
167
a13ea5b7
HD
168/*
169 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
170 */
171static int __init init_zero_pfn(void)
172{
173 zero_pfn = page_to_pfn(ZERO_PAGE(0));
174 return 0;
175}
e720e7d0 176early_initcall(init_zero_pfn);
a62eaf15 177
f1a79412 178void mm_trace_rss_stat(struct mm_struct *mm, int member)
b3d1411b 179{
f1a79412 180 trace_rss_stat(mm, member);
b3d1411b 181}
d559db08 182
1da177e4
LT
183/*
184 * Note: this doesn't free the actual pages themselves. That
185 * has been handled earlier when unmapping all the memory regions.
186 */
9e1b32ca
BH
187static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
188 unsigned long addr)
1da177e4 189{
2f569afd 190 pgtable_t token = pmd_pgtable(*pmd);
e0da382c 191 pmd_clear(pmd);
9e1b32ca 192 pte_free_tlb(tlb, token, addr);
c4812909 193 mm_dec_nr_ptes(tlb->mm);
1da177e4
LT
194}
195
e0da382c
HD
196static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
197 unsigned long addr, unsigned long end,
198 unsigned long floor, unsigned long ceiling)
1da177e4
LT
199{
200 pmd_t *pmd;
201 unsigned long next;
e0da382c 202 unsigned long start;
1da177e4 203
e0da382c 204 start = addr;
1da177e4 205 pmd = pmd_offset(pud, addr);
1da177e4
LT
206 do {
207 next = pmd_addr_end(addr, end);
208 if (pmd_none_or_clear_bad(pmd))
209 continue;
9e1b32ca 210 free_pte_range(tlb, pmd, addr);
1da177e4
LT
211 } while (pmd++, addr = next, addr != end);
212
e0da382c
HD
213 start &= PUD_MASK;
214 if (start < floor)
215 return;
216 if (ceiling) {
217 ceiling &= PUD_MASK;
218 if (!ceiling)
219 return;
1da177e4 220 }
e0da382c
HD
221 if (end - 1 > ceiling - 1)
222 return;
223
224 pmd = pmd_offset(pud, start);
225 pud_clear(pud);
9e1b32ca 226 pmd_free_tlb(tlb, pmd, start);
dc6c9a35 227 mm_dec_nr_pmds(tlb->mm);
1da177e4
LT
228}
229
c2febafc 230static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
e0da382c
HD
231 unsigned long addr, unsigned long end,
232 unsigned long floor, unsigned long ceiling)
1da177e4
LT
233{
234 pud_t *pud;
235 unsigned long next;
e0da382c 236 unsigned long start;
1da177e4 237
e0da382c 238 start = addr;
c2febafc 239 pud = pud_offset(p4d, addr);
1da177e4
LT
240 do {
241 next = pud_addr_end(addr, end);
242 if (pud_none_or_clear_bad(pud))
243 continue;
e0da382c 244 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
1da177e4
LT
245 } while (pud++, addr = next, addr != end);
246
c2febafc
KS
247 start &= P4D_MASK;
248 if (start < floor)
249 return;
250 if (ceiling) {
251 ceiling &= P4D_MASK;
252 if (!ceiling)
253 return;
254 }
255 if (end - 1 > ceiling - 1)
256 return;
257
258 pud = pud_offset(p4d, start);
259 p4d_clear(p4d);
260 pud_free_tlb(tlb, pud, start);
b4e98d9a 261 mm_dec_nr_puds(tlb->mm);
c2febafc
KS
262}
263
264static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
265 unsigned long addr, unsigned long end,
266 unsigned long floor, unsigned long ceiling)
267{
268 p4d_t *p4d;
269 unsigned long next;
270 unsigned long start;
271
272 start = addr;
273 p4d = p4d_offset(pgd, addr);
274 do {
275 next = p4d_addr_end(addr, end);
276 if (p4d_none_or_clear_bad(p4d))
277 continue;
278 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
279 } while (p4d++, addr = next, addr != end);
280
e0da382c
HD
281 start &= PGDIR_MASK;
282 if (start < floor)
283 return;
284 if (ceiling) {
285 ceiling &= PGDIR_MASK;
286 if (!ceiling)
287 return;
1da177e4 288 }
e0da382c
HD
289 if (end - 1 > ceiling - 1)
290 return;
291
c2febafc 292 p4d = p4d_offset(pgd, start);
e0da382c 293 pgd_clear(pgd);
c2febafc 294 p4d_free_tlb(tlb, p4d, start);
1da177e4
LT
295}
296
297/*
e0da382c 298 * This function frees user-level page tables of a process.
1da177e4 299 */
42b77728 300void free_pgd_range(struct mmu_gather *tlb,
e0da382c
HD
301 unsigned long addr, unsigned long end,
302 unsigned long floor, unsigned long ceiling)
1da177e4
LT
303{
304 pgd_t *pgd;
305 unsigned long next;
e0da382c
HD
306
307 /*
308 * The next few lines have given us lots of grief...
309 *
310 * Why are we testing PMD* at this top level? Because often
311 * there will be no work to do at all, and we'd prefer not to
312 * go all the way down to the bottom just to discover that.
313 *
314 * Why all these "- 1"s? Because 0 represents both the bottom
315 * of the address space and the top of it (using -1 for the
316 * top wouldn't help much: the masks would do the wrong thing).
317 * The rule is that addr 0 and floor 0 refer to the bottom of
318 * the address space, but end 0 and ceiling 0 refer to the top
319 * Comparisons need to use "end - 1" and "ceiling - 1" (though
320 * that end 0 case should be mythical).
321 *
322 * Wherever addr is brought up or ceiling brought down, we must
323 * be careful to reject "the opposite 0" before it confuses the
324 * subsequent tests. But what about where end is brought down
325 * by PMD_SIZE below? no, end can't go down to 0 there.
326 *
327 * Whereas we round start (addr) and ceiling down, by different
328 * masks at different levels, in order to test whether a table
329 * now has no other vmas using it, so can be freed, we don't
330 * bother to round floor or end up - the tests don't need that.
331 */
1da177e4 332
e0da382c
HD
333 addr &= PMD_MASK;
334 if (addr < floor) {
335 addr += PMD_SIZE;
336 if (!addr)
337 return;
338 }
339 if (ceiling) {
340 ceiling &= PMD_MASK;
341 if (!ceiling)
342 return;
343 }
344 if (end - 1 > ceiling - 1)
345 end -= PMD_SIZE;
346 if (addr > end - 1)
347 return;
07e32661
AK
348 /*
349 * We add page table cache pages with PAGE_SIZE,
350 * (see pte_free_tlb()), flush the tlb if we need
351 */
ed6a7935 352 tlb_change_page_size(tlb, PAGE_SIZE);
42b77728 353 pgd = pgd_offset(tlb->mm, addr);
1da177e4
LT
354 do {
355 next = pgd_addr_end(addr, end);
356 if (pgd_none_or_clear_bad(pgd))
357 continue;
c2febafc 358 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
1da177e4 359 } while (pgd++, addr = next, addr != end);
e0da382c
HD
360}
361
fd892593 362void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
763ecb03 363 struct vm_area_struct *vma, unsigned long floor,
98e51a22 364 unsigned long ceiling, bool mm_wr_locked)
e0da382c 365{
763ecb03 366 do {
e0da382c 367 unsigned long addr = vma->vm_start;
763ecb03
LH
368 struct vm_area_struct *next;
369
370 /*
371 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
372 * be 0. This will underflow and is okay.
373 */
fd892593 374 next = mas_find(mas, ceiling - 1);
d2406291
PZ
375 if (unlikely(xa_is_zero(next)))
376 next = NULL;
e0da382c 377
8f4f8c16 378 /*
25d9e2d1 379 * Hide vma from rmap and truncate_pagecache before freeing
380 * pgtables
8f4f8c16 381 */
98e51a22
SB
382 if (mm_wr_locked)
383 vma_start_write(vma);
5beb4930 384 unlink_anon_vmas(vma);
8f4f8c16
HD
385 unlink_file_vma(vma);
386
9da61aef 387 if (is_vm_hugetlb_page(vma)) {
3bf5ee95 388 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
166f61b9 389 floor, next ? next->vm_start : ceiling);
3bf5ee95
HD
390 } else {
391 /*
392 * Optimization: gather nearby vmas into one call down
393 */
394 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
4866920b 395 && !is_vm_hugetlb_page(next)) {
3bf5ee95 396 vma = next;
fd892593 397 next = mas_find(mas, ceiling - 1);
d2406291
PZ
398 if (unlikely(xa_is_zero(next)))
399 next = NULL;
98e51a22
SB
400 if (mm_wr_locked)
401 vma_start_write(vma);
5beb4930 402 unlink_anon_vmas(vma);
8f4f8c16 403 unlink_file_vma(vma);
3bf5ee95
HD
404 }
405 free_pgd_range(tlb, addr, vma->vm_end,
166f61b9 406 floor, next ? next->vm_start : ceiling);
3bf5ee95 407 }
e0da382c 408 vma = next;
763ecb03 409 } while (vma);
1da177e4
LT
410}
411
03c4f204 412void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
1da177e4 413{
03c4f204 414 spinlock_t *ptl = pmd_lock(mm, pmd);
1bb3630e 415
8ac1f832 416 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
c4812909 417 mm_inc_nr_ptes(mm);
ed33b5a6
QZ
418 /*
419 * Ensure all pte setup (eg. pte page lock and page clearing) are
420 * visible before the pte is made visible to other CPUs by being
421 * put into page tables.
422 *
423 * The other side of the story is the pointer chasing in the page
424 * table walking code (when walking the page table without locking;
425 * ie. most of the time). Fortunately, these data accesses consist
426 * of a chain of data-dependent loads, meaning most CPUs (alpha
427 * being the notable exception) will already guarantee loads are
428 * seen in-order. See the alpha page table accessors for the
429 * smp_rmb() barriers in page table walking code.
430 */
431 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
03c4f204
QZ
432 pmd_populate(mm, pmd, *pte);
433 *pte = NULL;
4b471e88 434 }
c4088ebd 435 spin_unlock(ptl);
03c4f204
QZ
436}
437
4cf58924 438int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
1da177e4 439{
4cf58924 440 pgtable_t new = pte_alloc_one(mm);
1bb3630e
HD
441 if (!new)
442 return -ENOMEM;
443
03c4f204 444 pmd_install(mm, pmd, &new);
2f569afd
MS
445 if (new)
446 pte_free(mm, new);
1bb3630e 447 return 0;
1da177e4
LT
448}
449
4cf58924 450int __pte_alloc_kernel(pmd_t *pmd)
1da177e4 451{
4cf58924 452 pte_t *new = pte_alloc_one_kernel(&init_mm);
1bb3630e
HD
453 if (!new)
454 return -ENOMEM;
455
456 spin_lock(&init_mm.page_table_lock);
8ac1f832 457 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
ed33b5a6 458 smp_wmb(); /* See comment in pmd_install() */
1bb3630e 459 pmd_populate_kernel(&init_mm, pmd, new);
2f569afd 460 new = NULL;
4b471e88 461 }
1bb3630e 462 spin_unlock(&init_mm.page_table_lock);
2f569afd
MS
463 if (new)
464 pte_free_kernel(&init_mm, new);
1bb3630e 465 return 0;
1da177e4
LT
466}
467
d559db08
KH
468static inline void init_rss_vec(int *rss)
469{
470 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
471}
472
473static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
ae859762 474{
d559db08
KH
475 int i;
476
477 for (i = 0; i < NR_MM_COUNTERS; i++)
478 if (rss[i])
479 add_mm_counter(mm, i, rss[i]);
ae859762
HD
480}
481
b5810039 482/*
6aab341e
LT
483 * This function is called to print an error when a bad pte
484 * is found. For example, we might have a PFN-mapped pte in
485 * a region that doesn't allow it.
b5810039
NP
486 *
487 * The calling function must still handle the error.
488 */
3dc14741
HD
489static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
490 pte_t pte, struct page *page)
b5810039 491{
3dc14741 492 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
c2febafc
KS
493 p4d_t *p4d = p4d_offset(pgd, addr);
494 pud_t *pud = pud_offset(p4d, addr);
3dc14741
HD
495 pmd_t *pmd = pmd_offset(pud, addr);
496 struct address_space *mapping;
497 pgoff_t index;
d936cf9b
HD
498 static unsigned long resume;
499 static unsigned long nr_shown;
500 static unsigned long nr_unshown;
501
502 /*
503 * Allow a burst of 60 reports, then keep quiet for that minute;
504 * or allow a steady drip of one report per second.
505 */
506 if (nr_shown == 60) {
507 if (time_before(jiffies, resume)) {
508 nr_unshown++;
509 return;
510 }
511 if (nr_unshown) {
1170532b
JP
512 pr_alert("BUG: Bad page map: %lu messages suppressed\n",
513 nr_unshown);
d936cf9b
HD
514 nr_unshown = 0;
515 }
516 nr_shown = 0;
517 }
518 if (nr_shown++ == 0)
519 resume = jiffies + 60 * HZ;
3dc14741
HD
520
521 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
522 index = linear_page_index(vma, addr);
523
1170532b
JP
524 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
525 current->comm,
526 (long long)pte_val(pte), (long long)pmd_val(*pmd));
718a3821 527 if (page)
f0b791a3 528 dump_page(page, "bad pte");
6aa9b8b2 529 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
1170532b 530 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
7e0a1265 531 pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n",
2682582a
KK
532 vma->vm_file,
533 vma->vm_ops ? vma->vm_ops->fault : NULL,
534 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
7e0a1265 535 mapping ? mapping->a_ops->read_folio : NULL);
b5810039 536 dump_stack();
373d4d09 537 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
b5810039
NP
538}
539
ee498ed7 540/*
7e675137 541 * vm_normal_page -- This function gets the "struct page" associated with a pte.
6aab341e 542 *
7e675137
NP
543 * "Special" mappings do not wish to be associated with a "struct page" (either
544 * it doesn't exist, or it exists but they don't want to touch it). In this
545 * case, NULL is returned here. "Normal" mappings do have a struct page.
b379d790 546 *
7e675137
NP
547 * There are 2 broad cases. Firstly, an architecture may define a pte_special()
548 * pte bit, in which case this function is trivial. Secondly, an architecture
549 * may not have a spare pte bit, which requires a more complicated scheme,
550 * described below.
551 *
552 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
553 * special mapping (even if there are underlying and valid "struct pages").
554 * COWed pages of a VM_PFNMAP are always normal.
6aab341e 555 *
b379d790
JH
556 * The way we recognize COWed pages within VM_PFNMAP mappings is through the
557 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
7e675137
NP
558 * set, and the vm_pgoff will point to the first PFN mapped: thus every special
559 * mapping will always honor the rule
6aab341e
LT
560 *
561 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
562 *
7e675137
NP
563 * And for normal mappings this is false.
564 *
565 * This restricts such mappings to be a linear translation from virtual address
566 * to pfn. To get around this restriction, we allow arbitrary mappings so long
567 * as the vma is not a COW mapping; in that case, we know that all ptes are
568 * special (because none can have been COWed).
b379d790 569 *
b379d790 570 *
7e675137 571 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
b379d790
JH
572 *
573 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
574 * page" backing, however the difference is that _all_ pages with a struct
575 * page (that is, those where pfn_valid is true) are refcounted and considered
576 * normal pages by the VM. The disadvantage is that pages are refcounted
577 * (which can be slower and simply not an option for some PFNMAP users). The
578 * advantage is that we don't have to follow the strict linearity rule of
579 * PFNMAP mappings in order to support COWable mappings.
580 *
ee498ed7 581 */
25b2995a
CH
582struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
583 pte_t pte)
ee498ed7 584{
22b31eec 585 unsigned long pfn = pte_pfn(pte);
7e675137 586
00b3a331 587 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
b38af472 588 if (likely(!pte_special(pte)))
22b31eec 589 goto check_pfn;
667a0a06
DV
590 if (vma->vm_ops && vma->vm_ops->find_special_page)
591 return vma->vm_ops->find_special_page(vma, addr);
a13ea5b7
HD
592 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
593 return NULL;
df6ad698
JG
594 if (is_zero_pfn(pfn))
595 return NULL;
e1fb4a08 596 if (pte_devmap(pte))
3218f871
AS
597 /*
598 * NOTE: New users of ZONE_DEVICE will not set pte_devmap()
599 * and will have refcounts incremented on their struct pages
600 * when they are inserted into PTEs, thus they are safe to
601 * return here. Legacy ZONE_DEVICE pages that set pte_devmap()
602 * do not have refcounts. Example of legacy ZONE_DEVICE is
603 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers.
604 */
e1fb4a08
DJ
605 return NULL;
606
df6ad698 607 print_bad_pte(vma, addr, pte, NULL);
7e675137
NP
608 return NULL;
609 }
610
00b3a331 611 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
7e675137 612
b379d790
JH
613 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
614 if (vma->vm_flags & VM_MIXEDMAP) {
615 if (!pfn_valid(pfn))
616 return NULL;
617 goto out;
618 } else {
7e675137
NP
619 unsigned long off;
620 off = (addr - vma->vm_start) >> PAGE_SHIFT;
b379d790
JH
621 if (pfn == vma->vm_pgoff + off)
622 return NULL;
623 if (!is_cow_mapping(vma->vm_flags))
624 return NULL;
625 }
6aab341e
LT
626 }
627
b38af472
HD
628 if (is_zero_pfn(pfn))
629 return NULL;
00b3a331 630
22b31eec
HD
631check_pfn:
632 if (unlikely(pfn > highest_memmap_pfn)) {
633 print_bad_pte(vma, addr, pte, NULL);
634 return NULL;
635 }
6aab341e
LT
636
637 /*
7e675137 638 * NOTE! We still have PageReserved() pages in the page tables.
7e675137 639 * eg. VDSO mappings can cause them to exist.
6aab341e 640 */
b379d790 641out:
6aab341e 642 return pfn_to_page(pfn);
ee498ed7
HD
643}
644
318e9342
VMO
645struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
646 pte_t pte)
647{
648 struct page *page = vm_normal_page(vma, addr, pte);
649
650 if (page)
651 return page_folio(page);
652 return NULL;
653}
654
28093f9f
GS
655#ifdef CONFIG_TRANSPARENT_HUGEPAGE
656struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
657 pmd_t pmd)
658{
659 unsigned long pfn = pmd_pfn(pmd);
660
661 /*
662 * There is no pmd_special() but there may be special pmds, e.g.
663 * in a direct-access (dax) mapping, so let's just replicate the
00b3a331 664 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
28093f9f
GS
665 */
666 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
667 if (vma->vm_flags & VM_MIXEDMAP) {
668 if (!pfn_valid(pfn))
669 return NULL;
670 goto out;
671 } else {
672 unsigned long off;
673 off = (addr - vma->vm_start) >> PAGE_SHIFT;
674 if (pfn == vma->vm_pgoff + off)
675 return NULL;
676 if (!is_cow_mapping(vma->vm_flags))
677 return NULL;
678 }
679 }
680
e1fb4a08
DJ
681 if (pmd_devmap(pmd))
682 return NULL;
3cde287b 683 if (is_huge_zero_pmd(pmd))
28093f9f
GS
684 return NULL;
685 if (unlikely(pfn > highest_memmap_pfn))
686 return NULL;
687
688 /*
689 * NOTE! We still have PageReserved() pages in the page tables.
690 * eg. VDSO mappings can cause them to exist.
691 */
692out:
693 return pfn_to_page(pfn);
694}
65610453
KW
695
696struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
697 unsigned long addr, pmd_t pmd)
698{
699 struct page *page = vm_normal_page_pmd(vma, addr, pmd);
700
701 if (page)
702 return page_folio(page);
703 return NULL;
704}
28093f9f
GS
705#endif
706
b756a3b5
AP
707static void restore_exclusive_pte(struct vm_area_struct *vma,
708 struct page *page, unsigned long address,
709 pte_t *ptep)
710{
b832a354 711 struct folio *folio = page_folio(page);
c33c7948 712 pte_t orig_pte;
b756a3b5
AP
713 pte_t pte;
714 swp_entry_t entry;
715
c33c7948 716 orig_pte = ptep_get(ptep);
b756a3b5 717 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
c33c7948 718 if (pte_swp_soft_dirty(orig_pte))
b756a3b5
AP
719 pte = pte_mksoft_dirty(pte);
720
c33c7948
RR
721 entry = pte_to_swp_entry(orig_pte);
722 if (pte_swp_uffd_wp(orig_pte))
b756a3b5
AP
723 pte = pte_mkuffd_wp(pte);
724 else if (is_writable_device_exclusive_entry(entry))
725 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
726
b832a354
DH
727 VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
728 PageAnonExclusive(page)), folio);
6c287605 729
b756a3b5
AP
730 /*
731 * No need to take a page reference as one was already
732 * created when the swap entry was made.
733 */
b832a354
DH
734 if (folio_test_anon(folio))
735 folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
b756a3b5
AP
736 else
737 /*
738 * Currently device exclusive access only supports anonymous
739 * memory so the entry shouldn't point to a filebacked page.
740 */
4d8ff640 741 WARN_ON_ONCE(1);
b756a3b5 742
1eba86c0
PT
743 set_pte_at(vma->vm_mm, address, ptep, pte);
744
b756a3b5
AP
745 /*
746 * No need to invalidate - it was non-present before. However
747 * secondary CPUs may have mappings that need invalidating.
748 */
749 update_mmu_cache(vma, address, ptep);
750}
751
752/*
753 * Tries to restore an exclusive pte if the page lock can be acquired without
754 * sleeping.
755 */
756static int
757try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
758 unsigned long addr)
759{
c33c7948 760 swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte));
b756a3b5
AP
761 struct page *page = pfn_swap_entry_to_page(entry);
762
763 if (trylock_page(page)) {
764 restore_exclusive_pte(vma, page, addr, src_pte);
765 unlock_page(page);
766 return 0;
767 }
768
769 return -EBUSY;
770}
771
1da177e4
LT
772/*
773 * copy one vm_area from one task to the other. Assumes the page tables
774 * already present in the new task to be cleared in the whole range
775 * covered by this vma.
1da177e4
LT
776 */
777
df3a57d1
LT
778static unsigned long
779copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
8f34f1ea
PX
780 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
781 struct vm_area_struct *src_vma, unsigned long addr, int *rss)
1da177e4 782{
8f34f1ea 783 unsigned long vm_flags = dst_vma->vm_flags;
c33c7948
RR
784 pte_t orig_pte = ptep_get(src_pte);
785 pte_t pte = orig_pte;
08e7795e 786 struct folio *folio;
1da177e4 787 struct page *page;
c33c7948 788 swp_entry_t entry = pte_to_swp_entry(orig_pte);
df3a57d1
LT
789
790 if (likely(!non_swap_entry(entry))) {
791 if (swap_duplicate(entry) < 0)
9a5cc85c 792 return -EIO;
df3a57d1
LT
793
794 /* make sure dst_mm is on swapoff's mmlist. */
795 if (unlikely(list_empty(&dst_mm->mmlist))) {
796 spin_lock(&mmlist_lock);
797 if (list_empty(&dst_mm->mmlist))
798 list_add(&dst_mm->mmlist,
799 &src_mm->mmlist);
800 spin_unlock(&mmlist_lock);
801 }
1493a191 802 /* Mark the swap entry as shared. */
c33c7948
RR
803 if (pte_swp_exclusive(orig_pte)) {
804 pte = pte_swp_clear_exclusive(orig_pte);
1493a191
DH
805 set_pte_at(src_mm, addr, src_pte, pte);
806 }
df3a57d1
LT
807 rss[MM_SWAPENTS]++;
808 } else if (is_migration_entry(entry)) {
af5cdaf8 809 page = pfn_swap_entry_to_page(entry);
1da177e4 810
df3a57d1 811 rss[mm_counter(page)]++;
5042db43 812
6c287605 813 if (!is_readable_migration_entry(entry) &&
df3a57d1 814 is_cow_mapping(vm_flags)) {
5042db43 815 /*
6c287605
DH
816 * COW mappings require pages in both parent and child
817 * to be set to read. A previously exclusive entry is
818 * now shared.
5042db43 819 */
4dd845b5
AP
820 entry = make_readable_migration_entry(
821 swp_offset(entry));
df3a57d1 822 pte = swp_entry_to_pte(entry);
c33c7948 823 if (pte_swp_soft_dirty(orig_pte))
df3a57d1 824 pte = pte_swp_mksoft_dirty(pte);
c33c7948 825 if (pte_swp_uffd_wp(orig_pte))
df3a57d1
LT
826 pte = pte_swp_mkuffd_wp(pte);
827 set_pte_at(src_mm, addr, src_pte, pte);
828 }
829 } else if (is_device_private_entry(entry)) {
af5cdaf8 830 page = pfn_swap_entry_to_page(entry);
08e7795e 831 folio = page_folio(page);
5042db43 832
df3a57d1
LT
833 /*
834 * Update rss count even for unaddressable pages, as
835 * they should treated just like normal pages in this
836 * respect.
837 *
838 * We will likely want to have some new rss counters
839 * for unaddressable pages, at some point. But for now
840 * keep things as they are.
841 */
08e7795e 842 folio_get(folio);
df3a57d1 843 rss[mm_counter(page)]++;
fb3d824d 844 /* Cannot fail as these pages cannot get pinned. */
08e7795e 845 folio_try_dup_anon_rmap_pte(folio, page, src_vma);
df3a57d1
LT
846
847 /*
848 * We do not preserve soft-dirty information, because so
849 * far, checkpoint/restore is the only feature that
850 * requires that. And checkpoint/restore does not work
851 * when a device driver is involved (you cannot easily
852 * save and restore device driver state).
853 */
4dd845b5 854 if (is_writable_device_private_entry(entry) &&
df3a57d1 855 is_cow_mapping(vm_flags)) {
4dd845b5
AP
856 entry = make_readable_device_private_entry(
857 swp_offset(entry));
df3a57d1 858 pte = swp_entry_to_pte(entry);
c33c7948 859 if (pte_swp_uffd_wp(orig_pte))
df3a57d1
LT
860 pte = pte_swp_mkuffd_wp(pte);
861 set_pte_at(src_mm, addr, src_pte, pte);
1da177e4 862 }
b756a3b5
AP
863 } else if (is_device_exclusive_entry(entry)) {
864 /*
865 * Make device exclusive entries present by restoring the
866 * original entry then copying as for a present pte. Device
867 * exclusive entries currently only support private writable
868 * (ie. COW) mappings.
869 */
870 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags));
871 if (try_restore_exclusive_pte(src_pte, src_vma, addr))
872 return -EBUSY;
873 return -ENOENT;
c56d1b62 874 } else if (is_pte_marker_entry(entry)) {
af19487f
AR
875 pte_marker marker = copy_pte_marker(entry, dst_vma);
876
877 if (marker)
878 set_pte_at(dst_mm, addr, dst_pte,
879 make_pte_marker(marker));
c56d1b62 880 return 0;
1da177e4 881 }
8f34f1ea
PX
882 if (!userfaultfd_wp(dst_vma))
883 pte = pte_swp_clear_uffd_wp(pte);
df3a57d1
LT
884 set_pte_at(dst_mm, addr, dst_pte, pte);
885 return 0;
886}
887
70e806e4 888/*
b51ad4f8 889 * Copy a present and normal page.
70e806e4 890 *
b51ad4f8
DH
891 * NOTE! The usual case is that this isn't required;
892 * instead, the caller can just increase the page refcount
893 * and re-use the pte the traditional way.
70e806e4
PX
894 *
895 * And if we need a pre-allocated page but don't yet have
896 * one, return a negative error to let the preallocation
897 * code know so that it can do so outside the page table
898 * lock.
899 */
900static inline int
c78f4636
PX
901copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
902 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
edf50470 903 struct folio **prealloc, struct page *page)
70e806e4 904{
edf50470 905 struct folio *new_folio;
b51ad4f8 906 pte_t pte;
70e806e4 907
edf50470
MWO
908 new_folio = *prealloc;
909 if (!new_folio)
70e806e4
PX
910 return -EAGAIN;
911
912 /*
913 * We have a prealloc page, all good! Take it
914 * over and copy the page & arm it.
915 */
916 *prealloc = NULL;
edf50470
MWO
917 copy_user_highpage(&new_folio->page, page, addr, src_vma);
918 __folio_mark_uptodate(new_folio);
919 folio_add_new_anon_rmap(new_folio, dst_vma, addr);
920 folio_add_lru_vma(new_folio, dst_vma);
921 rss[MM_ANONPAGES]++;
70e806e4
PX
922
923 /* All done, just insert the new page copy in the child */
edf50470 924 pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot);
c78f4636 925 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
c33c7948 926 if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte)))
8f34f1ea 927 /* Uffd-wp needs to be delivered to dest pte as well */
f1eb1bac 928 pte = pte_mkuffd_wp(pte);
c78f4636 929 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
70e806e4
PX
930 return 0;
931}
932
933/*
934 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
935 * is required to copy this pte.
936 */
937static inline int
c78f4636
PX
938copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
939 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
edf50470 940 struct folio **prealloc)
df3a57d1 941{
c78f4636
PX
942 struct mm_struct *src_mm = src_vma->vm_mm;
943 unsigned long vm_flags = src_vma->vm_flags;
c33c7948 944 pte_t pte = ptep_get(src_pte);
df3a57d1 945 struct page *page;
14ddee41 946 struct folio *folio;
df3a57d1 947
c78f4636 948 page = vm_normal_page(src_vma, addr, pte);
14ddee41
MWO
949 if (page)
950 folio = page_folio(page);
951 if (page && folio_test_anon(folio)) {
b51ad4f8
DH
952 /*
953 * If this page may have been pinned by the parent process,
954 * copy the page immediately for the child so that we'll always
955 * guarantee the pinned page won't be randomly replaced in the
956 * future.
957 */
14ddee41 958 folio_get(folio);
08e7795e 959 if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
14ddee41
MWO
960 /* Page may be pinned, we have to copy. */
961 folio_put(folio);
fb3d824d
DH
962 return copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
963 addr, rss, prealloc, page);
964 }
edf50470 965 rss[MM_ANONPAGES]++;
b51ad4f8 966 } else if (page) {
14ddee41 967 folio_get(folio);
d8ef5e31 968 folio_dup_file_rmap_pte(folio, page);
edf50470 969 rss[mm_counter_file(page)]++;
70e806e4
PX
970 }
971
1da177e4
LT
972 /*
973 * If it's a COW mapping, write protect it both
974 * in the parent and the child
975 */
1b2de5d0 976 if (is_cow_mapping(vm_flags) && pte_write(pte)) {
1da177e4 977 ptep_set_wrprotect(src_mm, addr, src_pte);
3dc90795 978 pte = pte_wrprotect(pte);
1da177e4 979 }
14ddee41 980 VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
1da177e4
LT
981
982 /*
983 * If it's a shared mapping, mark it clean in
984 * the child
985 */
986 if (vm_flags & VM_SHARED)
987 pte = pte_mkclean(pte);
988 pte = pte_mkold(pte);
6aab341e 989
8f34f1ea 990 if (!userfaultfd_wp(dst_vma))
b569a176
PX
991 pte = pte_clear_uffd_wp(pte);
992
c78f4636 993 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
70e806e4
PX
994 return 0;
995}
996
294de6d8
KW
997static inline struct folio *folio_prealloc(struct mm_struct *src_mm,
998 struct vm_area_struct *vma, unsigned long addr, bool need_zero)
70e806e4 999{
edf50470 1000 struct folio *new_folio;
70e806e4 1001
294de6d8
KW
1002 if (need_zero)
1003 new_folio = vma_alloc_zeroed_movable_folio(vma, addr);
1004 else
1005 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
1006 addr, false);
1007
edf50470 1008 if (!new_folio)
70e806e4
PX
1009 return NULL;
1010
edf50470
MWO
1011 if (mem_cgroup_charge(new_folio, src_mm, GFP_KERNEL)) {
1012 folio_put(new_folio);
70e806e4 1013 return NULL;
6aab341e 1014 }
e601ded4 1015 folio_throttle_swaprate(new_folio, GFP_KERNEL);
ae859762 1016
edf50470 1017 return new_folio;
1da177e4
LT
1018}
1019
c78f4636
PX
1020static int
1021copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1022 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1023 unsigned long end)
1da177e4 1024{
c78f4636
PX
1025 struct mm_struct *dst_mm = dst_vma->vm_mm;
1026 struct mm_struct *src_mm = src_vma->vm_mm;
c36987e2 1027 pte_t *orig_src_pte, *orig_dst_pte;
1da177e4 1028 pte_t *src_pte, *dst_pte;
c33c7948 1029 pte_t ptent;
c74df32c 1030 spinlock_t *src_ptl, *dst_ptl;
70e806e4 1031 int progress, ret = 0;
d559db08 1032 int rss[NR_MM_COUNTERS];
570a335b 1033 swp_entry_t entry = (swp_entry_t){0};
edf50470 1034 struct folio *prealloc = NULL;
1da177e4
LT
1035
1036again:
70e806e4 1037 progress = 0;
d559db08
KH
1038 init_rss_vec(rss);
1039
3db82b93
HD
1040 /*
1041 * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the
1042 * error handling here, assume that exclusive mmap_lock on dst and src
1043 * protects anon from unexpected THP transitions; with shmem and file
1044 * protected by mmap_lock-less collapse skipping areas with anon_vma
1045 * (whereas vma_needs_copy() skips areas without anon_vma). A rework
1046 * can remove such assumptions later, but this is good enough for now.
1047 */
c74df32c 1048 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
70e806e4
PX
1049 if (!dst_pte) {
1050 ret = -ENOMEM;
1051 goto out;
1052 }
3db82b93
HD
1053 src_pte = pte_offset_map_nolock(src_mm, src_pmd, addr, &src_ptl);
1054 if (!src_pte) {
1055 pte_unmap_unlock(dst_pte, dst_ptl);
1056 /* ret == 0 */
1057 goto out;
1058 }
f20dc5f7 1059 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
c36987e2
DN
1060 orig_src_pte = src_pte;
1061 orig_dst_pte = dst_pte;
6606c3e0 1062 arch_enter_lazy_mmu_mode();
1da177e4 1063
1da177e4
LT
1064 do {
1065 /*
1066 * We are holding two locks at this point - either of them
1067 * could generate latencies in another task on another CPU.
1068 */
e040f218
HD
1069 if (progress >= 32) {
1070 progress = 0;
1071 if (need_resched() ||
95c354fe 1072 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
e040f218
HD
1073 break;
1074 }
c33c7948
RR
1075 ptent = ptep_get(src_pte);
1076 if (pte_none(ptent)) {
1da177e4
LT
1077 progress++;
1078 continue;
1079 }
c33c7948 1080 if (unlikely(!pte_present(ptent))) {
9a5cc85c
AP
1081 ret = copy_nonpresent_pte(dst_mm, src_mm,
1082 dst_pte, src_pte,
1083 dst_vma, src_vma,
1084 addr, rss);
1085 if (ret == -EIO) {
c33c7948 1086 entry = pte_to_swp_entry(ptep_get(src_pte));
79a1971c 1087 break;
b756a3b5
AP
1088 } else if (ret == -EBUSY) {
1089 break;
1090 } else if (!ret) {
1091 progress += 8;
1092 continue;
9a5cc85c 1093 }
b756a3b5
AP
1094
1095 /*
1096 * Device exclusive entry restored, continue by copying
1097 * the now present pte.
1098 */
1099 WARN_ON_ONCE(ret != -ENOENT);
79a1971c 1100 }
70e806e4 1101 /* copy_present_pte() will clear `*prealloc' if consumed */
c78f4636
PX
1102 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1103 addr, rss, &prealloc);
70e806e4
PX
1104 /*
1105 * If we need a pre-allocated page for this pte, drop the
1106 * locks, allocate, and try again.
1107 */
1108 if (unlikely(ret == -EAGAIN))
1109 break;
1110 if (unlikely(prealloc)) {
1111 /*
1112 * pre-alloc page cannot be reused by next time so as
1113 * to strictly follow mempolicy (e.g., alloc_page_vma()
1114 * will allocate page according to address). This
1115 * could only happen if one pinned pte changed.
1116 */
edf50470 1117 folio_put(prealloc);
70e806e4
PX
1118 prealloc = NULL;
1119 }
1da177e4
LT
1120 progress += 8;
1121 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1da177e4 1122
6606c3e0 1123 arch_leave_lazy_mmu_mode();
3db82b93 1124 pte_unmap_unlock(orig_src_pte, src_ptl);
d559db08 1125 add_mm_rss_vec(dst_mm, rss);
c36987e2 1126 pte_unmap_unlock(orig_dst_pte, dst_ptl);
c74df32c 1127 cond_resched();
570a335b 1128
9a5cc85c
AP
1129 if (ret == -EIO) {
1130 VM_WARN_ON_ONCE(!entry.val);
70e806e4
PX
1131 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1132 ret = -ENOMEM;
1133 goto out;
1134 }
1135 entry.val = 0;
b756a3b5
AP
1136 } else if (ret == -EBUSY) {
1137 goto out;
9a5cc85c 1138 } else if (ret == -EAGAIN) {
294de6d8 1139 prealloc = folio_prealloc(src_mm, src_vma, addr, false);
70e806e4 1140 if (!prealloc)
570a335b 1141 return -ENOMEM;
9a5cc85c
AP
1142 } else if (ret) {
1143 VM_WARN_ON_ONCE(1);
570a335b 1144 }
9a5cc85c
AP
1145
1146 /* We've captured and resolved the error. Reset, try again. */
1147 ret = 0;
1148
1da177e4
LT
1149 if (addr != end)
1150 goto again;
70e806e4
PX
1151out:
1152 if (unlikely(prealloc))
edf50470 1153 folio_put(prealloc);
70e806e4 1154 return ret;
1da177e4
LT
1155}
1156
c78f4636
PX
1157static inline int
1158copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1159 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1160 unsigned long end)
1da177e4 1161{
c78f4636
PX
1162 struct mm_struct *dst_mm = dst_vma->vm_mm;
1163 struct mm_struct *src_mm = src_vma->vm_mm;
1da177e4
LT
1164 pmd_t *src_pmd, *dst_pmd;
1165 unsigned long next;
1166
1167 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1168 if (!dst_pmd)
1169 return -ENOMEM;
1170 src_pmd = pmd_offset(src_pud, addr);
1171 do {
1172 next = pmd_addr_end(addr, end);
84c3fc4e
ZY
1173 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1174 || pmd_devmap(*src_pmd)) {
71e3aac0 1175 int err;
c78f4636 1176 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
8f34f1ea
PX
1177 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1178 addr, dst_vma, src_vma);
71e3aac0
AA
1179 if (err == -ENOMEM)
1180 return -ENOMEM;
1181 if (!err)
1182 continue;
1183 /* fall through */
1184 }
1da177e4
LT
1185 if (pmd_none_or_clear_bad(src_pmd))
1186 continue;
c78f4636
PX
1187 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1188 addr, next))
1da177e4
LT
1189 return -ENOMEM;
1190 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1191 return 0;
1192}
1193
c78f4636
PX
1194static inline int
1195copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1196 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1197 unsigned long end)
1da177e4 1198{
c78f4636
PX
1199 struct mm_struct *dst_mm = dst_vma->vm_mm;
1200 struct mm_struct *src_mm = src_vma->vm_mm;
1da177e4
LT
1201 pud_t *src_pud, *dst_pud;
1202 unsigned long next;
1203
c2febafc 1204 dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1da177e4
LT
1205 if (!dst_pud)
1206 return -ENOMEM;
c2febafc 1207 src_pud = pud_offset(src_p4d, addr);
1da177e4
LT
1208 do {
1209 next = pud_addr_end(addr, end);
a00cc7d9
MW
1210 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1211 int err;
1212
c78f4636 1213 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
a00cc7d9 1214 err = copy_huge_pud(dst_mm, src_mm,
c78f4636 1215 dst_pud, src_pud, addr, src_vma);
a00cc7d9
MW
1216 if (err == -ENOMEM)
1217 return -ENOMEM;
1218 if (!err)
1219 continue;
1220 /* fall through */
1221 }
1da177e4
LT
1222 if (pud_none_or_clear_bad(src_pud))
1223 continue;
c78f4636
PX
1224 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1225 addr, next))
1da177e4
LT
1226 return -ENOMEM;
1227 } while (dst_pud++, src_pud++, addr = next, addr != end);
1228 return 0;
1229}
1230
c78f4636
PX
1231static inline int
1232copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1233 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1234 unsigned long end)
c2febafc 1235{
c78f4636 1236 struct mm_struct *dst_mm = dst_vma->vm_mm;
c2febafc
KS
1237 p4d_t *src_p4d, *dst_p4d;
1238 unsigned long next;
1239
1240 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1241 if (!dst_p4d)
1242 return -ENOMEM;
1243 src_p4d = p4d_offset(src_pgd, addr);
1244 do {
1245 next = p4d_addr_end(addr, end);
1246 if (p4d_none_or_clear_bad(src_p4d))
1247 continue;
c78f4636
PX
1248 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1249 addr, next))
c2febafc
KS
1250 return -ENOMEM;
1251 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1252 return 0;
1253}
1254
c56d1b62
PX
1255/*
1256 * Return true if the vma needs to copy the pgtable during this fork(). Return
1257 * false when we can speed up fork() by allowing lazy page faults later until
1258 * when the child accesses the memory range.
1259 */
bc70fbf2 1260static bool
c56d1b62
PX
1261vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1262{
1263 /*
1264 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's
1265 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable
1266 * contains uffd-wp protection information, that's something we can't
1267 * retrieve from page cache, and skip copying will lose those info.
1268 */
1269 if (userfaultfd_wp(dst_vma))
1270 return true;
1271
bcd51a3c 1272 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
c56d1b62
PX
1273 return true;
1274
1275 if (src_vma->anon_vma)
1276 return true;
1277
1278 /*
1279 * Don't copy ptes where a page fault will fill them correctly. Fork
1280 * becomes much lighter when there are big shared or private readonly
1281 * mappings. The tradeoff is that copy_page_range is more efficient
1282 * than faulting.
1283 */
1284 return false;
1285}
1286
c78f4636
PX
1287int
1288copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1da177e4
LT
1289{
1290 pgd_t *src_pgd, *dst_pgd;
1291 unsigned long next;
c78f4636
PX
1292 unsigned long addr = src_vma->vm_start;
1293 unsigned long end = src_vma->vm_end;
1294 struct mm_struct *dst_mm = dst_vma->vm_mm;
1295 struct mm_struct *src_mm = src_vma->vm_mm;
ac46d4f3 1296 struct mmu_notifier_range range;
2ec74c3e 1297 bool is_cow;
cddb8a5c 1298 int ret;
1da177e4 1299
c56d1b62 1300 if (!vma_needs_copy(dst_vma, src_vma))
0661a336 1301 return 0;
d992895b 1302
c78f4636 1303 if (is_vm_hugetlb_page(src_vma))
bc70fbf2 1304 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma);
1da177e4 1305
c78f4636 1306 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
2ab64037 1307 /*
1308 * We do not free on error cases below as remove_vma
1309 * gets called on error from higher level routine
1310 */
c78f4636 1311 ret = track_pfn_copy(src_vma);
2ab64037 1312 if (ret)
1313 return ret;
1314 }
1315
cddb8a5c
AA
1316 /*
1317 * We need to invalidate the secondary MMU mappings only when
1318 * there could be a permission downgrade on the ptes of the
1319 * parent mm. And a permission downgrade will only happen if
1320 * is_cow_mapping() returns true.
1321 */
c78f4636 1322 is_cow = is_cow_mapping(src_vma->vm_flags);
ac46d4f3
JG
1323
1324 if (is_cow) {
7269f999 1325 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
7d4a8be0 1326 0, src_mm, addr, end);
ac46d4f3 1327 mmu_notifier_invalidate_range_start(&range);
57efa1fe
JG
1328 /*
1329 * Disabling preemption is not needed for the write side, as
1330 * the read side doesn't spin, but goes to the mmap_lock.
1331 *
1332 * Use the raw variant of the seqcount_t write API to avoid
1333 * lockdep complaining about preemptibility.
1334 */
e727bfd5 1335 vma_assert_write_locked(src_vma);
57efa1fe 1336 raw_write_seqcount_begin(&src_mm->write_protect_seq);
ac46d4f3 1337 }
cddb8a5c
AA
1338
1339 ret = 0;
1da177e4
LT
1340 dst_pgd = pgd_offset(dst_mm, addr);
1341 src_pgd = pgd_offset(src_mm, addr);
1342 do {
1343 next = pgd_addr_end(addr, end);
1344 if (pgd_none_or_clear_bad(src_pgd))
1345 continue;
c78f4636
PX
1346 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1347 addr, next))) {
d155df53 1348 untrack_pfn_clear(dst_vma);
cddb8a5c
AA
1349 ret = -ENOMEM;
1350 break;
1351 }
1da177e4 1352 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
cddb8a5c 1353
57efa1fe
JG
1354 if (is_cow) {
1355 raw_write_seqcount_end(&src_mm->write_protect_seq);
ac46d4f3 1356 mmu_notifier_invalidate_range_end(&range);
57efa1fe 1357 }
cddb8a5c 1358 return ret;
1da177e4
LT
1359}
1360
5abfd71d
PX
1361/* Whether we should zap all COWed (private) pages too */
1362static inline bool should_zap_cows(struct zap_details *details)
1363{
1364 /* By default, zap all pages */
1365 if (!details)
1366 return true;
1367
1368 /* Or, we zap COWed pages only if the caller wants to */
2e148f1e 1369 return details->even_cows;
5abfd71d
PX
1370}
1371
2e148f1e 1372/* Decides whether we should zap this page with the page pointer specified */
254ab940 1373static inline bool should_zap_page(struct zap_details *details, struct page *page)
3506659e 1374{
5abfd71d
PX
1375 /* If we can make a decision without *page.. */
1376 if (should_zap_cows(details))
254ab940 1377 return true;
5abfd71d
PX
1378
1379 /* E.g. the caller passes NULL for the case of a zero page */
1380 if (!page)
254ab940 1381 return true;
3506659e 1382
2e148f1e
PX
1383 /* Otherwise we should only zap non-anon pages */
1384 return !PageAnon(page);
3506659e
MWO
1385}
1386
999dad82
PX
1387static inline bool zap_drop_file_uffd_wp(struct zap_details *details)
1388{
1389 if (!details)
1390 return false;
1391
1392 return details->zap_flags & ZAP_FLAG_DROP_MARKER;
1393}
1394
1395/*
1396 * This function makes sure that we'll replace the none pte with an uffd-wp
1397 * swap special pte marker when necessary. Must be with the pgtable lock held.
1398 */
1399static inline void
1400zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1401 unsigned long addr, pte_t *pte,
1402 struct zap_details *details, pte_t pteval)
1403{
2bad466c
PX
1404 /* Zap on anonymous always means dropping everything */
1405 if (vma_is_anonymous(vma))
1406 return;
1407
999dad82
PX
1408 if (zap_drop_file_uffd_wp(details))
1409 return;
1410
1411 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
1412}
1413
51c6f666 1414static unsigned long zap_pte_range(struct mmu_gather *tlb,
b5810039 1415 struct vm_area_struct *vma, pmd_t *pmd,
1da177e4 1416 unsigned long addr, unsigned long end,
97a89413 1417 struct zap_details *details)
1da177e4 1418{
b5810039 1419 struct mm_struct *mm = tlb->mm;
d16dfc55 1420 int force_flush = 0;
d559db08 1421 int rss[NR_MM_COUNTERS];
97a89413 1422 spinlock_t *ptl;
5f1a1907 1423 pte_t *start_pte;
97a89413 1424 pte_t *pte;
8a5f14a2 1425 swp_entry_t entry;
d559db08 1426
ed6a7935 1427 tlb_change_page_size(tlb, PAGE_SIZE);
e303297e 1428 init_rss_vec(rss);
3db82b93
HD
1429 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1430 if (!pte)
1431 return addr;
1432
3ea27719 1433 flush_tlb_batched_pending(mm);
6606c3e0 1434 arch_enter_lazy_mmu_mode();
1da177e4 1435 do {
c33c7948 1436 pte_t ptent = ptep_get(pte);
c4626503 1437 struct folio *folio;
8018db85
PX
1438 struct page *page;
1439
166f61b9 1440 if (pte_none(ptent))
1da177e4 1441 continue;
6f5e6b9e 1442
7b167b68
MK
1443 if (need_resched())
1444 break;
1445
1da177e4 1446 if (pte_present(ptent)) {
5df397de
LT
1447 unsigned int delay_rmap;
1448
25b2995a 1449 page = vm_normal_page(vma, addr, ptent);
254ab940 1450 if (unlikely(!should_zap_page(details, page)))
91b61ef3 1451 continue;
b5810039 1452 ptent = ptep_get_and_clear_full(mm, addr, pte,
a600388d 1453 tlb->fullmm);
e5136e87 1454 arch_check_zapped_pte(vma, ptent);
1da177e4 1455 tlb_remove_tlb_entry(tlb, pte, addr);
999dad82
PX
1456 zap_install_uffd_wp_if_needed(vma, addr, pte, details,
1457 ptent);
e2942062 1458 if (unlikely(!page)) {
6080d19f 1459 ksm_might_unmap_zero_page(mm, ptent);
1da177e4 1460 continue;
e2942062 1461 }
eca56ff9 1462
c4626503 1463 folio = page_folio(page);
5df397de 1464 delay_rmap = 0;
c4626503 1465 if (!folio_test_anon(folio)) {
1cf35d47 1466 if (pte_dirty(ptent)) {
e4e3df29 1467 folio_mark_dirty(folio);
5df397de
LT
1468 if (tlb_delay_rmap(tlb)) {
1469 delay_rmap = 1;
1470 force_flush = 1;
1471 }
1cf35d47 1472 }
8788f678 1473 if (pte_young(ptent) && likely(vma_has_recency(vma)))
c4626503 1474 folio_mark_accessed(folio);
6237bcd9 1475 }
eca56ff9 1476 rss[mm_counter(page)]--;
5df397de 1477 if (!delay_rmap) {
c4626503 1478 folio_remove_rmap_pte(folio, page, vma);
5df397de
LT
1479 if (unlikely(page_mapcount(page) < 0))
1480 print_bad_pte(vma, addr, ptent, page);
1481 }
1482 if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) {
1cf35d47 1483 force_flush = 1;
ce9ec37b 1484 addr += PAGE_SIZE;
d16dfc55 1485 break;
1cf35d47 1486 }
1da177e4
LT
1487 continue;
1488 }
5042db43
JG
1489
1490 entry = pte_to_swp_entry(ptent);
b756a3b5
AP
1491 if (is_device_private_entry(entry) ||
1492 is_device_exclusive_entry(entry)) {
8018db85 1493 page = pfn_swap_entry_to_page(entry);
c4626503 1494 folio = page_folio(page);
254ab940 1495 if (unlikely(!should_zap_page(details, page)))
91b61ef3 1496 continue;
999dad82
PX
1497 /*
1498 * Both device private/exclusive mappings should only
1499 * work with anonymous page so far, so we don't need to
1500 * consider uffd-wp bit when zap. For more information,
1501 * see zap_install_uffd_wp_if_needed().
1502 */
1503 WARN_ON_ONCE(!vma_is_anonymous(vma));
5042db43 1504 rss[mm_counter(page)]--;
b756a3b5 1505 if (is_device_private_entry(entry))
c4626503
DH
1506 folio_remove_rmap_pte(folio, page, vma);
1507 folio_put(folio);
8018db85 1508 } else if (!non_swap_entry(entry)) {
5abfd71d
PX
1509 /* Genuine swap entry, hence a private anon page */
1510 if (!should_zap_cows(details))
1511 continue;
8a5f14a2 1512 rss[MM_SWAPENTS]--;
8018db85
PX
1513 if (unlikely(!free_swap_and_cache(entry)))
1514 print_bad_pte(vma, addr, ptent, NULL);
5abfd71d 1515 } else if (is_migration_entry(entry)) {
af5cdaf8 1516 page = pfn_swap_entry_to_page(entry);
254ab940 1517 if (!should_zap_page(details, page))
5abfd71d 1518 continue;
eca56ff9 1519 rss[mm_counter(page)]--;
999dad82 1520 } else if (pte_marker_entry_uffd_wp(entry)) {
2bad466c
PX
1521 /*
1522 * For anon: always drop the marker; for file: only
1523 * drop the marker if explicitly requested.
1524 */
1525 if (!vma_is_anonymous(vma) &&
1526 !zap_drop_file_uffd_wp(details))
999dad82 1527 continue;
9f186f9e 1528 } else if (is_hwpoison_entry(entry) ||
af19487f 1529 is_poisoned_swp_entry(entry)) {
5abfd71d
PX
1530 if (!should_zap_cows(details))
1531 continue;
1532 } else {
1533 /* We should have covered all the swap entry types */
727d16f1 1534 pr_alert("unrecognized swap entry 0x%lx\n", entry.val);
5abfd71d 1535 WARN_ON_ONCE(1);
b084d435 1536 }
9888a1ca 1537 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
999dad82 1538 zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
97a89413 1539 } while (pte++, addr += PAGE_SIZE, addr != end);
ae859762 1540
d559db08 1541 add_mm_rss_vec(mm, rss);
6606c3e0 1542 arch_leave_lazy_mmu_mode();
51c6f666 1543
1cf35d47 1544 /* Do the actual TLB flush before dropping ptl */
5df397de 1545 if (force_flush) {
1cf35d47 1546 tlb_flush_mmu_tlbonly(tlb);
f036c818 1547 tlb_flush_rmaps(tlb, vma);
5df397de 1548 }
1cf35d47
LT
1549 pte_unmap_unlock(start_pte, ptl);
1550
1551 /*
1552 * If we forced a TLB flush (either due to running out of
1553 * batch buffers or because we needed to flush dirty TLB
1554 * entries before releasing the ptl), free the batched
3db82b93 1555 * memory too. Come back again if we didn't do everything.
1cf35d47 1556 */
3db82b93 1557 if (force_flush)
fa0aafb8 1558 tlb_flush_mmu(tlb);
d16dfc55 1559
51c6f666 1560 return addr;
1da177e4
LT
1561}
1562
51c6f666 1563static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
b5810039 1564 struct vm_area_struct *vma, pud_t *pud,
1da177e4 1565 unsigned long addr, unsigned long end,
97a89413 1566 struct zap_details *details)
1da177e4
LT
1567{
1568 pmd_t *pmd;
1569 unsigned long next;
1570
1571 pmd = pmd_offset(pud, addr);
1572 do {
1573 next = pmd_addr_end(addr, end);
84c3fc4e 1574 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
53406ed1 1575 if (next - addr != HPAGE_PMD_SIZE)
fd60775a 1576 __split_huge_pmd(vma, pmd, addr, false, NULL);
3db82b93
HD
1577 else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1578 addr = next;
1579 continue;
1580 }
71e3aac0 1581 /* fall through */
3506659e
MWO
1582 } else if (details && details->single_folio &&
1583 folio_test_pmd_mappable(details->single_folio) &&
22061a1f
HD
1584 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1585 spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1586 /*
1587 * Take and drop THP pmd lock so that we cannot return
1588 * prematurely, while zap_huge_pmd() has cleared *pmd,
1589 * but not yet decremented compound_mapcount().
1590 */
1591 spin_unlock(ptl);
71e3aac0 1592 }
3db82b93
HD
1593 if (pmd_none(*pmd)) {
1594 addr = next;
1595 continue;
1596 }
1597 addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1598 if (addr != next)
1599 pmd--;
1600 } while (pmd++, cond_resched(), addr != end);
51c6f666
RH
1601
1602 return addr;
1da177e4
LT
1603}
1604
51c6f666 1605static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
c2febafc 1606 struct vm_area_struct *vma, p4d_t *p4d,
1da177e4 1607 unsigned long addr, unsigned long end,
97a89413 1608 struct zap_details *details)
1da177e4
LT
1609{
1610 pud_t *pud;
1611 unsigned long next;
1612
c2febafc 1613 pud = pud_offset(p4d, addr);
1da177e4
LT
1614 do {
1615 next = pud_addr_end(addr, end);
a00cc7d9
MW
1616 if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1617 if (next - addr != HPAGE_PUD_SIZE) {
42fc5414 1618 mmap_assert_locked(tlb->mm);
a00cc7d9
MW
1619 split_huge_pud(vma, pud, addr);
1620 } else if (zap_huge_pud(tlb, vma, pud, addr))
1621 goto next;
1622 /* fall through */
1623 }
97a89413 1624 if (pud_none_or_clear_bad(pud))
1da177e4 1625 continue;
97a89413 1626 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
a00cc7d9
MW
1627next:
1628 cond_resched();
97a89413 1629 } while (pud++, addr = next, addr != end);
51c6f666
RH
1630
1631 return addr;
1da177e4
LT
1632}
1633
c2febafc
KS
1634static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1635 struct vm_area_struct *vma, pgd_t *pgd,
1636 unsigned long addr, unsigned long end,
1637 struct zap_details *details)
1638{
1639 p4d_t *p4d;
1640 unsigned long next;
1641
1642 p4d = p4d_offset(pgd, addr);
1643 do {
1644 next = p4d_addr_end(addr, end);
1645 if (p4d_none_or_clear_bad(p4d))
1646 continue;
1647 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1648 } while (p4d++, addr = next, addr != end);
1649
1650 return addr;
1651}
1652
aac45363 1653void unmap_page_range(struct mmu_gather *tlb,
038c7aa1
AV
1654 struct vm_area_struct *vma,
1655 unsigned long addr, unsigned long end,
1656 struct zap_details *details)
1da177e4
LT
1657{
1658 pgd_t *pgd;
1659 unsigned long next;
1660
1da177e4
LT
1661 BUG_ON(addr >= end);
1662 tlb_start_vma(tlb, vma);
1663 pgd = pgd_offset(vma->vm_mm, addr);
1664 do {
1665 next = pgd_addr_end(addr, end);
97a89413 1666 if (pgd_none_or_clear_bad(pgd))
1da177e4 1667 continue;
c2febafc 1668 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
97a89413 1669 } while (pgd++, addr = next, addr != end);
1da177e4
LT
1670 tlb_end_vma(tlb, vma);
1671}
51c6f666 1672
f5cc4eef
AV
1673
1674static void unmap_single_vma(struct mmu_gather *tlb,
1675 struct vm_area_struct *vma, unsigned long start_addr,
4f74d2c8 1676 unsigned long end_addr,
68f48381 1677 struct zap_details *details, bool mm_wr_locked)
f5cc4eef
AV
1678{
1679 unsigned long start = max(vma->vm_start, start_addr);
1680 unsigned long end;
1681
1682 if (start >= vma->vm_end)
1683 return;
1684 end = min(vma->vm_end, end_addr);
1685 if (end <= vma->vm_start)
1686 return;
1687
cbc91f71
SD
1688 if (vma->vm_file)
1689 uprobe_munmap(vma, start, end);
1690
b3b9c293 1691 if (unlikely(vma->vm_flags & VM_PFNMAP))
68f48381 1692 untrack_pfn(vma, 0, 0, mm_wr_locked);
f5cc4eef
AV
1693
1694 if (start != end) {
1695 if (unlikely(is_vm_hugetlb_page(vma))) {
1696 /*
1697 * It is undesirable to test vma->vm_file as it
1698 * should be non-null for valid hugetlb area.
1699 * However, vm_file will be NULL in the error
7aa6b4ad 1700 * cleanup path of mmap_region. When
f5cc4eef 1701 * hugetlbfs ->mmap method fails,
7aa6b4ad 1702 * mmap_region() nullifies vma->vm_file
f5cc4eef
AV
1703 * before calling this function to clean up.
1704 * Since no pte has actually been setup, it is
1705 * safe to do nothing in this case.
1706 */
24669e58 1707 if (vma->vm_file) {
05e90bd0
PX
1708 zap_flags_t zap_flags = details ?
1709 details->zap_flags : 0;
2820b0f0 1710 __unmap_hugepage_range(tlb, vma, start, end,
05e90bd0 1711 NULL, zap_flags);
24669e58 1712 }
f5cc4eef
AV
1713 } else
1714 unmap_page_range(tlb, vma, start, end, details);
1715 }
1da177e4
LT
1716}
1717
1da177e4
LT
1718/**
1719 * unmap_vmas - unmap a range of memory covered by a list of vma's
0164f69d 1720 * @tlb: address of the caller's struct mmu_gather
6e412203 1721 * @mas: the maple state
1da177e4
LT
1722 * @vma: the starting vma
1723 * @start_addr: virtual address at which to start unmapping
1724 * @end_addr: virtual address at which to end unmapping
6e412203 1725 * @tree_end: The maximum index to check
809ef83c 1726 * @mm_wr_locked: lock flag
1da177e4 1727 *
508034a3 1728 * Unmap all pages in the vma list.
1da177e4 1729 *
1da177e4
LT
1730 * Only addresses between `start' and `end' will be unmapped.
1731 *
1732 * The VMA list must be sorted in ascending virtual address order.
1733 *
1734 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1735 * range after unmap_vmas() returns. So the only responsibility here is to
1736 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1737 * drops the lock and schedules.
1738 */
fd892593 1739void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
1da177e4 1740 struct vm_area_struct *vma, unsigned long start_addr,
fd892593
LH
1741 unsigned long end_addr, unsigned long tree_end,
1742 bool mm_wr_locked)
1da177e4 1743{
ac46d4f3 1744 struct mmu_notifier_range range;
999dad82 1745 struct zap_details details = {
04ada095 1746 .zap_flags = ZAP_FLAG_DROP_MARKER | ZAP_FLAG_UNMAP,
999dad82
PX
1747 /* Careful - we need to zap private pages too! */
1748 .even_cows = true,
1749 };
1da177e4 1750
7d4a8be0 1751 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
6f4f13e8 1752 start_addr, end_addr);
ac46d4f3 1753 mmu_notifier_invalidate_range_start(&range);
763ecb03 1754 do {
2820b0f0
RR
1755 unsigned long start = start_addr;
1756 unsigned long end = end_addr;
1757 hugetlb_zap_begin(vma, &start, &end);
1758 unmap_single_vma(tlb, vma, start, end, &details,
68f48381 1759 mm_wr_locked);
2820b0f0 1760 hugetlb_zap_end(vma, &details);
d2406291
PZ
1761 vma = mas_find(mas, tree_end - 1);
1762 } while (vma && likely(!xa_is_zero(vma)));
ac46d4f3 1763 mmu_notifier_invalidate_range_end(&range);
1da177e4
LT
1764}
1765
f5cc4eef
AV
1766/**
1767 * zap_page_range_single - remove user pages in a given range
1768 * @vma: vm_area_struct holding the applicable pages
1769 * @address: starting address of pages to zap
1770 * @size: number of bytes to zap
8a5f14a2 1771 * @details: details of shared cache invalidation
f5cc4eef
AV
1772 *
1773 * The range must fit into one VMA.
1da177e4 1774 */
21b85b09 1775void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1da177e4
LT
1776 unsigned long size, struct zap_details *details)
1777{
21b85b09 1778 const unsigned long end = address + size;
ac46d4f3 1779 struct mmu_notifier_range range;
d16dfc55 1780 struct mmu_gather tlb;
1da177e4 1781
1da177e4 1782 lru_add_drain();
7d4a8be0 1783 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
21b85b09 1784 address, end);
2820b0f0 1785 hugetlb_zap_begin(vma, &range.start, &range.end);
a72afd87 1786 tlb_gather_mmu(&tlb, vma->vm_mm);
ac46d4f3
JG
1787 update_hiwater_rss(vma->vm_mm);
1788 mmu_notifier_invalidate_range_start(&range);
21b85b09
MK
1789 /*
1790 * unmap 'address-end' not 'range.start-range.end' as range
1791 * could have been expanded for hugetlb pmd sharing.
1792 */
68f48381 1793 unmap_single_vma(&tlb, vma, address, end, details, false);
ac46d4f3 1794 mmu_notifier_invalidate_range_end(&range);
ae8eba8b 1795 tlb_finish_mmu(&tlb);
2820b0f0 1796 hugetlb_zap_end(vma, details);
1da177e4
LT
1797}
1798
c627f9cc
JS
1799/**
1800 * zap_vma_ptes - remove ptes mapping the vma
1801 * @vma: vm_area_struct holding ptes to be zapped
1802 * @address: starting address of pages to zap
1803 * @size: number of bytes to zap
1804 *
1805 * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1806 *
1807 * The entire address range must be fully contained within the vma.
1808 *
c627f9cc 1809 */
27d036e3 1810void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
c627f9cc
JS
1811 unsigned long size)
1812{
88a35912 1813 if (!range_in_vma(vma, address, address + size) ||
c627f9cc 1814 !(vma->vm_flags & VM_PFNMAP))
27d036e3
LR
1815 return;
1816
f5cc4eef 1817 zap_page_range_single(vma, address, size, NULL);
c627f9cc
JS
1818}
1819EXPORT_SYMBOL_GPL(zap_vma_ptes);
1820
8cd3984d 1821static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
c9cfcddf 1822{
c2febafc
KS
1823 pgd_t *pgd;
1824 p4d_t *p4d;
1825 pud_t *pud;
1826 pmd_t *pmd;
1827
1828 pgd = pgd_offset(mm, addr);
1829 p4d = p4d_alloc(mm, pgd, addr);
1830 if (!p4d)
1831 return NULL;
1832 pud = pud_alloc(mm, p4d, addr);
1833 if (!pud)
1834 return NULL;
1835 pmd = pmd_alloc(mm, pud, addr);
1836 if (!pmd)
1837 return NULL;
1838
1839 VM_BUG_ON(pmd_trans_huge(*pmd));
8cd3984d
AR
1840 return pmd;
1841}
1842
1843pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1844 spinlock_t **ptl)
1845{
1846 pmd_t *pmd = walk_to_pmd(mm, addr);
1847
1848 if (!pmd)
1849 return NULL;
c2febafc 1850 return pte_alloc_map_lock(mm, pmd, addr, ptl);
c9cfcddf
LT
1851}
1852
8efd6f5b
AR
1853static int validate_page_before_insert(struct page *page)
1854{
f8b6187d
KW
1855 struct folio *folio = page_folio(page);
1856
1857 if (folio_test_anon(folio) || folio_test_slab(folio) ||
1858 page_has_type(page))
8efd6f5b 1859 return -EINVAL;
f8b6187d 1860 flush_dcache_folio(folio);
8efd6f5b
AR
1861 return 0;
1862}
1863
cea86fe2 1864static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
8efd6f5b
AR
1865 unsigned long addr, struct page *page, pgprot_t prot)
1866{
ef37b2ea
DH
1867 struct folio *folio = page_folio(page);
1868
c33c7948 1869 if (!pte_none(ptep_get(pte)))
8efd6f5b
AR
1870 return -EBUSY;
1871 /* Ok, finally just insert the thing.. */
ef37b2ea 1872 folio_get(folio);
f1a79412 1873 inc_mm_counter(vma->vm_mm, mm_counter_file(page));
ef37b2ea 1874 folio_add_file_rmap_pte(folio, page, vma);
cea86fe2 1875 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
8efd6f5b
AR
1876 return 0;
1877}
1878
238f58d8
LT
1879/*
1880 * This is the old fallback for page remapping.
1881 *
1882 * For historical reasons, it only allows reserved pages. Only
1883 * old drivers should use this, and they needed to mark their
1884 * pages reserved for the old functions anyway.
1885 */
423bad60
NP
1886static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1887 struct page *page, pgprot_t prot)
238f58d8
LT
1888{
1889 int retval;
c9cfcddf 1890 pte_t *pte;
8a9f3ccd
BS
1891 spinlock_t *ptl;
1892
8efd6f5b
AR
1893 retval = validate_page_before_insert(page);
1894 if (retval)
5b4e655e 1895 goto out;
238f58d8 1896 retval = -ENOMEM;
cea86fe2 1897 pte = get_locked_pte(vma->vm_mm, addr, &ptl);
238f58d8 1898 if (!pte)
5b4e655e 1899 goto out;
cea86fe2 1900 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
238f58d8
LT
1901 pte_unmap_unlock(pte, ptl);
1902out:
1903 return retval;
1904}
1905
cea86fe2 1906static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
8cd3984d
AR
1907 unsigned long addr, struct page *page, pgprot_t prot)
1908{
1909 int err;
1910
1911 if (!page_count(page))
1912 return -EINVAL;
1913 err = validate_page_before_insert(page);
7f70c2a6
AR
1914 if (err)
1915 return err;
cea86fe2 1916 return insert_page_into_pte_locked(vma, pte, addr, page, prot);
8cd3984d
AR
1917}
1918
1919/* insert_pages() amortizes the cost of spinlock operations
bb7dbaaf 1920 * when inserting pages in a loop.
8cd3984d
AR
1921 */
1922static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1923 struct page **pages, unsigned long *num, pgprot_t prot)
1924{
1925 pmd_t *pmd = NULL;
7f70c2a6
AR
1926 pte_t *start_pte, *pte;
1927 spinlock_t *pte_lock;
8cd3984d
AR
1928 struct mm_struct *const mm = vma->vm_mm;
1929 unsigned long curr_page_idx = 0;
1930 unsigned long remaining_pages_total = *num;
1931 unsigned long pages_to_write_in_pmd;
1932 int ret;
1933more:
1934 ret = -EFAULT;
1935 pmd = walk_to_pmd(mm, addr);
1936 if (!pmd)
1937 goto out;
1938
1939 pages_to_write_in_pmd = min_t(unsigned long,
1940 remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1941
1942 /* Allocate the PTE if necessary; takes PMD lock once only. */
1943 ret = -ENOMEM;
1944 if (pte_alloc(mm, pmd))
1945 goto out;
8cd3984d
AR
1946
1947 while (pages_to_write_in_pmd) {
1948 int pte_idx = 0;
1949 const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1950
7f70c2a6 1951 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
3db82b93
HD
1952 if (!start_pte) {
1953 ret = -EFAULT;
1954 goto out;
1955 }
7f70c2a6 1956 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
cea86fe2 1957 int err = insert_page_in_batch_locked(vma, pte,
8cd3984d
AR
1958 addr, pages[curr_page_idx], prot);
1959 if (unlikely(err)) {
7f70c2a6 1960 pte_unmap_unlock(start_pte, pte_lock);
8cd3984d
AR
1961 ret = err;
1962 remaining_pages_total -= pte_idx;
1963 goto out;
1964 }
1965 addr += PAGE_SIZE;
1966 ++curr_page_idx;
1967 }
7f70c2a6 1968 pte_unmap_unlock(start_pte, pte_lock);
8cd3984d
AR
1969 pages_to_write_in_pmd -= batch_size;
1970 remaining_pages_total -= batch_size;
1971 }
1972 if (remaining_pages_total)
1973 goto more;
1974 ret = 0;
1975out:
1976 *num = remaining_pages_total;
1977 return ret;
1978}
8cd3984d
AR
1979
1980/**
1981 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1982 * @vma: user vma to map to
1983 * @addr: target start user address of these pages
1984 * @pages: source kernel pages
1985 * @num: in: number of pages to map. out: number of pages that were *not*
1986 * mapped. (0 means all pages were successfully mapped).
1987 *
1988 * Preferred over vm_insert_page() when inserting multiple pages.
1989 *
1990 * In case of error, we may have mapped a subset of the provided
1991 * pages. It is the caller's responsibility to account for this case.
1992 *
1993 * The same restrictions apply as in vm_insert_page().
1994 */
1995int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1996 struct page **pages, unsigned long *num)
1997{
8cd3984d
AR
1998 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1999
2000 if (addr < vma->vm_start || end_addr >= vma->vm_end)
2001 return -EFAULT;
2002 if (!(vma->vm_flags & VM_MIXEDMAP)) {
d8ed45c5 2003 BUG_ON(mmap_read_trylock(vma->vm_mm));
8cd3984d 2004 BUG_ON(vma->vm_flags & VM_PFNMAP);
1c71222e 2005 vm_flags_set(vma, VM_MIXEDMAP);
8cd3984d
AR
2006 }
2007 /* Defer page refcount checking till we're about to map that page. */
2008 return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
8cd3984d
AR
2009}
2010EXPORT_SYMBOL(vm_insert_pages);
2011
bfa5bf6d
REB
2012/**
2013 * vm_insert_page - insert single page into user vma
2014 * @vma: user vma to map to
2015 * @addr: target user address of this page
2016 * @page: source kernel page
2017 *
a145dd41
LT
2018 * This allows drivers to insert individual pages they've allocated
2019 * into a user vma.
2020 *
2021 * The page has to be a nice clean _individual_ kernel allocation.
2022 * If you allocate a compound page, you need to have marked it as
2023 * such (__GFP_COMP), or manually just split the page up yourself
8dfcc9ba 2024 * (see split_page()).
a145dd41
LT
2025 *
2026 * NOTE! Traditionally this was done with "remap_pfn_range()" which
2027 * took an arbitrary page protection parameter. This doesn't allow
2028 * that. Your vma protection will have to be set up correctly, which
2029 * means that if you want a shared writable mapping, you'd better
2030 * ask for a shared writable mapping!
2031 *
2032 * The page does not need to be reserved.
4b6e1e37
KK
2033 *
2034 * Usually this function is called from f_op->mmap() handler
c1e8d7c6 2035 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
4b6e1e37
KK
2036 * Caller must set VM_MIXEDMAP on vma if it wants to call this
2037 * function from other places, for example from page-fault handler.
a862f68a
MR
2038 *
2039 * Return: %0 on success, negative error code otherwise.
a145dd41 2040 */
423bad60
NP
2041int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2042 struct page *page)
a145dd41
LT
2043{
2044 if (addr < vma->vm_start || addr >= vma->vm_end)
2045 return -EFAULT;
2046 if (!page_count(page))
2047 return -EINVAL;
4b6e1e37 2048 if (!(vma->vm_flags & VM_MIXEDMAP)) {
d8ed45c5 2049 BUG_ON(mmap_read_trylock(vma->vm_mm));
4b6e1e37 2050 BUG_ON(vma->vm_flags & VM_PFNMAP);
1c71222e 2051 vm_flags_set(vma, VM_MIXEDMAP);
4b6e1e37 2052 }
423bad60 2053 return insert_page(vma, addr, page, vma->vm_page_prot);
a145dd41 2054}
e3c3374f 2055EXPORT_SYMBOL(vm_insert_page);
a145dd41 2056
a667d745
SJ
2057/*
2058 * __vm_map_pages - maps range of kernel pages into user vma
2059 * @vma: user vma to map to
2060 * @pages: pointer to array of source kernel pages
2061 * @num: number of pages in page array
2062 * @offset: user's requested vm_pgoff
2063 *
2064 * This allows drivers to map range of kernel pages into a user vma.
2065 *
2066 * Return: 0 on success and error code otherwise.
2067 */
2068static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2069 unsigned long num, unsigned long offset)
2070{
2071 unsigned long count = vma_pages(vma);
2072 unsigned long uaddr = vma->vm_start;
2073 int ret, i;
2074
2075 /* Fail if the user requested offset is beyond the end of the object */
96756fcb 2076 if (offset >= num)
a667d745
SJ
2077 return -ENXIO;
2078
2079 /* Fail if the user requested size exceeds available object size */
2080 if (count > num - offset)
2081 return -ENXIO;
2082
2083 for (i = 0; i < count; i++) {
2084 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2085 if (ret < 0)
2086 return ret;
2087 uaddr += PAGE_SIZE;
2088 }
2089
2090 return 0;
2091}
2092
2093/**
2094 * vm_map_pages - maps range of kernel pages starts with non zero offset
2095 * @vma: user vma to map to
2096 * @pages: pointer to array of source kernel pages
2097 * @num: number of pages in page array
2098 *
2099 * Maps an object consisting of @num pages, catering for the user's
2100 * requested vm_pgoff
2101 *
2102 * If we fail to insert any page into the vma, the function will return
2103 * immediately leaving any previously inserted pages present. Callers
2104 * from the mmap handler may immediately return the error as their caller
2105 * will destroy the vma, removing any successfully inserted pages. Other
2106 * callers should make their own arrangements for calling unmap_region().
2107 *
2108 * Context: Process context. Called by mmap handlers.
2109 * Return: 0 on success and error code otherwise.
2110 */
2111int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2112 unsigned long num)
2113{
2114 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2115}
2116EXPORT_SYMBOL(vm_map_pages);
2117
2118/**
2119 * vm_map_pages_zero - map range of kernel pages starts with zero offset
2120 * @vma: user vma to map to
2121 * @pages: pointer to array of source kernel pages
2122 * @num: number of pages in page array
2123 *
2124 * Similar to vm_map_pages(), except that it explicitly sets the offset
2125 * to 0. This function is intended for the drivers that did not consider
2126 * vm_pgoff.
2127 *
2128 * Context: Process context. Called by mmap handlers.
2129 * Return: 0 on success and error code otherwise.
2130 */
2131int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2132 unsigned long num)
2133{
2134 return __vm_map_pages(vma, pages, num, 0);
2135}
2136EXPORT_SYMBOL(vm_map_pages_zero);
2137
9b5a8e00 2138static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
b2770da6 2139 pfn_t pfn, pgprot_t prot, bool mkwrite)
423bad60
NP
2140{
2141 struct mm_struct *mm = vma->vm_mm;
423bad60
NP
2142 pte_t *pte, entry;
2143 spinlock_t *ptl;
2144
423bad60
NP
2145 pte = get_locked_pte(mm, addr, &ptl);
2146 if (!pte)
9b5a8e00 2147 return VM_FAULT_OOM;
c33c7948
RR
2148 entry = ptep_get(pte);
2149 if (!pte_none(entry)) {
b2770da6
RZ
2150 if (mkwrite) {
2151 /*
2152 * For read faults on private mappings the PFN passed
2153 * in may not match the PFN we have mapped if the
2154 * mapped PFN is a writeable COW page. In the mkwrite
2155 * case we are creating a writable PTE for a shared
f2c57d91
JK
2156 * mapping and we expect the PFNs to match. If they
2157 * don't match, we are likely racing with block
2158 * allocation and mapping invalidation so just skip the
2159 * update.
b2770da6 2160 */
c33c7948
RR
2161 if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) {
2162 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry)));
b2770da6 2163 goto out_unlock;
f2c57d91 2164 }
c33c7948 2165 entry = pte_mkyoung(entry);
cae85cb8
JK
2166 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2167 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2168 update_mmu_cache(vma, addr, pte);
2169 }
2170 goto out_unlock;
b2770da6 2171 }
423bad60
NP
2172
2173 /* Ok, finally just insert the thing.. */
01c8f1c4
DW
2174 if (pfn_t_devmap(pfn))
2175 entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2176 else
2177 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
b2770da6 2178
b2770da6
RZ
2179 if (mkwrite) {
2180 entry = pte_mkyoung(entry);
2181 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2182 }
2183
423bad60 2184 set_pte_at(mm, addr, pte, entry);
4b3073e1 2185 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
423bad60 2186
423bad60
NP
2187out_unlock:
2188 pte_unmap_unlock(pte, ptl);
9b5a8e00 2189 return VM_FAULT_NOPAGE;
423bad60
NP
2190}
2191
f5e6d1d5
MW
2192/**
2193 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2194 * @vma: user vma to map to
2195 * @addr: target user address of this page
2196 * @pfn: source kernel pfn
2197 * @pgprot: pgprot flags for the inserted page
2198 *
a1a0aea5 2199 * This is exactly like vmf_insert_pfn(), except that it allows drivers
f5e6d1d5
MW
2200 * to override pgprot on a per-page basis.
2201 *
2202 * This only makes sense for IO mappings, and it makes no sense for
2203 * COW mappings. In general, using multiple vmas is preferable;
ae2b01f3 2204 * vmf_insert_pfn_prot should only be used if using multiple VMAs is
f5e6d1d5
MW
2205 * impractical.
2206 *
28d8b812
LS
2207 * pgprot typically only differs from @vma->vm_page_prot when drivers set
2208 * caching- and encryption bits different than those of @vma->vm_page_prot,
2209 * because the caching- or encryption mode may not be known at mmap() time.
2210 *
2211 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2212 * to set caching and encryption bits for those vmas (except for COW pages).
2213 * This is ensured by core vm only modifying these page table entries using
2214 * functions that don't touch caching- or encryption bits, using pte_modify()
2215 * if needed. (See for example mprotect()).
2216 *
2217 * Also when new page-table entries are created, this is only done using the
2218 * fault() callback, and never using the value of vma->vm_page_prot,
2219 * except for page-table entries that point to anonymous pages as the result
2220 * of COW.
574c5b3d 2221 *
ae2b01f3 2222 * Context: Process context. May allocate using %GFP_KERNEL.
f5e6d1d5
MW
2223 * Return: vm_fault_t value.
2224 */
2225vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2226 unsigned long pfn, pgprot_t pgprot)
2227{
6d958546
MW
2228 /*
2229 * Technically, architectures with pte_special can avoid all these
2230 * restrictions (same for remap_pfn_range). However we would like
2231 * consistency in testing and feature parity among all, so we should
2232 * try to keep these invariants in place for everybody.
2233 */
2234 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2235 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2236 (VM_PFNMAP|VM_MIXEDMAP));
2237 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2238 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2239
2240 if (addr < vma->vm_start || addr >= vma->vm_end)
2241 return VM_FAULT_SIGBUS;
2242
2243 if (!pfn_modify_allowed(pfn, pgprot))
2244 return VM_FAULT_SIGBUS;
2245
2246 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2247
9b5a8e00 2248 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
6d958546 2249 false);
f5e6d1d5
MW
2250}
2251EXPORT_SYMBOL(vmf_insert_pfn_prot);
e0dc0d8f 2252
ae2b01f3
MW
2253/**
2254 * vmf_insert_pfn - insert single pfn into user vma
2255 * @vma: user vma to map to
2256 * @addr: target user address of this page
2257 * @pfn: source kernel pfn
2258 *
2259 * Similar to vm_insert_page, this allows drivers to insert individual pages
2260 * they've allocated into a user vma. Same comments apply.
2261 *
2262 * This function should only be called from a vm_ops->fault handler, and
2263 * in that case the handler should return the result of this function.
2264 *
2265 * vma cannot be a COW mapping.
2266 *
2267 * As this is called only for pages that do not currently exist, we
2268 * do not need to flush old virtual caches or the TLB.
2269 *
2270 * Context: Process context. May allocate using %GFP_KERNEL.
2271 * Return: vm_fault_t value.
2272 */
2273vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2274 unsigned long pfn)
2275{
2276 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2277}
2278EXPORT_SYMBOL(vmf_insert_pfn);
2279
785a3fab
DW
2280static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2281{
2282 /* these checks mirror the abort conditions in vm_normal_page */
2283 if (vma->vm_flags & VM_MIXEDMAP)
2284 return true;
2285 if (pfn_t_devmap(pfn))
2286 return true;
2287 if (pfn_t_special(pfn))
2288 return true;
2289 if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2290 return true;
2291 return false;
2292}
2293
79f3aa5b 2294static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
28d8b812 2295 unsigned long addr, pfn_t pfn, bool mkwrite)
423bad60 2296{
28d8b812 2297 pgprot_t pgprot = vma->vm_page_prot;
79f3aa5b 2298 int err;
87744ab3 2299
785a3fab 2300 BUG_ON(!vm_mixed_ok(vma, pfn));
e0dc0d8f 2301
423bad60 2302 if (addr < vma->vm_start || addr >= vma->vm_end)
79f3aa5b 2303 return VM_FAULT_SIGBUS;
308a047c
BP
2304
2305 track_pfn_insert(vma, &pgprot, pfn);
e0dc0d8f 2306
42e4089c 2307 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
79f3aa5b 2308 return VM_FAULT_SIGBUS;
42e4089c 2309
423bad60
NP
2310 /*
2311 * If we don't have pte special, then we have to use the pfn_valid()
2312 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2313 * refcount the page if pfn_valid is true (hence insert_page rather
62eede62
HD
2314 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
2315 * without pte special, it would there be refcounted as a normal page.
423bad60 2316 */
00b3a331
LD
2317 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2318 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
423bad60
NP
2319 struct page *page;
2320
03fc2da6
DW
2321 /*
2322 * At this point we are committed to insert_page()
2323 * regardless of whether the caller specified flags that
2324 * result in pfn_t_has_page() == false.
2325 */
2326 page = pfn_to_page(pfn_t_to_pfn(pfn));
79f3aa5b
MW
2327 err = insert_page(vma, addr, page, pgprot);
2328 } else {
9b5a8e00 2329 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
423bad60 2330 }
b2770da6 2331
5d747637
MW
2332 if (err == -ENOMEM)
2333 return VM_FAULT_OOM;
2334 if (err < 0 && err != -EBUSY)
2335 return VM_FAULT_SIGBUS;
2336
2337 return VM_FAULT_NOPAGE;
e0dc0d8f 2338}
79f3aa5b
MW
2339
2340vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2341 pfn_t pfn)
2342{
28d8b812 2343 return __vm_insert_mixed(vma, addr, pfn, false);
79f3aa5b 2344}
5d747637 2345EXPORT_SYMBOL(vmf_insert_mixed);
e0dc0d8f 2346
ab77dab4
SJ
2347/*
2348 * If the insertion of PTE failed because someone else already added a
2349 * different entry in the mean time, we treat that as success as we assume
2350 * the same entry was actually inserted.
2351 */
ab77dab4
SJ
2352vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2353 unsigned long addr, pfn_t pfn)
b2770da6 2354{
28d8b812 2355 return __vm_insert_mixed(vma, addr, pfn, true);
b2770da6 2356}
ab77dab4 2357EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
b2770da6 2358
1da177e4
LT
2359/*
2360 * maps a range of physical memory into the requested pages. the old
2361 * mappings are removed. any references to nonexistent pages results
2362 * in null mappings (currently treated as "copy-on-access")
2363 */
2364static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2365 unsigned long addr, unsigned long end,
2366 unsigned long pfn, pgprot_t prot)
2367{
90a3e375 2368 pte_t *pte, *mapped_pte;
c74df32c 2369 spinlock_t *ptl;
42e4089c 2370 int err = 0;
1da177e4 2371
90a3e375 2372 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1da177e4
LT
2373 if (!pte)
2374 return -ENOMEM;
6606c3e0 2375 arch_enter_lazy_mmu_mode();
1da177e4 2376 do {
c33c7948 2377 BUG_ON(!pte_none(ptep_get(pte)));
42e4089c
AK
2378 if (!pfn_modify_allowed(pfn, prot)) {
2379 err = -EACCES;
2380 break;
2381 }
7e675137 2382 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1da177e4
LT
2383 pfn++;
2384 } while (pte++, addr += PAGE_SIZE, addr != end);
6606c3e0 2385 arch_leave_lazy_mmu_mode();
90a3e375 2386 pte_unmap_unlock(mapped_pte, ptl);
42e4089c 2387 return err;
1da177e4
LT
2388}
2389
2390static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2391 unsigned long addr, unsigned long end,
2392 unsigned long pfn, pgprot_t prot)
2393{
2394 pmd_t *pmd;
2395 unsigned long next;
42e4089c 2396 int err;
1da177e4
LT
2397
2398 pfn -= addr >> PAGE_SHIFT;
2399 pmd = pmd_alloc(mm, pud, addr);
2400 if (!pmd)
2401 return -ENOMEM;
f66055ab 2402 VM_BUG_ON(pmd_trans_huge(*pmd));
1da177e4
LT
2403 do {
2404 next = pmd_addr_end(addr, end);
42e4089c
AK
2405 err = remap_pte_range(mm, pmd, addr, next,
2406 pfn + (addr >> PAGE_SHIFT), prot);
2407 if (err)
2408 return err;
1da177e4
LT
2409 } while (pmd++, addr = next, addr != end);
2410 return 0;
2411}
2412
c2febafc 2413static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
1da177e4
LT
2414 unsigned long addr, unsigned long end,
2415 unsigned long pfn, pgprot_t prot)
2416{
2417 pud_t *pud;
2418 unsigned long next;
42e4089c 2419 int err;
1da177e4
LT
2420
2421 pfn -= addr >> PAGE_SHIFT;
c2febafc 2422 pud = pud_alloc(mm, p4d, addr);
1da177e4
LT
2423 if (!pud)
2424 return -ENOMEM;
2425 do {
2426 next = pud_addr_end(addr, end);
42e4089c
AK
2427 err = remap_pmd_range(mm, pud, addr, next,
2428 pfn + (addr >> PAGE_SHIFT), prot);
2429 if (err)
2430 return err;
1da177e4
LT
2431 } while (pud++, addr = next, addr != end);
2432 return 0;
2433}
2434
c2febafc
KS
2435static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2436 unsigned long addr, unsigned long end,
2437 unsigned long pfn, pgprot_t prot)
2438{
2439 p4d_t *p4d;
2440 unsigned long next;
42e4089c 2441 int err;
c2febafc
KS
2442
2443 pfn -= addr >> PAGE_SHIFT;
2444 p4d = p4d_alloc(mm, pgd, addr);
2445 if (!p4d)
2446 return -ENOMEM;
2447 do {
2448 next = p4d_addr_end(addr, end);
42e4089c
AK
2449 err = remap_pud_range(mm, p4d, addr, next,
2450 pfn + (addr >> PAGE_SHIFT), prot);
2451 if (err)
2452 return err;
c2febafc
KS
2453 } while (p4d++, addr = next, addr != end);
2454 return 0;
2455}
2456
74ffa5a3
CH
2457/*
2458 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
2459 * must have pre-validated the caching bits of the pgprot_t.
bfa5bf6d 2460 */
74ffa5a3
CH
2461int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2462 unsigned long pfn, unsigned long size, pgprot_t prot)
1da177e4
LT
2463{
2464 pgd_t *pgd;
2465 unsigned long next;
2d15cab8 2466 unsigned long end = addr + PAGE_ALIGN(size);
1da177e4
LT
2467 struct mm_struct *mm = vma->vm_mm;
2468 int err;
2469
0c4123e3
AZ
2470 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2471 return -EINVAL;
2472
1da177e4
LT
2473 /*
2474 * Physically remapped pages are special. Tell the
2475 * rest of the world about it:
2476 * VM_IO tells people not to look at these pages
2477 * (accesses can have side effects).
6aab341e
LT
2478 * VM_PFNMAP tells the core MM that the base pages are just
2479 * raw PFN mappings, and do not have a "struct page" associated
2480 * with them.
314e51b9
KK
2481 * VM_DONTEXPAND
2482 * Disable vma merging and expanding with mremap().
2483 * VM_DONTDUMP
2484 * Omit vma from core dump, even when VM_IO turned off.
fb155c16
LT
2485 *
2486 * There's a horrible special case to handle copy-on-write
2487 * behaviour that some programs depend on. We mark the "original"
2488 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
b3b9c293 2489 * See vm_normal_page() for details.
1da177e4 2490 */
b3b9c293
KK
2491 if (is_cow_mapping(vma->vm_flags)) {
2492 if (addr != vma->vm_start || end != vma->vm_end)
2493 return -EINVAL;
fb155c16 2494 vma->vm_pgoff = pfn;
b3b9c293
KK
2495 }
2496
1c71222e 2497 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1da177e4
LT
2498
2499 BUG_ON(addr >= end);
2500 pfn -= addr >> PAGE_SHIFT;
2501 pgd = pgd_offset(mm, addr);
2502 flush_cache_range(vma, addr, end);
1da177e4
LT
2503 do {
2504 next = pgd_addr_end(addr, end);
c2febafc 2505 err = remap_p4d_range(mm, pgd, addr, next,
1da177e4
LT
2506 pfn + (addr >> PAGE_SHIFT), prot);
2507 if (err)
74ffa5a3 2508 return err;
1da177e4 2509 } while (pgd++, addr = next, addr != end);
2ab64037 2510
74ffa5a3
CH
2511 return 0;
2512}
2513
2514/**
2515 * remap_pfn_range - remap kernel memory to userspace
2516 * @vma: user vma to map to
2517 * @addr: target page aligned user address to start at
2518 * @pfn: page frame number of kernel physical memory address
2519 * @size: size of mapping area
2520 * @prot: page protection flags for this mapping
2521 *
2522 * Note: this is only safe if the mm semaphore is held when called.
2523 *
2524 * Return: %0 on success, negative error code otherwise.
2525 */
2526int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2527 unsigned long pfn, unsigned long size, pgprot_t prot)
2528{
2529 int err;
2530
2531 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2ab64037 2532 if (err)
74ffa5a3 2533 return -EINVAL;
2ab64037 2534
74ffa5a3
CH
2535 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2536 if (err)
68f48381 2537 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
1da177e4
LT
2538 return err;
2539}
2540EXPORT_SYMBOL(remap_pfn_range);
2541
b4cbb197
LT
2542/**
2543 * vm_iomap_memory - remap memory to userspace
2544 * @vma: user vma to map to
abd69b9e 2545 * @start: start of the physical memory to be mapped
b4cbb197
LT
2546 * @len: size of area
2547 *
2548 * This is a simplified io_remap_pfn_range() for common driver use. The
2549 * driver just needs to give us the physical memory range to be mapped,
2550 * we'll figure out the rest from the vma information.
2551 *
2552 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2553 * whatever write-combining details or similar.
a862f68a
MR
2554 *
2555 * Return: %0 on success, negative error code otherwise.
b4cbb197
LT
2556 */
2557int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2558{
2559 unsigned long vm_len, pfn, pages;
2560
2561 /* Check that the physical memory area passed in looks valid */
2562 if (start + len < start)
2563 return -EINVAL;
2564 /*
2565 * You *really* shouldn't map things that aren't page-aligned,
2566 * but we've historically allowed it because IO memory might
2567 * just have smaller alignment.
2568 */
2569 len += start & ~PAGE_MASK;
2570 pfn = start >> PAGE_SHIFT;
2571 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2572 if (pfn + pages < pfn)
2573 return -EINVAL;
2574
2575 /* We start the mapping 'vm_pgoff' pages into the area */
2576 if (vma->vm_pgoff > pages)
2577 return -EINVAL;
2578 pfn += vma->vm_pgoff;
2579 pages -= vma->vm_pgoff;
2580
2581 /* Can we fit all of the mapping? */
2582 vm_len = vma->vm_end - vma->vm_start;
2583 if (vm_len >> PAGE_SHIFT > pages)
2584 return -EINVAL;
2585
2586 /* Ok, let it rip */
2587 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2588}
2589EXPORT_SYMBOL(vm_iomap_memory);
2590
aee16b3c
JF
2591static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2592 unsigned long addr, unsigned long end,
e80d3909
JR
2593 pte_fn_t fn, void *data, bool create,
2594 pgtbl_mod_mask *mask)
aee16b3c 2595{
8abb50c7 2596 pte_t *pte, *mapped_pte;
be1db475 2597 int err = 0;
3f649ab7 2598 spinlock_t *ptl;
aee16b3c 2599
be1db475 2600 if (create) {
8abb50c7 2601 mapped_pte = pte = (mm == &init_mm) ?
e80d3909 2602 pte_alloc_kernel_track(pmd, addr, mask) :
be1db475
DA
2603 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2604 if (!pte)
2605 return -ENOMEM;
2606 } else {
8abb50c7 2607 mapped_pte = pte = (mm == &init_mm) ?
be1db475
DA
2608 pte_offset_kernel(pmd, addr) :
2609 pte_offset_map_lock(mm, pmd, addr, &ptl);
3db82b93
HD
2610 if (!pte)
2611 return -EINVAL;
be1db475 2612 }
aee16b3c 2613
38e0edb1
JF
2614 arch_enter_lazy_mmu_mode();
2615
eeb4a05f
CH
2616 if (fn) {
2617 do {
c33c7948 2618 if (create || !pte_none(ptep_get(pte))) {
eeb4a05f
CH
2619 err = fn(pte++, addr, data);
2620 if (err)
2621 break;
2622 }
2623 } while (addr += PAGE_SIZE, addr != end);
2624 }
e80d3909 2625 *mask |= PGTBL_PTE_MODIFIED;
aee16b3c 2626
38e0edb1
JF
2627 arch_leave_lazy_mmu_mode();
2628
aee16b3c 2629 if (mm != &init_mm)
8abb50c7 2630 pte_unmap_unlock(mapped_pte, ptl);
aee16b3c
JF
2631 return err;
2632}
2633
2634static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2635 unsigned long addr, unsigned long end,
e80d3909
JR
2636 pte_fn_t fn, void *data, bool create,
2637 pgtbl_mod_mask *mask)
aee16b3c
JF
2638{
2639 pmd_t *pmd;
2640 unsigned long next;
be1db475 2641 int err = 0;
aee16b3c 2642
ceb86879
AK
2643 BUG_ON(pud_huge(*pud));
2644
be1db475 2645 if (create) {
e80d3909 2646 pmd = pmd_alloc_track(mm, pud, addr, mask);
be1db475
DA
2647 if (!pmd)
2648 return -ENOMEM;
2649 } else {
2650 pmd = pmd_offset(pud, addr);
2651 }
aee16b3c
JF
2652 do {
2653 next = pmd_addr_end(addr, end);
0c95cba4
NP
2654 if (pmd_none(*pmd) && !create)
2655 continue;
2656 if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2657 return -EINVAL;
2658 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2659 if (!create)
2660 continue;
2661 pmd_clear_bad(pmd);
be1db475 2662 }
0c95cba4
NP
2663 err = apply_to_pte_range(mm, pmd, addr, next,
2664 fn, data, create, mask);
2665 if (err)
2666 break;
aee16b3c 2667 } while (pmd++, addr = next, addr != end);
0c95cba4 2668
aee16b3c
JF
2669 return err;
2670}
2671
c2febafc 2672static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
aee16b3c 2673 unsigned long addr, unsigned long end,
e80d3909
JR
2674 pte_fn_t fn, void *data, bool create,
2675 pgtbl_mod_mask *mask)
aee16b3c
JF
2676{
2677 pud_t *pud;
2678 unsigned long next;
be1db475 2679 int err = 0;
aee16b3c 2680
be1db475 2681 if (create) {
e80d3909 2682 pud = pud_alloc_track(mm, p4d, addr, mask);
be1db475
DA
2683 if (!pud)
2684 return -ENOMEM;
2685 } else {
2686 pud = pud_offset(p4d, addr);
2687 }
aee16b3c
JF
2688 do {
2689 next = pud_addr_end(addr, end);
0c95cba4
NP
2690 if (pud_none(*pud) && !create)
2691 continue;
2692 if (WARN_ON_ONCE(pud_leaf(*pud)))
2693 return -EINVAL;
2694 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) {
2695 if (!create)
2696 continue;
2697 pud_clear_bad(pud);
be1db475 2698 }
0c95cba4
NP
2699 err = apply_to_pmd_range(mm, pud, addr, next,
2700 fn, data, create, mask);
2701 if (err)
2702 break;
aee16b3c 2703 } while (pud++, addr = next, addr != end);
0c95cba4 2704
aee16b3c
JF
2705 return err;
2706}
2707
c2febafc
KS
2708static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2709 unsigned long addr, unsigned long end,
e80d3909
JR
2710 pte_fn_t fn, void *data, bool create,
2711 pgtbl_mod_mask *mask)
c2febafc
KS
2712{
2713 p4d_t *p4d;
2714 unsigned long next;
be1db475 2715 int err = 0;
c2febafc 2716
be1db475 2717 if (create) {
e80d3909 2718 p4d = p4d_alloc_track(mm, pgd, addr, mask);
be1db475
DA
2719 if (!p4d)
2720 return -ENOMEM;
2721 } else {
2722 p4d = p4d_offset(pgd, addr);
2723 }
c2febafc
KS
2724 do {
2725 next = p4d_addr_end(addr, end);
0c95cba4
NP
2726 if (p4d_none(*p4d) && !create)
2727 continue;
2728 if (WARN_ON_ONCE(p4d_leaf(*p4d)))
2729 return -EINVAL;
2730 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) {
2731 if (!create)
2732 continue;
2733 p4d_clear_bad(p4d);
be1db475 2734 }
0c95cba4
NP
2735 err = apply_to_pud_range(mm, p4d, addr, next,
2736 fn, data, create, mask);
2737 if (err)
2738 break;
c2febafc 2739 } while (p4d++, addr = next, addr != end);
0c95cba4 2740
c2febafc
KS
2741 return err;
2742}
2743
be1db475
DA
2744static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2745 unsigned long size, pte_fn_t fn,
2746 void *data, bool create)
aee16b3c
JF
2747{
2748 pgd_t *pgd;
e80d3909 2749 unsigned long start = addr, next;
57250a5b 2750 unsigned long end = addr + size;
e80d3909 2751 pgtbl_mod_mask mask = 0;
be1db475 2752 int err = 0;
aee16b3c 2753
9cb65bc3
MP
2754 if (WARN_ON(addr >= end))
2755 return -EINVAL;
2756
aee16b3c
JF
2757 pgd = pgd_offset(mm, addr);
2758 do {
2759 next = pgd_addr_end(addr, end);
0c95cba4 2760 if (pgd_none(*pgd) && !create)
be1db475 2761 continue;
0c95cba4
NP
2762 if (WARN_ON_ONCE(pgd_leaf(*pgd)))
2763 return -EINVAL;
2764 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) {
2765 if (!create)
2766 continue;
2767 pgd_clear_bad(pgd);
2768 }
2769 err = apply_to_p4d_range(mm, pgd, addr, next,
2770 fn, data, create, &mask);
aee16b3c
JF
2771 if (err)
2772 break;
2773 } while (pgd++, addr = next, addr != end);
57250a5b 2774
e80d3909
JR
2775 if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2776 arch_sync_kernel_mappings(start, start + size);
2777
aee16b3c
JF
2778 return err;
2779}
be1db475
DA
2780
2781/*
2782 * Scan a region of virtual memory, filling in page tables as necessary
2783 * and calling a provided function on each leaf page table.
2784 */
2785int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2786 unsigned long size, pte_fn_t fn, void *data)
2787{
2788 return __apply_to_page_range(mm, addr, size, fn, data, true);
2789}
aee16b3c
JF
2790EXPORT_SYMBOL_GPL(apply_to_page_range);
2791
be1db475
DA
2792/*
2793 * Scan a region of virtual memory, calling a provided function on
2794 * each leaf page table where it exists.
2795 *
2796 * Unlike apply_to_page_range, this does _not_ fill in page tables
2797 * where they are absent.
2798 */
2799int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2800 unsigned long size, pte_fn_t fn, void *data)
2801{
2802 return __apply_to_page_range(mm, addr, size, fn, data, false);
2803}
2804EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2805
8f4e2101 2806/*
9b4bdd2f
KS
2807 * handle_pte_fault chooses page fault handler according to an entry which was
2808 * read non-atomically. Before making any commitment, on those architectures
2809 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2810 * parts, do_swap_page must check under lock before unmapping the pte and
2811 * proceeding (but do_wp_page is only called after already making such a check;
a335b2e1 2812 * and do_anonymous_page can safely check later on).
8f4e2101 2813 */
2ca99358 2814static inline int pte_unmap_same(struct vm_fault *vmf)
8f4e2101
HD
2815{
2816 int same = 1;
923717cb 2817#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION)
8f4e2101 2818 if (sizeof(pte_t) > sizeof(unsigned long)) {
c7ad0880 2819 spin_lock(vmf->ptl);
c33c7948 2820 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
c7ad0880 2821 spin_unlock(vmf->ptl);
8f4e2101
HD
2822 }
2823#endif
2ca99358
PX
2824 pte_unmap(vmf->pte);
2825 vmf->pte = NULL;
8f4e2101
HD
2826 return same;
2827}
2828
a873dfe1
TL
2829/*
2830 * Return:
2831 * 0: copied succeeded
2832 * -EHWPOISON: copy failed due to hwpoison in source page
2833 * -EAGAIN: copied failed (some other reason)
2834 */
2835static inline int __wp_page_copy_user(struct page *dst, struct page *src,
2836 struct vm_fault *vmf)
6aab341e 2837{
a873dfe1 2838 int ret;
83d116c5
JH
2839 void *kaddr;
2840 void __user *uaddr;
83d116c5
JH
2841 struct vm_area_struct *vma = vmf->vma;
2842 struct mm_struct *mm = vma->vm_mm;
2843 unsigned long addr = vmf->address;
2844
83d116c5 2845 if (likely(src)) {
d302c239
TL
2846 if (copy_mc_user_highpage(dst, src, addr, vma)) {
2847 memory_failure_queue(page_to_pfn(src), 0);
a873dfe1 2848 return -EHWPOISON;
d302c239 2849 }
a873dfe1 2850 return 0;
83d116c5
JH
2851 }
2852
6aab341e
LT
2853 /*
2854 * If the source page was a PFN mapping, we don't have
2855 * a "struct page" for it. We do a best-effort copy by
2856 * just copying from the original user address. If that
2857 * fails, we just zero-fill it. Live with it.
2858 */
24d2613a
FDF
2859 kaddr = kmap_local_page(dst);
2860 pagefault_disable();
83d116c5
JH
2861 uaddr = (void __user *)(addr & PAGE_MASK);
2862
2863 /*
2864 * On architectures with software "accessed" bits, we would
2865 * take a double page fault, so mark it accessed here.
2866 */
3db82b93 2867 vmf->pte = NULL;
e1fd09e3 2868 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
83d116c5 2869 pte_t entry;
5d2a2dbb 2870
83d116c5 2871 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
c33c7948 2872 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
83d116c5
JH
2873 /*
2874 * Other thread has already handled the fault
7df67697 2875 * and update local tlb only
83d116c5 2876 */
a92cbb82
HD
2877 if (vmf->pte)
2878 update_mmu_tlb(vma, addr, vmf->pte);
a873dfe1 2879 ret = -EAGAIN;
83d116c5
JH
2880 goto pte_unlock;
2881 }
2882
2883 entry = pte_mkyoung(vmf->orig_pte);
2884 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
5003a2bd 2885 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
83d116c5
JH
2886 }
2887
2888 /*
2889 * This really shouldn't fail, because the page is there
2890 * in the page tables. But it might just be unreadable,
2891 * in which case we just give up and fill the result with
2892 * zeroes.
2893 */
2894 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
3db82b93 2895 if (vmf->pte)
c3e5ea6e
KS
2896 goto warn;
2897
2898 /* Re-validate under PTL if the page is still mapped */
2899 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
c33c7948 2900 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
7df67697 2901 /* The PTE changed under us, update local tlb */
a92cbb82
HD
2902 if (vmf->pte)
2903 update_mmu_tlb(vma, addr, vmf->pte);
a873dfe1 2904 ret = -EAGAIN;
c3e5ea6e
KS
2905 goto pte_unlock;
2906 }
2907
5d2a2dbb 2908 /*
985ba004 2909 * The same page can be mapped back since last copy attempt.
c3e5ea6e 2910 * Try to copy again under PTL.
5d2a2dbb 2911 */
c3e5ea6e
KS
2912 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2913 /*
2914 * Give a warn in case there can be some obscure
2915 * use-case
2916 */
2917warn:
2918 WARN_ON_ONCE(1);
2919 clear_page(kaddr);
2920 }
83d116c5
JH
2921 }
2922
a873dfe1 2923 ret = 0;
83d116c5
JH
2924
2925pte_unlock:
3db82b93 2926 if (vmf->pte)
83d116c5 2927 pte_unmap_unlock(vmf->pte, vmf->ptl);
24d2613a
FDF
2928 pagefault_enable();
2929 kunmap_local(kaddr);
83d116c5
JH
2930 flush_dcache_page(dst);
2931
2932 return ret;
6aab341e
LT
2933}
2934
c20cd45e
MH
2935static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2936{
2937 struct file *vm_file = vma->vm_file;
2938
2939 if (vm_file)
2940 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2941
2942 /*
2943 * Special mappings (e.g. VDSO) do not have any file so fake
2944 * a default GFP_KERNEL for them.
2945 */
2946 return GFP_KERNEL;
2947}
2948
fb09a464
KS
2949/*
2950 * Notify the address space that the page is about to become writable so that
2951 * it can prohibit this or wait for the page to get into an appropriate state.
2952 *
2953 * We do this without the lock held, so that it can sleep if it needs to.
2954 */
86aa6998 2955static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
fb09a464 2956{
2b740303 2957 vm_fault_t ret;
38b8cb7f 2958 unsigned int old_flags = vmf->flags;
fb09a464 2959
38b8cb7f 2960 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
fb09a464 2961
dc617f29
DW
2962 if (vmf->vma->vm_file &&
2963 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2964 return VM_FAULT_SIGBUS;
2965
11bac800 2966 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
38b8cb7f
JK
2967 /* Restore original flags so that caller is not surprised */
2968 vmf->flags = old_flags;
fb09a464
KS
2969 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2970 return ret;
2971 if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3d243659
SK
2972 folio_lock(folio);
2973 if (!folio->mapping) {
2974 folio_unlock(folio);
fb09a464
KS
2975 return 0; /* retry */
2976 }
2977 ret |= VM_FAULT_LOCKED;
2978 } else
3d243659 2979 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
fb09a464
KS
2980 return ret;
2981}
2982
97ba0c2b
JK
2983/*
2984 * Handle dirtying of a page in shared file mapping on a write fault.
2985 *
2986 * The function expects the page to be locked and unlocks it.
2987 */
89b15332 2988static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
97ba0c2b 2989{
89b15332 2990 struct vm_area_struct *vma = vmf->vma;
97ba0c2b 2991 struct address_space *mapping;
15b4919a 2992 struct folio *folio = page_folio(vmf->page);
97ba0c2b
JK
2993 bool dirtied;
2994 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2995
15b4919a
Z
2996 dirtied = folio_mark_dirty(folio);
2997 VM_BUG_ON_FOLIO(folio_test_anon(folio), folio);
97ba0c2b 2998 /*
15b4919a
Z
2999 * Take a local copy of the address_space - folio.mapping may be zeroed
3000 * by truncate after folio_unlock(). The address_space itself remains
3001 * pinned by vma->vm_file's reference. We rely on folio_unlock()'s
97ba0c2b
JK
3002 * release semantics to prevent the compiler from undoing this copying.
3003 */
15b4919a
Z
3004 mapping = folio_raw_mapping(folio);
3005 folio_unlock(folio);
97ba0c2b 3006
89b15332
JW
3007 if (!page_mkwrite)
3008 file_update_time(vma->vm_file);
3009
3010 /*
3011 * Throttle page dirtying rate down to writeback speed.
3012 *
3013 * mapping may be NULL here because some device drivers do not
3014 * set page.mapping but still dirty their pages
3015 *
c1e8d7c6 3016 * Drop the mmap_lock before waiting on IO, if we can. The file
89b15332
JW
3017 * is pinning the mapping, as per above.
3018 */
97ba0c2b 3019 if ((dirtied || page_mkwrite) && mapping) {
89b15332
JW
3020 struct file *fpin;
3021
3022 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
97ba0c2b 3023 balance_dirty_pages_ratelimited(mapping);
89b15332
JW
3024 if (fpin) {
3025 fput(fpin);
d9272525 3026 return VM_FAULT_COMPLETED;
89b15332 3027 }
97ba0c2b
JK
3028 }
3029
89b15332 3030 return 0;
97ba0c2b
JK
3031}
3032
4e047f89
SR
3033/*
3034 * Handle write page faults for pages that can be reused in the current vma
3035 *
3036 * This can happen either due to the mapping being with the VM_SHARED flag,
3037 * or due to us being the last reference standing to the page. In either
3038 * case, all we need to do here is to mark the page as writable and update
3039 * any related book-keeping.
3040 */
a86bc96b 3041static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio)
82b0f8c3 3042 __releases(vmf->ptl)
4e047f89 3043{
82b0f8c3 3044 struct vm_area_struct *vma = vmf->vma;
4e047f89 3045 pte_t entry;
6c287605 3046
c89357e2 3047 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
6c287605 3048
c2c3b514
KW
3049 if (folio) {
3050 VM_BUG_ON(folio_test_anon(folio) &&
3051 !PageAnonExclusive(vmf->page));
3052 /*
3053 * Clear the folio's cpupid information as the existing
3054 * information potentially belongs to a now completely
3055 * unrelated process.
3056 */
3057 folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1);
3058 }
4e047f89 3059
2994302b
JK
3060 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3061 entry = pte_mkyoung(vmf->orig_pte);
4e047f89 3062 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
82b0f8c3 3063 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
5003a2bd 3064 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
82b0f8c3 3065 pte_unmap_unlock(vmf->pte, vmf->ptl);
798a6b87 3066 count_vm_event(PGREUSE);
4e047f89
SR
3067}
3068
4ed43798
MWO
3069/*
3070 * We could add a bitflag somewhere, but for now, we know that all
3071 * vm_ops that have a ->map_pages have been audited and don't need
3072 * the mmap_lock to be held.
3073 */
3074static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
3075{
3076 struct vm_area_struct *vma = vmf->vma;
3077
3078 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK))
3079 return 0;
3080 vma_end_read(vma);
3081 return VM_FAULT_RETRY;
3082}
3083
164b06f2
MWO
3084static vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
3085{
3086 struct vm_area_struct *vma = vmf->vma;
3087
3088 if (likely(vma->anon_vma))
3089 return 0;
3090 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3091 vma_end_read(vma);
3092 return VM_FAULT_RETRY;
3093 }
3094 if (__anon_vma_prepare(vma))
3095 return VM_FAULT_OOM;
3096 return 0;
3097}
3098
2f38ab2c 3099/*
c89357e2
DH
3100 * Handle the case of a page which we actually need to copy to a new page,
3101 * either due to COW or unsharing.
2f38ab2c 3102 *
c1e8d7c6 3103 * Called with mmap_lock locked and the old page referenced, but
2f38ab2c
SR
3104 * without the ptl held.
3105 *
3106 * High level logic flow:
3107 *
3108 * - Allocate a page, copy the content of the old page to the new one.
3109 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3110 * - Take the PTL. If the pte changed, bail out and release the allocated page
3111 * - If the pte is still the way we remember it, update the page table and all
3112 * relevant references. This includes dropping the reference the page-table
3113 * held to the old page, as well as updating the rmap.
3114 * - In any case, unlock the PTL and drop the reference we took to the old page.
3115 */
2b740303 3116static vm_fault_t wp_page_copy(struct vm_fault *vmf)
2f38ab2c 3117{
c89357e2 3118 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
82b0f8c3 3119 struct vm_area_struct *vma = vmf->vma;
bae473a4 3120 struct mm_struct *mm = vma->vm_mm;
28d41a48
MWO
3121 struct folio *old_folio = NULL;
3122 struct folio *new_folio = NULL;
2f38ab2c
SR
3123 pte_t entry;
3124 int page_copied = 0;
ac46d4f3 3125 struct mmu_notifier_range range;
164b06f2 3126 vm_fault_t ret;
cf503cc6 3127 bool pfn_is_zero;
2f38ab2c 3128
662ce1dc
YY
3129 delayacct_wpcopy_start();
3130
28d41a48
MWO
3131 if (vmf->page)
3132 old_folio = page_folio(vmf->page);
164b06f2
MWO
3133 ret = vmf_anon_prepare(vmf);
3134 if (unlikely(ret))
3135 goto out;
2f38ab2c 3136
cf503cc6
KW
3137 pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte));
3138 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero);
3139 if (!new_folio)
3140 goto oom;
3141
3142 if (!pfn_is_zero) {
164b06f2 3143 int err;
83d116c5 3144
164b06f2
MWO
3145 err = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3146 if (err) {
83d116c5
JH
3147 /*
3148 * COW failed, if the fault was solved by other,
3149 * it's fine. If not, userspace would re-fault on
3150 * the same address and we will handle the fault
3151 * from the second attempt.
a873dfe1 3152 * The -EHWPOISON case will not be retried.
83d116c5 3153 */
28d41a48
MWO
3154 folio_put(new_folio);
3155 if (old_folio)
3156 folio_put(old_folio);
662ce1dc
YY
3157
3158 delayacct_wpcopy_end();
164b06f2 3159 return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
83d116c5 3160 }
28d41a48 3161 kmsan_copy_page_meta(&new_folio->page, vmf->page);
2f38ab2c 3162 }
2f38ab2c 3163
28d41a48 3164 __folio_mark_uptodate(new_folio);
eb3c24f3 3165
7d4a8be0 3166 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
6f4f13e8 3167 vmf->address & PAGE_MASK,
ac46d4f3
JG
3168 (vmf->address & PAGE_MASK) + PAGE_SIZE);
3169 mmu_notifier_invalidate_range_start(&range);
2f38ab2c
SR
3170
3171 /*
3172 * Re-check the pte - we dropped the lock
3173 */
82b0f8c3 3174 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
c33c7948 3175 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
28d41a48
MWO
3176 if (old_folio) {
3177 if (!folio_test_anon(old_folio)) {
3178 dec_mm_counter(mm, mm_counter_file(&old_folio->page));
f1a79412 3179 inc_mm_counter(mm, MM_ANONPAGES);
2f38ab2c
SR
3180 }
3181 } else {
6080d19f 3182 ksm_might_unmap_zero_page(mm, vmf->orig_pte);
f1a79412 3183 inc_mm_counter(mm, MM_ANONPAGES);
2f38ab2c 3184 }
2994302b 3185 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
28d41a48 3186 entry = mk_pte(&new_folio->page, vma->vm_page_prot);
50c25ee9 3187 entry = pte_sw_mkyoung(entry);
c89357e2
DH
3188 if (unlikely(unshare)) {
3189 if (pte_soft_dirty(vmf->orig_pte))
3190 entry = pte_mksoft_dirty(entry);
3191 if (pte_uffd_wp(vmf->orig_pte))
3192 entry = pte_mkuffd_wp(entry);
3193 } else {
3194 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3195 }
111fe718 3196
2f38ab2c
SR
3197 /*
3198 * Clear the pte entry and flush it first, before updating the
111fe718
NP
3199 * pte with the new entry, to keep TLBs on different CPUs in
3200 * sync. This code used to set the new PTE then flush TLBs, but
3201 * that left a window where the new PTE could be loaded into
3202 * some TLBs while the old PTE remains in others.
2f38ab2c 3203 */
ec8832d0 3204 ptep_clear_flush(vma, vmf->address, vmf->pte);
28d41a48
MWO
3205 folio_add_new_anon_rmap(new_folio, vma, vmf->address);
3206 folio_add_lru_vma(new_folio, vma);
2f38ab2c
SR
3207 /*
3208 * We call the notify macro here because, when using secondary
3209 * mmu page tables (such as kvm shadow page tables), we want the
3210 * new page to be mapped directly into the secondary page table.
3211 */
c89357e2 3212 BUG_ON(unshare && pte_write(entry));
82b0f8c3 3213 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
5003a2bd 3214 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
28d41a48 3215 if (old_folio) {
2f38ab2c
SR
3216 /*
3217 * Only after switching the pte to the new page may
3218 * we remove the mapcount here. Otherwise another
3219 * process may come and find the rmap count decremented
3220 * before the pte is switched to the new page, and
3221 * "reuse" the old page writing into it while our pte
3222 * here still points into it and can be read by other
3223 * threads.
3224 *
3225 * The critical issue is to order this
c4626503
DH
3226 * folio_remove_rmap_pte() with the ptp_clear_flush
3227 * above. Those stores are ordered by (if nothing else,)
2f38ab2c 3228 * the barrier present in the atomic_add_negative
c4626503 3229 * in folio_remove_rmap_pte();
2f38ab2c
SR
3230 *
3231 * Then the TLB flush in ptep_clear_flush ensures that
3232 * no process can access the old page before the
3233 * decremented mapcount is visible. And the old page
3234 * cannot be reused until after the decremented
3235 * mapcount is visible. So transitively, TLBs to
3236 * old page will be flushed before it can be reused.
3237 */
c4626503 3238 folio_remove_rmap_pte(old_folio, vmf->page, vma);
2f38ab2c
SR
3239 }
3240
3241 /* Free the old page.. */
28d41a48 3242 new_folio = old_folio;
2f38ab2c 3243 page_copied = 1;
3db82b93
HD
3244 pte_unmap_unlock(vmf->pte, vmf->ptl);
3245 } else if (vmf->pte) {
7df67697 3246 update_mmu_tlb(vma, vmf->address, vmf->pte);
3db82b93 3247 pte_unmap_unlock(vmf->pte, vmf->ptl);
2f38ab2c
SR
3248 }
3249
ec8832d0 3250 mmu_notifier_invalidate_range_end(&range);
3db82b93
HD
3251
3252 if (new_folio)
3253 folio_put(new_folio);
28d41a48 3254 if (old_folio) {
f4c4a3f4 3255 if (page_copied)
28d41a48
MWO
3256 free_swap_cache(&old_folio->page);
3257 folio_put(old_folio);
2f38ab2c 3258 }
662ce1dc
YY
3259
3260 delayacct_wpcopy_end();
cb8d8633 3261 return 0;
2f38ab2c 3262oom:
164b06f2
MWO
3263 ret = VM_FAULT_OOM;
3264out:
28d41a48
MWO
3265 if (old_folio)
3266 folio_put(old_folio);
662ce1dc
YY
3267
3268 delayacct_wpcopy_end();
164b06f2 3269 return ret;
2f38ab2c
SR
3270}
3271
66a6197c
JK
3272/**
3273 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3274 * writeable once the page is prepared
3275 *
3276 * @vmf: structure describing the fault
a86bc96b 3277 * @folio: the folio of vmf->page
66a6197c
JK
3278 *
3279 * This function handles all that is needed to finish a write page fault in a
3280 * shared mapping due to PTE being read-only once the mapped page is prepared.
a862f68a 3281 * It handles locking of PTE and modifying it.
66a6197c
JK
3282 *
3283 * The function expects the page to be locked or other protection against
3284 * concurrent faults / writeback (such as DAX radix tree locks).
a862f68a 3285 *
2797e79f 3286 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before
a862f68a 3287 * we acquired PTE lock.
66a6197c 3288 */
a86bc96b 3289static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio)
66a6197c
JK
3290{
3291 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3292 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3293 &vmf->ptl);
3db82b93
HD
3294 if (!vmf->pte)
3295 return VM_FAULT_NOPAGE;
66a6197c
JK
3296 /*
3297 * We might have raced with another page fault while we released the
3298 * pte_offset_map_lock.
3299 */
c33c7948 3300 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
7df67697 3301 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
66a6197c 3302 pte_unmap_unlock(vmf->pte, vmf->ptl);
a19e2553 3303 return VM_FAULT_NOPAGE;
66a6197c 3304 }
a86bc96b 3305 wp_page_reuse(vmf, folio);
a19e2553 3306 return 0;
66a6197c
JK
3307}
3308
dd906184
BH
3309/*
3310 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3311 * mapping
3312 */
2b740303 3313static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
dd906184 3314{
82b0f8c3 3315 struct vm_area_struct *vma = vmf->vma;
bae473a4 3316
dd906184 3317 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
2b740303 3318 vm_fault_t ret;
dd906184 3319
82b0f8c3 3320 pte_unmap_unlock(vmf->pte, vmf->ptl);
4a68fef1
MWO
3321 ret = vmf_can_call_fault(vmf);
3322 if (ret)
3323 return ret;
063e60d8 3324
fe82221f 3325 vmf->flags |= FAULT_FLAG_MKWRITE;
11bac800 3326 ret = vma->vm_ops->pfn_mkwrite(vmf);
2f89dc12 3327 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
dd906184 3328 return ret;
a86bc96b 3329 return finish_mkwrite_fault(vmf, NULL);
dd906184 3330 }
a86bc96b 3331 wp_page_reuse(vmf, NULL);
cb8d8633 3332 return 0;
dd906184
BH
3333}
3334
5a97858b 3335static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
82b0f8c3 3336 __releases(vmf->ptl)
93e478d4 3337{
82b0f8c3 3338 struct vm_area_struct *vma = vmf->vma;
cb8d8633 3339 vm_fault_t ret = 0;
93e478d4 3340
5a97858b 3341 folio_get(folio);
93e478d4 3342
93e478d4 3343 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
2b740303 3344 vm_fault_t tmp;
93e478d4 3345
82b0f8c3 3346 pte_unmap_unlock(vmf->pte, vmf->ptl);
4a68fef1
MWO
3347 tmp = vmf_can_call_fault(vmf);
3348 if (tmp) {
063e60d8 3349 folio_put(folio);
4a68fef1 3350 return tmp;
063e60d8
MWO
3351 }
3352
86aa6998 3353 tmp = do_page_mkwrite(vmf, folio);
93e478d4
SR
3354 if (unlikely(!tmp || (tmp &
3355 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
5a97858b 3356 folio_put(folio);
93e478d4
SR
3357 return tmp;
3358 }
a86bc96b 3359 tmp = finish_mkwrite_fault(vmf, folio);
a19e2553 3360 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
5a97858b
SK
3361 folio_unlock(folio);
3362 folio_put(folio);
66a6197c 3363 return tmp;
93e478d4 3364 }
66a6197c 3365 } else {
a86bc96b 3366 wp_page_reuse(vmf, folio);
5a97858b 3367 folio_lock(folio);
93e478d4 3368 }
89b15332 3369 ret |= fault_dirty_shared_page(vmf);
5a97858b 3370 folio_put(folio);
93e478d4 3371
89b15332 3372 return ret;
93e478d4
SR
3373}
3374
dec078cc
DH
3375static bool wp_can_reuse_anon_folio(struct folio *folio,
3376 struct vm_area_struct *vma)
3377{
3378 /*
3379 * We have to verify under folio lock: these early checks are
3380 * just an optimization to avoid locking the folio and freeing
3381 * the swapcache if there is little hope that we can reuse.
3382 *
3383 * KSM doesn't necessarily raise the folio refcount.
3384 */
3385 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
3386 return false;
3387 if (!folio_test_lru(folio))
3388 /*
3389 * We cannot easily detect+handle references from
3390 * remote LRU caches or references to LRU folios.
3391 */
3392 lru_add_drain();
3393 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
3394 return false;
3395 if (!folio_trylock(folio))
3396 return false;
3397 if (folio_test_swapcache(folio))
3398 folio_free_swap(folio);
3399 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
3400 folio_unlock(folio);
3401 return false;
3402 }
3403 /*
3404 * Ok, we've got the only folio reference from our mapping
3405 * and the folio is locked, it's dark out, and we're wearing
3406 * sunglasses. Hit it.
3407 */
3408 folio_move_anon_rmap(folio, vma);
3409 folio_unlock(folio);
3410 return true;
3411}
3412
1da177e4 3413/*
c89357e2
DH
3414 * This routine handles present pages, when
3415 * * users try to write to a shared page (FAULT_FLAG_WRITE)
3416 * * GUP wants to take a R/O pin on a possibly shared anonymous page
3417 * (FAULT_FLAG_UNSHARE)
3418 *
3419 * It is done by copying the page to a new address and decrementing the
3420 * shared-page counter for the old page.
1da177e4 3421 *
1da177e4
LT
3422 * Note that this routine assumes that the protection checks have been
3423 * done by the caller (the low-level page fault routine in most cases).
c89357e2
DH
3424 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've
3425 * done any necessary COW.
1da177e4 3426 *
c89357e2
DH
3427 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even
3428 * though the page will change only once the write actually happens. This
3429 * avoids a few races, and potentially makes it more efficient.
1da177e4 3430 *
c1e8d7c6 3431 * We enter with non-exclusive mmap_lock (to exclude vma changes,
8f4e2101 3432 * but allow concurrent faults), with pte both mapped and locked.
c1e8d7c6 3433 * We return with mmap_lock still held, but pte unmapped and unlocked.
1da177e4 3434 */
2b740303 3435static vm_fault_t do_wp_page(struct vm_fault *vmf)
82b0f8c3 3436 __releases(vmf->ptl)
1da177e4 3437{
c89357e2 3438 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
82b0f8c3 3439 struct vm_area_struct *vma = vmf->vma;
b9086fde 3440 struct folio *folio = NULL;
d61ea1cb 3441 pte_t pte;
1da177e4 3442
c89357e2 3443 if (likely(!unshare)) {
c33c7948 3444 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
d61ea1cb
PX
3445 if (!userfaultfd_wp_async(vma)) {
3446 pte_unmap_unlock(vmf->pte, vmf->ptl);
3447 return handle_userfault(vmf, VM_UFFD_WP);
3448 }
3449
3450 /*
3451 * Nothing needed (cache flush, TLB invalidations,
3452 * etc.) because we're only removing the uffd-wp bit,
3453 * which is completely invisible to the user.
3454 */
3455 pte = pte_clear_uffd_wp(ptep_get(vmf->pte));
3456
3457 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3458 /*
3459 * Update this to be prepared for following up CoW
3460 * handling
3461 */
3462 vmf->orig_pte = pte;
c89357e2
DH
3463 }
3464
3465 /*
3466 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3467 * is flushed in this case before copying.
3468 */
3469 if (unlikely(userfaultfd_wp(vmf->vma) &&
3470 mm_tlb_flush_pending(vmf->vma->vm_mm)))
3471 flush_tlb_page(vmf->vma, vmf->address);
3472 }
6ce64428 3473
a41b70d6 3474 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
c89357e2 3475
5a97858b
SK
3476 if (vmf->page)
3477 folio = page_folio(vmf->page);
3478
b9086fde
DH
3479 /*
3480 * Shared mapping: we are guaranteed to have VM_WRITE and
3481 * FAULT_FLAG_WRITE set at this point.
3482 */
3483 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
251b97f5 3484 /*
64e45507
PF
3485 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3486 * VM_PFNMAP VMA.
251b97f5
PZ
3487 *
3488 * We should not cow pages in a shared writeable mapping.
dd906184 3489 * Just mark the pages writable and/or call ops->pfn_mkwrite.
251b97f5 3490 */
b9086fde 3491 if (!vmf->page)
2994302b 3492 return wp_pfn_shared(vmf);
5a97858b 3493 return wp_page_shared(vmf, folio);
251b97f5 3494 }
1da177e4 3495
d08b3851 3496 /*
b9086fde
DH
3497 * Private mapping: create an exclusive anonymous page copy if reuse
3498 * is impossible. We might miss VM_WRITE for FOLL_FORCE handling.
dec078cc
DH
3499 *
3500 * If we encounter a page that is marked exclusive, we must reuse
3501 * the page without further checks.
d08b3851 3502 */
dec078cc
DH
3503 if (folio && folio_test_anon(folio) &&
3504 (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) {
3505 if (!PageAnonExclusive(vmf->page))
3506 SetPageAnonExclusive(vmf->page);
c89357e2
DH
3507 if (unlikely(unshare)) {
3508 pte_unmap_unlock(vmf->pte, vmf->ptl);
3509 return 0;
3510 }
a86bc96b 3511 wp_page_reuse(vmf, folio);
cb8d8633 3512 return 0;
1da177e4 3513 }
1da177e4
LT
3514 /*
3515 * Ok, we need to copy. Oh, well..
3516 */
b9086fde
DH
3517 if (folio)
3518 folio_get(folio);
28766805 3519
82b0f8c3 3520 pte_unmap_unlock(vmf->pte, vmf->ptl);
94bfe85b 3521#ifdef CONFIG_KSM
b9086fde 3522 if (folio && folio_test_ksm(folio))
94bfe85b
YY
3523 count_vm_event(COW_KSM);
3524#endif
a41b70d6 3525 return wp_page_copy(vmf);
1da177e4
LT
3526}
3527
97a89413 3528static void unmap_mapping_range_vma(struct vm_area_struct *vma,
1da177e4
LT
3529 unsigned long start_addr, unsigned long end_addr,
3530 struct zap_details *details)
3531{
f5cc4eef 3532 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
1da177e4
LT
3533}
3534
f808c13f 3535static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
232a6a1c
PX
3536 pgoff_t first_index,
3537 pgoff_t last_index,
1da177e4
LT
3538 struct zap_details *details)
3539{
3540 struct vm_area_struct *vma;
1da177e4
LT
3541 pgoff_t vba, vea, zba, zea;
3542
232a6a1c 3543 vma_interval_tree_foreach(vma, root, first_index, last_index) {
1da177e4 3544 vba = vma->vm_pgoff;
d6e93217 3545 vea = vba + vma_pages(vma) - 1;
f9871da9
ML
3546 zba = max(first_index, vba);
3547 zea = min(last_index, vea);
1da177e4 3548
97a89413 3549 unmap_mapping_range_vma(vma,
1da177e4
LT
3550 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3551 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
97a89413 3552 details);
1da177e4
LT
3553 }
3554}
3555
22061a1f 3556/**
3506659e
MWO
3557 * unmap_mapping_folio() - Unmap single folio from processes.
3558 * @folio: The locked folio to be unmapped.
22061a1f 3559 *
3506659e 3560 * Unmap this folio from any userspace process which still has it mmaped.
22061a1f
HD
3561 * Typically, for efficiency, the range of nearby pages has already been
3562 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
3506659e
MWO
3563 * truncation or invalidation holds the lock on a folio, it may find that
3564 * the page has been remapped again: and then uses unmap_mapping_folio()
22061a1f
HD
3565 * to unmap it finally.
3566 */
3506659e 3567void unmap_mapping_folio(struct folio *folio)
22061a1f 3568{
3506659e 3569 struct address_space *mapping = folio->mapping;
22061a1f 3570 struct zap_details details = { };
232a6a1c
PX
3571 pgoff_t first_index;
3572 pgoff_t last_index;
22061a1f 3573
3506659e 3574 VM_BUG_ON(!folio_test_locked(folio));
22061a1f 3575
3506659e 3576 first_index = folio->index;
87b11f86 3577 last_index = folio_next_index(folio) - 1;
232a6a1c 3578
2e148f1e 3579 details.even_cows = false;
3506659e 3580 details.single_folio = folio;
999dad82 3581 details.zap_flags = ZAP_FLAG_DROP_MARKER;
22061a1f 3582
2c865995 3583 i_mmap_lock_read(mapping);
22061a1f 3584 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
232a6a1c
PX
3585 unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3586 last_index, &details);
2c865995 3587 i_mmap_unlock_read(mapping);
22061a1f
HD
3588}
3589
977fbdcd
MW
3590/**
3591 * unmap_mapping_pages() - Unmap pages from processes.
3592 * @mapping: The address space containing pages to be unmapped.
3593 * @start: Index of first page to be unmapped.
3594 * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
3595 * @even_cows: Whether to unmap even private COWed pages.
3596 *
3597 * Unmap the pages in this address space from any userspace process which
3598 * has them mmaped. Generally, you want to remove COWed pages as well when
3599 * a file is being truncated, but not when invalidating pages from the page
3600 * cache.
3601 */
3602void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3603 pgoff_t nr, bool even_cows)
3604{
3605 struct zap_details details = { };
232a6a1c
PX
3606 pgoff_t first_index = start;
3607 pgoff_t last_index = start + nr - 1;
977fbdcd 3608
2e148f1e 3609 details.even_cows = even_cows;
232a6a1c
PX
3610 if (last_index < first_index)
3611 last_index = ULONG_MAX;
977fbdcd 3612
2c865995 3613 i_mmap_lock_read(mapping);
977fbdcd 3614 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
232a6a1c
PX
3615 unmap_mapping_range_tree(&mapping->i_mmap, first_index,
3616 last_index, &details);
2c865995 3617 i_mmap_unlock_read(mapping);
977fbdcd 3618}
6e0e99d5 3619EXPORT_SYMBOL_GPL(unmap_mapping_pages);
977fbdcd 3620
1da177e4 3621/**
8a5f14a2 3622 * unmap_mapping_range - unmap the portion of all mmaps in the specified
977fbdcd 3623 * address_space corresponding to the specified byte range in the underlying
8a5f14a2
KS
3624 * file.
3625 *
3d41088f 3626 * @mapping: the address space containing mmaps to be unmapped.
1da177e4
LT
3627 * @holebegin: byte in first page to unmap, relative to the start of
3628 * the underlying file. This will be rounded down to a PAGE_SIZE
25d9e2d1 3629 * boundary. Note that this is different from truncate_pagecache(), which
1da177e4
LT
3630 * must keep the partial page. In contrast, we must get rid of
3631 * partial pages.
3632 * @holelen: size of prospective hole in bytes. This will be rounded
3633 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the
3634 * end of the file.
3635 * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3636 * but 0 when invalidating pagecache, don't throw away private data.
3637 */
3638void unmap_mapping_range(struct address_space *mapping,
3639 loff_t const holebegin, loff_t const holelen, int even_cows)
3640{
9eab0421
JX
3641 pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT;
3642 pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1da177e4
LT
3643
3644 /* Check for overflow. */
3645 if (sizeof(holelen) > sizeof(hlen)) {
3646 long long holeend =
3647 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3648 if (holeend & ~(long long)ULONG_MAX)
3649 hlen = ULONG_MAX - hba + 1;
3650 }
3651
977fbdcd 3652 unmap_mapping_pages(mapping, hba, hlen, even_cows);
1da177e4
LT
3653}
3654EXPORT_SYMBOL(unmap_mapping_range);
3655
b756a3b5
AP
3656/*
3657 * Restore a potential device exclusive pte to a working pte entry
3658 */
3659static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3660{
19672a9e 3661 struct folio *folio = page_folio(vmf->page);
b756a3b5
AP
3662 struct vm_area_struct *vma = vmf->vma;
3663 struct mmu_notifier_range range;
fdc724d6 3664 vm_fault_t ret;
b756a3b5 3665
7c7b9629
AP
3666 /*
3667 * We need a reference to lock the folio because we don't hold
3668 * the PTL so a racing thread can remove the device-exclusive
3669 * entry and unmap it. If the folio is free the entry must
3670 * have been removed already. If it happens to have already
3671 * been re-allocated after being freed all we do is lock and
3672 * unlock it.
3673 */
3674 if (!folio_try_get(folio))
3675 return 0;
3676
fdc724d6
SB
3677 ret = folio_lock_or_retry(folio, vmf);
3678 if (ret) {
7c7b9629 3679 folio_put(folio);
fdc724d6 3680 return ret;
7c7b9629 3681 }
7d4a8be0 3682 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
b756a3b5
AP
3683 vma->vm_mm, vmf->address & PAGE_MASK,
3684 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3685 mmu_notifier_invalidate_range_start(&range);
3686
3687 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3688 &vmf->ptl);
c33c7948 3689 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
19672a9e 3690 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
b756a3b5 3691
3db82b93
HD
3692 if (vmf->pte)
3693 pte_unmap_unlock(vmf->pte, vmf->ptl);
19672a9e 3694 folio_unlock(folio);
7c7b9629 3695 folio_put(folio);
b756a3b5
AP
3696
3697 mmu_notifier_invalidate_range_end(&range);
3698 return 0;
3699}
3700
a160e537 3701static inline bool should_try_to_free_swap(struct folio *folio,
c145e0b4
DH
3702 struct vm_area_struct *vma,
3703 unsigned int fault_flags)
3704{
a160e537 3705 if (!folio_test_swapcache(folio))
c145e0b4 3706 return false;
9202d527 3707 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
a160e537 3708 folio_test_mlocked(folio))
c145e0b4
DH
3709 return true;
3710 /*
3711 * If we want to map a page that's in the swapcache writable, we
3712 * have to detect via the refcount if we're really the exclusive
3713 * user. Try freeing the swapcache to get rid of the swapcache
3714 * reference only in case it's likely that we'll be the exlusive user.
3715 */
a160e537
MWO
3716 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
3717 folio_ref_count(folio) == 2;
c145e0b4
DH
3718}
3719
9c28a205
PX
3720static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
3721{
3722 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3723 vmf->address, &vmf->ptl);
3db82b93
HD
3724 if (!vmf->pte)
3725 return 0;
9c28a205
PX
3726 /*
3727 * Be careful so that we will only recover a special uffd-wp pte into a
3728 * none pte. Otherwise it means the pte could have changed, so retry.
7e3ce3f8
PX
3729 *
3730 * This should also cover the case where e.g. the pte changed
af19487f 3731 * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_POISONED.
7e3ce3f8 3732 * So is_pte_marker() check is not enough to safely drop the pte.
9c28a205 3733 */
c33c7948 3734 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
9c28a205
PX
3735 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
3736 pte_unmap_unlock(vmf->pte, vmf->ptl);
3737 return 0;
3738}
3739
2bad466c
PX
3740static vm_fault_t do_pte_missing(struct vm_fault *vmf)
3741{
3742 if (vma_is_anonymous(vmf->vma))
3743 return do_anonymous_page(vmf);
3744 else
3745 return do_fault(vmf);
3746}
3747
9c28a205
PX
3748/*
3749 * This is actually a page-missing access, but with uffd-wp special pte
3750 * installed. It means this pte was wr-protected before being unmapped.
3751 */
3752static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
3753{
3754 /*
3755 * Just in case there're leftover special ptes even after the region
7a079ba2 3756 * got unregistered - we can simply clear them.
9c28a205 3757 */
2bad466c 3758 if (unlikely(!userfaultfd_wp(vmf->vma)))
9c28a205
PX
3759 return pte_marker_clear(vmf);
3760
2bad466c 3761 return do_pte_missing(vmf);
9c28a205
PX
3762}
3763
5c041f5d
PX
3764static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
3765{
3766 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
3767 unsigned long marker = pte_marker_get(entry);
3768
3769 /*
ca92ea3d
PX
3770 * PTE markers should never be empty. If anything weird happened,
3771 * the best thing to do is to kill the process along with its mm.
5c041f5d 3772 */
ca92ea3d 3773 if (WARN_ON_ONCE(!marker))
5c041f5d
PX
3774 return VM_FAULT_SIGBUS;
3775
15520a3f 3776 /* Higher priority than uffd-wp when data corrupted */
af19487f
AR
3777 if (marker & PTE_MARKER_POISONED)
3778 return VM_FAULT_HWPOISON;
15520a3f 3779
9c28a205
PX
3780 if (pte_marker_entry_uffd_wp(entry))
3781 return pte_marker_handle_uffd_wp(vmf);
3782
3783 /* This is an unknown pte marker */
3784 return VM_FAULT_SIGBUS;
5c041f5d
PX
3785}
3786
1da177e4 3787/*
c1e8d7c6 3788 * We enter with non-exclusive mmap_lock (to exclude vma changes,
8f4e2101 3789 * but allow concurrent faults), and pte mapped but not yet locked.
9a95f3cf
PC
3790 * We return with pte unmapped and unlocked.
3791 *
c1e8d7c6 3792 * We return with the mmap_lock locked or unlocked in the same cases
9a95f3cf 3793 * as does filemap_fault().
1da177e4 3794 */
2b740303 3795vm_fault_t do_swap_page(struct vm_fault *vmf)
1da177e4 3796{
82b0f8c3 3797 struct vm_area_struct *vma = vmf->vma;
d4f9565a
MWO
3798 struct folio *swapcache, *folio = NULL;
3799 struct page *page;
2799e775 3800 struct swap_info_struct *si = NULL;
14f9135d 3801 rmap_t rmap_flags = RMAP_NONE;
13ddaf26 3802 bool need_clear_cache = false;
1493a191 3803 bool exclusive = false;
65500d23 3804 swp_entry_t entry;
1da177e4 3805 pte_t pte;
2b740303 3806 vm_fault_t ret = 0;
aae466b0 3807 void *shadow = NULL;
1da177e4 3808
2ca99358 3809 if (!pte_unmap_same(vmf))
8f4e2101 3810 goto out;
65500d23 3811
2994302b 3812 entry = pte_to_swp_entry(vmf->orig_pte);
d1737fdb
AK
3813 if (unlikely(non_swap_entry(entry))) {
3814 if (is_migration_entry(entry)) {
82b0f8c3
JK
3815 migration_entry_wait(vma->vm_mm, vmf->pmd,
3816 vmf->address);
b756a3b5
AP
3817 } else if (is_device_exclusive_entry(entry)) {
3818 vmf->page = pfn_swap_entry_to_page(entry);
3819 ret = remove_device_exclusive_entry(vmf);
5042db43 3820 } else if (is_device_private_entry(entry)) {
1235ccd0
SB
3821 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3822 /*
3823 * migrate_to_ram is not yet ready to operate
3824 * under VMA lock.
3825 */
3826 vma_end_read(vma);
3827 ret = VM_FAULT_RETRY;
3828 goto out;
3829 }
3830
af5cdaf8 3831 vmf->page = pfn_swap_entry_to_page(entry);
16ce101d
AP
3832 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3833 vmf->address, &vmf->ptl);
3db82b93 3834 if (unlikely(!vmf->pte ||
c33c7948
RR
3835 !pte_same(ptep_get(vmf->pte),
3836 vmf->orig_pte)))
3b65f437 3837 goto unlock;
16ce101d
AP
3838
3839 /*
3840 * Get a page reference while we know the page can't be
3841 * freed.
3842 */
3843 get_page(vmf->page);
3844 pte_unmap_unlock(vmf->pte, vmf->ptl);
4a955bed 3845 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
16ce101d 3846 put_page(vmf->page);
d1737fdb
AK
3847 } else if (is_hwpoison_entry(entry)) {
3848 ret = VM_FAULT_HWPOISON;
5c041f5d
PX
3849 } else if (is_pte_marker_entry(entry)) {
3850 ret = handle_pte_marker(vmf);
d1737fdb 3851 } else {
2994302b 3852 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
d99be1a8 3853 ret = VM_FAULT_SIGBUS;
d1737fdb 3854 }
0697212a
CL
3855 goto out;
3856 }
0bcac06f 3857
2799e775
ML
3858 /* Prevent swapoff from happening to us. */
3859 si = get_swap_device(entry);
3860 if (unlikely(!si))
3861 goto out;
0bcac06f 3862
5a423081
MWO
3863 folio = swap_cache_get_folio(entry, vma, vmf->address);
3864 if (folio)
3865 page = folio_file_page(folio, swp_offset(entry));
d4f9565a 3866 swapcache = folio;
f8020772 3867
d4f9565a 3868 if (!folio) {
a449bf58
QC
3869 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3870 __swap_count(entry) == 1) {
13ddaf26
KS
3871 /*
3872 * Prevent parallel swapin from proceeding with
3873 * the cache flag. Otherwise, another thread may
3874 * finish swapin first, free the entry, and swapout
3875 * reusing the same entry. It's undetectable as
3876 * pte_same() returns true due to entry reuse.
3877 */
3878 if (swapcache_prepare(entry)) {
3879 /* Relax a bit to prevent rapid repeated page faults */
3880 schedule_timeout_uninterruptible(1);
3881 goto out;
3882 }
3883 need_clear_cache = true;
3884
0bcac06f 3885 /* skip swapcache */
63ad4add
MWO
3886 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
3887 vma, vmf->address, false);
3888 page = &folio->page;
3889 if (folio) {
3890 __folio_set_locked(folio);
3891 __folio_set_swapbacked(folio);
4c6355b2 3892
65995918 3893 if (mem_cgroup_swapin_charge_folio(folio,
63ad4add
MWO
3894 vma->vm_mm, GFP_KERNEL,
3895 entry)) {
545b1b07 3896 ret = VM_FAULT_OOM;
4c6355b2 3897 goto out_page;
545b1b07 3898 }
0add0c77 3899 mem_cgroup_swapin_uncharge_swap(entry);
4c6355b2 3900
aae466b0
JK
3901 shadow = get_shadow_from_swap_cache(entry);
3902 if (shadow)
63ad4add 3903 workingset_refault(folio, shadow);
0076f029 3904
63ad4add 3905 folio_add_lru(folio);
0add0c77 3906
c9bdf768 3907 /* To provide entry to swap_read_folio() */
3d2c9087 3908 folio->swap = entry;
c9bdf768 3909 swap_read_folio(folio, true, NULL);
63ad4add 3910 folio->private = NULL;
0bcac06f 3911 }
aa8d22a1 3912 } else {
e9e9b7ec
MK
3913 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3914 vmf);
63ad4add
MWO
3915 if (page)
3916 folio = page_folio(page);
d4f9565a 3917 swapcache = folio;
0bcac06f
MK
3918 }
3919
d4f9565a 3920 if (!folio) {
1da177e4 3921 /*
8f4e2101
HD
3922 * Back out if somebody else faulted in this pte
3923 * while we released the pte lock.
1da177e4 3924 */
82b0f8c3
JK
3925 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3926 vmf->address, &vmf->ptl);
c33c7948
RR
3927 if (likely(vmf->pte &&
3928 pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
1da177e4 3929 ret = VM_FAULT_OOM;
65500d23 3930 goto unlock;
1da177e4
LT
3931 }
3932
3933 /* Had to read the page from swap area: Major fault */
3934 ret = VM_FAULT_MAJOR;
f8891e5e 3935 count_vm_event(PGMAJFAULT);
2262185c 3936 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
d1737fdb 3937 } else if (PageHWPoison(page)) {
71f72525
WF
3938 /*
3939 * hwpoisoned dirty swapcache pages are kept for killing
3940 * owner processes (which may be unknown at hwpoison time)
3941 */
d1737fdb 3942 ret = VM_FAULT_HWPOISON;
4779cb31 3943 goto out_release;
1da177e4
LT
3944 }
3945
fdc724d6
SB
3946 ret |= folio_lock_or_retry(folio, vmf);
3947 if (ret & VM_FAULT_RETRY)
d065bd81 3948 goto out_release;
073e587e 3949
84d60fdd
DH
3950 if (swapcache) {
3951 /*
3b344157 3952 * Make sure folio_free_swap() or swapoff did not release the
84d60fdd
DH
3953 * swapcache from under us. The page pin, and pte_same test
3954 * below, are not enough to exclude that. Even if it is still
3955 * swapcache, we need to check that the page's swap has not
3956 * changed.
3957 */
63ad4add 3958 if (unlikely(!folio_test_swapcache(folio) ||
cfeed8ff 3959 page_swap_entry(page).val != entry.val))
84d60fdd
DH
3960 goto out_page;
3961
3962 /*
3963 * KSM sometimes has to copy on read faults, for example, if
3964 * page->index of !PageKSM() pages would be nonlinear inside the
3965 * anon VMA -- PageKSM() is lost on actual swapout.
3966 */
96db66d9
MWO
3967 folio = ksm_might_need_to_copy(folio, vma, vmf->address);
3968 if (unlikely(!folio)) {
84d60fdd 3969 ret = VM_FAULT_OOM;
96db66d9 3970 folio = swapcache;
84d60fdd 3971 goto out_page;
96db66d9 3972 } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
6b970599 3973 ret = VM_FAULT_HWPOISON;
96db66d9 3974 folio = swapcache;
6b970599 3975 goto out_page;
84d60fdd 3976 }
96db66d9
MWO
3977 if (folio != swapcache)
3978 page = folio_page(folio, 0);
c145e0b4
DH
3979
3980 /*
3981 * If we want to map a page that's in the swapcache writable, we
3982 * have to detect via the refcount if we're really the exclusive
3983 * owner. Try removing the extra reference from the local LRU
1fec6890 3984 * caches if required.
c145e0b4 3985 */
d4f9565a 3986 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
63ad4add 3987 !folio_test_ksm(folio) && !folio_test_lru(folio))
c145e0b4 3988 lru_add_drain();
5ad64688
HD
3989 }
3990
4231f842 3991 folio_throttle_swaprate(folio, GFP_KERNEL);
8a9f3ccd 3992
1da177e4 3993 /*
8f4e2101 3994 * Back out if somebody else already faulted in this pte.
1da177e4 3995 */
82b0f8c3
JK
3996 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3997 &vmf->ptl);
c33c7948 3998 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
b8107480 3999 goto out_nomap;
b8107480 4000
63ad4add 4001 if (unlikely(!folio_test_uptodate(folio))) {
b8107480
KK
4002 ret = VM_FAULT_SIGBUS;
4003 goto out_nomap;
1da177e4
LT
4004 }
4005
78fbe906
DH
4006 /*
4007 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte
4008 * must never point at an anonymous page in the swapcache that is
4009 * PG_anon_exclusive. Sanity check that this holds and especially, that
4010 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity
4011 * check after taking the PT lock and making sure that nobody
4012 * concurrently faulted in this page and set PG_anon_exclusive.
4013 */
63ad4add
MWO
4014 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
4015 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
78fbe906 4016
1493a191
DH
4017 /*
4018 * Check under PT lock (to protect against concurrent fork() sharing
4019 * the swap entry concurrently) for certainly exclusive pages.
4020 */
63ad4add 4021 if (!folio_test_ksm(folio)) {
1493a191 4022 exclusive = pte_swp_exclusive(vmf->orig_pte);
d4f9565a 4023 if (folio != swapcache) {
1493a191
DH
4024 /*
4025 * We have a fresh page that is not exposed to the
4026 * swapcache -> certainly exclusive.
4027 */
4028 exclusive = true;
63ad4add 4029 } else if (exclusive && folio_test_writeback(folio) &&
eacde327 4030 data_race(si->flags & SWP_STABLE_WRITES)) {
1493a191
DH
4031 /*
4032 * This is tricky: not all swap backends support
4033 * concurrent page modifications while under writeback.
4034 *
4035 * So if we stumble over such a page in the swapcache
4036 * we must not set the page exclusive, otherwise we can
4037 * map it writable without further checks and modify it
4038 * while still under writeback.
4039 *
4040 * For these problematic swap backends, simply drop the
4041 * exclusive marker: this is perfectly fine as we start
4042 * writeback only if we fully unmapped the page and
4043 * there are no unexpected references on the page after
4044 * unmapping succeeded. After fully unmapped, no
4045 * further GUP references (FOLL_GET and FOLL_PIN) can
4046 * appear, so dropping the exclusive marker and mapping
4047 * it only R/O is fine.
4048 */
4049 exclusive = false;
4050 }
4051 }
4052
6dca4ac6
PC
4053 /*
4054 * Some architectures may have to restore extra metadata to the page
4055 * when reading from swap. This metadata may be indexed by swap entry
4056 * so this must be called before swap_free().
4057 */
4058 arch_swap_restore(entry, folio);
4059
8c7c6e34 4060 /*
c145e0b4
DH
4061 * Remove the swap entry and conditionally try to free up the swapcache.
4062 * We're already holding a reference on the page but haven't mapped it
4063 * yet.
8c7c6e34 4064 */
c145e0b4 4065 swap_free(entry);
a160e537
MWO
4066 if (should_try_to_free_swap(folio, vma, vmf->flags))
4067 folio_free_swap(folio);
1da177e4 4068
f1a79412
SB
4069 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
4070 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1da177e4 4071 pte = mk_pte(page, vma->vm_page_prot);
c145e0b4
DH
4072
4073 /*
1493a191
DH
4074 * Same logic as in do_wp_page(); however, optimize for pages that are
4075 * certainly not shared either because we just allocated them without
4076 * exposing them to the swapcache or because the swap entry indicates
4077 * exclusivity.
c145e0b4 4078 */
63ad4add
MWO
4079 if (!folio_test_ksm(folio) &&
4080 (exclusive || folio_ref_count(folio) == 1)) {
6c287605
DH
4081 if (vmf->flags & FAULT_FLAG_WRITE) {
4082 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
4083 vmf->flags &= ~FAULT_FLAG_WRITE;
6c287605 4084 }
14f9135d 4085 rmap_flags |= RMAP_EXCLUSIVE;
1da177e4 4086 }
1da177e4 4087 flush_icache_page(vma, page);
2994302b 4088 if (pte_swp_soft_dirty(vmf->orig_pte))
179ef71c 4089 pte = pte_mksoft_dirty(pte);
f1eb1bac 4090 if (pte_swp_uffd_wp(vmf->orig_pte))
f45ec5ff 4091 pte = pte_mkuffd_wp(pte);
2994302b 4092 vmf->orig_pte = pte;
0bcac06f
MK
4093
4094 /* ksm created a completely new copy */
d4f9565a 4095 if (unlikely(folio != swapcache && swapcache)) {
2853b66b 4096 folio_add_new_anon_rmap(folio, vma, vmf->address);
63ad4add 4097 folio_add_lru_vma(folio, vma);
0bcac06f 4098 } else {
b832a354
DH
4099 folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
4100 rmap_flags);
00501b53 4101 }
1da177e4 4102
63ad4add
MWO
4103 VM_BUG_ON(!folio_test_anon(folio) ||
4104 (pte_write(pte) && !PageAnonExclusive(page)));
1eba86c0
PT
4105 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
4106 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
4107
63ad4add 4108 folio_unlock(folio);
d4f9565a 4109 if (folio != swapcache && swapcache) {
4969c119
AA
4110 /*
4111 * Hold the lock to avoid the swap entry to be reused
4112 * until we take the PT lock for the pte_same() check
4113 * (to avoid false positives from pte_same). For
4114 * further safety release the lock after the swap_free
4115 * so that the swap count won't change under a
4116 * parallel locked swapcache.
4117 */
d4f9565a
MWO
4118 folio_unlock(swapcache);
4119 folio_put(swapcache);
4969c119 4120 }
c475a8ab 4121
82b0f8c3 4122 if (vmf->flags & FAULT_FLAG_WRITE) {
2994302b 4123 ret |= do_wp_page(vmf);
61469f1d
HD
4124 if (ret & VM_FAULT_ERROR)
4125 ret &= VM_FAULT_ERROR;
1da177e4
LT
4126 goto out;
4127 }
4128
4129 /* No need to invalidate - it was non-present before */
5003a2bd 4130 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
65500d23 4131unlock:
3db82b93
HD
4132 if (vmf->pte)
4133 pte_unmap_unlock(vmf->pte, vmf->ptl);
1da177e4 4134out:
13ddaf26
KS
4135 /* Clear the swap cache pin for direct swapin after PTL unlock */
4136 if (need_clear_cache)
4137 swapcache_clear(si, entry);
2799e775
ML
4138 if (si)
4139 put_swap_device(si);
1da177e4 4140 return ret;
b8107480 4141out_nomap:
3db82b93
HD
4142 if (vmf->pte)
4143 pte_unmap_unlock(vmf->pte, vmf->ptl);
bc43f75c 4144out_page:
63ad4add 4145 folio_unlock(folio);
4779cb31 4146out_release:
63ad4add 4147 folio_put(folio);
d4f9565a
MWO
4148 if (folio != swapcache && swapcache) {
4149 folio_unlock(swapcache);
4150 folio_put(swapcache);
4969c119 4151 }
13ddaf26
KS
4152 if (need_clear_cache)
4153 swapcache_clear(si, entry);
2799e775
ML
4154 if (si)
4155 put_swap_device(si);
65500d23 4156 return ret;
1da177e4
LT
4157}
4158
19eaf449
RR
4159static bool pte_range_none(pte_t *pte, int nr_pages)
4160{
4161 int i;
4162
4163 for (i = 0; i < nr_pages; i++) {
4164 if (!pte_none(ptep_get_lockless(pte + i)))
4165 return false;
4166 }
4167
4168 return true;
4169}
4170
4171static struct folio *alloc_anon_folio(struct vm_fault *vmf)
4172{
4173#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4174 struct vm_area_struct *vma = vmf->vma;
4175 unsigned long orders;
4176 struct folio *folio;
4177 unsigned long addr;
4178 pte_t *pte;
4179 gfp_t gfp;
4180 int order;
4181
4182 /*
4183 * If uffd is active for the vma we need per-page fault fidelity to
4184 * maintain the uffd semantics.
4185 */
4186 if (unlikely(userfaultfd_armed(vma)))
4187 goto fallback;
4188
4189 /*
4190 * Get a list of all the (large) orders below PMD_ORDER that are enabled
4191 * for this vma. Then filter out the orders that can't be allocated over
4192 * the faulting address and still be fully contained in the vma.
4193 */
4194 orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
4195 BIT(PMD_ORDER) - 1);
4196 orders = thp_vma_suitable_orders(vma, vmf->address, orders);
4197
4198 if (!orders)
4199 goto fallback;
4200
4201 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
4202 if (!pte)
4203 return ERR_PTR(-EAGAIN);
4204
4205 /*
4206 * Find the highest order where the aligned range is completely
4207 * pte_none(). Note that all remaining orders will be completely
4208 * pte_none().
4209 */
4210 order = highest_order(orders);
4211 while (orders) {
4212 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4213 if (pte_range_none(pte + pte_index(addr), 1 << order))
4214 break;
4215 order = next_order(&orders, order);
4216 }
4217
4218 pte_unmap(pte);
4219
4220 /* Try allocating the highest of the remaining orders. */
4221 gfp = vma_thp_gfp_mask(vma);
4222 while (orders) {
4223 addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
4224 folio = vma_alloc_folio(gfp, order, vma, addr, true);
4225 if (folio) {
4226 clear_huge_page(&folio->page, vmf->address, 1 << order);
4227 return folio;
4228 }
4229 order = next_order(&orders, order);
4230 }
4231
4232fallback:
4233#endif
4234 return vma_alloc_zeroed_movable_folio(vmf->vma, vmf->address);
4235}
4236
1da177e4 4237/*
c1e8d7c6 4238 * We enter with non-exclusive mmap_lock (to exclude vma changes,
8f4e2101 4239 * but allow concurrent faults), and pte mapped but not yet locked.
c1e8d7c6 4240 * We return with mmap_lock still held, but pte unmapped and unlocked.
1da177e4 4241 */
2b740303 4242static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
1da177e4 4243{
2bad466c 4244 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
82b0f8c3 4245 struct vm_area_struct *vma = vmf->vma;
19eaf449 4246 unsigned long addr = vmf->address;
6bc56a4d 4247 struct folio *folio;
2b740303 4248 vm_fault_t ret = 0;
19eaf449 4249 int nr_pages = 1;
1da177e4 4250 pte_t entry;
19eaf449 4251 int i;
1da177e4 4252
6b7339f4
KS
4253 /* File mapping without ->vm_ops ? */
4254 if (vma->vm_flags & VM_SHARED)
4255 return VM_FAULT_SIGBUS;
4256
7267ec00 4257 /*
3db82b93
HD
4258 * Use pte_alloc() instead of pte_alloc_map(), so that OOM can
4259 * be distinguished from a transient failure of pte_offset_map().
7267ec00 4260 */
4cf58924 4261 if (pte_alloc(vma->vm_mm, vmf->pmd))
7267ec00
KS
4262 return VM_FAULT_OOM;
4263
11ac5524 4264 /* Use the zero-page for reads */
82b0f8c3 4265 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
bae473a4 4266 !mm_forbids_zeropage(vma->vm_mm)) {
82b0f8c3 4267 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
62eede62 4268 vma->vm_page_prot));
82b0f8c3
JK
4269 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4270 vmf->address, &vmf->ptl);
3db82b93
HD
4271 if (!vmf->pte)
4272 goto unlock;
2bad466c 4273 if (vmf_pte_changed(vmf)) {
7df67697 4274 update_mmu_tlb(vma, vmf->address, vmf->pte);
a13ea5b7 4275 goto unlock;
7df67697 4276 }
6b31d595
MH
4277 ret = check_stable_address_space(vma->vm_mm);
4278 if (ret)
4279 goto unlock;
6b251fc9
AA
4280 /* Deliver the page fault to userland, check inside PT lock */
4281 if (userfaultfd_missing(vma)) {
82b0f8c3
JK
4282 pte_unmap_unlock(vmf->pte, vmf->ptl);
4283 return handle_userfault(vmf, VM_UFFD_MISSING);
6b251fc9 4284 }
a13ea5b7
HD
4285 goto setpte;
4286 }
4287
557ed1fa 4288 /* Allocate our own private page. */
557ed1fa
NP
4289 if (unlikely(anon_vma_prepare(vma)))
4290 goto oom;
19eaf449
RR
4291 /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */
4292 folio = alloc_anon_folio(vmf);
4293 if (IS_ERR(folio))
4294 return 0;
6bc56a4d 4295 if (!folio)
557ed1fa 4296 goto oom;
eb3c24f3 4297
19eaf449
RR
4298 nr_pages = folio_nr_pages(folio);
4299 addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE);
4300
6bc56a4d 4301 if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
eb3c24f3 4302 goto oom_free_page;
e2bf3e2c 4303 folio_throttle_swaprate(folio, GFP_KERNEL);
eb3c24f3 4304
52f37629 4305 /*
cb3184de 4306 * The memory barrier inside __folio_mark_uptodate makes sure that
f4f5329d 4307 * preceding stores to the page contents become visible before
52f37629
MK
4308 * the set_pte_at() write.
4309 */
cb3184de 4310 __folio_mark_uptodate(folio);
8f4e2101 4311
cb3184de 4312 entry = mk_pte(&folio->page, vma->vm_page_prot);
50c25ee9 4313 entry = pte_sw_mkyoung(entry);
1ac0cb5d 4314 if (vma->vm_flags & VM_WRITE)
161e393c 4315 entry = pte_mkwrite(pte_mkdirty(entry), vma);
1da177e4 4316
19eaf449 4317 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3db82b93
HD
4318 if (!vmf->pte)
4319 goto release;
19eaf449
RR
4320 if (nr_pages == 1 && vmf_pte_changed(vmf)) {
4321 update_mmu_tlb(vma, addr, vmf->pte);
4322 goto release;
4323 } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
4324 for (i = 0; i < nr_pages; i++)
4325 update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i);
557ed1fa 4326 goto release;
7df67697 4327 }
9ba69294 4328
6b31d595
MH
4329 ret = check_stable_address_space(vma->vm_mm);
4330 if (ret)
4331 goto release;
4332
6b251fc9
AA
4333 /* Deliver the page fault to userland, check inside PT lock */
4334 if (userfaultfd_missing(vma)) {
82b0f8c3 4335 pte_unmap_unlock(vmf->pte, vmf->ptl);
cb3184de 4336 folio_put(folio);
82b0f8c3 4337 return handle_userfault(vmf, VM_UFFD_MISSING);
6b251fc9
AA
4338 }
4339
19eaf449
RR
4340 folio_ref_add(folio, nr_pages - 1);
4341 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages);
4342 folio_add_new_anon_rmap(folio, vma, addr);
cb3184de 4343 folio_add_lru_vma(folio, vma);
a13ea5b7 4344setpte:
2bad466c
PX
4345 if (uffd_wp)
4346 entry = pte_mkuffd_wp(entry);
19eaf449 4347 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages);
1da177e4
LT
4348
4349 /* No need to invalidate - it was non-present before */
19eaf449 4350 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages);
65500d23 4351unlock:
3db82b93
HD
4352 if (vmf->pte)
4353 pte_unmap_unlock(vmf->pte, vmf->ptl);
6b31d595 4354 return ret;
8f4e2101 4355release:
cb3184de 4356 folio_put(folio);
8f4e2101 4357 goto unlock;
8a9f3ccd 4358oom_free_page:
cb3184de 4359 folio_put(folio);
65500d23 4360oom:
1da177e4
LT
4361 return VM_FAULT_OOM;
4362}
4363
9a95f3cf 4364/*
c1e8d7c6 4365 * The mmap_lock must have been held on entry, and may have been
9a95f3cf
PC
4366 * released depending on flags and vma->vm_ops->fault() return value.
4367 * See filemap_fault() and __lock_page_retry().
4368 */
2b740303 4369static vm_fault_t __do_fault(struct vm_fault *vmf)
7eae74af 4370{
82b0f8c3 4371 struct vm_area_struct *vma = vmf->vma;
01d1e0e6 4372 struct folio *folio;
2b740303 4373 vm_fault_t ret;
7eae74af 4374
63f3655f
MH
4375 /*
4376 * Preallocate pte before we take page_lock because this might lead to
4377 * deadlocks for memcg reclaim which waits for pages under writeback:
4378 * lock_page(A)
4379 * SetPageWriteback(A)
4380 * unlock_page(A)
4381 * lock_page(B)
4382 * lock_page(B)
d383807a 4383 * pte_alloc_one
63f3655f
MH
4384 * shrink_page_list
4385 * wait_on_page_writeback(A)
4386 * SetPageWriteback(B)
4387 * unlock_page(B)
4388 * # flush A, B to clear the writeback
4389 */
4390 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
a7069ee3 4391 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
63f3655f
MH
4392 if (!vmf->prealloc_pte)
4393 return VM_FAULT_OOM;
63f3655f
MH
4394 }
4395
11bac800 4396 ret = vma->vm_ops->fault(vmf);
3917048d 4397 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
b1aa812b 4398 VM_FAULT_DONE_COW)))
bc2466e4 4399 return ret;
7eae74af 4400
01d1e0e6 4401 folio = page_folio(vmf->page);
667240e0 4402 if (unlikely(PageHWPoison(vmf->page))) {
e53ac737
RR
4403 vm_fault_t poisonret = VM_FAULT_HWPOISON;
4404 if (ret & VM_FAULT_LOCKED) {
01d1e0e6
MWO
4405 if (page_mapped(vmf->page))
4406 unmap_mapping_folio(folio);
4407 /* Retry if a clean folio was removed from the cache. */
4408 if (mapping_evict_folio(folio->mapping, folio))
3149c79f 4409 poisonret = VM_FAULT_NOPAGE;
01d1e0e6 4410 folio_unlock(folio);
e53ac737 4411 }
01d1e0e6 4412 folio_put(folio);
936ca80d 4413 vmf->page = NULL;
e53ac737 4414 return poisonret;
7eae74af
KS
4415 }
4416
4417 if (unlikely(!(ret & VM_FAULT_LOCKED)))
01d1e0e6 4418 folio_lock(folio);
7eae74af 4419 else
01d1e0e6 4420 VM_BUG_ON_PAGE(!folio_test_locked(folio), vmf->page);
7eae74af 4421
7eae74af
KS
4422 return ret;
4423}
4424
396bcc52 4425#ifdef CONFIG_TRANSPARENT_HUGEPAGE
82b0f8c3 4426static void deposit_prealloc_pte(struct vm_fault *vmf)
953c66c2 4427{
82b0f8c3 4428 struct vm_area_struct *vma = vmf->vma;
953c66c2 4429
82b0f8c3 4430 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
953c66c2
AK
4431 /*
4432 * We are going to consume the prealloc table,
4433 * count that as nr_ptes.
4434 */
c4812909 4435 mm_inc_nr_ptes(vma->vm_mm);
7f2b6ce8 4436 vmf->prealloc_pte = NULL;
953c66c2
AK
4437}
4438
f9ce0be7 4439vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
10102459 4440{
ef37b2ea 4441 struct folio *folio = page_folio(page);
82b0f8c3
JK
4442 struct vm_area_struct *vma = vmf->vma;
4443 bool write = vmf->flags & FAULT_FLAG_WRITE;
4444 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
10102459 4445 pmd_t entry;
d01ac3c3 4446 vm_fault_t ret = VM_FAULT_FALLBACK;
10102459 4447
3485b883 4448 if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER))
d01ac3c3 4449 return ret;
10102459 4450
ef37b2ea 4451 if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER)
d01ac3c3 4452 return ret;
10102459 4453
eac96c3e
YS
4454 /*
4455 * Just backoff if any subpage of a THP is corrupted otherwise
4456 * the corrupted page may mapped by PMD silently to escape the
4457 * check. This kind of THP just can be PTE mapped. Access to
4458 * the corrupted subpage should trigger SIGBUS as expected.
4459 */
ef37b2ea 4460 if (unlikely(folio_test_has_hwpoisoned(folio)))
eac96c3e
YS
4461 return ret;
4462
953c66c2 4463 /*
f0953a1b 4464 * Archs like ppc64 need additional space to store information
953c66c2
AK
4465 * related to pte entry. Use the preallocated table for that.
4466 */
82b0f8c3 4467 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4cf58924 4468 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
82b0f8c3 4469 if (!vmf->prealloc_pte)
953c66c2 4470 return VM_FAULT_OOM;
953c66c2
AK
4471 }
4472
82b0f8c3
JK
4473 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4474 if (unlikely(!pmd_none(*vmf->pmd)))
10102459
KS
4475 goto out;
4476
9f1f5b60 4477 flush_icache_pages(vma, page, HPAGE_PMD_NR);
10102459
KS
4478
4479 entry = mk_huge_pmd(page, vma->vm_page_prot);
4480 if (write)
f55e1014 4481 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
10102459 4482
fadae295 4483 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
ef37b2ea 4484 folio_add_file_rmap_pmd(folio, page, vma);
cea86fe2 4485
953c66c2
AK
4486 /*
4487 * deposit and withdraw with pmd lock held
4488 */
4489 if (arch_needs_pgtable_deposit())
82b0f8c3 4490 deposit_prealloc_pte(vmf);
10102459 4491
82b0f8c3 4492 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
10102459 4493
82b0f8c3 4494 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
10102459
KS
4495
4496 /* fault is handled */
4497 ret = 0;
95ecedcd 4498 count_vm_event(THP_FILE_MAPPED);
10102459 4499out:
82b0f8c3 4500 spin_unlock(vmf->ptl);
10102459
KS
4501 return ret;
4502}
4503#else
f9ce0be7 4504vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
10102459 4505{
f9ce0be7 4506 return VM_FAULT_FALLBACK;
10102459
KS
4507}
4508#endif
4509
3bd786f7
YF
4510/**
4511 * set_pte_range - Set a range of PTEs to point to pages in a folio.
4512 * @vmf: Fault decription.
4513 * @folio: The folio that contains @page.
4514 * @page: The first page to create a PTE for.
4515 * @nr: The number of PTEs to create.
4516 * @addr: The first address to create a PTE for.
4517 */
4518void set_pte_range(struct vm_fault *vmf, struct folio *folio,
4519 struct page *page, unsigned int nr, unsigned long addr)
3bb97794 4520{
82b0f8c3 4521 struct vm_area_struct *vma = vmf->vma;
2bad466c 4522 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
82b0f8c3 4523 bool write = vmf->flags & FAULT_FLAG_WRITE;
3bd786f7 4524 bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
3bb97794 4525 pte_t entry;
7267ec00 4526
3bd786f7 4527 flush_icache_pages(vma, page, nr);
3bb97794 4528 entry = mk_pte(page, vma->vm_page_prot);
46bdb427
WD
4529
4530 if (prefault && arch_wants_old_prefaulted_pte())
4531 entry = pte_mkold(entry);
50c25ee9
TB
4532 else
4533 entry = pte_sw_mkyoung(entry);
46bdb427 4534
3bb97794
KS
4535 if (write)
4536 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
9c28a205 4537 if (unlikely(uffd_wp))
f1eb1bac 4538 entry = pte_mkuffd_wp(entry);
bae473a4
KS
4539 /* copy-on-write page */
4540 if (write && !(vma->vm_flags & VM_SHARED)) {
3bd786f7
YF
4541 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr);
4542 VM_BUG_ON_FOLIO(nr != 1, folio);
4543 folio_add_new_anon_rmap(folio, vma, addr);
4544 folio_add_lru_vma(folio, vma);
3bb97794 4545 } else {
3bd786f7 4546 add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
68f03208 4547 folio_add_file_rmap_ptes(folio, page, nr, vma);
3bb97794 4548 }
3bd786f7
YF
4549 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
4550
4551 /* no need to invalidate: a not-present page won't be cached */
4552 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
3bb97794
KS
4553}
4554
f46f2ade
PX
4555static bool vmf_pte_changed(struct vm_fault *vmf)
4556{
4557 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
c33c7948 4558 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
f46f2ade 4559
c33c7948 4560 return !pte_none(ptep_get(vmf->pte));
f46f2ade
PX
4561}
4562
9118c0cb
JK
4563/**
4564 * finish_fault - finish page fault once we have prepared the page to fault
4565 *
4566 * @vmf: structure describing the fault
4567 *
4568 * This function handles all that is needed to finish a page fault once the
4569 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
4570 * given page, adds reverse page mapping, handles memcg charges and LRU
a862f68a 4571 * addition.
9118c0cb
JK
4572 *
4573 * The function expects the page to be locked and on success it consumes a
4574 * reference of a page being mapped (for the PTE which maps it).
a862f68a
MR
4575 *
4576 * Return: %0 on success, %VM_FAULT_ code in case of error.
9118c0cb 4577 */
2b740303 4578vm_fault_t finish_fault(struct vm_fault *vmf)
9118c0cb 4579{
f9ce0be7 4580 struct vm_area_struct *vma = vmf->vma;
9118c0cb 4581 struct page *page;
f9ce0be7 4582 vm_fault_t ret;
9118c0cb
JK
4583
4584 /* Did we COW the page? */
f9ce0be7 4585 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
9118c0cb
JK
4586 page = vmf->cow_page;
4587 else
4588 page = vmf->page;
6b31d595
MH
4589
4590 /*
4591 * check even for read faults because we might have lost our CoWed
4592 * page
4593 */
f9ce0be7
KS
4594 if (!(vma->vm_flags & VM_SHARED)) {
4595 ret = check_stable_address_space(vma->vm_mm);
4596 if (ret)
4597 return ret;
4598 }
4599
4600 if (pmd_none(*vmf->pmd)) {
4601 if (PageTransCompound(page)) {
4602 ret = do_set_pmd(vmf, page);
4603 if (ret != VM_FAULT_FALLBACK)
4604 return ret;
4605 }
4606
03c4f204
QZ
4607 if (vmf->prealloc_pte)
4608 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4609 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
f9ce0be7
KS
4610 return VM_FAULT_OOM;
4611 }
4612
f9ce0be7
KS
4613 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4614 vmf->address, &vmf->ptl);
3db82b93
HD
4615 if (!vmf->pte)
4616 return VM_FAULT_NOPAGE;
70427f6e 4617
f9ce0be7 4618 /* Re-check under ptl */
70427f6e 4619 if (likely(!vmf_pte_changed(vmf))) {
3bd786f7 4620 struct folio *folio = page_folio(page);
70427f6e 4621
3bd786f7 4622 set_pte_range(vmf, folio, page, 1, vmf->address);
70427f6e
SA
4623 ret = 0;
4624 } else {
4625 update_mmu_tlb(vma, vmf->address, vmf->pte);
f9ce0be7 4626 ret = VM_FAULT_NOPAGE;
70427f6e 4627 }
f9ce0be7 4628
f9ce0be7 4629 pte_unmap_unlock(vmf->pte, vmf->ptl);
9118c0cb
JK
4630 return ret;
4631}
4632
53d36a56
LS
4633static unsigned long fault_around_pages __read_mostly =
4634 65536 >> PAGE_SHIFT;
a9b0f861 4635
a9b0f861
KS
4636#ifdef CONFIG_DEBUG_FS
4637static int fault_around_bytes_get(void *data, u64 *val)
1592eef0 4638{
53d36a56 4639 *val = fault_around_pages << PAGE_SHIFT;
1592eef0
KS
4640 return 0;
4641}
4642
b4903d6e 4643/*
da391d64
WK
4644 * fault_around_bytes must be rounded down to the nearest page order as it's
4645 * what do_fault_around() expects to see.
b4903d6e 4646 */
a9b0f861 4647static int fault_around_bytes_set(void *data, u64 val)
1592eef0 4648{
a9b0f861 4649 if (val / PAGE_SIZE > PTRS_PER_PTE)
1592eef0 4650 return -EINVAL;
53d36a56
LS
4651
4652 /*
4653 * The minimum value is 1 page, however this results in no fault-around
4654 * at all. See should_fault_around().
4655 */
4656 fault_around_pages = max(rounddown_pow_of_two(val) >> PAGE_SHIFT, 1UL);
4657
1592eef0
KS
4658 return 0;
4659}
0a1345f8 4660DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
a9b0f861 4661 fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
1592eef0
KS
4662
4663static int __init fault_around_debugfs(void)
4664{
d9f7979c
GKH
4665 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4666 &fault_around_bytes_fops);
1592eef0
KS
4667 return 0;
4668}
4669late_initcall(fault_around_debugfs);
1592eef0 4670#endif
8c6e50b0 4671
1fdb412b
KS
4672/*
4673 * do_fault_around() tries to map few pages around the fault address. The hope
4674 * is that the pages will be needed soon and this will lower the number of
4675 * faults to handle.
4676 *
4677 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4678 * not ready to be mapped: not up-to-date, locked, etc.
4679 *
9042599e
LS
4680 * This function doesn't cross VMA or page table boundaries, in order to call
4681 * map_pages() and acquire a PTE lock only once.
1fdb412b 4682 *
53d36a56 4683 * fault_around_pages defines how many pages we'll try to map.
da391d64
WK
4684 * do_fault_around() expects it to be set to a power of two less than or equal
4685 * to PTRS_PER_PTE.
1fdb412b 4686 *
da391d64 4687 * The virtual address of the area that we map is naturally aligned to
53d36a56 4688 * fault_around_pages * PAGE_SIZE rounded down to the machine page size
da391d64
WK
4689 * (and therefore to page order). This way it's easier to guarantee
4690 * that we don't cross page table boundaries.
1fdb412b 4691 */
2b740303 4692static vm_fault_t do_fault_around(struct vm_fault *vmf)
8c6e50b0 4693{
53d36a56 4694 pgoff_t nr_pages = READ_ONCE(fault_around_pages);
9042599e
LS
4695 pgoff_t pte_off = pte_index(vmf->address);
4696 /* The page offset of vmf->address within the VMA. */
4697 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
4698 pgoff_t from_pte, to_pte;
58ef47ef 4699 vm_fault_t ret;
8c6e50b0 4700
9042599e
LS
4701 /* The PTE offset of the start address, clamped to the VMA. */
4702 from_pte = max(ALIGN_DOWN(pte_off, nr_pages),
4703 pte_off - min(pte_off, vma_off));
aecd6f44 4704
9042599e
LS
4705 /* The PTE offset of the end address, clamped to the VMA and PTE. */
4706 to_pte = min3(from_pte + nr_pages, (pgoff_t)PTRS_PER_PTE,
4707 pte_off + vma_pages(vmf->vma) - vma_off) - 1;
8c6e50b0 4708
82b0f8c3 4709 if (pmd_none(*vmf->pmd)) {
4cf58924 4710 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
82b0f8c3 4711 if (!vmf->prealloc_pte)
f9ce0be7 4712 return VM_FAULT_OOM;
8c6e50b0
KS
4713 }
4714
58ef47ef
MWO
4715 rcu_read_lock();
4716 ret = vmf->vma->vm_ops->map_pages(vmf,
4717 vmf->pgoff + from_pte - pte_off,
4718 vmf->pgoff + to_pte - pte_off);
4719 rcu_read_unlock();
4720
4721 return ret;
8c6e50b0
KS
4722}
4723
9c28a205
PX
4724/* Return true if we should do read fault-around, false otherwise */
4725static inline bool should_fault_around(struct vm_fault *vmf)
4726{
4727 /* No ->map_pages? No way to fault around... */
4728 if (!vmf->vma->vm_ops->map_pages)
4729 return false;
4730
4731 if (uffd_disable_fault_around(vmf->vma))
4732 return false;
4733
53d36a56
LS
4734 /* A single page implies no faulting 'around' at all. */
4735 return fault_around_pages > 1;
9c28a205
PX
4736}
4737
2b740303 4738static vm_fault_t do_read_fault(struct vm_fault *vmf)
e655fb29 4739{
2b740303 4740 vm_fault_t ret = 0;
22d1e68f 4741 struct folio *folio;
8c6e50b0
KS
4742
4743 /*
4744 * Let's call ->map_pages() first and use ->fault() as fallback
4745 * if page by the offset is not ready to be mapped (cold cache or
4746 * something).
4747 */
9c28a205
PX
4748 if (should_fault_around(vmf)) {
4749 ret = do_fault_around(vmf);
4750 if (ret)
4751 return ret;
8c6e50b0 4752 }
e655fb29 4753
12214eba
MWO
4754 ret = vmf_can_call_fault(vmf);
4755 if (ret)
4756 return ret;
f5617ffe 4757
936ca80d 4758 ret = __do_fault(vmf);
e655fb29
KS
4759 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4760 return ret;
4761
9118c0cb 4762 ret |= finish_fault(vmf);
22d1e68f
SK
4763 folio = page_folio(vmf->page);
4764 folio_unlock(folio);
7267ec00 4765 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
22d1e68f 4766 folio_put(folio);
e655fb29
KS
4767 return ret;
4768}
4769
2b740303 4770static vm_fault_t do_cow_fault(struct vm_fault *vmf)
ec47c3b9 4771{
82b0f8c3 4772 struct vm_area_struct *vma = vmf->vma;
e4621e70 4773 struct folio *folio;
2b740303 4774 vm_fault_t ret;
ec47c3b9 4775
4de8c93a
MWO
4776 ret = vmf_can_call_fault(vmf);
4777 if (!ret)
4778 ret = vmf_anon_prepare(vmf);
4779 if (ret)
4780 return ret;
ec47c3b9 4781
e4621e70
KW
4782 folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false);
4783 if (!folio)
ec47c3b9
KS
4784 return VM_FAULT_OOM;
4785
e4621e70 4786 vmf->cow_page = &folio->page;
ec47c3b9 4787
936ca80d 4788 ret = __do_fault(vmf);
ec47c3b9
KS
4789 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4790 goto uncharge_out;
3917048d
JK
4791 if (ret & VM_FAULT_DONE_COW)
4792 return ret;
ec47c3b9 4793
b1aa812b 4794 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
e4621e70 4795 __folio_mark_uptodate(folio);
ec47c3b9 4796
9118c0cb 4797 ret |= finish_fault(vmf);
b1aa812b
JK
4798 unlock_page(vmf->page);
4799 put_page(vmf->page);
7267ec00
KS
4800 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4801 goto uncharge_out;
ec47c3b9
KS
4802 return ret;
4803uncharge_out:
e4621e70 4804 folio_put(folio);
ec47c3b9
KS
4805 return ret;
4806}
4807
2b740303 4808static vm_fault_t do_shared_fault(struct vm_fault *vmf)
1da177e4 4809{
82b0f8c3 4810 struct vm_area_struct *vma = vmf->vma;
2b740303 4811 vm_fault_t ret, tmp;
6f609b7e 4812 struct folio *folio;
1d65f86d 4813
4ed43798
MWO
4814 ret = vmf_can_call_fault(vmf);
4815 if (ret)
4816 return ret;
1d65f86d 4817
936ca80d 4818 ret = __do_fault(vmf);
7eae74af 4819 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
f0c6d4d2 4820 return ret;
1da177e4 4821
6f609b7e
SK
4822 folio = page_folio(vmf->page);
4823
1da177e4 4824 /*
f0c6d4d2
KS
4825 * Check if the backing address space wants to know that the page is
4826 * about to become writable
1da177e4 4827 */
fb09a464 4828 if (vma->vm_ops->page_mkwrite) {
6f609b7e 4829 folio_unlock(folio);
86aa6998 4830 tmp = do_page_mkwrite(vmf, folio);
fb09a464
KS
4831 if (unlikely(!tmp ||
4832 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
6f609b7e 4833 folio_put(folio);
fb09a464 4834 return tmp;
4294621f 4835 }
fb09a464
KS
4836 }
4837
9118c0cb 4838 ret |= finish_fault(vmf);
7267ec00
KS
4839 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4840 VM_FAULT_RETRY))) {
6f609b7e
SK
4841 folio_unlock(folio);
4842 folio_put(folio);
f0c6d4d2 4843 return ret;
1da177e4 4844 }
b827e496 4845
89b15332 4846 ret |= fault_dirty_shared_page(vmf);
1d65f86d 4847 return ret;
54cb8821 4848}
d00806b1 4849
9a95f3cf 4850/*
c1e8d7c6 4851 * We enter with non-exclusive mmap_lock (to exclude vma changes,
9a95f3cf 4852 * but allow concurrent faults).
c1e8d7c6 4853 * The mmap_lock may have been released depending on flags and our
9138e47e 4854 * return value. See filemap_fault() and __folio_lock_or_retry().
c1e8d7c6 4855 * If mmap_lock is released, vma may become invalid (for example
fc8efd2d 4856 * by other thread calling munmap()).
9a95f3cf 4857 */
2b740303 4858static vm_fault_t do_fault(struct vm_fault *vmf)
54cb8821 4859{
82b0f8c3 4860 struct vm_area_struct *vma = vmf->vma;
fc8efd2d 4861 struct mm_struct *vm_mm = vma->vm_mm;
2b740303 4862 vm_fault_t ret;
54cb8821 4863
ff09d7ec
AK
4864 /*
4865 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4866 */
4867 if (!vma->vm_ops->fault) {
3db82b93
HD
4868 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4869 vmf->address, &vmf->ptl);
4870 if (unlikely(!vmf->pte))
ff09d7ec
AK
4871 ret = VM_FAULT_SIGBUS;
4872 else {
ff09d7ec
AK
4873 /*
4874 * Make sure this is not a temporary clearing of pte
4875 * by holding ptl and checking again. A R/M/W update
4876 * of pte involves: take ptl, clearing the pte so that
4877 * we don't have concurrent modification by hardware
4878 * followed by an update.
4879 */
c33c7948 4880 if (unlikely(pte_none(ptep_get(vmf->pte))))
ff09d7ec
AK
4881 ret = VM_FAULT_SIGBUS;
4882 else
4883 ret = VM_FAULT_NOPAGE;
4884
4885 pte_unmap_unlock(vmf->pte, vmf->ptl);
4886 }
4887 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
b0b9b3df
HD
4888 ret = do_read_fault(vmf);
4889 else if (!(vma->vm_flags & VM_SHARED))
4890 ret = do_cow_fault(vmf);
4891 else
4892 ret = do_shared_fault(vmf);
4893
4894 /* preallocated pagetable is unused: free it */
4895 if (vmf->prealloc_pte) {
fc8efd2d 4896 pte_free(vm_mm, vmf->prealloc_pte);
7f2b6ce8 4897 vmf->prealloc_pte = NULL;
b0b9b3df
HD
4898 }
4899 return ret;
54cb8821
NP
4900}
4901
cda6d936 4902int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma,
f4c0d836 4903 unsigned long addr, int page_nid, int *flags)
9532fec1 4904{
cda6d936 4905 folio_get(folio);
9532fec1 4906
fc137c0d
R
4907 /* Record the current PID acceesing VMA */
4908 vma_set_access_pid_bit(vma);
4909
9532fec1 4910 count_vm_numa_event(NUMA_HINT_FAULTS);
04bb2f94 4911 if (page_nid == numa_node_id()) {
9532fec1 4912 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
04bb2f94
RR
4913 *flags |= TNF_FAULT_LOCAL;
4914 }
9532fec1 4915
75c70128 4916 return mpol_misplaced(folio, vma, addr);
9532fec1
MG
4917}
4918
2b740303 4919static vm_fault_t do_numa_page(struct vm_fault *vmf)
d10e63f2 4920{
82b0f8c3 4921 struct vm_area_struct *vma = vmf->vma;
6695cf68
KW
4922 struct folio *folio = NULL;
4923 int nid = NUMA_NO_NODE;
6a56ccbc 4924 bool writable = false;
90572890 4925 int last_cpupid;
cbee9f88 4926 int target_nid;
04a86453 4927 pte_t pte, old_pte;
6688cc05 4928 int flags = 0;
d10e63f2
MG
4929
4930 /*
166f61b9
TH
4931 * The "pte" at this point cannot be used safely without
4932 * validation through pte_unmap_same(). It's of NUMA type but
4933 * the pfn may be screwed if the read is non atomic.
166f61b9 4934 */
82b0f8c3 4935 spin_lock(vmf->ptl);
c33c7948 4936 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
82b0f8c3 4937 pte_unmap_unlock(vmf->pte, vmf->ptl);
4daae3b4
MG
4938 goto out;
4939 }
4940
b99a342d
HY
4941 /* Get the normal PTE */
4942 old_pte = ptep_get(vmf->pte);
04a86453 4943 pte = pte_modify(old_pte, vma->vm_page_prot);
d10e63f2 4944
6a56ccbc
DH
4945 /*
4946 * Detect now whether the PTE could be writable; this information
4947 * is only valid while holding the PT lock.
4948 */
4949 writable = pte_write(pte);
4950 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
4951 can_change_pte_writable(vma, vmf->address, pte))
4952 writable = true;
4953
6695cf68
KW
4954 folio = vm_normal_folio(vma, vmf->address, pte);
4955 if (!folio || folio_is_zone_device(folio))
b99a342d 4956 goto out_map;
d10e63f2 4957
e81c4802 4958 /* TODO: handle PTE-mapped THP */
6695cf68 4959 if (folio_test_large(folio))
b99a342d 4960 goto out_map;
e81c4802 4961
6688cc05 4962 /*
bea66fbd
MG
4963 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4964 * much anyway since they can be in shared cache state. This misses
4965 * the case where a mapping is writable but the process never writes
4966 * to it but pte_write gets cleared during protection updates and
4967 * pte_dirty has unpredictable behaviour between PTE scan updates,
4968 * background writeback, dirty balancing and application behaviour.
6688cc05 4969 */
6a56ccbc 4970 if (!writable)
6688cc05
PZ
4971 flags |= TNF_NO_GROUP;
4972
dabe1d99 4973 /*
6695cf68 4974 * Flag if the folio is shared between multiple address spaces. This
dabe1d99
RR
4975 * is later used when determining whether to group tasks together
4976 */
6695cf68 4977 if (folio_estimated_sharers(folio) > 1 && (vma->vm_flags & VM_SHARED))
dabe1d99
RR
4978 flags |= TNF_SHARED;
4979
6695cf68 4980 nid = folio_nid(folio);
33024536
HY
4981 /*
4982 * For memory tiering mode, cpupid of slow memory page is used
4983 * to record page access time. So use default value.
4984 */
4985 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
6695cf68 4986 !node_is_toptier(nid))
33024536
HY
4987 last_cpupid = (-1 & LAST_CPUPID_MASK);
4988 else
67b33e3f 4989 last_cpupid = folio_last_cpupid(folio);
cda6d936 4990 target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags);
98fa15f3 4991 if (target_nid == NUMA_NO_NODE) {
6695cf68 4992 folio_put(folio);
b99a342d 4993 goto out_map;
4daae3b4 4994 }
b99a342d 4995 pte_unmap_unlock(vmf->pte, vmf->ptl);
6a56ccbc 4996 writable = false;
4daae3b4
MG
4997
4998 /* Migrate to the requested node */
6695cf68
KW
4999 if (migrate_misplaced_folio(folio, vma, target_nid)) {
5000 nid = target_nid;
6688cc05 5001 flags |= TNF_MIGRATED;
b99a342d 5002 } else {
074c2381 5003 flags |= TNF_MIGRATE_FAIL;
c7ad0880
HD
5004 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5005 vmf->address, &vmf->ptl);
5006 if (unlikely(!vmf->pte))
5007 goto out;
c33c7948 5008 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
b99a342d
HY
5009 pte_unmap_unlock(vmf->pte, vmf->ptl);
5010 goto out;
5011 }
5012 goto out_map;
5013 }
4daae3b4
MG
5014
5015out:
6695cf68
KW
5016 if (nid != NUMA_NO_NODE)
5017 task_numa_fault(last_cpupid, nid, 1, flags);
d10e63f2 5018 return 0;
b99a342d
HY
5019out_map:
5020 /*
5021 * Make it present again, depending on how arch implements
5022 * non-accessible ptes, some can allow access by kernel mode.
5023 */
5024 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
5025 pte = pte_modify(old_pte, vma->vm_page_prot);
5026 pte = pte_mkyoung(pte);
6a56ccbc 5027 if (writable)
161e393c 5028 pte = pte_mkwrite(pte, vma);
b99a342d 5029 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
5003a2bd 5030 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
b99a342d
HY
5031 pte_unmap_unlock(vmf->pte, vmf->ptl);
5032 goto out;
d10e63f2
MG
5033}
5034
2b740303 5035static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
b96375f7 5036{
8f5fd0e1
MWO
5037 struct vm_area_struct *vma = vmf->vma;
5038 if (vma_is_anonymous(vma))
82b0f8c3 5039 return do_huge_pmd_anonymous_page(vmf);
40d49a3c 5040 if (vma->vm_ops->huge_fault)
1d024e7a 5041 return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
b96375f7
MW
5042 return VM_FAULT_FALLBACK;
5043}
5044
183f24aa 5045/* `inline' is required to avoid gcc 4.1.2 build error */
5db4f15c 5046static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
b96375f7 5047{
8f5fd0e1 5048 struct vm_area_struct *vma = vmf->vma;
c89357e2 5049 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
aea06577 5050 vm_fault_t ret;
c89357e2 5051
8f5fd0e1 5052 if (vma_is_anonymous(vma)) {
c89357e2 5053 if (likely(!unshare) &&
d61ea1cb
PX
5054 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) {
5055 if (userfaultfd_wp_async(vmf->vma))
5056 goto split;
529b930b 5057 return handle_userfault(vmf, VM_UFFD_WP);
d61ea1cb 5058 }
5db4f15c 5059 return do_huge_pmd_wp_page(vmf);
529b930b 5060 }
327e9fd4 5061
8f5fd0e1
MWO
5062 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5063 if (vma->vm_ops->huge_fault) {
1d024e7a 5064 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
aea06577
DH
5065 if (!(ret & VM_FAULT_FALLBACK))
5066 return ret;
5067 }
327e9fd4 5068 }
af9e4d5f 5069
d61ea1cb 5070split:
327e9fd4 5071 /* COW or write-notify handled on pte level: split pmd. */
8f5fd0e1 5072 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
af9e4d5f 5073
b96375f7
MW
5074 return VM_FAULT_FALLBACK;
5075}
5076
2b740303 5077static vm_fault_t create_huge_pud(struct vm_fault *vmf)
a00cc7d9 5078{
14c99d65
GJ
5079#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
5080 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
c4fd825e 5081 struct vm_area_struct *vma = vmf->vma;
14c99d65 5082 /* No support for anonymous transparent PUD pages yet */
c4fd825e 5083 if (vma_is_anonymous(vma))
14c99d65 5084 return VM_FAULT_FALLBACK;
40d49a3c 5085 if (vma->vm_ops->huge_fault)
1d024e7a 5086 return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
14c99d65
GJ
5087#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
5088 return VM_FAULT_FALLBACK;
5089}
5090
5091static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
5092{
327e9fd4
THV
5093#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
5094 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
c4fd825e 5095 struct vm_area_struct *vma = vmf->vma;
aea06577
DH
5096 vm_fault_t ret;
5097
a00cc7d9 5098 /* No support for anonymous transparent PUD pages yet */
c4fd825e 5099 if (vma_is_anonymous(vma))
327e9fd4 5100 goto split;
c4fd825e
MWO
5101 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
5102 if (vma->vm_ops->huge_fault) {
1d024e7a 5103 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
aea06577
DH
5104 if (!(ret & VM_FAULT_FALLBACK))
5105 return ret;
5106 }
327e9fd4
THV
5107 }
5108split:
5109 /* COW or write-notify not handled on PUD level: split pud.*/
c4fd825e 5110 __split_huge_pud(vma, vmf->pud, vmf->address);
14c99d65 5111#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
a00cc7d9
MW
5112 return VM_FAULT_FALLBACK;
5113}
5114
1da177e4
LT
5115/*
5116 * These routines also need to handle stuff like marking pages dirty
5117 * and/or accessed for architectures that don't do it in hardware (most
5118 * RISC architectures). The early dirtying is also good on the i386.
5119 *
5120 * There is also a hook called "update_mmu_cache()" that architectures
5121 * with external mmu caches can use to update those (ie the Sparc or
5122 * PowerPC hashed page tables that act as extended TLBs).
5123 *
c1e8d7c6 5124 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
7267ec00 5125 * concurrent faults).
9a95f3cf 5126 *
c1e8d7c6 5127 * The mmap_lock may have been released depending on flags and our return value.
9138e47e 5128 * See filemap_fault() and __folio_lock_or_retry().
1da177e4 5129 */
2b740303 5130static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
1da177e4
LT
5131{
5132 pte_t entry;
5133
82b0f8c3 5134 if (unlikely(pmd_none(*vmf->pmd))) {
7267ec00
KS
5135 /*
5136 * Leave __pte_alloc() until later: because vm_ops->fault may
5137 * want to allocate huge page, and if we expose page table
5138 * for an instant, it will be difficult to retract from
5139 * concurrent faults and from rmap lookups.
5140 */
82b0f8c3 5141 vmf->pte = NULL;
f46f2ade 5142 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
7267ec00 5143 } else {
7267ec00
KS
5144 /*
5145 * A regular pmd is established and it can't morph into a huge
c7ad0880
HD
5146 * pmd by anon khugepaged, since that takes mmap_lock in write
5147 * mode; but shmem or file collapse to THP could still morph
5148 * it into a huge pmd: just retry later if so.
7267ec00 5149 */
c7ad0880
HD
5150 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd,
5151 vmf->address, &vmf->ptl);
5152 if (unlikely(!vmf->pte))
5153 return 0;
26e1a0c3 5154 vmf->orig_pte = ptep_get_lockless(vmf->pte);
f46f2ade 5155 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
7267ec00 5156
2994302b 5157 if (pte_none(vmf->orig_pte)) {
82b0f8c3
JK
5158 pte_unmap(vmf->pte);
5159 vmf->pte = NULL;
65500d23 5160 }
1da177e4
LT
5161 }
5162
2bad466c
PX
5163 if (!vmf->pte)
5164 return do_pte_missing(vmf);
7267ec00 5165
2994302b
JK
5166 if (!pte_present(vmf->orig_pte))
5167 return do_swap_page(vmf);
7267ec00 5168
2994302b
JK
5169 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5170 return do_numa_page(vmf);
d10e63f2 5171
82b0f8c3 5172 spin_lock(vmf->ptl);
2994302b 5173 entry = vmf->orig_pte;
c33c7948 5174 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
7df67697 5175 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
8f4e2101 5176 goto unlock;
7df67697 5177 }
c89357e2 5178 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
f6f37321 5179 if (!pte_write(entry))
2994302b 5180 return do_wp_page(vmf);
c89357e2
DH
5181 else if (likely(vmf->flags & FAULT_FLAG_WRITE))
5182 entry = pte_mkdirty(entry);
1da177e4
LT
5183 }
5184 entry = pte_mkyoung(entry);
82b0f8c3
JK
5185 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5186 vmf->flags & FAULT_FLAG_WRITE)) {
5003a2bd
MWO
5187 update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5188 vmf->pte, 1);
1a44e149 5189 } else {
b7333b58
YS
5190 /* Skip spurious TLB flush for retried page fault */
5191 if (vmf->flags & FAULT_FLAG_TRIED)
5192 goto unlock;
1a44e149
AA
5193 /*
5194 * This is needed only for protection faults but the arch code
5195 * is not yet telling us if this is a protection fault or not.
5196 * This still avoids useless tlb flushes for .text page faults
5197 * with threads.
5198 */
82b0f8c3 5199 if (vmf->flags & FAULT_FLAG_WRITE)
99c29133
GS
5200 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5201 vmf->pte);
1a44e149 5202 }
8f4e2101 5203unlock:
82b0f8c3 5204 pte_unmap_unlock(vmf->pte, vmf->ptl);
83c54070 5205 return 0;
1da177e4
LT
5206}
5207
5208/*
4ec31152
MWO
5209 * On entry, we hold either the VMA lock or the mmap_lock
5210 * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in
5211 * the result, the mmap_lock is not held on exit. See filemap_fault()
5212 * and __folio_lock_or_retry().
1da177e4 5213 */
2b740303
SJ
5214static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
5215 unsigned long address, unsigned int flags)
1da177e4 5216{
82b0f8c3 5217 struct vm_fault vmf = {
bae473a4 5218 .vma = vma,
1a29d85e 5219 .address = address & PAGE_MASK,
824ddc60 5220 .real_address = address,
bae473a4 5221 .flags = flags,
0721ec8b 5222 .pgoff = linear_page_index(vma, address),
667240e0 5223 .gfp_mask = __get_fault_gfp_mask(vma),
bae473a4 5224 };
dcddffd4 5225 struct mm_struct *mm = vma->vm_mm;
7da4e2cb 5226 unsigned long vm_flags = vma->vm_flags;
1da177e4 5227 pgd_t *pgd;
c2febafc 5228 p4d_t *p4d;
2b740303 5229 vm_fault_t ret;
1da177e4 5230
1da177e4 5231 pgd = pgd_offset(mm, address);
c2febafc
KS
5232 p4d = p4d_alloc(mm, pgd, address);
5233 if (!p4d)
5234 return VM_FAULT_OOM;
a00cc7d9 5235
c2febafc 5236 vmf.pud = pud_alloc(mm, p4d, address);
a00cc7d9 5237 if (!vmf.pud)
c74df32c 5238 return VM_FAULT_OOM;
625110b5 5239retry_pud:
7da4e2cb 5240 if (pud_none(*vmf.pud) &&
3485b883 5241 thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
a00cc7d9
MW
5242 ret = create_huge_pud(&vmf);
5243 if (!(ret & VM_FAULT_FALLBACK))
5244 return ret;
5245 } else {
5246 pud_t orig_pud = *vmf.pud;
5247
5248 barrier();
5249 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
a00cc7d9 5250
c89357e2
DH
5251 /*
5252 * TODO once we support anonymous PUDs: NUMA case and
5253 * FAULT_FLAG_UNSHARE handling.
5254 */
5255 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) {
a00cc7d9
MW
5256 ret = wp_huge_pud(&vmf, orig_pud);
5257 if (!(ret & VM_FAULT_FALLBACK))
5258 return ret;
5259 } else {
5260 huge_pud_set_accessed(&vmf, orig_pud);
5261 return 0;
5262 }
5263 }
5264 }
5265
5266 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
82b0f8c3 5267 if (!vmf.pmd)
c74df32c 5268 return VM_FAULT_OOM;
625110b5
TH
5269
5270 /* Huge pud page fault raced with pmd_alloc? */
5271 if (pud_trans_unstable(vmf.pud))
5272 goto retry_pud;
5273
7da4e2cb 5274 if (pmd_none(*vmf.pmd) &&
3485b883 5275 thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
a2d58167 5276 ret = create_huge_pmd(&vmf);
c0292554
KS
5277 if (!(ret & VM_FAULT_FALLBACK))
5278 return ret;
71e3aac0 5279 } else {
26e1a0c3 5280 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
1f1d06c3 5281
5db4f15c 5282 if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
84c3fc4e 5283 VM_BUG_ON(thp_migration_supported() &&
5db4f15c
YS
5284 !is_pmd_migration_entry(vmf.orig_pmd));
5285 if (is_pmd_migration_entry(vmf.orig_pmd))
84c3fc4e
ZY
5286 pmd_migration_entry_wait(mm, vmf.pmd);
5287 return 0;
5288 }
5db4f15c
YS
5289 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
5290 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
5291 return do_huge_pmd_numa_page(&vmf);
d10e63f2 5292
c89357e2
DH
5293 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
5294 !pmd_write(vmf.orig_pmd)) {
5db4f15c 5295 ret = wp_huge_pmd(&vmf);
9845cbbd
KS
5296 if (!(ret & VM_FAULT_FALLBACK))
5297 return ret;
a1dd450b 5298 } else {
5db4f15c 5299 huge_pmd_set_accessed(&vmf);
9845cbbd 5300 return 0;
1f1d06c3 5301 }
71e3aac0
AA
5302 }
5303 }
5304
82b0f8c3 5305 return handle_pte_fault(&vmf);
1da177e4
LT
5306}
5307
bce617ed 5308/**
f0953a1b 5309 * mm_account_fault - Do page fault accounting
809ef83c 5310 * @mm: mm from which memcg should be extracted. It can be NULL.
bce617ed
PX
5311 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting
5312 * of perf event counters, but we'll still do the per-task accounting to
5313 * the task who triggered this page fault.
5314 * @address: the faulted address.
5315 * @flags: the fault flags.
5316 * @ret: the fault retcode.
5317 *
f0953a1b 5318 * This will take care of most of the page fault accounting. Meanwhile, it
bce617ed 5319 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
f0953a1b 5320 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
bce617ed
PX
5321 * still be in per-arch page fault handlers at the entry of page fault.
5322 */
53156443 5323static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs,
bce617ed
PX
5324 unsigned long address, unsigned int flags,
5325 vm_fault_t ret)
5326{
5327 bool major;
5328
53156443
SB
5329 /* Incomplete faults will be accounted upon completion. */
5330 if (ret & VM_FAULT_RETRY)
5331 return;
5332
bce617ed 5333 /*
53156443
SB
5334 * To preserve the behavior of older kernels, PGFAULT counters record
5335 * both successful and failed faults, as opposed to perf counters,
5336 * which ignore failed cases.
bce617ed 5337 */
53156443
SB
5338 count_vm_event(PGFAULT);
5339 count_memcg_event_mm(mm, PGFAULT);
5340
5341 /*
5342 * Do not account for unsuccessful faults (e.g. when the address wasn't
5343 * valid). That includes arch_vma_access_permitted() failing before
5344 * reaching here. So this is not a "this many hardware page faults"
5345 * counter. We should use the hw profiling for that.
5346 */
5347 if (ret & VM_FAULT_ERROR)
bce617ed
PX
5348 return;
5349
5350 /*
5351 * We define the fault as a major fault when the final successful fault
5352 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
5353 * handle it immediately previously).
5354 */
5355 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
5356
a2beb5f1
PX
5357 if (major)
5358 current->maj_flt++;
5359 else
5360 current->min_flt++;
5361
bce617ed 5362 /*
a2beb5f1
PX
5363 * If the fault is done for GUP, regs will be NULL. We only do the
5364 * accounting for the per thread fault counters who triggered the
5365 * fault, and we skip the perf event updates.
bce617ed
PX
5366 */
5367 if (!regs)
5368 return;
5369
a2beb5f1 5370 if (major)
bce617ed 5371 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
a2beb5f1 5372 else
bce617ed 5373 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
bce617ed
PX
5374}
5375
ec1c86b2
YZ
5376#ifdef CONFIG_LRU_GEN
5377static void lru_gen_enter_fault(struct vm_area_struct *vma)
5378{
8788f678
YZ
5379 /* the LRU algorithm only applies to accesses with recency */
5380 current->in_lru_fault = vma_has_recency(vma);
ec1c86b2
YZ
5381}
5382
5383static void lru_gen_exit_fault(void)
5384{
5385 current->in_lru_fault = false;
5386}
5387#else
5388static void lru_gen_enter_fault(struct vm_area_struct *vma)
5389{
5390}
5391
5392static void lru_gen_exit_fault(void)
5393{
5394}
5395#endif /* CONFIG_LRU_GEN */
5396
cdc5021c
DH
5397static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
5398 unsigned int *flags)
5399{
5400 if (unlikely(*flags & FAULT_FLAG_UNSHARE)) {
5401 if (WARN_ON_ONCE(*flags & FAULT_FLAG_WRITE))
5402 return VM_FAULT_SIGSEGV;
5403 /*
5404 * FAULT_FLAG_UNSHARE only applies to COW mappings. Let's
5405 * just treat it like an ordinary read-fault otherwise.
5406 */
5407 if (!is_cow_mapping(vma->vm_flags))
5408 *flags &= ~FAULT_FLAG_UNSHARE;
79881fed
DH
5409 } else if (*flags & FAULT_FLAG_WRITE) {
5410 /* Write faults on read-only mappings are impossible ... */
5411 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
5412 return VM_FAULT_SIGSEGV;
5413 /* ... and FOLL_FORCE only applies to COW mappings. */
5414 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
5415 !is_cow_mapping(vma->vm_flags)))
5416 return VM_FAULT_SIGSEGV;
cdc5021c 5417 }
4089eef0
SB
5418#ifdef CONFIG_PER_VMA_LOCK
5419 /*
5420 * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of
5421 * the assumption that lock is dropped on VM_FAULT_RETRY.
5422 */
5423 if (WARN_ON_ONCE((*flags &
5424 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) ==
5425 (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)))
5426 return VM_FAULT_SIGSEGV;
5427#endif
5428
cdc5021c
DH
5429 return 0;
5430}
5431
9a95f3cf
PC
5432/*
5433 * By the time we get here, we already hold the mm semaphore
5434 *
c1e8d7c6 5435 * The mmap_lock may have been released depending on flags and our
9138e47e 5436 * return value. See filemap_fault() and __folio_lock_or_retry().
9a95f3cf 5437 */
2b740303 5438vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
bce617ed 5439 unsigned int flags, struct pt_regs *regs)
519e5247 5440{
53156443
SB
5441 /* If the fault handler drops the mmap_lock, vma may be freed */
5442 struct mm_struct *mm = vma->vm_mm;
2b740303 5443 vm_fault_t ret;
519e5247
JW
5444
5445 __set_current_state(TASK_RUNNING);
5446
cdc5021c
DH
5447 ret = sanitize_fault_flags(vma, &flags);
5448 if (ret)
53156443 5449 goto out;
cdc5021c 5450
de0c799b
LD
5451 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
5452 flags & FAULT_FLAG_INSTRUCTION,
53156443
SB
5453 flags & FAULT_FLAG_REMOTE)) {
5454 ret = VM_FAULT_SIGSEGV;
5455 goto out;
5456 }
de0c799b 5457
519e5247
JW
5458 /*
5459 * Enable the memcg OOM handling for faults triggered in user
5460 * space. Kernel faults are handled more gracefully.
5461 */
5462 if (flags & FAULT_FLAG_USER)
29ef680a 5463 mem_cgroup_enter_user_fault();
519e5247 5464
ec1c86b2
YZ
5465 lru_gen_enter_fault(vma);
5466
bae473a4
KS
5467 if (unlikely(is_vm_hugetlb_page(vma)))
5468 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5469 else
5470 ret = __handle_mm_fault(vma, address, flags);
519e5247 5471
ec1c86b2
YZ
5472 lru_gen_exit_fault();
5473
49426420 5474 if (flags & FAULT_FLAG_USER) {
29ef680a 5475 mem_cgroup_exit_user_fault();
166f61b9
TH
5476 /*
5477 * The task may have entered a memcg OOM situation but
5478 * if the allocation error was handled gracefully (no
5479 * VM_FAULT_OOM), there is no need to kill anything.
5480 * Just clean up the OOM state peacefully.
5481 */
5482 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
5483 mem_cgroup_oom_synchronize(false);
49426420 5484 }
53156443
SB
5485out:
5486 mm_account_fault(mm, regs, address, flags, ret);
bce617ed 5487
519e5247
JW
5488 return ret;
5489}
e1d6d01a 5490EXPORT_SYMBOL_GPL(handle_mm_fault);
519e5247 5491
c2508ec5
LT
5492#ifdef CONFIG_LOCK_MM_AND_FIND_VMA
5493#include <linux/extable.h>
5494
5495static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
5496{
4542057e 5497 if (likely(mmap_read_trylock(mm)))
c2508ec5 5498 return true;
c2508ec5
LT
5499
5500 if (regs && !user_mode(regs)) {
8fa50708 5501 unsigned long ip = exception_ip(regs);
c2508ec5
LT
5502 if (!search_exception_tables(ip))
5503 return false;
5504 }
5505
eda00472 5506 return !mmap_read_lock_killable(mm);
c2508ec5
LT
5507}
5508
5509static inline bool mmap_upgrade_trylock(struct mm_struct *mm)
5510{
5511 /*
5512 * We don't have this operation yet.
5513 *
5514 * It should be easy enough to do: it's basically a
5515 * atomic_long_try_cmpxchg_acquire()
5516 * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but
5517 * it also needs the proper lockdep magic etc.
5518 */
5519 return false;
5520}
5521
5522static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs)
5523{
5524 mmap_read_unlock(mm);
5525 if (regs && !user_mode(regs)) {
8fa50708 5526 unsigned long ip = exception_ip(regs);
c2508ec5
LT
5527 if (!search_exception_tables(ip))
5528 return false;
5529 }
eda00472 5530 return !mmap_write_lock_killable(mm);
c2508ec5
LT
5531}
5532
5533/*
5534 * Helper for page fault handling.
5535 *
5536 * This is kind of equivalend to "mmap_read_lock()" followed
5537 * by "find_extend_vma()", except it's a lot more careful about
5538 * the locking (and will drop the lock on failure).
5539 *
5540 * For example, if we have a kernel bug that causes a page
5541 * fault, we don't want to just use mmap_read_lock() to get
5542 * the mm lock, because that would deadlock if the bug were
5543 * to happen while we're holding the mm lock for writing.
5544 *
5545 * So this checks the exception tables on kernel faults in
5546 * order to only do this all for instructions that are actually
5547 * expected to fault.
5548 *
5549 * We can also actually take the mm lock for writing if we
5550 * need to extend the vma, which helps the VM layer a lot.
5551 */
5552struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
5553 unsigned long addr, struct pt_regs *regs)
5554{
5555 struct vm_area_struct *vma;
5556
5557 if (!get_mmap_lock_carefully(mm, regs))
5558 return NULL;
5559
5560 vma = find_vma(mm, addr);
5561 if (likely(vma && (vma->vm_start <= addr)))
5562 return vma;
5563
5564 /*
5565 * Well, dang. We might still be successful, but only
5566 * if we can extend a vma to do so.
5567 */
5568 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
5569 mmap_read_unlock(mm);
5570 return NULL;
5571 }
5572
5573 /*
5574 * We can try to upgrade the mmap lock atomically,
5575 * in which case we can continue to use the vma
5576 * we already looked up.
5577 *
5578 * Otherwise we'll have to drop the mmap lock and
5579 * re-take it, and also look up the vma again,
5580 * re-checking it.
5581 */
5582 if (!mmap_upgrade_trylock(mm)) {
5583 if (!upgrade_mmap_lock_carefully(mm, regs))
5584 return NULL;
5585
5586 vma = find_vma(mm, addr);
5587 if (!vma)
5588 goto fail;
5589 if (vma->vm_start <= addr)
5590 goto success;
5591 if (!(vma->vm_flags & VM_GROWSDOWN))
5592 goto fail;
5593 }
5594
8d7071af 5595 if (expand_stack_locked(vma, addr))
c2508ec5
LT
5596 goto fail;
5597
5598success:
5599 mmap_write_downgrade(mm);
5600 return vma;
5601
5602fail:
5603 mmap_write_unlock(mm);
5604 return NULL;
5605}
5606#endif
5607
50ee3253
SB
5608#ifdef CONFIG_PER_VMA_LOCK
5609/*
5610 * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be
5611 * stable and not isolated. If the VMA is not found or is being modified the
5612 * function returns NULL.
5613 */
5614struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
5615 unsigned long address)
5616{
5617 MA_STATE(mas, &mm->mm_mt, address, address);
5618 struct vm_area_struct *vma;
5619
5620 rcu_read_lock();
5621retry:
5622 vma = mas_walk(&mas);
5623 if (!vma)
5624 goto inval;
5625
50ee3253
SB
5626 if (!vma_start_read(vma))
5627 goto inval;
5628
657b5146
JH
5629 /*
5630 * find_mergeable_anon_vma uses adjacent vmas which are not locked.
5631 * This check must happen after vma_start_read(); otherwise, a
5632 * concurrent mremap() with MREMAP_DONTUNMAP could dissociate the VMA
5633 * from its anon_vma.
5634 */
29a22b9e 5635 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma))
657b5146 5636 goto inval_end_read;
444eeb17 5637
50ee3253 5638 /* Check since vm_start/vm_end might change before we lock the VMA */
657b5146
JH
5639 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
5640 goto inval_end_read;
50ee3253
SB
5641
5642 /* Check if the VMA got isolated after we found it */
5643 if (vma->detached) {
5644 vma_end_read(vma);
52f23865 5645 count_vm_vma_lock_event(VMA_LOCK_MISS);
50ee3253
SB
5646 /* The area was replaced with another one */
5647 goto retry;
5648 }
5649
5650 rcu_read_unlock();
5651 return vma;
657b5146
JH
5652
5653inval_end_read:
5654 vma_end_read(vma);
50ee3253
SB
5655inval:
5656 rcu_read_unlock();
52f23865 5657 count_vm_vma_lock_event(VMA_LOCK_ABORT);
50ee3253
SB
5658 return NULL;
5659}
5660#endif /* CONFIG_PER_VMA_LOCK */
5661
90eceff1
KS
5662#ifndef __PAGETABLE_P4D_FOLDED
5663/*
5664 * Allocate p4d page table.
5665 * We've already handled the fast-path in-line.
5666 */
5667int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
5668{
5669 p4d_t *new = p4d_alloc_one(mm, address);
5670 if (!new)
5671 return -ENOMEM;
5672
90eceff1 5673 spin_lock(&mm->page_table_lock);
ed33b5a6 5674 if (pgd_present(*pgd)) { /* Another has populated it */
90eceff1 5675 p4d_free(mm, new);
ed33b5a6
QZ
5676 } else {
5677 smp_wmb(); /* See comment in pmd_install() */
90eceff1 5678 pgd_populate(mm, pgd, new);
ed33b5a6 5679 }
90eceff1
KS
5680 spin_unlock(&mm->page_table_lock);
5681 return 0;
5682}
5683#endif /* __PAGETABLE_P4D_FOLDED */
5684
1da177e4
LT
5685#ifndef __PAGETABLE_PUD_FOLDED
5686/*
5687 * Allocate page upper directory.
872fec16 5688 * We've already handled the fast-path in-line.
1da177e4 5689 */
c2febafc 5690int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
1da177e4 5691{
c74df32c
HD
5692 pud_t *new = pud_alloc_one(mm, address);
5693 if (!new)
1bb3630e 5694 return -ENOMEM;
1da177e4 5695
872fec16 5696 spin_lock(&mm->page_table_lock);
b4e98d9a
KS
5697 if (!p4d_present(*p4d)) {
5698 mm_inc_nr_puds(mm);
ed33b5a6 5699 smp_wmb(); /* See comment in pmd_install() */
c2febafc 5700 p4d_populate(mm, p4d, new);
b4e98d9a 5701 } else /* Another has populated it */
5e541973 5702 pud_free(mm, new);
c74df32c 5703 spin_unlock(&mm->page_table_lock);
1bb3630e 5704 return 0;
1da177e4
LT
5705}
5706#endif /* __PAGETABLE_PUD_FOLDED */
5707
5708#ifndef __PAGETABLE_PMD_FOLDED
5709/*
5710 * Allocate page middle directory.
872fec16 5711 * We've already handled the fast-path in-line.
1da177e4 5712 */
1bb3630e 5713int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1da177e4 5714{
a00cc7d9 5715 spinlock_t *ptl;
c74df32c
HD
5716 pmd_t *new = pmd_alloc_one(mm, address);
5717 if (!new)
1bb3630e 5718 return -ENOMEM;
1da177e4 5719
a00cc7d9 5720 ptl = pud_lock(mm, pud);
dc6c9a35
KS
5721 if (!pud_present(*pud)) {
5722 mm_inc_nr_pmds(mm);
ed33b5a6 5723 smp_wmb(); /* See comment in pmd_install() */
1bb3630e 5724 pud_populate(mm, pud, new);
ed33b5a6 5725 } else { /* Another has populated it */
5e541973 5726 pmd_free(mm, new);
ed33b5a6 5727 }
a00cc7d9 5728 spin_unlock(ptl);
1bb3630e 5729 return 0;
e0f39591 5730}
1da177e4
LT
5731#endif /* __PAGETABLE_PMD_FOLDED */
5732
0e5e64c0
MS
5733/**
5734 * follow_pte - look up PTE at a user virtual address
5735 * @mm: the mm_struct of the target address space
5736 * @address: user virtual address
5737 * @ptepp: location to store found PTE
5738 * @ptlp: location to store the lock for the PTE
5739 *
5740 * On a successful return, the pointer to the PTE is stored in @ptepp;
5741 * the corresponding lock is taken and its location is stored in @ptlp.
5742 * The contents of the PTE are only stable until @ptlp is released;
5743 * any further use, if any, must be protected against invalidation
5744 * with MMU notifiers.
5745 *
5746 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
5747 * should be taken for read.
5748 *
5749 * KVM uses this function. While it is arguably less bad than ``follow_pfn``,
5750 * it is not a good general-purpose API.
5751 *
5752 * Return: zero on success, -ve otherwise.
5753 */
5754int follow_pte(struct mm_struct *mm, unsigned long address,
5755 pte_t **ptepp, spinlock_t **ptlp)
f8ad0f49
JW
5756{
5757 pgd_t *pgd;
c2febafc 5758 p4d_t *p4d;
f8ad0f49
JW
5759 pud_t *pud;
5760 pmd_t *pmd;
5761 pte_t *ptep;
5762
5763 pgd = pgd_offset(mm, address);
5764 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
5765 goto out;
5766
c2febafc
KS
5767 p4d = p4d_offset(pgd, address);
5768 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
5769 goto out;
5770
5771 pud = pud_offset(p4d, address);
f8ad0f49
JW
5772 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
5773 goto out;
5774
5775 pmd = pmd_offset(pud, address);
f66055ab 5776 VM_BUG_ON(pmd_trans_huge(*pmd));
f8ad0f49 5777
f8ad0f49 5778 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
3db82b93
HD
5779 if (!ptep)
5780 goto out;
c33c7948 5781 if (!pte_present(ptep_get(ptep)))
f8ad0f49
JW
5782 goto unlock;
5783 *ptepp = ptep;
5784 return 0;
5785unlock:
5786 pte_unmap_unlock(ptep, *ptlp);
5787out:
5788 return -EINVAL;
5789}
9fd6dad1
PB
5790EXPORT_SYMBOL_GPL(follow_pte);
5791
3b6748e2
JW
5792/**
5793 * follow_pfn - look up PFN at a user virtual address
5794 * @vma: memory mapping
5795 * @address: user virtual address
5796 * @pfn: location to store found PFN
5797 *
5798 * Only IO mappings and raw PFN mappings are allowed.
5799 *
9fd6dad1
PB
5800 * This function does not allow the caller to read the permissions
5801 * of the PTE. Do not use it.
5802 *
a862f68a 5803 * Return: zero and the pfn at @pfn on success, -ve otherwise.
3b6748e2
JW
5804 */
5805int follow_pfn(struct vm_area_struct *vma, unsigned long address,
5806 unsigned long *pfn)
5807{
5808 int ret = -EINVAL;
5809 spinlock_t *ptl;
5810 pte_t *ptep;
5811
5812 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5813 return ret;
5814
9fd6dad1 5815 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
3b6748e2
JW
5816 if (ret)
5817 return ret;
c33c7948 5818 *pfn = pte_pfn(ptep_get(ptep));
3b6748e2
JW
5819 pte_unmap_unlock(ptep, ptl);
5820 return 0;
5821}
5822EXPORT_SYMBOL(follow_pfn);
5823
28b2ee20 5824#ifdef CONFIG_HAVE_IOREMAP_PROT
d87fe660 5825int follow_phys(struct vm_area_struct *vma,
5826 unsigned long address, unsigned int flags,
5827 unsigned long *prot, resource_size_t *phys)
28b2ee20 5828{
03668a4d 5829 int ret = -EINVAL;
28b2ee20
RR
5830 pte_t *ptep, pte;
5831 spinlock_t *ptl;
28b2ee20 5832
d87fe660 5833 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5834 goto out;
28b2ee20 5835
9fd6dad1 5836 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
d87fe660 5837 goto out;
c33c7948 5838 pte = ptep_get(ptep);
03668a4d 5839
f6f37321 5840 if ((flags & FOLL_WRITE) && !pte_write(pte))
28b2ee20 5841 goto unlock;
28b2ee20
RR
5842
5843 *prot = pgprot_val(pte_pgprot(pte));
03668a4d 5844 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
28b2ee20 5845
03668a4d 5846 ret = 0;
28b2ee20
RR
5847unlock:
5848 pte_unmap_unlock(ptep, ptl);
5849out:
d87fe660 5850 return ret;
28b2ee20
RR
5851}
5852
96667f8a
DV
5853/**
5854 * generic_access_phys - generic implementation for iomem mmap access
5855 * @vma: the vma to access
f0953a1b 5856 * @addr: userspace address, not relative offset within @vma
96667f8a
DV
5857 * @buf: buffer to read/write
5858 * @len: length of transfer
5859 * @write: set to FOLL_WRITE when writing, otherwise reading
5860 *
5861 * This is a generic implementation for &vm_operations_struct.access for an
5862 * iomem mapping. This callback is used by access_process_vm() when the @vma is
5863 * not page based.
5864 */
28b2ee20
RR
5865int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5866 void *buf, int len, int write)
5867{
5868 resource_size_t phys_addr;
5869 unsigned long prot = 0;
2bc7273b 5870 void __iomem *maddr;
96667f8a
DV
5871 pte_t *ptep, pte;
5872 spinlock_t *ptl;
5873 int offset = offset_in_page(addr);
5874 int ret = -EINVAL;
5875
5876 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5877 return -EINVAL;
5878
5879retry:
e913a8cd 5880 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
96667f8a 5881 return -EINVAL;
c33c7948 5882 pte = ptep_get(ptep);
96667f8a 5883 pte_unmap_unlock(ptep, ptl);
28b2ee20 5884
96667f8a
DV
5885 prot = pgprot_val(pte_pgprot(pte));
5886 phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5887
5888 if ((write & FOLL_WRITE) && !pte_write(pte))
28b2ee20
RR
5889 return -EINVAL;
5890
9cb12d7b 5891 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
24eee1e4 5892 if (!maddr)
5893 return -ENOMEM;
5894
e913a8cd 5895 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
96667f8a
DV
5896 goto out_unmap;
5897
c33c7948 5898 if (!pte_same(pte, ptep_get(ptep))) {
96667f8a
DV
5899 pte_unmap_unlock(ptep, ptl);
5900 iounmap(maddr);
5901
5902 goto retry;
5903 }
5904
28b2ee20
RR
5905 if (write)
5906 memcpy_toio(maddr + offset, buf, len);
5907 else
5908 memcpy_fromio(buf, maddr + offset, len);
96667f8a
DV
5909 ret = len;
5910 pte_unmap_unlock(ptep, ptl);
5911out_unmap:
28b2ee20
RR
5912 iounmap(maddr);
5913
96667f8a 5914 return ret;
28b2ee20 5915}
5a73633e 5916EXPORT_SYMBOL_GPL(generic_access_phys);
28b2ee20
RR
5917#endif
5918
0ec76a11 5919/*
d3f5ffca 5920 * Access another process' address space as given in mm.
0ec76a11 5921 */
c43cfa42
LS
5922static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
5923 void *buf, int len, unsigned int gup_flags)
0ec76a11 5924{
0ec76a11 5925 void *old_buf = buf;
442486ec 5926 int write = gup_flags & FOLL_WRITE;
0ec76a11 5927
d8ed45c5 5928 if (mmap_read_lock_killable(mm))
1e426fe2
KK
5929 return 0;
5930
22883973
KS
5931 /* Untag the address before looking up the VMA */
5932 addr = untagged_addr_remote(mm, addr);
5933
eee9c708
LT
5934 /* Avoid triggering the temporary warning in __get_user_pages */
5935 if (!vma_lookup(mm, addr) && !expand_stack(mm, addr))
5936 return 0;
5937
183ff22b 5938 /* ignore errors, just check how much was successfully transferred */
0ec76a11 5939 while (len) {
ca5e8632 5940 int bytes, offset;
0ec76a11 5941 void *maddr;
ca5e8632
LS
5942 struct vm_area_struct *vma = NULL;
5943 struct page *page = get_user_page_vma_remote(mm, addr,
5944 gup_flags, &vma);
0ec76a11 5945
6a1960b8 5946 if (IS_ERR(page)) {
9471f1f2
LT
5947 /* We might need to expand the stack to access it */
5948 vma = vma_lookup(mm, addr);
5949 if (!vma) {
5950 vma = expand_stack(mm, addr);
5951
5952 /* mmap_lock was dropped on failure */
5953 if (!vma)
5954 return buf - old_buf;
5955
5956 /* Try again if stack expansion worked */
5957 continue;
5958 }
5959
28b2ee20
RR
5960 /*
5961 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5962 * we can access using slightly different code.
5963 */
9471f1f2
LT
5964 bytes = 0;
5965#ifdef CONFIG_HAVE_IOREMAP_PROT
28b2ee20 5966 if (vma->vm_ops && vma->vm_ops->access)
9471f1f2
LT
5967 bytes = vma->vm_ops->access(vma, addr, buf,
5968 len, write);
dbffcd03 5969#endif
9471f1f2
LT
5970 if (bytes <= 0)
5971 break;
0ec76a11 5972 } else {
28b2ee20
RR
5973 bytes = len;
5974 offset = addr & (PAGE_SIZE-1);
5975 if (bytes > PAGE_SIZE-offset)
5976 bytes = PAGE_SIZE-offset;
5977
f7ef5fe7 5978 maddr = kmap_local_page(page);
28b2ee20
RR
5979 if (write) {
5980 copy_to_user_page(vma, page, addr,
5981 maddr + offset, buf, bytes);
5982 set_page_dirty_lock(page);
5983 } else {
5984 copy_from_user_page(vma, page, addr,
5985 buf, maddr + offset, bytes);
5986 }
f7ef5fe7 5987 unmap_and_put_page(page, maddr);
0ec76a11 5988 }
0ec76a11
DH
5989 len -= bytes;
5990 buf += bytes;
5991 addr += bytes;
5992 }
d8ed45c5 5993 mmap_read_unlock(mm);
0ec76a11
DH
5994
5995 return buf - old_buf;
5996}
03252919 5997
5ddd36b9 5998/**
ae91dbfc 5999 * access_remote_vm - access another process' address space
5ddd36b9
SW
6000 * @mm: the mm_struct of the target address space
6001 * @addr: start address to access
6002 * @buf: source or destination buffer
6003 * @len: number of bytes to transfer
6347e8d5 6004 * @gup_flags: flags modifying lookup behaviour
5ddd36b9
SW
6005 *
6006 * The caller must hold a reference on @mm.
a862f68a
MR
6007 *
6008 * Return: number of bytes copied from source to destination.
5ddd36b9
SW
6009 */
6010int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6347e8d5 6011 void *buf, int len, unsigned int gup_flags)
5ddd36b9 6012{
d3f5ffca 6013 return __access_remote_vm(mm, addr, buf, len, gup_flags);
5ddd36b9
SW
6014}
6015
206cb636
SW
6016/*
6017 * Access another process' address space.
6018 * Source/target buffer must be kernel space,
6019 * Do not walk the page table directly, use get_user_pages
6020 */
6021int access_process_vm(struct task_struct *tsk, unsigned long addr,
f307ab6d 6022 void *buf, int len, unsigned int gup_flags)
206cb636
SW
6023{
6024 struct mm_struct *mm;
6025 int ret;
6026
6027 mm = get_task_mm(tsk);
6028 if (!mm)
6029 return 0;
6030
d3f5ffca 6031 ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
442486ec 6032
206cb636
SW
6033 mmput(mm);
6034
6035 return ret;
6036}
fcd35857 6037EXPORT_SYMBOL_GPL(access_process_vm);
206cb636 6038
03252919
AK
6039/*
6040 * Print the name of a VMA.
6041 */
6042void print_vma_addr(char *prefix, unsigned long ip)
6043{
6044 struct mm_struct *mm = current->mm;
6045 struct vm_area_struct *vma;
6046
e8bff74a 6047 /*
0a7f682d 6048 * we might be running from an atomic context so we cannot sleep
e8bff74a 6049 */
d8ed45c5 6050 if (!mmap_read_trylock(mm))
e8bff74a
IM
6051 return;
6052
03252919
AK
6053 vma = find_vma(mm, ip);
6054 if (vma && vma->vm_file) {
6055 struct file *f = vma->vm_file;
0a7f682d 6056 char *buf = (char *)__get_free_page(GFP_NOWAIT);
03252919 6057 if (buf) {
2fbc57c5 6058 char *p;
03252919 6059
9bf39ab2 6060 p = file_path(f, buf, PAGE_SIZE);
03252919
AK
6061 if (IS_ERR(p))
6062 p = "?";
2fbc57c5 6063 printk("%s%s[%lx+%lx]", prefix, kbasename(p),
03252919
AK
6064 vma->vm_start,
6065 vma->vm_end - vma->vm_start);
6066 free_page((unsigned long)buf);
6067 }
6068 }
d8ed45c5 6069 mmap_read_unlock(mm);
03252919 6070}
3ee1afa3 6071
662bbcb2 6072#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
9ec23531 6073void __might_fault(const char *file, int line)
3ee1afa3 6074{
9ec23531 6075 if (pagefault_disabled())
662bbcb2 6076 return;
42a38756 6077 __might_sleep(file, line);
9ec23531 6078#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
662bbcb2 6079 if (current->mm)
da1c55f1 6080 might_lock_read(&current->mm->mmap_lock);
9ec23531 6081#endif
3ee1afa3 6082}
9ec23531 6083EXPORT_SYMBOL(__might_fault);
3ee1afa3 6084#endif
47ad8475
AA
6085
6086#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
c6ddfb6c
HY
6087/*
6088 * Process all subpages of the specified huge page with the specified
6089 * operation. The target subpage will be processed last to keep its
6090 * cache lines hot.
6091 */
1cb9dc4b 6092static inline int process_huge_page(
c6ddfb6c 6093 unsigned long addr_hint, unsigned int pages_per_huge_page,
1cb9dc4b 6094 int (*process_subpage)(unsigned long addr, int idx, void *arg),
c6ddfb6c 6095 void *arg)
47ad8475 6096{
1cb9dc4b 6097 int i, n, base, l, ret;
c79b57e4
HY
6098 unsigned long addr = addr_hint &
6099 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
47ad8475 6100
c6ddfb6c 6101 /* Process target subpage last to keep its cache lines hot */
47ad8475 6102 might_sleep();
c79b57e4
HY
6103 n = (addr_hint - addr) / PAGE_SIZE;
6104 if (2 * n <= pages_per_huge_page) {
c6ddfb6c 6105 /* If target subpage in first half of huge page */
c79b57e4
HY
6106 base = 0;
6107 l = n;
c6ddfb6c 6108 /* Process subpages at the end of huge page */
c79b57e4
HY
6109 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
6110 cond_resched();
1cb9dc4b
LS
6111 ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6112 if (ret)
6113 return ret;
c79b57e4
HY
6114 }
6115 } else {
c6ddfb6c 6116 /* If target subpage in second half of huge page */
c79b57e4
HY
6117 base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
6118 l = pages_per_huge_page - n;
c6ddfb6c 6119 /* Process subpages at the begin of huge page */
c79b57e4
HY
6120 for (i = 0; i < base; i++) {
6121 cond_resched();
1cb9dc4b
LS
6122 ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
6123 if (ret)
6124 return ret;
c79b57e4
HY
6125 }
6126 }
6127 /*
c6ddfb6c
HY
6128 * Process remaining subpages in left-right-left-right pattern
6129 * towards the target subpage
c79b57e4
HY
6130 */
6131 for (i = 0; i < l; i++) {
6132 int left_idx = base + i;
6133 int right_idx = base + 2 * l - 1 - i;
6134
6135 cond_resched();
1cb9dc4b
LS
6136 ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
6137 if (ret)
6138 return ret;
47ad8475 6139 cond_resched();
1cb9dc4b
LS
6140 ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
6141 if (ret)
6142 return ret;
47ad8475 6143 }
1cb9dc4b 6144 return 0;
47ad8475
AA
6145}
6146
c6ddfb6c
HY
6147static void clear_gigantic_page(struct page *page,
6148 unsigned long addr,
6149 unsigned int pages_per_huge_page)
6150{
6151 int i;
14455eab 6152 struct page *p;
c6ddfb6c
HY
6153
6154 might_sleep();
14455eab
CL
6155 for (i = 0; i < pages_per_huge_page; i++) {
6156 p = nth_page(page, i);
c6ddfb6c
HY
6157 cond_resched();
6158 clear_user_highpage(p, addr + i * PAGE_SIZE);
6159 }
6160}
6161
1cb9dc4b 6162static int clear_subpage(unsigned long addr, int idx, void *arg)
c6ddfb6c
HY
6163{
6164 struct page *page = arg;
6165
6166 clear_user_highpage(page + idx, addr);
1cb9dc4b 6167 return 0;
c6ddfb6c
HY
6168}
6169
6170void clear_huge_page(struct page *page,
6171 unsigned long addr_hint, unsigned int pages_per_huge_page)
6172{
6173 unsigned long addr = addr_hint &
6174 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
6175
6176 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
6177 clear_gigantic_page(page, addr, pages_per_huge_page);
6178 return;
6179 }
6180
6181 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
6182}
6183
1cb9dc4b 6184static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
c0e8150e
Z
6185 unsigned long addr,
6186 struct vm_area_struct *vma,
6187 unsigned int pages_per_huge_page)
47ad8475
AA
6188{
6189 int i;
c0e8150e
Z
6190 struct page *dst_page;
6191 struct page *src_page;
47ad8475 6192
14455eab 6193 for (i = 0; i < pages_per_huge_page; i++) {
c0e8150e
Z
6194 dst_page = folio_page(dst, i);
6195 src_page = folio_page(src, i);
14455eab 6196
47ad8475 6197 cond_resched();
1cb9dc4b
LS
6198 if (copy_mc_user_highpage(dst_page, src_page,
6199 addr + i*PAGE_SIZE, vma)) {
6200 memory_failure_queue(page_to_pfn(src_page), 0);
6201 return -EHWPOISON;
6202 }
47ad8475 6203 }
1cb9dc4b 6204 return 0;
47ad8475
AA
6205}
6206
c9f4cd71
HY
6207struct copy_subpage_arg {
6208 struct page *dst;
6209 struct page *src;
6210 struct vm_area_struct *vma;
6211};
6212
1cb9dc4b 6213static int copy_subpage(unsigned long addr, int idx, void *arg)
c9f4cd71
HY
6214{
6215 struct copy_subpage_arg *copy_arg = arg;
6216
1cb9dc4b
LS
6217 if (copy_mc_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
6218 addr, copy_arg->vma)) {
6219 memory_failure_queue(page_to_pfn(copy_arg->src + idx), 0);
6220 return -EHWPOISON;
6221 }
6222 return 0;
c9f4cd71
HY
6223}
6224
1cb9dc4b
LS
6225int copy_user_large_folio(struct folio *dst, struct folio *src,
6226 unsigned long addr_hint, struct vm_area_struct *vma)
47ad8475 6227{
c0e8150e 6228 unsigned int pages_per_huge_page = folio_nr_pages(dst);
c9f4cd71
HY
6229 unsigned long addr = addr_hint &
6230 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
6231 struct copy_subpage_arg arg = {
c0e8150e
Z
6232 .dst = &dst->page,
6233 .src = &src->page,
c9f4cd71
HY
6234 .vma = vma,
6235 };
47ad8475 6236
1cb9dc4b
LS
6237 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES))
6238 return copy_user_gigantic_page(dst, src, addr, vma,
6239 pages_per_huge_page);
47ad8475 6240
1cb9dc4b 6241 return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
47ad8475 6242}
fa4d75c1 6243
e87340ca
Z
6244long copy_folio_from_user(struct folio *dst_folio,
6245 const void __user *usr_src,
6246 bool allow_pagefault)
fa4d75c1 6247{
e87340ca 6248 void *kaddr;
fa4d75c1 6249 unsigned long i, rc = 0;
e87340ca
Z
6250 unsigned int nr_pages = folio_nr_pages(dst_folio);
6251 unsigned long ret_val = nr_pages * PAGE_SIZE;
14455eab 6252 struct page *subpage;
fa4d75c1 6253
e87340ca
Z
6254 for (i = 0; i < nr_pages; i++) {
6255 subpage = folio_page(dst_folio, i);
6256 kaddr = kmap_local_page(subpage);
0d508c1f
Z
6257 if (!allow_pagefault)
6258 pagefault_disable();
e87340ca 6259 rc = copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE);
0d508c1f
Z
6260 if (!allow_pagefault)
6261 pagefault_enable();
e87340ca 6262 kunmap_local(kaddr);
fa4d75c1
MK
6263
6264 ret_val -= (PAGE_SIZE - rc);
6265 if (rc)
6266 break;
6267
e763243c
MS
6268 flush_dcache_page(subpage);
6269
fa4d75c1
MK
6270 cond_resched();
6271 }
6272 return ret_val;
6273}
47ad8475 6274#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
49076ec2 6275
40b64acd 6276#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
b35f1819
KS
6277
6278static struct kmem_cache *page_ptl_cachep;
6279
6280void __init ptlock_cache_init(void)
6281{
6282 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
6283 SLAB_PANIC, NULL);
6284}
6285
f5ecca06 6286bool ptlock_alloc(struct ptdesc *ptdesc)
49076ec2
KS
6287{
6288 spinlock_t *ptl;
6289
b35f1819 6290 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
49076ec2
KS
6291 if (!ptl)
6292 return false;
f5ecca06 6293 ptdesc->ptl = ptl;
49076ec2
KS
6294 return true;
6295}
6296
6ed1b8a0 6297void ptlock_free(struct ptdesc *ptdesc)
49076ec2 6298{
6ed1b8a0 6299 kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
49076ec2
KS
6300}
6301#endif