mm, hugetlb: fix racy resv_huge_pages underflow on UFFDIO_COPY
[linux-block.git] / mm / userfaultfd.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
c1a4de99
AA
2/*
3 * mm/userfaultfd.c
4 *
5 * Copyright (C) 2015 Red Hat, Inc.
c1a4de99
AA
6 */
7
8#include <linux/mm.h>
174cd4b1 9#include <linux/sched/signal.h>
c1a4de99
AA
10#include <linux/pagemap.h>
11#include <linux/rmap.h>
12#include <linux/swap.h>
13#include <linux/swapops.h>
14#include <linux/userfaultfd_k.h>
15#include <linux/mmu_notifier.h>
60d4d2d2 16#include <linux/hugetlb.h>
26071ced 17#include <linux/shmem_fs.h>
c1a4de99
AA
18#include <asm/tlbflush.h>
19#include "internal.h"
20
643aa36e
WY
21static __always_inline
22struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
23 unsigned long dst_start,
24 unsigned long len)
25{
26 /*
27 * Make sure that the dst range is both valid and fully within a
28 * single existing vma.
29 */
30 struct vm_area_struct *dst_vma;
31
32 dst_vma = find_vma(dst_mm, dst_start);
33 if (!dst_vma)
34 return NULL;
35
36 if (dst_start < dst_vma->vm_start ||
37 dst_start + len > dst_vma->vm_end)
38 return NULL;
39
40 /*
41 * Check the vma is registered in uffd, this is required to
42 * enforce the VM_MAYWRITE check done at uffd registration
43 * time.
44 */
45 if (!dst_vma->vm_userfaultfd_ctx.ctx)
46 return NULL;
47
48 return dst_vma;
49}
50
c1a4de99
AA
51static int mcopy_atomic_pte(struct mm_struct *dst_mm,
52 pmd_t *dst_pmd,
53 struct vm_area_struct *dst_vma,
54 unsigned long dst_addr,
b6ebaedb 55 unsigned long src_addr,
72981e0e
AA
56 struct page **pagep,
57 bool wp_copy)
c1a4de99 58{
c1a4de99
AA
59 pte_t _dst_pte, *dst_pte;
60 spinlock_t *ptl;
c1a4de99
AA
61 void *page_kaddr;
62 int ret;
b6ebaedb 63 struct page *page;
e2a50c1f
AA
64 pgoff_t offset, max_off;
65 struct inode *inode;
c1a4de99 66
b6ebaedb
AA
67 if (!*pagep) {
68 ret = -ENOMEM;
69 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
70 if (!page)
71 goto out;
72
73 page_kaddr = kmap_atomic(page);
74 ret = copy_from_user(page_kaddr,
75 (const void __user *) src_addr,
76 PAGE_SIZE);
77 kunmap_atomic(page_kaddr);
78
c1e8d7c6 79 /* fallback to copy_from_user outside mmap_lock */
b6ebaedb 80 if (unlikely(ret)) {
9e368259 81 ret = -ENOENT;
b6ebaedb
AA
82 *pagep = page;
83 /* don't free the page */
84 goto out;
85 }
86 } else {
87 page = *pagep;
88 *pagep = NULL;
89 }
c1a4de99
AA
90
91 /*
92 * The memory barrier inside __SetPageUptodate makes sure that
f4f5329d 93 * preceding stores to the page contents become visible before
c1a4de99
AA
94 * the set_pte_at() write.
95 */
96 __SetPageUptodate(page);
97
98 ret = -ENOMEM;
d9eb1ea2 99 if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL))
c1a4de99
AA
100 goto out_release;
101
72981e0e 102 _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));
292924b2
PX
103 if (dst_vma->vm_flags & VM_WRITE) {
104 if (wp_copy)
105 _dst_pte = pte_mkuffd_wp(_dst_pte);
106 else
107 _dst_pte = pte_mkwrite(_dst_pte);
108 }
c1a4de99 109
c1a4de99 110 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
e2a50c1f
AA
111 if (dst_vma->vm_file) {
112 /* the shmem MAP_PRIVATE case requires checking the i_size */
113 inode = dst_vma->vm_file->f_inode;
114 offset = linear_page_index(dst_vma, dst_addr);
115 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
116 ret = -EFAULT;
117 if (unlikely(offset >= max_off))
118 goto out_release_uncharge_unlock;
119 }
120 ret = -EEXIST;
c1a4de99
AA
121 if (!pte_none(*dst_pte))
122 goto out_release_uncharge_unlock;
123
124 inc_mm_counter(dst_mm, MM_ANONPAGES);
be5d0a74 125 page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
b518154e 126 lru_cache_add_inactive_or_unevictable(page, dst_vma);
c1a4de99
AA
127
128 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
129
130 /* No need to invalidate - it was non-present before */
131 update_mmu_cache(dst_vma, dst_addr, dst_pte);
132
133 pte_unmap_unlock(dst_pte, ptl);
134 ret = 0;
135out:
136 return ret;
137out_release_uncharge_unlock:
138 pte_unmap_unlock(dst_pte, ptl);
c1a4de99 139out_release:
09cbfeaf 140 put_page(page);
c1a4de99 141 goto out;
c1a4de99
AA
142}
143
144static int mfill_zeropage_pte(struct mm_struct *dst_mm,
145 pmd_t *dst_pmd,
146 struct vm_area_struct *dst_vma,
147 unsigned long dst_addr)
148{
149 pte_t _dst_pte, *dst_pte;
150 spinlock_t *ptl;
151 int ret;
e2a50c1f
AA
152 pgoff_t offset, max_off;
153 struct inode *inode;
c1a4de99
AA
154
155 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
156 dst_vma->vm_page_prot));
c1a4de99 157 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
e2a50c1f
AA
158 if (dst_vma->vm_file) {
159 /* the shmem MAP_PRIVATE case requires checking the i_size */
160 inode = dst_vma->vm_file->f_inode;
161 offset = linear_page_index(dst_vma, dst_addr);
162 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
163 ret = -EFAULT;
164 if (unlikely(offset >= max_off))
165 goto out_unlock;
166 }
167 ret = -EEXIST;
c1a4de99
AA
168 if (!pte_none(*dst_pte))
169 goto out_unlock;
170 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
171 /* No need to invalidate - it was non-present before */
172 update_mmu_cache(dst_vma, dst_addr, dst_pte);
173 ret = 0;
174out_unlock:
175 pte_unmap_unlock(dst_pte, ptl);
176 return ret;
177}
178
179static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
180{
181 pgd_t *pgd;
c2febafc 182 p4d_t *p4d;
c1a4de99 183 pud_t *pud;
c1a4de99
AA
184
185 pgd = pgd_offset(mm, address);
c2febafc
KS
186 p4d = p4d_alloc(mm, pgd, address);
187 if (!p4d)
188 return NULL;
189 pud = pud_alloc(mm, p4d, address);
190 if (!pud)
191 return NULL;
192 /*
193 * Note that we didn't run this because the pmd was
194 * missing, the *pmd may be already established and in
195 * turn it may also be a trans_huge_pmd.
196 */
197 return pmd_alloc(mm, pud, address);
c1a4de99
AA
198}
199
60d4d2d2
MK
200#ifdef CONFIG_HUGETLB_PAGE
201/*
202 * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is
c1e8d7c6 203 * called with mmap_lock held, it will release mmap_lock before returning.
60d4d2d2
MK
204 */
205static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
206 struct vm_area_struct *dst_vma,
207 unsigned long dst_start,
208 unsigned long src_start,
209 unsigned long len,
f6191471 210 enum mcopy_atomic_mode mode)
60d4d2d2 211{
1c9e8def 212 int vm_shared = dst_vma->vm_flags & VM_SHARED;
60d4d2d2
MK
213 ssize_t err;
214 pte_t *dst_pte;
215 unsigned long src_addr, dst_addr;
216 long copied;
217 struct page *page;
60d4d2d2
MK
218 unsigned long vma_hpagesize;
219 pgoff_t idx;
220 u32 hash;
221 struct address_space *mapping;
222
223 /*
224 * There is no default zero huge page for all huge page sizes as
225 * supported by hugetlb. A PMD_SIZE huge pages may exist as used
226 * by THP. Since we can not reliably insert a zero page, this
227 * feature is not supported.
228 */
f6191471 229 if (mode == MCOPY_ATOMIC_ZEROPAGE) {
d8ed45c5 230 mmap_read_unlock(dst_mm);
60d4d2d2
MK
231 return -EINVAL;
232 }
233
234 src_addr = src_start;
235 dst_addr = dst_start;
236 copied = 0;
237 page = NULL;
238 vma_hpagesize = vma_kernel_pagesize(dst_vma);
239
240 /*
241 * Validate alignment based on huge page size
242 */
243 err = -EINVAL;
244 if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
245 goto out_unlock;
246
247retry:
248 /*
c1e8d7c6 249 * On routine entry dst_vma is set. If we had to drop mmap_lock and
60d4d2d2
MK
250 * retry, dst_vma will be set to NULL and we must lookup again.
251 */
252 if (!dst_vma) {
27d02568 253 err = -ENOENT;
643aa36e 254 dst_vma = find_dst_vma(dst_mm, dst_start, len);
60d4d2d2
MK
255 if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
256 goto out_unlock;
1c9e8def 257
27d02568
MR
258 err = -EINVAL;
259 if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
260 goto out_unlock;
261
1c9e8def 262 vm_shared = dst_vma->vm_flags & VM_SHARED;
60d4d2d2
MK
263 }
264
60d4d2d2 265 /*
1c9e8def 266 * If not shared, ensure the dst_vma has a anon_vma.
60d4d2d2
MK
267 */
268 err = -ENOMEM;
1c9e8def
MK
269 if (!vm_shared) {
270 if (unlikely(anon_vma_prepare(dst_vma)))
271 goto out_unlock;
272 }
60d4d2d2 273
60d4d2d2 274 while (src_addr < src_start + len) {
60d4d2d2 275 BUG_ON(dst_addr >= dst_start + len);
60d4d2d2
MK
276
277 /*
c0d0381a
MK
278 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
279 * i_mmap_rwsem ensures the dst_pte remains valid even
280 * in the case of shared pmds. fault mutex prevents
281 * races with other faulting threads.
60d4d2d2 282 */
ddeaab32 283 mapping = dst_vma->vm_file->f_mapping;
c0d0381a
MK
284 i_mmap_lock_read(mapping);
285 idx = linear_page_index(dst_vma, dst_addr);
188b04a7 286 hash = hugetlb_fault_mutex_hash(mapping, idx);
60d4d2d2
MK
287 mutex_lock(&hugetlb_fault_mutex_table[hash]);
288
289 err = -ENOMEM;
aec44e0f 290 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
60d4d2d2
MK
291 if (!dst_pte) {
292 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
c0d0381a 293 i_mmap_unlock_read(mapping);
60d4d2d2
MK
294 goto out_unlock;
295 }
296
f6191471
AR
297 if (mode != MCOPY_ATOMIC_CONTINUE &&
298 !huge_pte_none(huge_ptep_get(dst_pte))) {
299 err = -EEXIST;
60d4d2d2 300 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
c0d0381a 301 i_mmap_unlock_read(mapping);
60d4d2d2
MK
302 goto out_unlock;
303 }
304
305 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
f6191471 306 dst_addr, src_addr, mode, &page);
60d4d2d2
MK
307
308 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
c0d0381a 309 i_mmap_unlock_read(mapping);
60d4d2d2
MK
310
311 cond_resched();
312
9e368259 313 if (unlikely(err == -ENOENT)) {
d8ed45c5 314 mmap_read_unlock(dst_mm);
60d4d2d2
MK
315 BUG_ON(!page);
316
317 err = copy_huge_page_from_user(page,
318 (const void __user *)src_addr,
4fb07ee6
WY
319 vma_hpagesize / PAGE_SIZE,
320 true);
60d4d2d2
MK
321 if (unlikely(err)) {
322 err = -EFAULT;
323 goto out;
324 }
d8ed45c5 325 mmap_read_lock(dst_mm);
60d4d2d2
MK
326
327 dst_vma = NULL;
328 goto retry;
329 } else
330 BUG_ON(page);
331
332 if (!err) {
333 dst_addr += vma_hpagesize;
334 src_addr += vma_hpagesize;
335 copied += vma_hpagesize;
336
337 if (fatal_signal_pending(current))
338 err = -EINTR;
339 }
340 if (err)
341 break;
342 }
343
344out_unlock:
d8ed45c5 345 mmap_read_unlock(dst_mm);
60d4d2d2 346out:
8cc5fcbb 347 if (page)
60d4d2d2
MK
348 put_page(page);
349 BUG_ON(copied < 0);
350 BUG_ON(err > 0);
351 BUG_ON(!copied && !err);
352 return copied ? copied : err;
353}
354#else /* !CONFIG_HUGETLB_PAGE */
355/* fail at build time if gcc attempts to use this */
356extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
357 struct vm_area_struct *dst_vma,
358 unsigned long dst_start,
359 unsigned long src_start,
360 unsigned long len,
f6191471 361 enum mcopy_atomic_mode mode);
60d4d2d2
MK
362#endif /* CONFIG_HUGETLB_PAGE */
363
3217d3c7
MR
364static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
365 pmd_t *dst_pmd,
366 struct vm_area_struct *dst_vma,
367 unsigned long dst_addr,
368 unsigned long src_addr,
369 struct page **page,
72981e0e
AA
370 bool zeropage,
371 bool wp_copy)
3217d3c7
MR
372{
373 ssize_t err;
374
5b51072e
AA
375 /*
376 * The normal page fault path for a shmem will invoke the
377 * fault, fill the hole in the file and COW it right away. The
378 * result generates plain anonymous memory. So when we are
379 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
380 * generate anonymous memory directly without actually filling
381 * the hole. For the MAP_PRIVATE case the robustness check
382 * only happens in the pagetable (to verify it's still none)
383 * and not in the radix tree.
384 */
385 if (!(dst_vma->vm_flags & VM_SHARED)) {
3217d3c7
MR
386 if (!zeropage)
387 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
72981e0e
AA
388 dst_addr, src_addr, page,
389 wp_copy);
3217d3c7
MR
390 else
391 err = mfill_zeropage_pte(dst_mm, dst_pmd,
392 dst_vma, dst_addr);
393 } else {
72981e0e 394 VM_WARN_ON_ONCE(wp_copy);
8fb44e54 395 if (!zeropage)
3217d3c7
MR
396 err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
397 dst_vma, dst_addr,
398 src_addr, page);
8fb44e54
MR
399 else
400 err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
401 dst_vma, dst_addr);
3217d3c7
MR
402 }
403
404 return err;
405}
406
c1a4de99
AA
407static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
408 unsigned long dst_start,
409 unsigned long src_start,
410 unsigned long len,
f6191471 411 enum mcopy_atomic_mode mcopy_mode,
72981e0e
AA
412 bool *mmap_changing,
413 __u64 mode)
c1a4de99
AA
414{
415 struct vm_area_struct *dst_vma;
416 ssize_t err;
417 pmd_t *dst_pmd;
418 unsigned long src_addr, dst_addr;
b6ebaedb
AA
419 long copied;
420 struct page *page;
72981e0e 421 bool wp_copy;
f6191471 422 bool zeropage = (mcopy_mode == MCOPY_ATOMIC_ZEROPAGE);
c1a4de99
AA
423
424 /*
425 * Sanitize the command parameters:
426 */
427 BUG_ON(dst_start & ~PAGE_MASK);
428 BUG_ON(len & ~PAGE_MASK);
429
430 /* Does the address range wrap, or is the span zero-sized? */
431 BUG_ON(src_start + len <= src_start);
432 BUG_ON(dst_start + len <= dst_start);
433
b6ebaedb
AA
434 src_addr = src_start;
435 dst_addr = dst_start;
436 copied = 0;
437 page = NULL;
438retry:
d8ed45c5 439 mmap_read_lock(dst_mm);
c1a4de99 440
df2cc96e
MR
441 /*
442 * If memory mappings are changing because of non-cooperative
443 * operation (e.g. mremap) running in parallel, bail out and
444 * request the user to retry later
445 */
446 err = -EAGAIN;
447 if (mmap_changing && READ_ONCE(*mmap_changing))
448 goto out_unlock;
449
c1a4de99
AA
450 /*
451 * Make sure the vma is not shared, that the dst range is
452 * both valid and fully within a single existing vma.
453 */
27d02568 454 err = -ENOENT;
643aa36e 455 dst_vma = find_dst_vma(dst_mm, dst_start, len);
26071ced
MR
456 if (!dst_vma)
457 goto out_unlock;
c1a4de99 458
27d02568
MR
459 err = -EINVAL;
460 /*
461 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
462 * it will overwrite vm_ops, so vma_is_anonymous must return false.
463 */
464 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
465 dst_vma->vm_flags & VM_SHARED))
466 goto out_unlock;
467
72981e0e
AA
468 /*
469 * validate 'mode' now that we know the dst_vma: don't allow
470 * a wrprotect copy if the userfaultfd didn't register as WP.
471 */
472 wp_copy = mode & UFFDIO_COPY_MODE_WP;
473 if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
474 goto out_unlock;
475
60d4d2d2
MK
476 /*
477 * If this is a HUGETLB vma, pass off to appropriate routine
478 */
479 if (is_vm_hugetlb_page(dst_vma))
480 return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
f6191471 481 src_start, len, mcopy_mode);
60d4d2d2 482
26071ced 483 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
b6ebaedb 484 goto out_unlock;
f6191471
AR
485 if (mcopy_mode == MCOPY_ATOMIC_CONTINUE)
486 goto out_unlock;
c1a4de99
AA
487
488 /*
489 * Ensure the dst_vma has a anon_vma or this page
490 * would get a NULL anon_vma when moved in the
491 * dst_vma.
492 */
493 err = -ENOMEM;
5b51072e
AA
494 if (!(dst_vma->vm_flags & VM_SHARED) &&
495 unlikely(anon_vma_prepare(dst_vma)))
b6ebaedb 496 goto out_unlock;
c1a4de99 497
b6ebaedb 498 while (src_addr < src_start + len) {
c1a4de99 499 pmd_t dst_pmdval;
b6ebaedb 500
c1a4de99 501 BUG_ON(dst_addr >= dst_start + len);
b6ebaedb 502
c1a4de99
AA
503 dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
504 if (unlikely(!dst_pmd)) {
505 err = -ENOMEM;
506 break;
507 }
508
509 dst_pmdval = pmd_read_atomic(dst_pmd);
510 /*
511 * If the dst_pmd is mapped as THP don't
512 * override it and just be strict.
513 */
514 if (unlikely(pmd_trans_huge(dst_pmdval))) {
515 err = -EEXIST;
516 break;
517 }
518 if (unlikely(pmd_none(dst_pmdval)) &&
4cf58924 519 unlikely(__pte_alloc(dst_mm, dst_pmd))) {
c1a4de99
AA
520 err = -ENOMEM;
521 break;
522 }
523 /* If an huge pmd materialized from under us fail */
524 if (unlikely(pmd_trans_huge(*dst_pmd))) {
525 err = -EFAULT;
526 break;
527 }
528
529 BUG_ON(pmd_none(*dst_pmd));
530 BUG_ON(pmd_trans_huge(*dst_pmd));
531
3217d3c7 532 err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
72981e0e 533 src_addr, &page, zeropage, wp_copy);
c1a4de99
AA
534 cond_resched();
535
9e368259 536 if (unlikely(err == -ENOENT)) {
b6ebaedb
AA
537 void *page_kaddr;
538
d8ed45c5 539 mmap_read_unlock(dst_mm);
b6ebaedb
AA
540 BUG_ON(!page);
541
542 page_kaddr = kmap(page);
543 err = copy_from_user(page_kaddr,
544 (const void __user *) src_addr,
545 PAGE_SIZE);
546 kunmap(page);
547 if (unlikely(err)) {
548 err = -EFAULT;
549 goto out;
550 }
551 goto retry;
552 } else
553 BUG_ON(page);
554
c1a4de99
AA
555 if (!err) {
556 dst_addr += PAGE_SIZE;
557 src_addr += PAGE_SIZE;
558 copied += PAGE_SIZE;
559
560 if (fatal_signal_pending(current))
561 err = -EINTR;
562 }
563 if (err)
564 break;
565 }
566
b6ebaedb 567out_unlock:
d8ed45c5 568 mmap_read_unlock(dst_mm);
b6ebaedb
AA
569out:
570 if (page)
09cbfeaf 571 put_page(page);
c1a4de99
AA
572 BUG_ON(copied < 0);
573 BUG_ON(err > 0);
574 BUG_ON(!copied && !err);
575 return copied ? copied : err;
576}
577
578ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
df2cc96e 579 unsigned long src_start, unsigned long len,
72981e0e 580 bool *mmap_changing, __u64 mode)
c1a4de99 581{
f6191471
AR
582 return __mcopy_atomic(dst_mm, dst_start, src_start, len,
583 MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
c1a4de99
AA
584}
585
586ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
df2cc96e 587 unsigned long len, bool *mmap_changing)
c1a4de99 588{
f6191471
AR
589 return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
590 mmap_changing, 0);
591}
592
593ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
594 unsigned long len, bool *mmap_changing)
595{
596 return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
597 mmap_changing, 0);
c1a4de99 598}
ffd05793
SL
599
600int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
601 unsigned long len, bool enable_wp, bool *mmap_changing)
602{
603 struct vm_area_struct *dst_vma;
604 pgprot_t newprot;
605 int err;
606
607 /*
608 * Sanitize the command parameters:
609 */
610 BUG_ON(start & ~PAGE_MASK);
611 BUG_ON(len & ~PAGE_MASK);
612
613 /* Does the address range wrap, or is the span zero-sized? */
614 BUG_ON(start + len <= start);
615
d8ed45c5 616 mmap_read_lock(dst_mm);
ffd05793
SL
617
618 /*
619 * If memory mappings are changing because of non-cooperative
620 * operation (e.g. mremap) running in parallel, bail out and
621 * request the user to retry later
622 */
623 err = -EAGAIN;
624 if (mmap_changing && READ_ONCE(*mmap_changing))
625 goto out_unlock;
626
627 err = -ENOENT;
628 dst_vma = find_dst_vma(dst_mm, start, len);
629 /*
630 * Make sure the vma is not shared, that the dst range is
631 * both valid and fully within a single existing vma.
632 */
633 if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
634 goto out_unlock;
635 if (!userfaultfd_wp(dst_vma))
636 goto out_unlock;
637 if (!vma_is_anonymous(dst_vma))
638 goto out_unlock;
639
640 if (enable_wp)
641 newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
642 else
643 newprot = vm_get_page_prot(dst_vma->vm_flags);
644
645 change_protection(dst_vma, start, start + len, newprot,
646 enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
647
648 err = 0;
649out_unlock:
d8ed45c5 650 mmap_read_unlock(dst_mm);
ffd05793
SL
651 return err;
652}