Merge tag 'fixes-2023-01-14' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt...
[linux-block.git] / mm / userfaultfd.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  mm/userfaultfd.c
4  *
5  *  Copyright (C) 2015  Red Hat, Inc.
6  */
7
8 #include <linux/mm.h>
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
19 #include <asm/tlb.h>
20 #include "internal.h"
21
22 static __always_inline
23 struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
24                                     unsigned long dst_start,
25                                     unsigned long len)
26 {
27         /*
28          * Make sure that the dst range is both valid and fully within a
29          * single existing vma.
30          */
31         struct vm_area_struct *dst_vma;
32
33         dst_vma = find_vma(dst_mm, dst_start);
34         if (!dst_vma)
35                 return NULL;
36
37         if (dst_start < dst_vma->vm_start ||
38             dst_start + len > dst_vma->vm_end)
39                 return NULL;
40
41         /*
42          * Check the vma is registered in uffd, this is required to
43          * enforce the VM_MAYWRITE check done at uffd registration
44          * time.
45          */
46         if (!dst_vma->vm_userfaultfd_ctx.ctx)
47                 return NULL;
48
49         return dst_vma;
50 }
51
52 /*
53  * Install PTEs, to map dst_addr (within dst_vma) to page.
54  *
55  * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
56  * and anon, and for both shared and private VMAs.
57  */
58 int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
59                              struct vm_area_struct *dst_vma,
60                              unsigned long dst_addr, struct page *page,
61                              bool newly_allocated, bool wp_copy)
62 {
63         int ret;
64         pte_t _dst_pte, *dst_pte;
65         bool writable = dst_vma->vm_flags & VM_WRITE;
66         bool vm_shared = dst_vma->vm_flags & VM_SHARED;
67         bool page_in_cache = page_mapping(page);
68         spinlock_t *ptl;
69         struct folio *folio;
70         struct inode *inode;
71         pgoff_t offset, max_off;
72
73         _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
74         _dst_pte = pte_mkdirty(_dst_pte);
75         if (page_in_cache && !vm_shared)
76                 writable = false;
77
78         /*
79          * Always mark a PTE as write-protected when needed, regardless of
80          * VM_WRITE, which the user might change.
81          */
82         if (wp_copy) {
83                 _dst_pte = pte_mkuffd_wp(_dst_pte);
84                 writable = false;
85         }
86
87         if (writable)
88                 _dst_pte = pte_mkwrite(_dst_pte);
89         else
90                 /*
91                  * We need this to make sure write bit removed; as mk_pte()
92                  * could return a pte with write bit set.
93                  */
94                 _dst_pte = pte_wrprotect(_dst_pte);
95
96         dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
97
98         if (vma_is_shmem(dst_vma)) {
99                 /* serialize against truncate with the page table lock */
100                 inode = dst_vma->vm_file->f_inode;
101                 offset = linear_page_index(dst_vma, dst_addr);
102                 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
103                 ret = -EFAULT;
104                 if (unlikely(offset >= max_off))
105                         goto out_unlock;
106         }
107
108         ret = -EEXIST;
109         /*
110          * We allow to overwrite a pte marker: consider when both MISSING|WP
111          * registered, we firstly wr-protect a none pte which has no page cache
112          * page backing it, then access the page.
113          */
114         if (!pte_none_mostly(*dst_pte))
115                 goto out_unlock;
116
117         folio = page_folio(page);
118         if (page_in_cache) {
119                 /* Usually, cache pages are already added to LRU */
120                 if (newly_allocated)
121                         folio_add_lru(folio);
122                 page_add_file_rmap(page, dst_vma, false);
123         } else {
124                 page_add_new_anon_rmap(page, dst_vma, dst_addr);
125                 folio_add_lru_vma(folio, dst_vma);
126         }
127
128         /*
129          * Must happen after rmap, as mm_counter() checks mapping (via
130          * PageAnon()), which is set by __page_set_anon_rmap().
131          */
132         inc_mm_counter(dst_mm, mm_counter(page));
133
134         set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
135
136         /* No need to invalidate - it was non-present before */
137         update_mmu_cache(dst_vma, dst_addr, dst_pte);
138         ret = 0;
139 out_unlock:
140         pte_unmap_unlock(dst_pte, ptl);
141         return ret;
142 }
143
144 static int mcopy_atomic_pte(struct mm_struct *dst_mm,
145                             pmd_t *dst_pmd,
146                             struct vm_area_struct *dst_vma,
147                             unsigned long dst_addr,
148                             unsigned long src_addr,
149                             struct page **pagep,
150                             bool wp_copy)
151 {
152         void *page_kaddr;
153         int ret;
154         struct page *page;
155
156         if (!*pagep) {
157                 ret = -ENOMEM;
158                 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
159                 if (!page)
160                         goto out;
161
162                 page_kaddr = kmap_local_page(page);
163                 /*
164                  * The read mmap_lock is held here.  Despite the
165                  * mmap_lock being read recursive a deadlock is still
166                  * possible if a writer has taken a lock.  For example:
167                  *
168                  * process A thread 1 takes read lock on own mmap_lock
169                  * process A thread 2 calls mmap, blocks taking write lock
170                  * process B thread 1 takes page fault, read lock on own mmap lock
171                  * process B thread 2 calls mmap, blocks taking write lock
172                  * process A thread 1 blocks taking read lock on process B
173                  * process B thread 1 blocks taking read lock on process A
174                  *
175                  * Disable page faults to prevent potential deadlock
176                  * and retry the copy outside the mmap_lock.
177                  */
178                 pagefault_disable();
179                 ret = copy_from_user(page_kaddr,
180                                      (const void __user *) src_addr,
181                                      PAGE_SIZE);
182                 pagefault_enable();
183                 kunmap_local(page_kaddr);
184
185                 /* fallback to copy_from_user outside mmap_lock */
186                 if (unlikely(ret)) {
187                         ret = -ENOENT;
188                         *pagep = page;
189                         /* don't free the page */
190                         goto out;
191                 }
192
193                 flush_dcache_page(page);
194         } else {
195                 page = *pagep;
196                 *pagep = NULL;
197         }
198
199         /*
200          * The memory barrier inside __SetPageUptodate makes sure that
201          * preceding stores to the page contents become visible before
202          * the set_pte_at() write.
203          */
204         __SetPageUptodate(page);
205
206         ret = -ENOMEM;
207         if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL))
208                 goto out_release;
209
210         ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
211                                        page, true, wp_copy);
212         if (ret)
213                 goto out_release;
214 out:
215         return ret;
216 out_release:
217         put_page(page);
218         goto out;
219 }
220
221 static int mfill_zeropage_pte(struct mm_struct *dst_mm,
222                               pmd_t *dst_pmd,
223                               struct vm_area_struct *dst_vma,
224                               unsigned long dst_addr)
225 {
226         pte_t _dst_pte, *dst_pte;
227         spinlock_t *ptl;
228         int ret;
229         pgoff_t offset, max_off;
230         struct inode *inode;
231
232         _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
233                                          dst_vma->vm_page_prot));
234         dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
235         if (dst_vma->vm_file) {
236                 /* the shmem MAP_PRIVATE case requires checking the i_size */
237                 inode = dst_vma->vm_file->f_inode;
238                 offset = linear_page_index(dst_vma, dst_addr);
239                 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
240                 ret = -EFAULT;
241                 if (unlikely(offset >= max_off))
242                         goto out_unlock;
243         }
244         ret = -EEXIST;
245         if (!pte_none(*dst_pte))
246                 goto out_unlock;
247         set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
248         /* No need to invalidate - it was non-present before */
249         update_mmu_cache(dst_vma, dst_addr, dst_pte);
250         ret = 0;
251 out_unlock:
252         pte_unmap_unlock(dst_pte, ptl);
253         return ret;
254 }
255
256 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
257 static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
258                                 pmd_t *dst_pmd,
259                                 struct vm_area_struct *dst_vma,
260                                 unsigned long dst_addr,
261                                 bool wp_copy)
262 {
263         struct inode *inode = file_inode(dst_vma->vm_file);
264         pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
265         struct folio *folio;
266         struct page *page;
267         int ret;
268
269         ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
270         /* Our caller expects us to return -EFAULT if we failed to find folio */
271         if (ret == -ENOENT)
272                 ret = -EFAULT;
273         if (ret)
274                 goto out;
275         if (!folio) {
276                 ret = -EFAULT;
277                 goto out;
278         }
279
280         page = folio_file_page(folio, pgoff);
281         if (PageHWPoison(page)) {
282                 ret = -EIO;
283                 goto out_release;
284         }
285
286         ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
287                                        page, false, wp_copy);
288         if (ret)
289                 goto out_release;
290
291         folio_unlock(folio);
292         ret = 0;
293 out:
294         return ret;
295 out_release:
296         folio_unlock(folio);
297         folio_put(folio);
298         goto out;
299 }
300
301 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
302 {
303         pgd_t *pgd;
304         p4d_t *p4d;
305         pud_t *pud;
306
307         pgd = pgd_offset(mm, address);
308         p4d = p4d_alloc(mm, pgd, address);
309         if (!p4d)
310                 return NULL;
311         pud = pud_alloc(mm, p4d, address);
312         if (!pud)
313                 return NULL;
314         /*
315          * Note that we didn't run this because the pmd was
316          * missing, the *pmd may be already established and in
317          * turn it may also be a trans_huge_pmd.
318          */
319         return pmd_alloc(mm, pud, address);
320 }
321
322 #ifdef CONFIG_HUGETLB_PAGE
323 /*
324  * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
325  * called with mmap_lock held, it will release mmap_lock before returning.
326  */
327 static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
328                                               struct vm_area_struct *dst_vma,
329                                               unsigned long dst_start,
330                                               unsigned long src_start,
331                                               unsigned long len,
332                                               enum mcopy_atomic_mode mode,
333                                               bool wp_copy)
334 {
335         int vm_shared = dst_vma->vm_flags & VM_SHARED;
336         ssize_t err;
337         pte_t *dst_pte;
338         unsigned long src_addr, dst_addr;
339         long copied;
340         struct page *page;
341         unsigned long vma_hpagesize;
342         pgoff_t idx;
343         u32 hash;
344         struct address_space *mapping;
345
346         /*
347          * There is no default zero huge page for all huge page sizes as
348          * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
349          * by THP.  Since we can not reliably insert a zero page, this
350          * feature is not supported.
351          */
352         if (mode == MCOPY_ATOMIC_ZEROPAGE) {
353                 mmap_read_unlock(dst_mm);
354                 return -EINVAL;
355         }
356
357         src_addr = src_start;
358         dst_addr = dst_start;
359         copied = 0;
360         page = NULL;
361         vma_hpagesize = vma_kernel_pagesize(dst_vma);
362
363         /*
364          * Validate alignment based on huge page size
365          */
366         err = -EINVAL;
367         if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
368                 goto out_unlock;
369
370 retry:
371         /*
372          * On routine entry dst_vma is set.  If we had to drop mmap_lock and
373          * retry, dst_vma will be set to NULL and we must lookup again.
374          */
375         if (!dst_vma) {
376                 err = -ENOENT;
377                 dst_vma = find_dst_vma(dst_mm, dst_start, len);
378                 if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
379                         goto out_unlock;
380
381                 err = -EINVAL;
382                 if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
383                         goto out_unlock;
384
385                 vm_shared = dst_vma->vm_flags & VM_SHARED;
386         }
387
388         /*
389          * If not shared, ensure the dst_vma has a anon_vma.
390          */
391         err = -ENOMEM;
392         if (!vm_shared) {
393                 if (unlikely(anon_vma_prepare(dst_vma)))
394                         goto out_unlock;
395         }
396
397         while (src_addr < src_start + len) {
398                 BUG_ON(dst_addr >= dst_start + len);
399
400                 /*
401                  * Serialize via vma_lock and hugetlb_fault_mutex.
402                  * vma_lock ensures the dst_pte remains valid even
403                  * in the case of shared pmds.  fault mutex prevents
404                  * races with other faulting threads.
405                  */
406                 idx = linear_page_index(dst_vma, dst_addr);
407                 mapping = dst_vma->vm_file->f_mapping;
408                 hash = hugetlb_fault_mutex_hash(mapping, idx);
409                 mutex_lock(&hugetlb_fault_mutex_table[hash]);
410                 hugetlb_vma_lock_read(dst_vma);
411
412                 err = -ENOMEM;
413                 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
414                 if (!dst_pte) {
415                         hugetlb_vma_unlock_read(dst_vma);
416                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
417                         goto out_unlock;
418                 }
419
420                 if (mode != MCOPY_ATOMIC_CONTINUE &&
421                     !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
422                         err = -EEXIST;
423                         hugetlb_vma_unlock_read(dst_vma);
424                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
425                         goto out_unlock;
426                 }
427
428                 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
429                                                dst_addr, src_addr, mode, &page,
430                                                wp_copy);
431
432                 hugetlb_vma_unlock_read(dst_vma);
433                 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
434
435                 cond_resched();
436
437                 if (unlikely(err == -ENOENT)) {
438                         mmap_read_unlock(dst_mm);
439                         BUG_ON(!page);
440
441                         err = copy_huge_page_from_user(page,
442                                                 (const void __user *)src_addr,
443                                                 vma_hpagesize / PAGE_SIZE,
444                                                 true);
445                         if (unlikely(err)) {
446                                 err = -EFAULT;
447                                 goto out;
448                         }
449                         mmap_read_lock(dst_mm);
450
451                         dst_vma = NULL;
452                         goto retry;
453                 } else
454                         BUG_ON(page);
455
456                 if (!err) {
457                         dst_addr += vma_hpagesize;
458                         src_addr += vma_hpagesize;
459                         copied += vma_hpagesize;
460
461                         if (fatal_signal_pending(current))
462                                 err = -EINTR;
463                 }
464                 if (err)
465                         break;
466         }
467
468 out_unlock:
469         mmap_read_unlock(dst_mm);
470 out:
471         if (page)
472                 put_page(page);
473         BUG_ON(copied < 0);
474         BUG_ON(err > 0);
475         BUG_ON(!copied && !err);
476         return copied ? copied : err;
477 }
478 #else /* !CONFIG_HUGETLB_PAGE */
479 /* fail at build time if gcc attempts to use this */
480 extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
481                                       struct vm_area_struct *dst_vma,
482                                       unsigned long dst_start,
483                                       unsigned long src_start,
484                                       unsigned long len,
485                                       enum mcopy_atomic_mode mode,
486                                       bool wp_copy);
487 #endif /* CONFIG_HUGETLB_PAGE */
488
489 static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
490                                                 pmd_t *dst_pmd,
491                                                 struct vm_area_struct *dst_vma,
492                                                 unsigned long dst_addr,
493                                                 unsigned long src_addr,
494                                                 struct page **page,
495                                                 enum mcopy_atomic_mode mode,
496                                                 bool wp_copy)
497 {
498         ssize_t err;
499
500         if (mode == MCOPY_ATOMIC_CONTINUE) {
501                 return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
502                                             wp_copy);
503         }
504
505         /*
506          * The normal page fault path for a shmem will invoke the
507          * fault, fill the hole in the file and COW it right away. The
508          * result generates plain anonymous memory. So when we are
509          * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
510          * generate anonymous memory directly without actually filling
511          * the hole. For the MAP_PRIVATE case the robustness check
512          * only happens in the pagetable (to verify it's still none)
513          * and not in the radix tree.
514          */
515         if (!(dst_vma->vm_flags & VM_SHARED)) {
516                 if (mode == MCOPY_ATOMIC_NORMAL)
517                         err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
518                                                dst_addr, src_addr, page,
519                                                wp_copy);
520                 else
521                         err = mfill_zeropage_pte(dst_mm, dst_pmd,
522                                                  dst_vma, dst_addr);
523         } else {
524                 err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
525                                              dst_addr, src_addr,
526                                              mode != MCOPY_ATOMIC_NORMAL,
527                                              wp_copy, page);
528         }
529
530         return err;
531 }
532
533 static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
534                                               unsigned long dst_start,
535                                               unsigned long src_start,
536                                               unsigned long len,
537                                               enum mcopy_atomic_mode mcopy_mode,
538                                               atomic_t *mmap_changing,
539                                               __u64 mode)
540 {
541         struct vm_area_struct *dst_vma;
542         ssize_t err;
543         pmd_t *dst_pmd;
544         unsigned long src_addr, dst_addr;
545         long copied;
546         struct page *page;
547         bool wp_copy;
548
549         /*
550          * Sanitize the command parameters:
551          */
552         BUG_ON(dst_start & ~PAGE_MASK);
553         BUG_ON(len & ~PAGE_MASK);
554
555         /* Does the address range wrap, or is the span zero-sized? */
556         BUG_ON(src_start + len <= src_start);
557         BUG_ON(dst_start + len <= dst_start);
558
559         src_addr = src_start;
560         dst_addr = dst_start;
561         copied = 0;
562         page = NULL;
563 retry:
564         mmap_read_lock(dst_mm);
565
566         /*
567          * If memory mappings are changing because of non-cooperative
568          * operation (e.g. mremap) running in parallel, bail out and
569          * request the user to retry later
570          */
571         err = -EAGAIN;
572         if (mmap_changing && atomic_read(mmap_changing))
573                 goto out_unlock;
574
575         /*
576          * Make sure the vma is not shared, that the dst range is
577          * both valid and fully within a single existing vma.
578          */
579         err = -ENOENT;
580         dst_vma = find_dst_vma(dst_mm, dst_start, len);
581         if (!dst_vma)
582                 goto out_unlock;
583
584         err = -EINVAL;
585         /*
586          * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
587          * it will overwrite vm_ops, so vma_is_anonymous must return false.
588          */
589         if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
590             dst_vma->vm_flags & VM_SHARED))
591                 goto out_unlock;
592
593         /*
594          * validate 'mode' now that we know the dst_vma: don't allow
595          * a wrprotect copy if the userfaultfd didn't register as WP.
596          */
597         wp_copy = mode & UFFDIO_COPY_MODE_WP;
598         if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
599                 goto out_unlock;
600
601         /*
602          * If this is a HUGETLB vma, pass off to appropriate routine
603          */
604         if (is_vm_hugetlb_page(dst_vma))
605                 return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
606                                                src_start, len, mcopy_mode,
607                                                wp_copy);
608
609         if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
610                 goto out_unlock;
611         if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE)
612                 goto out_unlock;
613
614         /*
615          * Ensure the dst_vma has a anon_vma or this page
616          * would get a NULL anon_vma when moved in the
617          * dst_vma.
618          */
619         err = -ENOMEM;
620         if (!(dst_vma->vm_flags & VM_SHARED) &&
621             unlikely(anon_vma_prepare(dst_vma)))
622                 goto out_unlock;
623
624         while (src_addr < src_start + len) {
625                 pmd_t dst_pmdval;
626
627                 BUG_ON(dst_addr >= dst_start + len);
628
629                 dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
630                 if (unlikely(!dst_pmd)) {
631                         err = -ENOMEM;
632                         break;
633                 }
634
635                 dst_pmdval = pmdp_get_lockless(dst_pmd);
636                 /*
637                  * If the dst_pmd is mapped as THP don't
638                  * override it and just be strict.
639                  */
640                 if (unlikely(pmd_trans_huge(dst_pmdval))) {
641                         err = -EEXIST;
642                         break;
643                 }
644                 if (unlikely(pmd_none(dst_pmdval)) &&
645                     unlikely(__pte_alloc(dst_mm, dst_pmd))) {
646                         err = -ENOMEM;
647                         break;
648                 }
649                 /* If an huge pmd materialized from under us fail */
650                 if (unlikely(pmd_trans_huge(*dst_pmd))) {
651                         err = -EFAULT;
652                         break;
653                 }
654
655                 BUG_ON(pmd_none(*dst_pmd));
656                 BUG_ON(pmd_trans_huge(*dst_pmd));
657
658                 err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
659                                        src_addr, &page, mcopy_mode, wp_copy);
660                 cond_resched();
661
662                 if (unlikely(err == -ENOENT)) {
663                         void *page_kaddr;
664
665                         mmap_read_unlock(dst_mm);
666                         BUG_ON(!page);
667
668                         page_kaddr = kmap_local_page(page);
669                         err = copy_from_user(page_kaddr,
670                                              (const void __user *) src_addr,
671                                              PAGE_SIZE);
672                         kunmap_local(page_kaddr);
673                         if (unlikely(err)) {
674                                 err = -EFAULT;
675                                 goto out;
676                         }
677                         flush_dcache_page(page);
678                         goto retry;
679                 } else
680                         BUG_ON(page);
681
682                 if (!err) {
683                         dst_addr += PAGE_SIZE;
684                         src_addr += PAGE_SIZE;
685                         copied += PAGE_SIZE;
686
687                         if (fatal_signal_pending(current))
688                                 err = -EINTR;
689                 }
690                 if (err)
691                         break;
692         }
693
694 out_unlock:
695         mmap_read_unlock(dst_mm);
696 out:
697         if (page)
698                 put_page(page);
699         BUG_ON(copied < 0);
700         BUG_ON(err > 0);
701         BUG_ON(!copied && !err);
702         return copied ? copied : err;
703 }
704
705 ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
706                      unsigned long src_start, unsigned long len,
707                      atomic_t *mmap_changing, __u64 mode)
708 {
709         return __mcopy_atomic(dst_mm, dst_start, src_start, len,
710                               MCOPY_ATOMIC_NORMAL, mmap_changing, mode);
711 }
712
713 ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
714                        unsigned long len, atomic_t *mmap_changing)
715 {
716         return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
717                               mmap_changing, 0);
718 }
719
720 ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
721                        unsigned long len, atomic_t *mmap_changing)
722 {
723         return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE,
724                               mmap_changing, 0);
725 }
726
727 void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
728                    unsigned long start, unsigned long len, bool enable_wp)
729 {
730         struct mmu_gather tlb;
731         pgprot_t newprot;
732
733         if (enable_wp)
734                 newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
735         else
736                 newprot = vm_get_page_prot(dst_vma->vm_flags);
737
738         tlb_gather_mmu(&tlb, dst_mm);
739         change_protection(&tlb, dst_vma, start, start + len, newprot,
740                           enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
741         tlb_finish_mmu(&tlb);
742 }
743
744 int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
745                         unsigned long len, bool enable_wp,
746                         atomic_t *mmap_changing)
747 {
748         struct vm_area_struct *dst_vma;
749         unsigned long page_mask;
750         int err;
751
752         /*
753          * Sanitize the command parameters:
754          */
755         BUG_ON(start & ~PAGE_MASK);
756         BUG_ON(len & ~PAGE_MASK);
757
758         /* Does the address range wrap, or is the span zero-sized? */
759         BUG_ON(start + len <= start);
760
761         mmap_read_lock(dst_mm);
762
763         /*
764          * If memory mappings are changing because of non-cooperative
765          * operation (e.g. mremap) running in parallel, bail out and
766          * request the user to retry later
767          */
768         err = -EAGAIN;
769         if (mmap_changing && atomic_read(mmap_changing))
770                 goto out_unlock;
771
772         err = -ENOENT;
773         dst_vma = find_dst_vma(dst_mm, start, len);
774
775         if (!dst_vma)
776                 goto out_unlock;
777         if (!userfaultfd_wp(dst_vma))
778                 goto out_unlock;
779         if (!vma_can_userfault(dst_vma, dst_vma->vm_flags))
780                 goto out_unlock;
781
782         if (is_vm_hugetlb_page(dst_vma)) {
783                 err = -EINVAL;
784                 page_mask = vma_kernel_pagesize(dst_vma) - 1;
785                 if ((start & page_mask) || (len & page_mask))
786                         goto out_unlock;
787         }
788
789         uffd_wp_range(dst_mm, dst_vma, start, len, enable_wp);
790
791         err = 0;
792 out_unlock:
793         mmap_read_unlock(dst_mm);
794         return err;
795 }