1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2015 Red Hat, Inc.
9 #include <linux/sched/signal.h>
10 #include <linux/pagemap.h>
11 #include <linux/rmap.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/hugetlb.h>
17 #include <linux/shmem_fs.h>
18 #include <asm/tlbflush.h>
22 static __always_inline
23 struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
24 unsigned long dst_start,
28 * Make sure that the dst range is both valid and fully within a
29 * single existing vma.
31 struct vm_area_struct *dst_vma;
33 dst_vma = find_vma(dst_mm, dst_start);
34 if (!range_in_vma(dst_vma, dst_start, dst_start + len))
38 * Check the vma is registered in uffd, this is required to
39 * enforce the VM_MAYWRITE check done at uffd registration
42 if (!dst_vma->vm_userfaultfd_ctx.ctx)
48 /* Check if dst_addr is outside of file's size. Must be called with ptl held. */
49 static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
50 unsigned long dst_addr)
53 pgoff_t offset, max_off;
55 if (!dst_vma->vm_file)
58 inode = dst_vma->vm_file->f_inode;
59 offset = linear_page_index(dst_vma, dst_addr);
60 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
61 return offset >= max_off;
65 * Install PTEs, to map dst_addr (within dst_vma) to page.
67 * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
68 * and anon, and for both shared and private VMAs.
70 int mfill_atomic_install_pte(pmd_t *dst_pmd,
71 struct vm_area_struct *dst_vma,
72 unsigned long dst_addr, struct page *page,
73 bool newly_allocated, uffd_flags_t flags)
76 struct mm_struct *dst_mm = dst_vma->vm_mm;
77 pte_t _dst_pte, *dst_pte;
78 bool writable = dst_vma->vm_flags & VM_WRITE;
79 bool vm_shared = dst_vma->vm_flags & VM_SHARED;
80 bool page_in_cache = page_mapping(page);
84 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
85 _dst_pte = pte_mkdirty(_dst_pte);
86 if (page_in_cache && !vm_shared)
89 _dst_pte = pte_mkwrite(_dst_pte, dst_vma);
90 if (flags & MFILL_ATOMIC_WP)
91 _dst_pte = pte_mkuffd_wp(_dst_pte);
94 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
98 if (mfill_file_over_size(dst_vma, dst_addr)) {
105 * We allow to overwrite a pte marker: consider when both MISSING|WP
106 * registered, we firstly wr-protect a none pte which has no page cache
107 * page backing it, then access the page.
109 if (!pte_none_mostly(ptep_get(dst_pte)))
112 folio = page_folio(page);
114 /* Usually, cache pages are already added to LRU */
116 folio_add_lru(folio);
117 folio_add_file_rmap_pte(folio, page, dst_vma);
119 folio_add_new_anon_rmap(folio, dst_vma, dst_addr);
120 folio_add_lru_vma(folio, dst_vma);
124 * Must happen after rmap, as mm_counter() checks mapping (via
125 * PageAnon()), which is set by __page_set_anon_rmap().
127 inc_mm_counter(dst_mm, mm_counter(page));
129 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
131 /* No need to invalidate - it was non-present before */
132 update_mmu_cache(dst_vma, dst_addr, dst_pte);
135 pte_unmap_unlock(dst_pte, ptl);
140 static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
141 struct vm_area_struct *dst_vma,
142 unsigned long dst_addr,
143 unsigned long src_addr,
145 struct folio **foliop)
153 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
158 kaddr = kmap_local_folio(folio, 0);
160 * The read mmap_lock is held here. Despite the
161 * mmap_lock being read recursive a deadlock is still
162 * possible if a writer has taken a lock. For example:
164 * process A thread 1 takes read lock on own mmap_lock
165 * process A thread 2 calls mmap, blocks taking write lock
166 * process B thread 1 takes page fault, read lock on own mmap lock
167 * process B thread 2 calls mmap, blocks taking write lock
168 * process A thread 1 blocks taking read lock on process B
169 * process B thread 1 blocks taking read lock on process A
171 * Disable page faults to prevent potential deadlock
172 * and retry the copy outside the mmap_lock.
175 ret = copy_from_user(kaddr, (const void __user *) src_addr,
180 /* fallback to copy_from_user outside mmap_lock */
184 /* don't free the page */
188 flush_dcache_folio(folio);
195 * The memory barrier inside __folio_mark_uptodate makes sure that
196 * preceding stores to the page contents become visible before
197 * the set_pte_at() write.
199 __folio_mark_uptodate(folio);
202 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
205 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
206 &folio->page, true, flags);
216 static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
217 struct vm_area_struct *dst_vma,
218 unsigned long dst_addr)
220 pte_t _dst_pte, *dst_pte;
224 _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
225 dst_vma->vm_page_prot));
227 dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
230 if (mfill_file_over_size(dst_vma, dst_addr)) {
235 if (!pte_none(ptep_get(dst_pte)))
237 set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
238 /* No need to invalidate - it was non-present before */
239 update_mmu_cache(dst_vma, dst_addr, dst_pte);
242 pte_unmap_unlock(dst_pte, ptl);
247 /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
248 static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
249 struct vm_area_struct *dst_vma,
250 unsigned long dst_addr,
253 struct inode *inode = file_inode(dst_vma->vm_file);
254 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
259 ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
260 /* Our caller expects us to return -EFAULT if we failed to find folio */
270 page = folio_file_page(folio, pgoff);
271 if (PageHWPoison(page)) {
276 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
291 /* Handles UFFDIO_POISON for all non-hugetlb VMAs. */
292 static int mfill_atomic_pte_poison(pmd_t *dst_pmd,
293 struct vm_area_struct *dst_vma,
294 unsigned long dst_addr,
298 struct mm_struct *dst_mm = dst_vma->vm_mm;
299 pte_t _dst_pte, *dst_pte;
302 _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
304 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
308 if (mfill_file_over_size(dst_vma, dst_addr)) {
314 /* Refuse to overwrite any PTE, even a PTE marker (e.g. UFFD WP). */
315 if (!pte_none(ptep_get(dst_pte)))
318 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
320 /* No need to invalidate - it was non-present before */
321 update_mmu_cache(dst_vma, dst_addr, dst_pte);
324 pte_unmap_unlock(dst_pte, ptl);
329 static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
335 pgd = pgd_offset(mm, address);
336 p4d = p4d_alloc(mm, pgd, address);
339 pud = pud_alloc(mm, p4d, address);
343 * Note that we didn't run this because the pmd was
344 * missing, the *pmd may be already established and in
345 * turn it may also be a trans_huge_pmd.
347 return pmd_alloc(mm, pud, address);
350 #ifdef CONFIG_HUGETLB_PAGE
352 * mfill_atomic processing for HUGETLB vmas. Note that this routine is
353 * called with mmap_lock held, it will release mmap_lock before returning.
355 static __always_inline ssize_t mfill_atomic_hugetlb(
356 struct vm_area_struct *dst_vma,
357 unsigned long dst_start,
358 unsigned long src_start,
360 atomic_t *mmap_changing,
363 struct mm_struct *dst_mm = dst_vma->vm_mm;
364 int vm_shared = dst_vma->vm_flags & VM_SHARED;
367 unsigned long src_addr, dst_addr;
370 unsigned long vma_hpagesize;
373 struct address_space *mapping;
376 * There is no default zero huge page for all huge page sizes as
377 * supported by hugetlb. A PMD_SIZE huge pages may exist as used
378 * by THP. Since we can not reliably insert a zero page, this
379 * feature is not supported.
381 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) {
382 mmap_read_unlock(dst_mm);
386 src_addr = src_start;
387 dst_addr = dst_start;
390 vma_hpagesize = vma_kernel_pagesize(dst_vma);
393 * Validate alignment based on huge page size
396 if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
401 * On routine entry dst_vma is set. If we had to drop mmap_lock and
402 * retry, dst_vma will be set to NULL and we must lookup again.
406 dst_vma = find_dst_vma(dst_mm, dst_start, len);
407 if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
411 if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
414 vm_shared = dst_vma->vm_flags & VM_SHARED;
418 * If not shared, ensure the dst_vma has a anon_vma.
422 if (unlikely(anon_vma_prepare(dst_vma)))
426 while (src_addr < src_start + len) {
427 BUG_ON(dst_addr >= dst_start + len);
430 * Serialize via vma_lock and hugetlb_fault_mutex.
431 * vma_lock ensures the dst_pte remains valid even
432 * in the case of shared pmds. fault mutex prevents
433 * races with other faulting threads.
435 idx = linear_page_index(dst_vma, dst_addr);
436 mapping = dst_vma->vm_file->f_mapping;
437 hash = hugetlb_fault_mutex_hash(mapping, idx);
438 mutex_lock(&hugetlb_fault_mutex_table[hash]);
439 hugetlb_vma_lock_read(dst_vma);
442 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
444 hugetlb_vma_unlock_read(dst_vma);
445 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
449 if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) &&
450 !huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
452 hugetlb_vma_unlock_read(dst_vma);
453 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
457 err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
458 src_addr, flags, &folio);
460 hugetlb_vma_unlock_read(dst_vma);
461 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
465 if (unlikely(err == -ENOENT)) {
466 mmap_read_unlock(dst_mm);
469 err = copy_folio_from_user(folio,
470 (const void __user *)src_addr, true);
475 mmap_read_lock(dst_mm);
477 * If memory mappings are changing because of non-cooperative
478 * operation (e.g. mremap) running in parallel, bail out and
479 * request the user to retry later
481 if (mmap_changing && atomic_read(mmap_changing)) {
492 dst_addr += vma_hpagesize;
493 src_addr += vma_hpagesize;
494 copied += vma_hpagesize;
496 if (fatal_signal_pending(current))
504 mmap_read_unlock(dst_mm);
510 BUG_ON(!copied && !err);
511 return copied ? copied : err;
513 #else /* !CONFIG_HUGETLB_PAGE */
514 /* fail at build time if gcc attempts to use this */
515 extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
516 unsigned long dst_start,
517 unsigned long src_start,
519 atomic_t *mmap_changing,
521 #endif /* CONFIG_HUGETLB_PAGE */
523 static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
524 struct vm_area_struct *dst_vma,
525 unsigned long dst_addr,
526 unsigned long src_addr,
528 struct folio **foliop)
532 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) {
533 return mfill_atomic_pte_continue(dst_pmd, dst_vma,
535 } else if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
536 return mfill_atomic_pte_poison(dst_pmd, dst_vma,
541 * The normal page fault path for a shmem will invoke the
542 * fault, fill the hole in the file and COW it right away. The
543 * result generates plain anonymous memory. So when we are
544 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
545 * generate anonymous memory directly without actually filling
546 * the hole. For the MAP_PRIVATE case the robustness check
547 * only happens in the pagetable (to verify it's still none)
548 * and not in the radix tree.
550 if (!(dst_vma->vm_flags & VM_SHARED)) {
551 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY))
552 err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
556 err = mfill_atomic_pte_zeropage(dst_pmd,
559 err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
567 static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm,
568 unsigned long dst_start,
569 unsigned long src_start,
571 atomic_t *mmap_changing,
574 struct vm_area_struct *dst_vma;
577 unsigned long src_addr, dst_addr;
582 * Sanitize the command parameters:
584 BUG_ON(dst_start & ~PAGE_MASK);
585 BUG_ON(len & ~PAGE_MASK);
587 /* Does the address range wrap, or is the span zero-sized? */
588 BUG_ON(src_start + len <= src_start);
589 BUG_ON(dst_start + len <= dst_start);
591 src_addr = src_start;
592 dst_addr = dst_start;
596 mmap_read_lock(dst_mm);
599 * If memory mappings are changing because of non-cooperative
600 * operation (e.g. mremap) running in parallel, bail out and
601 * request the user to retry later
604 if (mmap_changing && atomic_read(mmap_changing))
608 * Make sure the vma is not shared, that the dst range is
609 * both valid and fully within a single existing vma.
612 dst_vma = find_dst_vma(dst_mm, dst_start, len);
618 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
619 * it will overwrite vm_ops, so vma_is_anonymous must return false.
621 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
622 dst_vma->vm_flags & VM_SHARED))
626 * validate 'mode' now that we know the dst_vma: don't allow
627 * a wrprotect copy if the userfaultfd didn't register as WP.
629 if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
633 * If this is a HUGETLB vma, pass off to appropriate routine
635 if (is_vm_hugetlb_page(dst_vma))
636 return mfill_atomic_hugetlb(dst_vma, dst_start, src_start,
637 len, mmap_changing, flags);
639 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
641 if (!vma_is_shmem(dst_vma) &&
642 uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
646 * Ensure the dst_vma has a anon_vma or this page
647 * would get a NULL anon_vma when moved in the
651 if (!(dst_vma->vm_flags & VM_SHARED) &&
652 unlikely(anon_vma_prepare(dst_vma)))
655 while (src_addr < src_start + len) {
658 BUG_ON(dst_addr >= dst_start + len);
660 dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
661 if (unlikely(!dst_pmd)) {
666 dst_pmdval = pmdp_get_lockless(dst_pmd);
668 * If the dst_pmd is mapped as THP don't
669 * override it and just be strict.
671 if (unlikely(pmd_trans_huge(dst_pmdval))) {
675 if (unlikely(pmd_none(dst_pmdval)) &&
676 unlikely(__pte_alloc(dst_mm, dst_pmd))) {
680 /* If an huge pmd materialized from under us fail */
681 if (unlikely(pmd_trans_huge(*dst_pmd))) {
686 BUG_ON(pmd_none(*dst_pmd));
687 BUG_ON(pmd_trans_huge(*dst_pmd));
689 err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
690 src_addr, flags, &folio);
693 if (unlikely(err == -ENOENT)) {
696 mmap_read_unlock(dst_mm);
699 kaddr = kmap_local_folio(folio, 0);
700 err = copy_from_user(kaddr,
701 (const void __user *) src_addr,
708 flush_dcache_folio(folio);
714 dst_addr += PAGE_SIZE;
715 src_addr += PAGE_SIZE;
718 if (fatal_signal_pending(current))
726 mmap_read_unlock(dst_mm);
732 BUG_ON(!copied && !err);
733 return copied ? copied : err;
736 ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start,
737 unsigned long src_start, unsigned long len,
738 atomic_t *mmap_changing, uffd_flags_t flags)
740 return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing,
741 uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY));
744 ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start,
745 unsigned long len, atomic_t *mmap_changing)
747 return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
748 uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE));
751 ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
752 unsigned long len, atomic_t *mmap_changing,
755 return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
756 uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE));
759 ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
760 unsigned long len, atomic_t *mmap_changing,
763 return mfill_atomic(dst_mm, start, 0, len, mmap_changing,
764 uffd_flags_set_mode(flags, MFILL_ATOMIC_POISON));
767 long uffd_wp_range(struct vm_area_struct *dst_vma,
768 unsigned long start, unsigned long len, bool enable_wp)
770 unsigned int mm_cp_flags;
771 struct mmu_gather tlb;
774 VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
775 "The address range exceeds VMA boundary.\n");
777 mm_cp_flags = MM_CP_UFFD_WP;
779 mm_cp_flags = MM_CP_UFFD_WP_RESOLVE;
782 * vma->vm_page_prot already reflects that uffd-wp is enabled for this
783 * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed
784 * to be write-protected as default whenever protection changes.
785 * Try upgrading write permissions manually.
787 if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
788 mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
789 tlb_gather_mmu(&tlb, dst_vma->vm_mm);
790 ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
791 tlb_finish_mmu(&tlb);
796 int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
797 unsigned long len, bool enable_wp,
798 atomic_t *mmap_changing)
800 unsigned long end = start + len;
801 unsigned long _start, _end;
802 struct vm_area_struct *dst_vma;
803 unsigned long page_mask;
805 VMA_ITERATOR(vmi, dst_mm, start);
808 * Sanitize the command parameters:
810 BUG_ON(start & ~PAGE_MASK);
811 BUG_ON(len & ~PAGE_MASK);
813 /* Does the address range wrap, or is the span zero-sized? */
814 BUG_ON(start + len <= start);
816 mmap_read_lock(dst_mm);
819 * If memory mappings are changing because of non-cooperative
820 * operation (e.g. mremap) running in parallel, bail out and
821 * request the user to retry later
824 if (mmap_changing && atomic_read(mmap_changing))
828 for_each_vma_range(vmi, dst_vma, end) {
830 if (!userfaultfd_wp(dst_vma)) {
835 if (is_vm_hugetlb_page(dst_vma)) {
837 page_mask = vma_kernel_pagesize(dst_vma) - 1;
838 if ((start & page_mask) || (len & page_mask))
842 _start = max(dst_vma->vm_start, start);
843 _end = min(dst_vma->vm_end, end);
845 err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
847 /* Return 0 on success, <0 on failures */
853 mmap_read_unlock(dst_mm);
858 void double_pt_lock(spinlock_t *ptl1,
866 /* exchange ptl1 and ptl2 */
871 /* lock in virtual address order to avoid lock inversion */
874 spin_lock_nested(ptl2, SINGLE_DEPTH_NESTING);
879 void double_pt_unlock(spinlock_t *ptl1,
892 static int move_present_pte(struct mm_struct *mm,
893 struct vm_area_struct *dst_vma,
894 struct vm_area_struct *src_vma,
895 unsigned long dst_addr, unsigned long src_addr,
896 pte_t *dst_pte, pte_t *src_pte,
897 pte_t orig_dst_pte, pte_t orig_src_pte,
898 spinlock_t *dst_ptl, spinlock_t *src_ptl,
899 struct folio *src_folio)
903 double_pt_lock(dst_ptl, src_ptl);
905 if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
906 !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
910 if (folio_test_large(src_folio) ||
911 folio_maybe_dma_pinned(src_folio) ||
912 !PageAnonExclusive(&src_folio->page)) {
917 folio_move_anon_rmap(src_folio, dst_vma);
918 WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
920 orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte);
921 /* Folio got pinned from under us. Put it back and fail the move. */
922 if (folio_maybe_dma_pinned(src_folio)) {
923 set_pte_at(mm, src_addr, src_pte, orig_src_pte);
928 orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
929 /* Follow mremap() behavior and treat the entry dirty after the move */
930 orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
932 set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
934 double_pt_unlock(dst_ptl, src_ptl);
938 static int move_swap_pte(struct mm_struct *mm,
939 unsigned long dst_addr, unsigned long src_addr,
940 pte_t *dst_pte, pte_t *src_pte,
941 pte_t orig_dst_pte, pte_t orig_src_pte,
942 spinlock_t *dst_ptl, spinlock_t *src_ptl)
944 if (!pte_swp_exclusive(orig_src_pte))
947 double_pt_lock(dst_ptl, src_ptl);
949 if (!pte_same(ptep_get(src_pte), orig_src_pte) ||
950 !pte_same(ptep_get(dst_pte), orig_dst_pte)) {
951 double_pt_unlock(dst_ptl, src_ptl);
955 orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
956 set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
957 double_pt_unlock(dst_ptl, src_ptl);
963 * The mmap_lock for reading is held by the caller. Just move the page
964 * from src_pmd to dst_pmd if possible, and return true if succeeded
965 * in moving the page.
967 static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd,
968 struct vm_area_struct *dst_vma,
969 struct vm_area_struct *src_vma,
970 unsigned long dst_addr, unsigned long src_addr,
974 pte_t orig_src_pte, orig_dst_pte;
976 spinlock_t *src_ptl, *dst_ptl;
977 pte_t *src_pte = NULL;
978 pte_t *dst_pte = NULL;
980 struct folio *src_folio = NULL;
981 struct anon_vma *src_anon_vma = NULL;
982 struct mmu_notifier_range range;
985 flush_cache_range(src_vma, src_addr, src_addr + PAGE_SIZE);
986 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
987 src_addr, src_addr + PAGE_SIZE);
988 mmu_notifier_invalidate_range_start(&range);
990 dst_pte = pte_offset_map_nolock(mm, dst_pmd, dst_addr, &dst_ptl);
992 /* Retry if a huge pmd materialized from under us */
993 if (unlikely(!dst_pte)) {
998 src_pte = pte_offset_map_nolock(mm, src_pmd, src_addr, &src_ptl);
1001 * We held the mmap_lock for reading so MADV_DONTNEED
1002 * can zap transparent huge pages under us, or the
1003 * transparent huge page fault can establish new
1004 * transparent huge pages under us.
1006 if (unlikely(!src_pte)) {
1011 /* Sanity checks before the operation */
1012 if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) ||
1013 WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) {
1019 orig_dst_pte = ptep_get(dst_pte);
1020 spin_unlock(dst_ptl);
1021 if (!pte_none(orig_dst_pte)) {
1027 orig_src_pte = ptep_get(src_pte);
1028 spin_unlock(src_ptl);
1029 if (pte_none(orig_src_pte)) {
1030 if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES))
1032 else /* nothing to do to move a hole */
1037 /* If PTE changed after we locked the folio them start over */
1038 if (src_folio && unlikely(!pte_same(src_folio_pte, orig_src_pte))) {
1043 if (pte_present(orig_src_pte)) {
1045 * Pin and lock both source folio and anon_vma. Since we are in
1046 * RCU read section, we can't block, so on contention have to
1047 * unmap the ptes, obtain the lock and retry.
1050 struct folio *folio;
1053 * Pin the page while holding the lock to be sure the
1054 * page isn't freed under us
1057 if (!pte_same(orig_src_pte, ptep_get(src_pte))) {
1058 spin_unlock(src_ptl);
1063 folio = vm_normal_folio(src_vma, src_addr, orig_src_pte);
1064 if (!folio || !PageAnonExclusive(&folio->page)) {
1065 spin_unlock(src_ptl);
1072 src_folio_pte = orig_src_pte;
1073 spin_unlock(src_ptl);
1075 if (!folio_trylock(src_folio)) {
1076 pte_unmap(&orig_src_pte);
1077 pte_unmap(&orig_dst_pte);
1078 src_pte = dst_pte = NULL;
1079 /* now we can block and wait */
1080 folio_lock(src_folio);
1084 if (WARN_ON_ONCE(!folio_test_anon(src_folio))) {
1090 /* at this point we have src_folio locked */
1091 if (folio_test_large(src_folio)) {
1092 /* split_folio() can block */
1093 pte_unmap(&orig_src_pte);
1094 pte_unmap(&orig_dst_pte);
1095 src_pte = dst_pte = NULL;
1096 err = split_folio(src_folio);
1099 /* have to reacquire the folio after it got split */
1100 folio_unlock(src_folio);
1101 folio_put(src_folio);
1106 if (!src_anon_vma) {
1108 * folio_referenced walks the anon_vma chain
1109 * without the folio lock. Serialize against it with
1110 * the anon_vma lock, the folio lock is not enough.
1112 src_anon_vma = folio_get_anon_vma(src_folio);
1113 if (!src_anon_vma) {
1114 /* page was unmapped from under us */
1118 if (!anon_vma_trylock_write(src_anon_vma)) {
1119 pte_unmap(&orig_src_pte);
1120 pte_unmap(&orig_dst_pte);
1121 src_pte = dst_pte = NULL;
1122 /* now we can block and wait */
1123 anon_vma_lock_write(src_anon_vma);
1128 err = move_present_pte(mm, dst_vma, src_vma,
1129 dst_addr, src_addr, dst_pte, src_pte,
1130 orig_dst_pte, orig_src_pte,
1131 dst_ptl, src_ptl, src_folio);
1133 entry = pte_to_swp_entry(orig_src_pte);
1134 if (non_swap_entry(entry)) {
1135 if (is_migration_entry(entry)) {
1136 pte_unmap(&orig_src_pte);
1137 pte_unmap(&orig_dst_pte);
1138 src_pte = dst_pte = NULL;
1139 migration_entry_wait(mm, src_pmd, src_addr);
1146 err = move_swap_pte(mm, dst_addr, src_addr,
1148 orig_dst_pte, orig_src_pte,
1154 anon_vma_unlock_write(src_anon_vma);
1155 put_anon_vma(src_anon_vma);
1158 folio_unlock(src_folio);
1159 folio_put(src_folio);
1165 mmu_notifier_invalidate_range_end(&range);
1170 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1171 static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1172 unsigned long src_addr,
1173 unsigned long src_end)
1175 return (src_addr & ~HPAGE_PMD_MASK) || (dst_addr & ~HPAGE_PMD_MASK) ||
1176 src_end - src_addr < HPAGE_PMD_SIZE;
1179 static inline bool move_splits_huge_pmd(unsigned long dst_addr,
1180 unsigned long src_addr,
1181 unsigned long src_end)
1183 /* This is unreachable anyway, just to avoid warnings when HPAGE_PMD_SIZE==0 */
1188 static inline bool vma_move_compatible(struct vm_area_struct *vma)
1190 return !(vma->vm_flags & (VM_PFNMAP | VM_IO | VM_HUGETLB |
1191 VM_MIXEDMAP | VM_SHADOW_STACK));
1194 static int validate_move_areas(struct userfaultfd_ctx *ctx,
1195 struct vm_area_struct *src_vma,
1196 struct vm_area_struct *dst_vma)
1198 /* Only allow moving if both have the same access and protection */
1199 if ((src_vma->vm_flags & VM_ACCESS_FLAGS) != (dst_vma->vm_flags & VM_ACCESS_FLAGS) ||
1200 pgprot_val(src_vma->vm_page_prot) != pgprot_val(dst_vma->vm_page_prot))
1203 /* Only allow moving if both are mlocked or both aren't */
1204 if ((src_vma->vm_flags & VM_LOCKED) != (dst_vma->vm_flags & VM_LOCKED))
1208 * For now, we keep it simple and only move between writable VMAs.
1209 * Access flags are equal, therefore cheching only the source is enough.
1211 if (!(src_vma->vm_flags & VM_WRITE))
1214 /* Check if vma flags indicate content which can be moved */
1215 if (!vma_move_compatible(src_vma) || !vma_move_compatible(dst_vma))
1218 /* Ensure dst_vma is registered in uffd we are operating on */
1219 if (!dst_vma->vm_userfaultfd_ctx.ctx ||
1220 dst_vma->vm_userfaultfd_ctx.ctx != ctx)
1223 /* Only allow moving across anonymous vmas */
1224 if (!vma_is_anonymous(src_vma) || !vma_is_anonymous(dst_vma))
1228 * Ensure the dst_vma has a anon_vma or this page
1229 * would get a NULL anon_vma when moved in the
1232 if (unlikely(anon_vma_prepare(dst_vma)))
1239 * move_pages - move arbitrary anonymous pages of an existing vma
1240 * @ctx: pointer to the userfaultfd context
1241 * @mm: the address space to move pages
1242 * @dst_start: start of the destination virtual memory range
1243 * @src_start: start of the source virtual memory range
1244 * @len: length of the virtual memory range
1245 * @mode: flags from uffdio_move.mode
1247 * Must be called with mmap_lock held for read.
1249 * move_pages() remaps arbitrary anonymous pages atomically in zero
1250 * copy. It only works on non shared anonymous pages because those can
1251 * be relocated without generating non linear anon_vmas in the rmap
1254 * It provides a zero copy mechanism to handle userspace page faults.
1255 * The source vma pages should have mapcount == 1, which can be
1256 * enforced by using madvise(MADV_DONTFORK) on src vma.
1258 * The thread receiving the page during the userland page fault
1259 * will receive the faulting page in the source vma through the network,
1260 * storage or any other I/O device (MADV_DONTFORK in the source vma
1261 * avoids move_pages() to fail with -EBUSY if the process forks before
1262 * move_pages() is called), then it will call move_pages() to map the
1263 * page in the faulting address in the destination vma.
1265 * This userfaultfd command works purely via pagetables, so it's the
1266 * most efficient way to move physical non shared anonymous pages
1267 * across different virtual addresses. Unlike mremap()/mmap()/munmap()
1268 * it does not create any new vmas. The mapping in the destination
1269 * address is atomic.
1271 * It only works if the vma protection bits are identical from the
1272 * source and destination vma.
1274 * It can remap non shared anonymous pages within the same vma too.
1276 * If the source virtual memory range has any unmapped holes, or if
1277 * the destination virtual memory range is not a whole unmapped hole,
1278 * move_pages() will fail respectively with -ENOENT or -EEXIST. This
1279 * provides a very strict behavior to avoid any chance of memory
1280 * corruption going unnoticed if there are userland race conditions.
1281 * Only one thread should resolve the userland page fault at any given
1282 * time for any given faulting address. This means that if two threads
1283 * try to both call move_pages() on the same destination address at the
1284 * same time, the second thread will get an explicit error from this
1287 * The command retval will return "len" is successful. The command
1288 * however can be interrupted by fatal signals or errors. If
1289 * interrupted it will return the number of bytes successfully
1290 * remapped before the interruption if any, or the negative error if
1291 * none. It will never return zero. Either it will return an error or
1292 * an amount of bytes successfully moved. If the retval reports a
1293 * "short" remap, the move_pages() command should be repeated by
1294 * userland with src+retval, dst+reval, len-retval if it wants to know
1295 * about the error that interrupted it.
1297 * The UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES flag can be specified to
1298 * prevent -ENOENT errors to materialize if there are holes in the
1299 * source virtual range that is being remapped. The holes will be
1300 * accounted as successfully remapped in the retval of the
1301 * command. This is mostly useful to remap hugepage naturally aligned
1302 * virtual regions without knowing if there are transparent hugepage
1303 * in the regions or not, but preventing the risk of having to split
1304 * the hugepmd during the remap.
1306 * If there's any rmap walk that is taking the anon_vma locks without
1307 * first obtaining the folio lock (the only current instance is
1308 * folio_referenced), they will have to verify if the folio->mapping
1309 * has changed after taking the anon_vma lock. If it changed they
1310 * should release the lock and retry obtaining a new anon_vma, because
1311 * it means the anon_vma was changed by move_pages() before the lock
1312 * could be obtained. This is the only additional complexity added to
1313 * the rmap code to provide this anonymous page remapping functionality.
1315 ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
1316 unsigned long dst_start, unsigned long src_start,
1317 unsigned long len, __u64 mode)
1319 struct vm_area_struct *src_vma, *dst_vma;
1320 unsigned long src_addr, dst_addr;
1321 pmd_t *src_pmd, *dst_pmd;
1325 /* Sanitize the command parameters. */
1326 if (WARN_ON_ONCE(src_start & ~PAGE_MASK) ||
1327 WARN_ON_ONCE(dst_start & ~PAGE_MASK) ||
1328 WARN_ON_ONCE(len & ~PAGE_MASK))
1331 /* Does the address range wrap, or is the span zero-sized? */
1332 if (WARN_ON_ONCE(src_start + len <= src_start) ||
1333 WARN_ON_ONCE(dst_start + len <= dst_start))
1337 * Make sure the vma is not shared, that the src and dst remap
1338 * ranges are both valid and fully within a single existing
1341 src_vma = find_vma(mm, src_start);
1342 if (!src_vma || (src_vma->vm_flags & VM_SHARED))
1344 if (src_start < src_vma->vm_start ||
1345 src_start + len > src_vma->vm_end)
1348 dst_vma = find_vma(mm, dst_start);
1349 if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
1351 if (dst_start < dst_vma->vm_start ||
1352 dst_start + len > dst_vma->vm_end)
1355 err = validate_move_areas(ctx, src_vma, dst_vma);
1359 for (src_addr = src_start, dst_addr = dst_start;
1360 src_addr < src_start + len;) {
1363 unsigned long step_size;
1366 * Below works because anonymous area would not have a
1367 * transparent huge PUD. If file-backed support is added,
1368 * that case would need to be handled here.
1370 src_pmd = mm_find_pmd(mm, src_addr);
1371 if (unlikely(!src_pmd)) {
1372 if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1376 src_pmd = mm_alloc_pmd(mm, src_addr);
1377 if (unlikely(!src_pmd)) {
1382 dst_pmd = mm_alloc_pmd(mm, dst_addr);
1383 if (unlikely(!dst_pmd)) {
1388 dst_pmdval = pmdp_get_lockless(dst_pmd);
1390 * If the dst_pmd is mapped as THP don't override it and just
1391 * be strict. If dst_pmd changes into TPH after this check, the
1392 * move_pages_huge_pmd() will detect the change and retry
1393 * while move_pages_pte() will detect the change and fail.
1395 if (unlikely(pmd_trans_huge(dst_pmdval))) {
1400 ptl = pmd_trans_huge_lock(src_pmd, src_vma);
1402 if (pmd_devmap(*src_pmd)) {
1407 /* Avoid moving zeropages for now */
1408 if (is_huge_zero_pmd(*src_pmd)) {
1414 /* Check if we can move the pmd without splitting it. */
1415 if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
1416 !pmd_none(dst_pmdval)) {
1417 struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
1419 if (!folio || !PageAnonExclusive(&folio->page)) {
1426 split_huge_pmd(src_vma, src_pmd, src_addr);
1427 /* The folio will be split by move_pages_pte() */
1431 err = move_pages_huge_pmd(mm, dst_pmd, src_pmd,
1432 dst_pmdval, dst_vma, src_vma,
1433 dst_addr, src_addr);
1434 step_size = HPAGE_PMD_SIZE;
1436 if (pmd_none(*src_pmd)) {
1437 if (!(mode & UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES)) {
1441 if (unlikely(__pte_alloc(mm, src_pmd))) {
1447 if (unlikely(pte_alloc(mm, dst_pmd))) {
1452 err = move_pages_pte(mm, dst_pmd, src_pmd,
1454 dst_addr, src_addr, mode);
1455 step_size = PAGE_SIZE;
1460 if (fatal_signal_pending(current)) {
1461 /* Do not override an error */
1462 if (!err || err == -EAGAIN)
1473 /* Proceed to the next page */
1474 dst_addr += step_size;
1475 src_addr += step_size;
1480 VM_WARN_ON(moved < 0);
1481 VM_WARN_ON(err > 0);
1482 VM_WARN_ON(!moved && !err);
1483 return moved ? moved : err;