Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c1a4de99 AA |
2 | /* |
3 | * mm/userfaultfd.c | |
4 | * | |
5 | * Copyright (C) 2015 Red Hat, Inc. | |
c1a4de99 AA |
6 | */ |
7 | ||
8 | #include <linux/mm.h> | |
174cd4b1 | 9 | #include <linux/sched/signal.h> |
c1a4de99 AA |
10 | #include <linux/pagemap.h> |
11 | #include <linux/rmap.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/swapops.h> | |
14 | #include <linux/userfaultfd_k.h> | |
15 | #include <linux/mmu_notifier.h> | |
60d4d2d2 | 16 | #include <linux/hugetlb.h> |
26071ced | 17 | #include <linux/shmem_fs.h> |
c1a4de99 AA |
18 | #include <asm/tlbflush.h> |
19 | #include "internal.h" | |
20 | ||
643aa36e WY |
21 | static __always_inline |
22 | struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, | |
23 | unsigned long dst_start, | |
24 | unsigned long len) | |
25 | { | |
26 | /* | |
27 | * Make sure that the dst range is both valid and fully within a | |
28 | * single existing vma. | |
29 | */ | |
30 | struct vm_area_struct *dst_vma; | |
31 | ||
32 | dst_vma = find_vma(dst_mm, dst_start); | |
33 | if (!dst_vma) | |
34 | return NULL; | |
35 | ||
36 | if (dst_start < dst_vma->vm_start || | |
37 | dst_start + len > dst_vma->vm_end) | |
38 | return NULL; | |
39 | ||
40 | /* | |
41 | * Check the vma is registered in uffd, this is required to | |
42 | * enforce the VM_MAYWRITE check done at uffd registration | |
43 | * time. | |
44 | */ | |
45 | if (!dst_vma->vm_userfaultfd_ctx.ctx) | |
46 | return NULL; | |
47 | ||
48 | return dst_vma; | |
49 | } | |
50 | ||
15313257 AR |
51 | /* |
52 | * Install PTEs, to map dst_addr (within dst_vma) to page. | |
53 | * | |
7d64ae3a AR |
54 | * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem |
55 | * and anon, and for both shared and private VMAs. | |
15313257 | 56 | */ |
7d64ae3a AR |
57 | int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, |
58 | struct vm_area_struct *dst_vma, | |
59 | unsigned long dst_addr, struct page *page, | |
60 | bool newly_allocated, bool wp_copy) | |
15313257 AR |
61 | { |
62 | int ret; | |
63 | pte_t _dst_pte, *dst_pte; | |
64 | bool writable = dst_vma->vm_flags & VM_WRITE; | |
65 | bool vm_shared = dst_vma->vm_flags & VM_SHARED; | |
66 | bool page_in_cache = page->mapping; | |
67 | spinlock_t *ptl; | |
68 | struct inode *inode; | |
69 | pgoff_t offset, max_off; | |
70 | ||
71 | _dst_pte = mk_pte(page, dst_vma->vm_page_prot); | |
9ae0f87d | 72 | _dst_pte = pte_mkdirty(_dst_pte); |
15313257 AR |
73 | if (page_in_cache && !vm_shared) |
74 | writable = false; | |
15313257 AR |
75 | if (writable) { |
76 | if (wp_copy) | |
77 | _dst_pte = pte_mkuffd_wp(_dst_pte); | |
78 | else | |
79 | _dst_pte = pte_mkwrite(_dst_pte); | |
80 | } | |
81 | ||
82 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); | |
83 | ||
84 | if (vma_is_shmem(dst_vma)) { | |
85 | /* serialize against truncate with the page table lock */ | |
86 | inode = dst_vma->vm_file->f_inode; | |
87 | offset = linear_page_index(dst_vma, dst_addr); | |
88 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | |
89 | ret = -EFAULT; | |
90 | if (unlikely(offset >= max_off)) | |
91 | goto out_unlock; | |
92 | } | |
93 | ||
94 | ret = -EEXIST; | |
95 | if (!pte_none(*dst_pte)) | |
96 | goto out_unlock; | |
97 | ||
cea86fe2 HD |
98 | if (page_in_cache) { |
99 | /* Usually, cache pages are already added to LRU */ | |
100 | if (newly_allocated) | |
101 | lru_cache_add(page); | |
102 | page_add_file_rmap(page, dst_vma, false); | |
103 | } else { | |
15313257 | 104 | page_add_new_anon_rmap(page, dst_vma, dst_addr, false); |
cea86fe2 HD |
105 | lru_cache_add_inactive_or_unevictable(page, dst_vma); |
106 | } | |
15313257 AR |
107 | |
108 | /* | |
109 | * Must happen after rmap, as mm_counter() checks mapping (via | |
110 | * PageAnon()), which is set by __page_set_anon_rmap(). | |
111 | */ | |
112 | inc_mm_counter(dst_mm, mm_counter(page)); | |
113 | ||
15313257 AR |
114 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
115 | ||
116 | /* No need to invalidate - it was non-present before */ | |
117 | update_mmu_cache(dst_vma, dst_addr, dst_pte); | |
118 | ret = 0; | |
119 | out_unlock: | |
120 | pte_unmap_unlock(dst_pte, ptl); | |
121 | return ret; | |
122 | } | |
123 | ||
c1a4de99 AA |
124 | static int mcopy_atomic_pte(struct mm_struct *dst_mm, |
125 | pmd_t *dst_pmd, | |
126 | struct vm_area_struct *dst_vma, | |
127 | unsigned long dst_addr, | |
b6ebaedb | 128 | unsigned long src_addr, |
72981e0e AA |
129 | struct page **pagep, |
130 | bool wp_copy) | |
c1a4de99 | 131 | { |
c1a4de99 AA |
132 | void *page_kaddr; |
133 | int ret; | |
b6ebaedb | 134 | struct page *page; |
c1a4de99 | 135 | |
b6ebaedb AA |
136 | if (!*pagep) { |
137 | ret = -ENOMEM; | |
138 | page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); | |
139 | if (!page) | |
140 | goto out; | |
141 | ||
142 | page_kaddr = kmap_atomic(page); | |
143 | ret = copy_from_user(page_kaddr, | |
144 | (const void __user *) src_addr, | |
145 | PAGE_SIZE); | |
146 | kunmap_atomic(page_kaddr); | |
147 | ||
c1e8d7c6 | 148 | /* fallback to copy_from_user outside mmap_lock */ |
b6ebaedb | 149 | if (unlikely(ret)) { |
9e368259 | 150 | ret = -ENOENT; |
b6ebaedb AA |
151 | *pagep = page; |
152 | /* don't free the page */ | |
153 | goto out; | |
154 | } | |
7c25a0b8 MS |
155 | |
156 | flush_dcache_page(page); | |
b6ebaedb AA |
157 | } else { |
158 | page = *pagep; | |
159 | *pagep = NULL; | |
160 | } | |
c1a4de99 AA |
161 | |
162 | /* | |
163 | * The memory barrier inside __SetPageUptodate makes sure that | |
f4f5329d | 164 | * preceding stores to the page contents become visible before |
c1a4de99 AA |
165 | * the set_pte_at() write. |
166 | */ | |
167 | __SetPageUptodate(page); | |
168 | ||
169 | ret = -ENOMEM; | |
8f425e4e | 170 | if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL)) |
c1a4de99 AA |
171 | goto out_release; |
172 | ||
15313257 AR |
173 | ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, |
174 | page, true, wp_copy); | |
175 | if (ret) | |
176 | goto out_release; | |
c1a4de99 AA |
177 | out: |
178 | return ret; | |
c1a4de99 | 179 | out_release: |
09cbfeaf | 180 | put_page(page); |
c1a4de99 | 181 | goto out; |
c1a4de99 AA |
182 | } |
183 | ||
184 | static int mfill_zeropage_pte(struct mm_struct *dst_mm, | |
185 | pmd_t *dst_pmd, | |
186 | struct vm_area_struct *dst_vma, | |
187 | unsigned long dst_addr) | |
188 | { | |
189 | pte_t _dst_pte, *dst_pte; | |
190 | spinlock_t *ptl; | |
191 | int ret; | |
e2a50c1f AA |
192 | pgoff_t offset, max_off; |
193 | struct inode *inode; | |
c1a4de99 AA |
194 | |
195 | _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), | |
196 | dst_vma->vm_page_prot)); | |
c1a4de99 | 197 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
e2a50c1f AA |
198 | if (dst_vma->vm_file) { |
199 | /* the shmem MAP_PRIVATE case requires checking the i_size */ | |
200 | inode = dst_vma->vm_file->f_inode; | |
201 | offset = linear_page_index(dst_vma, dst_addr); | |
202 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | |
203 | ret = -EFAULT; | |
204 | if (unlikely(offset >= max_off)) | |
205 | goto out_unlock; | |
206 | } | |
207 | ret = -EEXIST; | |
c1a4de99 AA |
208 | if (!pte_none(*dst_pte)) |
209 | goto out_unlock; | |
210 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); | |
211 | /* No need to invalidate - it was non-present before */ | |
212 | update_mmu_cache(dst_vma, dst_addr, dst_pte); | |
213 | ret = 0; | |
214 | out_unlock: | |
215 | pte_unmap_unlock(dst_pte, ptl); | |
216 | return ret; | |
217 | } | |
218 | ||
15313257 AR |
219 | /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ |
220 | static int mcontinue_atomic_pte(struct mm_struct *dst_mm, | |
221 | pmd_t *dst_pmd, | |
222 | struct vm_area_struct *dst_vma, | |
223 | unsigned long dst_addr, | |
224 | bool wp_copy) | |
225 | { | |
226 | struct inode *inode = file_inode(dst_vma->vm_file); | |
227 | pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); | |
228 | struct page *page; | |
229 | int ret; | |
230 | ||
231 | ret = shmem_getpage(inode, pgoff, &page, SGP_READ); | |
232 | if (ret) | |
233 | goto out; | |
234 | if (!page) { | |
235 | ret = -EFAULT; | |
236 | goto out; | |
237 | } | |
238 | ||
a7605426 YS |
239 | if (PageHWPoison(page)) { |
240 | ret = -EIO; | |
241 | goto out_release; | |
242 | } | |
243 | ||
15313257 AR |
244 | ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, |
245 | page, false, wp_copy); | |
246 | if (ret) | |
247 | goto out_release; | |
248 | ||
249 | unlock_page(page); | |
250 | ret = 0; | |
251 | out: | |
252 | return ret; | |
253 | out_release: | |
254 | unlock_page(page); | |
255 | put_page(page); | |
256 | goto out; | |
257 | } | |
258 | ||
c1a4de99 AA |
259 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
260 | { | |
261 | pgd_t *pgd; | |
c2febafc | 262 | p4d_t *p4d; |
c1a4de99 | 263 | pud_t *pud; |
c1a4de99 AA |
264 | |
265 | pgd = pgd_offset(mm, address); | |
c2febafc KS |
266 | p4d = p4d_alloc(mm, pgd, address); |
267 | if (!p4d) | |
268 | return NULL; | |
269 | pud = pud_alloc(mm, p4d, address); | |
270 | if (!pud) | |
271 | return NULL; | |
272 | /* | |
273 | * Note that we didn't run this because the pmd was | |
274 | * missing, the *pmd may be already established and in | |
275 | * turn it may also be a trans_huge_pmd. | |
276 | */ | |
277 | return pmd_alloc(mm, pud, address); | |
c1a4de99 AA |
278 | } |
279 | ||
60d4d2d2 MK |
280 | #ifdef CONFIG_HUGETLB_PAGE |
281 | /* | |
282 | * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is | |
c1e8d7c6 | 283 | * called with mmap_lock held, it will release mmap_lock before returning. |
60d4d2d2 MK |
284 | */ |
285 | static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, | |
286 | struct vm_area_struct *dst_vma, | |
287 | unsigned long dst_start, | |
288 | unsigned long src_start, | |
289 | unsigned long len, | |
f6191471 | 290 | enum mcopy_atomic_mode mode) |
60d4d2d2 | 291 | { |
1c9e8def | 292 | int vm_shared = dst_vma->vm_flags & VM_SHARED; |
60d4d2d2 MK |
293 | ssize_t err; |
294 | pte_t *dst_pte; | |
295 | unsigned long src_addr, dst_addr; | |
296 | long copied; | |
297 | struct page *page; | |
60d4d2d2 MK |
298 | unsigned long vma_hpagesize; |
299 | pgoff_t idx; | |
300 | u32 hash; | |
301 | struct address_space *mapping; | |
302 | ||
303 | /* | |
304 | * There is no default zero huge page for all huge page sizes as | |
305 | * supported by hugetlb. A PMD_SIZE huge pages may exist as used | |
306 | * by THP. Since we can not reliably insert a zero page, this | |
307 | * feature is not supported. | |
308 | */ | |
f6191471 | 309 | if (mode == MCOPY_ATOMIC_ZEROPAGE) { |
d8ed45c5 | 310 | mmap_read_unlock(dst_mm); |
60d4d2d2 MK |
311 | return -EINVAL; |
312 | } | |
313 | ||
314 | src_addr = src_start; | |
315 | dst_addr = dst_start; | |
316 | copied = 0; | |
317 | page = NULL; | |
318 | vma_hpagesize = vma_kernel_pagesize(dst_vma); | |
319 | ||
320 | /* | |
321 | * Validate alignment based on huge page size | |
322 | */ | |
323 | err = -EINVAL; | |
324 | if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) | |
325 | goto out_unlock; | |
326 | ||
327 | retry: | |
328 | /* | |
c1e8d7c6 | 329 | * On routine entry dst_vma is set. If we had to drop mmap_lock and |
60d4d2d2 MK |
330 | * retry, dst_vma will be set to NULL and we must lookup again. |
331 | */ | |
332 | if (!dst_vma) { | |
27d02568 | 333 | err = -ENOENT; |
643aa36e | 334 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
60d4d2d2 MK |
335 | if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) |
336 | goto out_unlock; | |
1c9e8def | 337 | |
27d02568 MR |
338 | err = -EINVAL; |
339 | if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) | |
340 | goto out_unlock; | |
341 | ||
1c9e8def | 342 | vm_shared = dst_vma->vm_flags & VM_SHARED; |
60d4d2d2 MK |
343 | } |
344 | ||
60d4d2d2 | 345 | /* |
1c9e8def | 346 | * If not shared, ensure the dst_vma has a anon_vma. |
60d4d2d2 MK |
347 | */ |
348 | err = -ENOMEM; | |
1c9e8def MK |
349 | if (!vm_shared) { |
350 | if (unlikely(anon_vma_prepare(dst_vma))) | |
351 | goto out_unlock; | |
352 | } | |
60d4d2d2 | 353 | |
60d4d2d2 | 354 | while (src_addr < src_start + len) { |
60d4d2d2 | 355 | BUG_ON(dst_addr >= dst_start + len); |
60d4d2d2 MK |
356 | |
357 | /* | |
c0d0381a MK |
358 | * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. |
359 | * i_mmap_rwsem ensures the dst_pte remains valid even | |
360 | * in the case of shared pmds. fault mutex prevents | |
361 | * races with other faulting threads. | |
60d4d2d2 | 362 | */ |
ddeaab32 | 363 | mapping = dst_vma->vm_file->f_mapping; |
c0d0381a MK |
364 | i_mmap_lock_read(mapping); |
365 | idx = linear_page_index(dst_vma, dst_addr); | |
188b04a7 | 366 | hash = hugetlb_fault_mutex_hash(mapping, idx); |
60d4d2d2 MK |
367 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
368 | ||
369 | err = -ENOMEM; | |
aec44e0f | 370 | dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); |
60d4d2d2 MK |
371 | if (!dst_pte) { |
372 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
c0d0381a | 373 | i_mmap_unlock_read(mapping); |
60d4d2d2 MK |
374 | goto out_unlock; |
375 | } | |
376 | ||
f6191471 AR |
377 | if (mode != MCOPY_ATOMIC_CONTINUE && |
378 | !huge_pte_none(huge_ptep_get(dst_pte))) { | |
379 | err = -EEXIST; | |
60d4d2d2 | 380 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
c0d0381a | 381 | i_mmap_unlock_read(mapping); |
60d4d2d2 MK |
382 | goto out_unlock; |
383 | } | |
384 | ||
385 | err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, | |
f6191471 | 386 | dst_addr, src_addr, mode, &page); |
60d4d2d2 MK |
387 | |
388 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
c0d0381a | 389 | i_mmap_unlock_read(mapping); |
60d4d2d2 MK |
390 | |
391 | cond_resched(); | |
392 | ||
9e368259 | 393 | if (unlikely(err == -ENOENT)) { |
d8ed45c5 | 394 | mmap_read_unlock(dst_mm); |
60d4d2d2 MK |
395 | BUG_ON(!page); |
396 | ||
397 | err = copy_huge_page_from_user(page, | |
398 | (const void __user *)src_addr, | |
4fb07ee6 WY |
399 | vma_hpagesize / PAGE_SIZE, |
400 | true); | |
60d4d2d2 MK |
401 | if (unlikely(err)) { |
402 | err = -EFAULT; | |
403 | goto out; | |
404 | } | |
d8ed45c5 | 405 | mmap_read_lock(dst_mm); |
60d4d2d2 MK |
406 | |
407 | dst_vma = NULL; | |
408 | goto retry; | |
409 | } else | |
410 | BUG_ON(page); | |
411 | ||
412 | if (!err) { | |
413 | dst_addr += vma_hpagesize; | |
414 | src_addr += vma_hpagesize; | |
415 | copied += vma_hpagesize; | |
416 | ||
417 | if (fatal_signal_pending(current)) | |
418 | err = -EINTR; | |
419 | } | |
420 | if (err) | |
421 | break; | |
422 | } | |
423 | ||
424 | out_unlock: | |
d8ed45c5 | 425 | mmap_read_unlock(dst_mm); |
60d4d2d2 | 426 | out: |
8cc5fcbb | 427 | if (page) |
60d4d2d2 MK |
428 | put_page(page); |
429 | BUG_ON(copied < 0); | |
430 | BUG_ON(err > 0); | |
431 | BUG_ON(!copied && !err); | |
432 | return copied ? copied : err; | |
433 | } | |
434 | #else /* !CONFIG_HUGETLB_PAGE */ | |
435 | /* fail at build time if gcc attempts to use this */ | |
436 | extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, | |
437 | struct vm_area_struct *dst_vma, | |
438 | unsigned long dst_start, | |
439 | unsigned long src_start, | |
440 | unsigned long len, | |
f6191471 | 441 | enum mcopy_atomic_mode mode); |
60d4d2d2 MK |
442 | #endif /* CONFIG_HUGETLB_PAGE */ |
443 | ||
3217d3c7 MR |
444 | static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, |
445 | pmd_t *dst_pmd, | |
446 | struct vm_area_struct *dst_vma, | |
447 | unsigned long dst_addr, | |
448 | unsigned long src_addr, | |
449 | struct page **page, | |
15313257 | 450 | enum mcopy_atomic_mode mode, |
72981e0e | 451 | bool wp_copy) |
3217d3c7 MR |
452 | { |
453 | ssize_t err; | |
454 | ||
15313257 AR |
455 | if (mode == MCOPY_ATOMIC_CONTINUE) { |
456 | return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, | |
457 | wp_copy); | |
458 | } | |
459 | ||
5b51072e AA |
460 | /* |
461 | * The normal page fault path for a shmem will invoke the | |
462 | * fault, fill the hole in the file and COW it right away. The | |
463 | * result generates plain anonymous memory. So when we are | |
464 | * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll | |
465 | * generate anonymous memory directly without actually filling | |
466 | * the hole. For the MAP_PRIVATE case the robustness check | |
467 | * only happens in the pagetable (to verify it's still none) | |
468 | * and not in the radix tree. | |
469 | */ | |
470 | if (!(dst_vma->vm_flags & VM_SHARED)) { | |
15313257 | 471 | if (mode == MCOPY_ATOMIC_NORMAL) |
3217d3c7 | 472 | err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, |
72981e0e AA |
473 | dst_addr, src_addr, page, |
474 | wp_copy); | |
3217d3c7 MR |
475 | else |
476 | err = mfill_zeropage_pte(dst_mm, dst_pmd, | |
477 | dst_vma, dst_addr); | |
478 | } else { | |
72981e0e | 479 | VM_WARN_ON_ONCE(wp_copy); |
3460f6e5 | 480 | err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, |
15313257 AR |
481 | dst_addr, src_addr, |
482 | mode != MCOPY_ATOMIC_NORMAL, | |
3460f6e5 | 483 | page); |
3217d3c7 MR |
484 | } |
485 | ||
486 | return err; | |
487 | } | |
488 | ||
c1a4de99 AA |
489 | static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, |
490 | unsigned long dst_start, | |
491 | unsigned long src_start, | |
492 | unsigned long len, | |
f6191471 | 493 | enum mcopy_atomic_mode mcopy_mode, |
a759a909 | 494 | atomic_t *mmap_changing, |
72981e0e | 495 | __u64 mode) |
c1a4de99 AA |
496 | { |
497 | struct vm_area_struct *dst_vma; | |
498 | ssize_t err; | |
499 | pmd_t *dst_pmd; | |
500 | unsigned long src_addr, dst_addr; | |
b6ebaedb AA |
501 | long copied; |
502 | struct page *page; | |
72981e0e | 503 | bool wp_copy; |
c1a4de99 AA |
504 | |
505 | /* | |
506 | * Sanitize the command parameters: | |
507 | */ | |
508 | BUG_ON(dst_start & ~PAGE_MASK); | |
509 | BUG_ON(len & ~PAGE_MASK); | |
510 | ||
511 | /* Does the address range wrap, or is the span zero-sized? */ | |
512 | BUG_ON(src_start + len <= src_start); | |
513 | BUG_ON(dst_start + len <= dst_start); | |
514 | ||
b6ebaedb AA |
515 | src_addr = src_start; |
516 | dst_addr = dst_start; | |
517 | copied = 0; | |
518 | page = NULL; | |
519 | retry: | |
d8ed45c5 | 520 | mmap_read_lock(dst_mm); |
c1a4de99 | 521 | |
df2cc96e MR |
522 | /* |
523 | * If memory mappings are changing because of non-cooperative | |
524 | * operation (e.g. mremap) running in parallel, bail out and | |
525 | * request the user to retry later | |
526 | */ | |
527 | err = -EAGAIN; | |
a759a909 | 528 | if (mmap_changing && atomic_read(mmap_changing)) |
df2cc96e MR |
529 | goto out_unlock; |
530 | ||
c1a4de99 AA |
531 | /* |
532 | * Make sure the vma is not shared, that the dst range is | |
533 | * both valid and fully within a single existing vma. | |
534 | */ | |
27d02568 | 535 | err = -ENOENT; |
643aa36e | 536 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
26071ced MR |
537 | if (!dst_vma) |
538 | goto out_unlock; | |
c1a4de99 | 539 | |
27d02568 MR |
540 | err = -EINVAL; |
541 | /* | |
542 | * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but | |
543 | * it will overwrite vm_ops, so vma_is_anonymous must return false. | |
544 | */ | |
545 | if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && | |
546 | dst_vma->vm_flags & VM_SHARED)) | |
547 | goto out_unlock; | |
548 | ||
72981e0e AA |
549 | /* |
550 | * validate 'mode' now that we know the dst_vma: don't allow | |
551 | * a wrprotect copy if the userfaultfd didn't register as WP. | |
552 | */ | |
553 | wp_copy = mode & UFFDIO_COPY_MODE_WP; | |
554 | if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) | |
555 | goto out_unlock; | |
556 | ||
60d4d2d2 MK |
557 | /* |
558 | * If this is a HUGETLB vma, pass off to appropriate routine | |
559 | */ | |
560 | if (is_vm_hugetlb_page(dst_vma)) | |
561 | return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, | |
f6191471 | 562 | src_start, len, mcopy_mode); |
60d4d2d2 | 563 | |
26071ced | 564 | if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) |
b6ebaedb | 565 | goto out_unlock; |
15313257 | 566 | if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE) |
f6191471 | 567 | goto out_unlock; |
c1a4de99 AA |
568 | |
569 | /* | |
570 | * Ensure the dst_vma has a anon_vma or this page | |
571 | * would get a NULL anon_vma when moved in the | |
572 | * dst_vma. | |
573 | */ | |
574 | err = -ENOMEM; | |
5b51072e AA |
575 | if (!(dst_vma->vm_flags & VM_SHARED) && |
576 | unlikely(anon_vma_prepare(dst_vma))) | |
b6ebaedb | 577 | goto out_unlock; |
c1a4de99 | 578 | |
b6ebaedb | 579 | while (src_addr < src_start + len) { |
c1a4de99 | 580 | pmd_t dst_pmdval; |
b6ebaedb | 581 | |
c1a4de99 | 582 | BUG_ON(dst_addr >= dst_start + len); |
b6ebaedb | 583 | |
c1a4de99 AA |
584 | dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); |
585 | if (unlikely(!dst_pmd)) { | |
586 | err = -ENOMEM; | |
587 | break; | |
588 | } | |
589 | ||
590 | dst_pmdval = pmd_read_atomic(dst_pmd); | |
591 | /* | |
592 | * If the dst_pmd is mapped as THP don't | |
593 | * override it and just be strict. | |
594 | */ | |
595 | if (unlikely(pmd_trans_huge(dst_pmdval))) { | |
596 | err = -EEXIST; | |
597 | break; | |
598 | } | |
599 | if (unlikely(pmd_none(dst_pmdval)) && | |
4cf58924 | 600 | unlikely(__pte_alloc(dst_mm, dst_pmd))) { |
c1a4de99 AA |
601 | err = -ENOMEM; |
602 | break; | |
603 | } | |
604 | /* If an huge pmd materialized from under us fail */ | |
605 | if (unlikely(pmd_trans_huge(*dst_pmd))) { | |
606 | err = -EFAULT; | |
607 | break; | |
608 | } | |
609 | ||
610 | BUG_ON(pmd_none(*dst_pmd)); | |
611 | BUG_ON(pmd_trans_huge(*dst_pmd)); | |
612 | ||
3217d3c7 | 613 | err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, |
15313257 | 614 | src_addr, &page, mcopy_mode, wp_copy); |
c1a4de99 AA |
615 | cond_resched(); |
616 | ||
9e368259 | 617 | if (unlikely(err == -ENOENT)) { |
b6ebaedb AA |
618 | void *page_kaddr; |
619 | ||
d8ed45c5 | 620 | mmap_read_unlock(dst_mm); |
b6ebaedb AA |
621 | BUG_ON(!page); |
622 | ||
623 | page_kaddr = kmap(page); | |
624 | err = copy_from_user(page_kaddr, | |
625 | (const void __user *) src_addr, | |
626 | PAGE_SIZE); | |
627 | kunmap(page); | |
628 | if (unlikely(err)) { | |
629 | err = -EFAULT; | |
630 | goto out; | |
631 | } | |
7c25a0b8 | 632 | flush_dcache_page(page); |
b6ebaedb AA |
633 | goto retry; |
634 | } else | |
635 | BUG_ON(page); | |
636 | ||
c1a4de99 AA |
637 | if (!err) { |
638 | dst_addr += PAGE_SIZE; | |
639 | src_addr += PAGE_SIZE; | |
640 | copied += PAGE_SIZE; | |
641 | ||
642 | if (fatal_signal_pending(current)) | |
643 | err = -EINTR; | |
644 | } | |
645 | if (err) | |
646 | break; | |
647 | } | |
648 | ||
b6ebaedb | 649 | out_unlock: |
d8ed45c5 | 650 | mmap_read_unlock(dst_mm); |
b6ebaedb AA |
651 | out: |
652 | if (page) | |
09cbfeaf | 653 | put_page(page); |
c1a4de99 AA |
654 | BUG_ON(copied < 0); |
655 | BUG_ON(err > 0); | |
656 | BUG_ON(!copied && !err); | |
657 | return copied ? copied : err; | |
658 | } | |
659 | ||
660 | ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, | |
df2cc96e | 661 | unsigned long src_start, unsigned long len, |
a759a909 | 662 | atomic_t *mmap_changing, __u64 mode) |
c1a4de99 | 663 | { |
f6191471 AR |
664 | return __mcopy_atomic(dst_mm, dst_start, src_start, len, |
665 | MCOPY_ATOMIC_NORMAL, mmap_changing, mode); | |
c1a4de99 AA |
666 | } |
667 | ||
668 | ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, | |
a759a909 | 669 | unsigned long len, atomic_t *mmap_changing) |
c1a4de99 | 670 | { |
f6191471 AR |
671 | return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE, |
672 | mmap_changing, 0); | |
673 | } | |
674 | ||
675 | ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start, | |
a759a909 | 676 | unsigned long len, atomic_t *mmap_changing) |
f6191471 AR |
677 | { |
678 | return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE, | |
679 | mmap_changing, 0); | |
c1a4de99 | 680 | } |
ffd05793 SL |
681 | |
682 | int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, | |
a759a909 NA |
683 | unsigned long len, bool enable_wp, |
684 | atomic_t *mmap_changing) | |
ffd05793 SL |
685 | { |
686 | struct vm_area_struct *dst_vma; | |
687 | pgprot_t newprot; | |
688 | int err; | |
689 | ||
690 | /* | |
691 | * Sanitize the command parameters: | |
692 | */ | |
693 | BUG_ON(start & ~PAGE_MASK); | |
694 | BUG_ON(len & ~PAGE_MASK); | |
695 | ||
696 | /* Does the address range wrap, or is the span zero-sized? */ | |
697 | BUG_ON(start + len <= start); | |
698 | ||
d8ed45c5 | 699 | mmap_read_lock(dst_mm); |
ffd05793 SL |
700 | |
701 | /* | |
702 | * If memory mappings are changing because of non-cooperative | |
703 | * operation (e.g. mremap) running in parallel, bail out and | |
704 | * request the user to retry later | |
705 | */ | |
706 | err = -EAGAIN; | |
a759a909 | 707 | if (mmap_changing && atomic_read(mmap_changing)) |
ffd05793 SL |
708 | goto out_unlock; |
709 | ||
710 | err = -ENOENT; | |
711 | dst_vma = find_dst_vma(dst_mm, start, len); | |
712 | /* | |
713 | * Make sure the vma is not shared, that the dst range is | |
714 | * both valid and fully within a single existing vma. | |
715 | */ | |
716 | if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) | |
717 | goto out_unlock; | |
718 | if (!userfaultfd_wp(dst_vma)) | |
719 | goto out_unlock; | |
720 | if (!vma_is_anonymous(dst_vma)) | |
721 | goto out_unlock; | |
722 | ||
723 | if (enable_wp) | |
724 | newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE)); | |
725 | else | |
726 | newprot = vm_get_page_prot(dst_vma->vm_flags); | |
727 | ||
728 | change_protection(dst_vma, start, start + len, newprot, | |
729 | enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE); | |
730 | ||
731 | err = 0; | |
732 | out_unlock: | |
d8ed45c5 | 733 | mmap_read_unlock(dst_mm); |
ffd05793 SL |
734 | return err; |
735 | } |