Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c1a4de99 AA |
2 | /* |
3 | * mm/userfaultfd.c | |
4 | * | |
5 | * Copyright (C) 2015 Red Hat, Inc. | |
c1a4de99 AA |
6 | */ |
7 | ||
8 | #include <linux/mm.h> | |
174cd4b1 | 9 | #include <linux/sched/signal.h> |
c1a4de99 AA |
10 | #include <linux/pagemap.h> |
11 | #include <linux/rmap.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/swapops.h> | |
14 | #include <linux/userfaultfd_k.h> | |
15 | #include <linux/mmu_notifier.h> | |
60d4d2d2 | 16 | #include <linux/hugetlb.h> |
26071ced | 17 | #include <linux/shmem_fs.h> |
c1a4de99 AA |
18 | #include <asm/tlbflush.h> |
19 | #include "internal.h" | |
20 | ||
643aa36e WY |
21 | static __always_inline |
22 | struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, | |
23 | unsigned long dst_start, | |
24 | unsigned long len) | |
25 | { | |
26 | /* | |
27 | * Make sure that the dst range is both valid and fully within a | |
28 | * single existing vma. | |
29 | */ | |
30 | struct vm_area_struct *dst_vma; | |
31 | ||
32 | dst_vma = find_vma(dst_mm, dst_start); | |
33 | if (!dst_vma) | |
34 | return NULL; | |
35 | ||
36 | if (dst_start < dst_vma->vm_start || | |
37 | dst_start + len > dst_vma->vm_end) | |
38 | return NULL; | |
39 | ||
40 | /* | |
41 | * Check the vma is registered in uffd, this is required to | |
42 | * enforce the VM_MAYWRITE check done at uffd registration | |
43 | * time. | |
44 | */ | |
45 | if (!dst_vma->vm_userfaultfd_ctx.ctx) | |
46 | return NULL; | |
47 | ||
48 | return dst_vma; | |
49 | } | |
50 | ||
15313257 AR |
51 | /* |
52 | * Install PTEs, to map dst_addr (within dst_vma) to page. | |
53 | * | |
7d64ae3a AR |
54 | * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem |
55 | * and anon, and for both shared and private VMAs. | |
15313257 | 56 | */ |
7d64ae3a AR |
57 | int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, |
58 | struct vm_area_struct *dst_vma, | |
59 | unsigned long dst_addr, struct page *page, | |
60 | bool newly_allocated, bool wp_copy) | |
15313257 AR |
61 | { |
62 | int ret; | |
63 | pte_t _dst_pte, *dst_pte; | |
64 | bool writable = dst_vma->vm_flags & VM_WRITE; | |
65 | bool vm_shared = dst_vma->vm_flags & VM_SHARED; | |
66 | bool page_in_cache = page->mapping; | |
67 | spinlock_t *ptl; | |
68 | struct inode *inode; | |
69 | pgoff_t offset, max_off; | |
70 | ||
71 | _dst_pte = mk_pte(page, dst_vma->vm_page_prot); | |
9ae0f87d | 72 | _dst_pte = pte_mkdirty(_dst_pte); |
15313257 AR |
73 | if (page_in_cache && !vm_shared) |
74 | writable = false; | |
15313257 AR |
75 | if (writable) { |
76 | if (wp_copy) | |
77 | _dst_pte = pte_mkuffd_wp(_dst_pte); | |
78 | else | |
79 | _dst_pte = pte_mkwrite(_dst_pte); | |
80 | } | |
81 | ||
82 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); | |
83 | ||
84 | if (vma_is_shmem(dst_vma)) { | |
85 | /* serialize against truncate with the page table lock */ | |
86 | inode = dst_vma->vm_file->f_inode; | |
87 | offset = linear_page_index(dst_vma, dst_addr); | |
88 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | |
89 | ret = -EFAULT; | |
90 | if (unlikely(offset >= max_off)) | |
91 | goto out_unlock; | |
92 | } | |
93 | ||
94 | ret = -EEXIST; | |
95 | if (!pte_none(*dst_pte)) | |
96 | goto out_unlock; | |
97 | ||
98 | if (page_in_cache) | |
99 | page_add_file_rmap(page, false); | |
100 | else | |
101 | page_add_new_anon_rmap(page, dst_vma, dst_addr, false); | |
102 | ||
103 | /* | |
104 | * Must happen after rmap, as mm_counter() checks mapping (via | |
105 | * PageAnon()), which is set by __page_set_anon_rmap(). | |
106 | */ | |
107 | inc_mm_counter(dst_mm, mm_counter(page)); | |
108 | ||
109 | if (newly_allocated) | |
110 | lru_cache_add_inactive_or_unevictable(page, dst_vma); | |
111 | ||
112 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); | |
113 | ||
114 | /* No need to invalidate - it was non-present before */ | |
115 | update_mmu_cache(dst_vma, dst_addr, dst_pte); | |
116 | ret = 0; | |
117 | out_unlock: | |
118 | pte_unmap_unlock(dst_pte, ptl); | |
119 | return ret; | |
120 | } | |
121 | ||
c1a4de99 AA |
122 | static int mcopy_atomic_pte(struct mm_struct *dst_mm, |
123 | pmd_t *dst_pmd, | |
124 | struct vm_area_struct *dst_vma, | |
125 | unsigned long dst_addr, | |
b6ebaedb | 126 | unsigned long src_addr, |
72981e0e AA |
127 | struct page **pagep, |
128 | bool wp_copy) | |
c1a4de99 | 129 | { |
c1a4de99 AA |
130 | void *page_kaddr; |
131 | int ret; | |
b6ebaedb | 132 | struct page *page; |
c1a4de99 | 133 | |
b6ebaedb AA |
134 | if (!*pagep) { |
135 | ret = -ENOMEM; | |
136 | page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); | |
137 | if (!page) | |
138 | goto out; | |
139 | ||
140 | page_kaddr = kmap_atomic(page); | |
141 | ret = copy_from_user(page_kaddr, | |
142 | (const void __user *) src_addr, | |
143 | PAGE_SIZE); | |
144 | kunmap_atomic(page_kaddr); | |
145 | ||
c1e8d7c6 | 146 | /* fallback to copy_from_user outside mmap_lock */ |
b6ebaedb | 147 | if (unlikely(ret)) { |
9e368259 | 148 | ret = -ENOENT; |
b6ebaedb AA |
149 | *pagep = page; |
150 | /* don't free the page */ | |
151 | goto out; | |
152 | } | |
153 | } else { | |
154 | page = *pagep; | |
155 | *pagep = NULL; | |
156 | } | |
c1a4de99 AA |
157 | |
158 | /* | |
159 | * The memory barrier inside __SetPageUptodate makes sure that | |
f4f5329d | 160 | * preceding stores to the page contents become visible before |
c1a4de99 AA |
161 | * the set_pte_at() write. |
162 | */ | |
163 | __SetPageUptodate(page); | |
164 | ||
165 | ret = -ENOMEM; | |
8f425e4e | 166 | if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL)) |
c1a4de99 AA |
167 | goto out_release; |
168 | ||
15313257 AR |
169 | ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, |
170 | page, true, wp_copy); | |
171 | if (ret) | |
172 | goto out_release; | |
c1a4de99 AA |
173 | out: |
174 | return ret; | |
c1a4de99 | 175 | out_release: |
09cbfeaf | 176 | put_page(page); |
c1a4de99 | 177 | goto out; |
c1a4de99 AA |
178 | } |
179 | ||
180 | static int mfill_zeropage_pte(struct mm_struct *dst_mm, | |
181 | pmd_t *dst_pmd, | |
182 | struct vm_area_struct *dst_vma, | |
183 | unsigned long dst_addr) | |
184 | { | |
185 | pte_t _dst_pte, *dst_pte; | |
186 | spinlock_t *ptl; | |
187 | int ret; | |
e2a50c1f AA |
188 | pgoff_t offset, max_off; |
189 | struct inode *inode; | |
c1a4de99 AA |
190 | |
191 | _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), | |
192 | dst_vma->vm_page_prot)); | |
c1a4de99 | 193 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
e2a50c1f AA |
194 | if (dst_vma->vm_file) { |
195 | /* the shmem MAP_PRIVATE case requires checking the i_size */ | |
196 | inode = dst_vma->vm_file->f_inode; | |
197 | offset = linear_page_index(dst_vma, dst_addr); | |
198 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | |
199 | ret = -EFAULT; | |
200 | if (unlikely(offset >= max_off)) | |
201 | goto out_unlock; | |
202 | } | |
203 | ret = -EEXIST; | |
c1a4de99 AA |
204 | if (!pte_none(*dst_pte)) |
205 | goto out_unlock; | |
206 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); | |
207 | /* No need to invalidate - it was non-present before */ | |
208 | update_mmu_cache(dst_vma, dst_addr, dst_pte); | |
209 | ret = 0; | |
210 | out_unlock: | |
211 | pte_unmap_unlock(dst_pte, ptl); | |
212 | return ret; | |
213 | } | |
214 | ||
15313257 AR |
215 | /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ |
216 | static int mcontinue_atomic_pte(struct mm_struct *dst_mm, | |
217 | pmd_t *dst_pmd, | |
218 | struct vm_area_struct *dst_vma, | |
219 | unsigned long dst_addr, | |
220 | bool wp_copy) | |
221 | { | |
222 | struct inode *inode = file_inode(dst_vma->vm_file); | |
223 | pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); | |
224 | struct page *page; | |
225 | int ret; | |
226 | ||
227 | ret = shmem_getpage(inode, pgoff, &page, SGP_READ); | |
228 | if (ret) | |
229 | goto out; | |
230 | if (!page) { | |
231 | ret = -EFAULT; | |
232 | goto out; | |
233 | } | |
234 | ||
235 | ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, | |
236 | page, false, wp_copy); | |
237 | if (ret) | |
238 | goto out_release; | |
239 | ||
240 | unlock_page(page); | |
241 | ret = 0; | |
242 | out: | |
243 | return ret; | |
244 | out_release: | |
245 | unlock_page(page); | |
246 | put_page(page); | |
247 | goto out; | |
248 | } | |
249 | ||
c1a4de99 AA |
250 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
251 | { | |
252 | pgd_t *pgd; | |
c2febafc | 253 | p4d_t *p4d; |
c1a4de99 | 254 | pud_t *pud; |
c1a4de99 AA |
255 | |
256 | pgd = pgd_offset(mm, address); | |
c2febafc KS |
257 | p4d = p4d_alloc(mm, pgd, address); |
258 | if (!p4d) | |
259 | return NULL; | |
260 | pud = pud_alloc(mm, p4d, address); | |
261 | if (!pud) | |
262 | return NULL; | |
263 | /* | |
264 | * Note that we didn't run this because the pmd was | |
265 | * missing, the *pmd may be already established and in | |
266 | * turn it may also be a trans_huge_pmd. | |
267 | */ | |
268 | return pmd_alloc(mm, pud, address); | |
c1a4de99 AA |
269 | } |
270 | ||
60d4d2d2 MK |
271 | #ifdef CONFIG_HUGETLB_PAGE |
272 | /* | |
273 | * __mcopy_atomic processing for HUGETLB vmas. Note that this routine is | |
c1e8d7c6 | 274 | * called with mmap_lock held, it will release mmap_lock before returning. |
60d4d2d2 MK |
275 | */ |
276 | static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, | |
277 | struct vm_area_struct *dst_vma, | |
278 | unsigned long dst_start, | |
279 | unsigned long src_start, | |
280 | unsigned long len, | |
f6191471 | 281 | enum mcopy_atomic_mode mode) |
60d4d2d2 | 282 | { |
1c9e8def | 283 | int vm_shared = dst_vma->vm_flags & VM_SHARED; |
60d4d2d2 MK |
284 | ssize_t err; |
285 | pte_t *dst_pte; | |
286 | unsigned long src_addr, dst_addr; | |
287 | long copied; | |
288 | struct page *page; | |
60d4d2d2 MK |
289 | unsigned long vma_hpagesize; |
290 | pgoff_t idx; | |
291 | u32 hash; | |
292 | struct address_space *mapping; | |
293 | ||
294 | /* | |
295 | * There is no default zero huge page for all huge page sizes as | |
296 | * supported by hugetlb. A PMD_SIZE huge pages may exist as used | |
297 | * by THP. Since we can not reliably insert a zero page, this | |
298 | * feature is not supported. | |
299 | */ | |
f6191471 | 300 | if (mode == MCOPY_ATOMIC_ZEROPAGE) { |
d8ed45c5 | 301 | mmap_read_unlock(dst_mm); |
60d4d2d2 MK |
302 | return -EINVAL; |
303 | } | |
304 | ||
305 | src_addr = src_start; | |
306 | dst_addr = dst_start; | |
307 | copied = 0; | |
308 | page = NULL; | |
309 | vma_hpagesize = vma_kernel_pagesize(dst_vma); | |
310 | ||
311 | /* | |
312 | * Validate alignment based on huge page size | |
313 | */ | |
314 | err = -EINVAL; | |
315 | if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) | |
316 | goto out_unlock; | |
317 | ||
318 | retry: | |
319 | /* | |
c1e8d7c6 | 320 | * On routine entry dst_vma is set. If we had to drop mmap_lock and |
60d4d2d2 MK |
321 | * retry, dst_vma will be set to NULL and we must lookup again. |
322 | */ | |
323 | if (!dst_vma) { | |
27d02568 | 324 | err = -ENOENT; |
643aa36e | 325 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
60d4d2d2 MK |
326 | if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) |
327 | goto out_unlock; | |
1c9e8def | 328 | |
27d02568 MR |
329 | err = -EINVAL; |
330 | if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) | |
331 | goto out_unlock; | |
332 | ||
1c9e8def | 333 | vm_shared = dst_vma->vm_flags & VM_SHARED; |
60d4d2d2 MK |
334 | } |
335 | ||
60d4d2d2 | 336 | /* |
1c9e8def | 337 | * If not shared, ensure the dst_vma has a anon_vma. |
60d4d2d2 MK |
338 | */ |
339 | err = -ENOMEM; | |
1c9e8def MK |
340 | if (!vm_shared) { |
341 | if (unlikely(anon_vma_prepare(dst_vma))) | |
342 | goto out_unlock; | |
343 | } | |
60d4d2d2 | 344 | |
60d4d2d2 | 345 | while (src_addr < src_start + len) { |
60d4d2d2 | 346 | BUG_ON(dst_addr >= dst_start + len); |
60d4d2d2 MK |
347 | |
348 | /* | |
c0d0381a MK |
349 | * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. |
350 | * i_mmap_rwsem ensures the dst_pte remains valid even | |
351 | * in the case of shared pmds. fault mutex prevents | |
352 | * races with other faulting threads. | |
60d4d2d2 | 353 | */ |
ddeaab32 | 354 | mapping = dst_vma->vm_file->f_mapping; |
c0d0381a MK |
355 | i_mmap_lock_read(mapping); |
356 | idx = linear_page_index(dst_vma, dst_addr); | |
188b04a7 | 357 | hash = hugetlb_fault_mutex_hash(mapping, idx); |
60d4d2d2 MK |
358 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
359 | ||
360 | err = -ENOMEM; | |
aec44e0f | 361 | dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); |
60d4d2d2 MK |
362 | if (!dst_pte) { |
363 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
c0d0381a | 364 | i_mmap_unlock_read(mapping); |
60d4d2d2 MK |
365 | goto out_unlock; |
366 | } | |
367 | ||
f6191471 AR |
368 | if (mode != MCOPY_ATOMIC_CONTINUE && |
369 | !huge_pte_none(huge_ptep_get(dst_pte))) { | |
370 | err = -EEXIST; | |
60d4d2d2 | 371 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
c0d0381a | 372 | i_mmap_unlock_read(mapping); |
60d4d2d2 MK |
373 | goto out_unlock; |
374 | } | |
375 | ||
376 | err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, | |
f6191471 | 377 | dst_addr, src_addr, mode, &page); |
60d4d2d2 MK |
378 | |
379 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
c0d0381a | 380 | i_mmap_unlock_read(mapping); |
60d4d2d2 MK |
381 | |
382 | cond_resched(); | |
383 | ||
9e368259 | 384 | if (unlikely(err == -ENOENT)) { |
d8ed45c5 | 385 | mmap_read_unlock(dst_mm); |
60d4d2d2 MK |
386 | BUG_ON(!page); |
387 | ||
388 | err = copy_huge_page_from_user(page, | |
389 | (const void __user *)src_addr, | |
4fb07ee6 WY |
390 | vma_hpagesize / PAGE_SIZE, |
391 | true); | |
60d4d2d2 MK |
392 | if (unlikely(err)) { |
393 | err = -EFAULT; | |
394 | goto out; | |
395 | } | |
d8ed45c5 | 396 | mmap_read_lock(dst_mm); |
60d4d2d2 MK |
397 | |
398 | dst_vma = NULL; | |
399 | goto retry; | |
400 | } else | |
401 | BUG_ON(page); | |
402 | ||
403 | if (!err) { | |
404 | dst_addr += vma_hpagesize; | |
405 | src_addr += vma_hpagesize; | |
406 | copied += vma_hpagesize; | |
407 | ||
408 | if (fatal_signal_pending(current)) | |
409 | err = -EINTR; | |
410 | } | |
411 | if (err) | |
412 | break; | |
413 | } | |
414 | ||
415 | out_unlock: | |
d8ed45c5 | 416 | mmap_read_unlock(dst_mm); |
60d4d2d2 | 417 | out: |
8cc5fcbb | 418 | if (page) |
60d4d2d2 MK |
419 | put_page(page); |
420 | BUG_ON(copied < 0); | |
421 | BUG_ON(err > 0); | |
422 | BUG_ON(!copied && !err); | |
423 | return copied ? copied : err; | |
424 | } | |
425 | #else /* !CONFIG_HUGETLB_PAGE */ | |
426 | /* fail at build time if gcc attempts to use this */ | |
427 | extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, | |
428 | struct vm_area_struct *dst_vma, | |
429 | unsigned long dst_start, | |
430 | unsigned long src_start, | |
431 | unsigned long len, | |
f6191471 | 432 | enum mcopy_atomic_mode mode); |
60d4d2d2 MK |
433 | #endif /* CONFIG_HUGETLB_PAGE */ |
434 | ||
3217d3c7 MR |
435 | static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, |
436 | pmd_t *dst_pmd, | |
437 | struct vm_area_struct *dst_vma, | |
438 | unsigned long dst_addr, | |
439 | unsigned long src_addr, | |
440 | struct page **page, | |
15313257 | 441 | enum mcopy_atomic_mode mode, |
72981e0e | 442 | bool wp_copy) |
3217d3c7 MR |
443 | { |
444 | ssize_t err; | |
445 | ||
15313257 AR |
446 | if (mode == MCOPY_ATOMIC_CONTINUE) { |
447 | return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, | |
448 | wp_copy); | |
449 | } | |
450 | ||
5b51072e AA |
451 | /* |
452 | * The normal page fault path for a shmem will invoke the | |
453 | * fault, fill the hole in the file and COW it right away. The | |
454 | * result generates plain anonymous memory. So when we are | |
455 | * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll | |
456 | * generate anonymous memory directly without actually filling | |
457 | * the hole. For the MAP_PRIVATE case the robustness check | |
458 | * only happens in the pagetable (to verify it's still none) | |
459 | * and not in the radix tree. | |
460 | */ | |
461 | if (!(dst_vma->vm_flags & VM_SHARED)) { | |
15313257 | 462 | if (mode == MCOPY_ATOMIC_NORMAL) |
3217d3c7 | 463 | err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, |
72981e0e AA |
464 | dst_addr, src_addr, page, |
465 | wp_copy); | |
3217d3c7 MR |
466 | else |
467 | err = mfill_zeropage_pte(dst_mm, dst_pmd, | |
468 | dst_vma, dst_addr); | |
469 | } else { | |
72981e0e | 470 | VM_WARN_ON_ONCE(wp_copy); |
3460f6e5 | 471 | err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, |
15313257 AR |
472 | dst_addr, src_addr, |
473 | mode != MCOPY_ATOMIC_NORMAL, | |
3460f6e5 | 474 | page); |
3217d3c7 MR |
475 | } |
476 | ||
477 | return err; | |
478 | } | |
479 | ||
c1a4de99 AA |
480 | static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, |
481 | unsigned long dst_start, | |
482 | unsigned long src_start, | |
483 | unsigned long len, | |
f6191471 | 484 | enum mcopy_atomic_mode mcopy_mode, |
a759a909 | 485 | atomic_t *mmap_changing, |
72981e0e | 486 | __u64 mode) |
c1a4de99 AA |
487 | { |
488 | struct vm_area_struct *dst_vma; | |
489 | ssize_t err; | |
490 | pmd_t *dst_pmd; | |
491 | unsigned long src_addr, dst_addr; | |
b6ebaedb AA |
492 | long copied; |
493 | struct page *page; | |
72981e0e | 494 | bool wp_copy; |
c1a4de99 AA |
495 | |
496 | /* | |
497 | * Sanitize the command parameters: | |
498 | */ | |
499 | BUG_ON(dst_start & ~PAGE_MASK); | |
500 | BUG_ON(len & ~PAGE_MASK); | |
501 | ||
502 | /* Does the address range wrap, or is the span zero-sized? */ | |
503 | BUG_ON(src_start + len <= src_start); | |
504 | BUG_ON(dst_start + len <= dst_start); | |
505 | ||
b6ebaedb AA |
506 | src_addr = src_start; |
507 | dst_addr = dst_start; | |
508 | copied = 0; | |
509 | page = NULL; | |
510 | retry: | |
d8ed45c5 | 511 | mmap_read_lock(dst_mm); |
c1a4de99 | 512 | |
df2cc96e MR |
513 | /* |
514 | * If memory mappings are changing because of non-cooperative | |
515 | * operation (e.g. mremap) running in parallel, bail out and | |
516 | * request the user to retry later | |
517 | */ | |
518 | err = -EAGAIN; | |
a759a909 | 519 | if (mmap_changing && atomic_read(mmap_changing)) |
df2cc96e MR |
520 | goto out_unlock; |
521 | ||
c1a4de99 AA |
522 | /* |
523 | * Make sure the vma is not shared, that the dst range is | |
524 | * both valid and fully within a single existing vma. | |
525 | */ | |
27d02568 | 526 | err = -ENOENT; |
643aa36e | 527 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
26071ced MR |
528 | if (!dst_vma) |
529 | goto out_unlock; | |
c1a4de99 | 530 | |
27d02568 MR |
531 | err = -EINVAL; |
532 | /* | |
533 | * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but | |
534 | * it will overwrite vm_ops, so vma_is_anonymous must return false. | |
535 | */ | |
536 | if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && | |
537 | dst_vma->vm_flags & VM_SHARED)) | |
538 | goto out_unlock; | |
539 | ||
72981e0e AA |
540 | /* |
541 | * validate 'mode' now that we know the dst_vma: don't allow | |
542 | * a wrprotect copy if the userfaultfd didn't register as WP. | |
543 | */ | |
544 | wp_copy = mode & UFFDIO_COPY_MODE_WP; | |
545 | if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP)) | |
546 | goto out_unlock; | |
547 | ||
60d4d2d2 MK |
548 | /* |
549 | * If this is a HUGETLB vma, pass off to appropriate routine | |
550 | */ | |
551 | if (is_vm_hugetlb_page(dst_vma)) | |
552 | return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, | |
f6191471 | 553 | src_start, len, mcopy_mode); |
60d4d2d2 | 554 | |
26071ced | 555 | if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) |
b6ebaedb | 556 | goto out_unlock; |
15313257 | 557 | if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE) |
f6191471 | 558 | goto out_unlock; |
c1a4de99 AA |
559 | |
560 | /* | |
561 | * Ensure the dst_vma has a anon_vma or this page | |
562 | * would get a NULL anon_vma when moved in the | |
563 | * dst_vma. | |
564 | */ | |
565 | err = -ENOMEM; | |
5b51072e AA |
566 | if (!(dst_vma->vm_flags & VM_SHARED) && |
567 | unlikely(anon_vma_prepare(dst_vma))) | |
b6ebaedb | 568 | goto out_unlock; |
c1a4de99 | 569 | |
b6ebaedb | 570 | while (src_addr < src_start + len) { |
c1a4de99 | 571 | pmd_t dst_pmdval; |
b6ebaedb | 572 | |
c1a4de99 | 573 | BUG_ON(dst_addr >= dst_start + len); |
b6ebaedb | 574 | |
c1a4de99 AA |
575 | dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); |
576 | if (unlikely(!dst_pmd)) { | |
577 | err = -ENOMEM; | |
578 | break; | |
579 | } | |
580 | ||
581 | dst_pmdval = pmd_read_atomic(dst_pmd); | |
582 | /* | |
583 | * If the dst_pmd is mapped as THP don't | |
584 | * override it and just be strict. | |
585 | */ | |
586 | if (unlikely(pmd_trans_huge(dst_pmdval))) { | |
587 | err = -EEXIST; | |
588 | break; | |
589 | } | |
590 | if (unlikely(pmd_none(dst_pmdval)) && | |
4cf58924 | 591 | unlikely(__pte_alloc(dst_mm, dst_pmd))) { |
c1a4de99 AA |
592 | err = -ENOMEM; |
593 | break; | |
594 | } | |
595 | /* If an huge pmd materialized from under us fail */ | |
596 | if (unlikely(pmd_trans_huge(*dst_pmd))) { | |
597 | err = -EFAULT; | |
598 | break; | |
599 | } | |
600 | ||
601 | BUG_ON(pmd_none(*dst_pmd)); | |
602 | BUG_ON(pmd_trans_huge(*dst_pmd)); | |
603 | ||
3217d3c7 | 604 | err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, |
15313257 | 605 | src_addr, &page, mcopy_mode, wp_copy); |
c1a4de99 AA |
606 | cond_resched(); |
607 | ||
9e368259 | 608 | if (unlikely(err == -ENOENT)) { |
b6ebaedb AA |
609 | void *page_kaddr; |
610 | ||
d8ed45c5 | 611 | mmap_read_unlock(dst_mm); |
b6ebaedb AA |
612 | BUG_ON(!page); |
613 | ||
614 | page_kaddr = kmap(page); | |
615 | err = copy_from_user(page_kaddr, | |
616 | (const void __user *) src_addr, | |
617 | PAGE_SIZE); | |
618 | kunmap(page); | |
619 | if (unlikely(err)) { | |
620 | err = -EFAULT; | |
621 | goto out; | |
622 | } | |
623 | goto retry; | |
624 | } else | |
625 | BUG_ON(page); | |
626 | ||
c1a4de99 AA |
627 | if (!err) { |
628 | dst_addr += PAGE_SIZE; | |
629 | src_addr += PAGE_SIZE; | |
630 | copied += PAGE_SIZE; | |
631 | ||
632 | if (fatal_signal_pending(current)) | |
633 | err = -EINTR; | |
634 | } | |
635 | if (err) | |
636 | break; | |
637 | } | |
638 | ||
b6ebaedb | 639 | out_unlock: |
d8ed45c5 | 640 | mmap_read_unlock(dst_mm); |
b6ebaedb AA |
641 | out: |
642 | if (page) | |
09cbfeaf | 643 | put_page(page); |
c1a4de99 AA |
644 | BUG_ON(copied < 0); |
645 | BUG_ON(err > 0); | |
646 | BUG_ON(!copied && !err); | |
647 | return copied ? copied : err; | |
648 | } | |
649 | ||
650 | ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, | |
df2cc96e | 651 | unsigned long src_start, unsigned long len, |
a759a909 | 652 | atomic_t *mmap_changing, __u64 mode) |
c1a4de99 | 653 | { |
f6191471 AR |
654 | return __mcopy_atomic(dst_mm, dst_start, src_start, len, |
655 | MCOPY_ATOMIC_NORMAL, mmap_changing, mode); | |
c1a4de99 AA |
656 | } |
657 | ||
658 | ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, | |
a759a909 | 659 | unsigned long len, atomic_t *mmap_changing) |
c1a4de99 | 660 | { |
f6191471 AR |
661 | return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE, |
662 | mmap_changing, 0); | |
663 | } | |
664 | ||
665 | ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start, | |
a759a909 | 666 | unsigned long len, atomic_t *mmap_changing) |
f6191471 AR |
667 | { |
668 | return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_CONTINUE, | |
669 | mmap_changing, 0); | |
c1a4de99 | 670 | } |
ffd05793 SL |
671 | |
672 | int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, | |
a759a909 NA |
673 | unsigned long len, bool enable_wp, |
674 | atomic_t *mmap_changing) | |
ffd05793 SL |
675 | { |
676 | struct vm_area_struct *dst_vma; | |
677 | pgprot_t newprot; | |
678 | int err; | |
679 | ||
680 | /* | |
681 | * Sanitize the command parameters: | |
682 | */ | |
683 | BUG_ON(start & ~PAGE_MASK); | |
684 | BUG_ON(len & ~PAGE_MASK); | |
685 | ||
686 | /* Does the address range wrap, or is the span zero-sized? */ | |
687 | BUG_ON(start + len <= start); | |
688 | ||
d8ed45c5 | 689 | mmap_read_lock(dst_mm); |
ffd05793 SL |
690 | |
691 | /* | |
692 | * If memory mappings are changing because of non-cooperative | |
693 | * operation (e.g. mremap) running in parallel, bail out and | |
694 | * request the user to retry later | |
695 | */ | |
696 | err = -EAGAIN; | |
a759a909 | 697 | if (mmap_changing && atomic_read(mmap_changing)) |
ffd05793 SL |
698 | goto out_unlock; |
699 | ||
700 | err = -ENOENT; | |
701 | dst_vma = find_dst_vma(dst_mm, start, len); | |
702 | /* | |
703 | * Make sure the vma is not shared, that the dst range is | |
704 | * both valid and fully within a single existing vma. | |
705 | */ | |
706 | if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) | |
707 | goto out_unlock; | |
708 | if (!userfaultfd_wp(dst_vma)) | |
709 | goto out_unlock; | |
710 | if (!vma_is_anonymous(dst_vma)) | |
711 | goto out_unlock; | |
712 | ||
713 | if (enable_wp) | |
714 | newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE)); | |
715 | else | |
716 | newprot = vm_get_page_prot(dst_vma->vm_flags); | |
717 | ||
718 | change_protection(dst_vma, start, start + len, newprot, | |
719 | enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE); | |
720 | ||
721 | err = 0; | |
722 | out_unlock: | |
d8ed45c5 | 723 | mmap_read_unlock(dst_mm); |
ffd05793 SL |
724 | return err; |
725 | } |