Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c1a4de99 AA |
2 | /* |
3 | * mm/userfaultfd.c | |
4 | * | |
5 | * Copyright (C) 2015 Red Hat, Inc. | |
c1a4de99 AA |
6 | */ |
7 | ||
8 | #include <linux/mm.h> | |
174cd4b1 | 9 | #include <linux/sched/signal.h> |
c1a4de99 AA |
10 | #include <linux/pagemap.h> |
11 | #include <linux/rmap.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/swapops.h> | |
14 | #include <linux/userfaultfd_k.h> | |
15 | #include <linux/mmu_notifier.h> | |
60d4d2d2 | 16 | #include <linux/hugetlb.h> |
26071ced | 17 | #include <linux/shmem_fs.h> |
c1a4de99 | 18 | #include <asm/tlbflush.h> |
4a18419f | 19 | #include <asm/tlb.h> |
c1a4de99 AA |
20 | #include "internal.h" |
21 | ||
643aa36e WY |
22 | static __always_inline |
23 | struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, | |
24 | unsigned long dst_start, | |
25 | unsigned long len) | |
26 | { | |
27 | /* | |
28 | * Make sure that the dst range is both valid and fully within a | |
29 | * single existing vma. | |
30 | */ | |
31 | struct vm_area_struct *dst_vma; | |
32 | ||
33 | dst_vma = find_vma(dst_mm, dst_start); | |
686ea6e6 | 34 | if (!range_in_vma(dst_vma, dst_start, dst_start + len)) |
643aa36e WY |
35 | return NULL; |
36 | ||
37 | /* | |
38 | * Check the vma is registered in uffd, this is required to | |
39 | * enforce the VM_MAYWRITE check done at uffd registration | |
40 | * time. | |
41 | */ | |
42 | if (!dst_vma->vm_userfaultfd_ctx.ctx) | |
43 | return NULL; | |
44 | ||
45 | return dst_vma; | |
46 | } | |
47 | ||
15313257 AR |
48 | /* |
49 | * Install PTEs, to map dst_addr (within dst_vma) to page. | |
50 | * | |
7d64ae3a AR |
51 | * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem |
52 | * and anon, and for both shared and private VMAs. | |
15313257 | 53 | */ |
61c50040 | 54 | int mfill_atomic_install_pte(pmd_t *dst_pmd, |
7d64ae3a AR |
55 | struct vm_area_struct *dst_vma, |
56 | unsigned long dst_addr, struct page *page, | |
d9712937 | 57 | bool newly_allocated, uffd_flags_t flags) |
15313257 AR |
58 | { |
59 | int ret; | |
61c50040 | 60 | struct mm_struct *dst_mm = dst_vma->vm_mm; |
15313257 AR |
61 | pte_t _dst_pte, *dst_pte; |
62 | bool writable = dst_vma->vm_flags & VM_WRITE; | |
63 | bool vm_shared = dst_vma->vm_flags & VM_SHARED; | |
93b0d917 | 64 | bool page_in_cache = page_mapping(page); |
15313257 | 65 | spinlock_t *ptl; |
28965f0f | 66 | struct folio *folio; |
15313257 AR |
67 | struct inode *inode; |
68 | pgoff_t offset, max_off; | |
69 | ||
70 | _dst_pte = mk_pte(page, dst_vma->vm_page_prot); | |
9ae0f87d | 71 | _dst_pte = pte_mkdirty(_dst_pte); |
15313257 AR |
72 | if (page_in_cache && !vm_shared) |
73 | writable = false; | |
8ee79edf | 74 | if (writable) |
0e88904c | 75 | _dst_pte = pte_mkwrite(_dst_pte); |
d9712937 | 76 | if (flags & MFILL_ATOMIC_WP) |
f1eb1bac | 77 | _dst_pte = pte_mkuffd_wp(_dst_pte); |
15313257 | 78 | |
3622d3cd | 79 | ret = -EAGAIN; |
15313257 | 80 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
3622d3cd HD |
81 | if (!dst_pte) |
82 | goto out; | |
15313257 AR |
83 | |
84 | if (vma_is_shmem(dst_vma)) { | |
85 | /* serialize against truncate with the page table lock */ | |
86 | inode = dst_vma->vm_file->f_inode; | |
87 | offset = linear_page_index(dst_vma, dst_addr); | |
88 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | |
89 | ret = -EFAULT; | |
90 | if (unlikely(offset >= max_off)) | |
91 | goto out_unlock; | |
92 | } | |
93 | ||
94 | ret = -EEXIST; | |
8ee79edf PX |
95 | /* |
96 | * We allow to overwrite a pte marker: consider when both MISSING|WP | |
97 | * registered, we firstly wr-protect a none pte which has no page cache | |
98 | * page backing it, then access the page. | |
99 | */ | |
c33c7948 | 100 | if (!pte_none_mostly(ptep_get(dst_pte))) |
15313257 AR |
101 | goto out_unlock; |
102 | ||
28965f0f | 103 | folio = page_folio(page); |
cea86fe2 HD |
104 | if (page_in_cache) { |
105 | /* Usually, cache pages are already added to LRU */ | |
106 | if (newly_allocated) | |
28965f0f | 107 | folio_add_lru(folio); |
cea86fe2 HD |
108 | page_add_file_rmap(page, dst_vma, false); |
109 | } else { | |
40f2bbf7 | 110 | page_add_new_anon_rmap(page, dst_vma, dst_addr); |
28965f0f | 111 | folio_add_lru_vma(folio, dst_vma); |
cea86fe2 | 112 | } |
15313257 AR |
113 | |
114 | /* | |
115 | * Must happen after rmap, as mm_counter() checks mapping (via | |
116 | * PageAnon()), which is set by __page_set_anon_rmap(). | |
117 | */ | |
118 | inc_mm_counter(dst_mm, mm_counter(page)); | |
119 | ||
15313257 AR |
120 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
121 | ||
122 | /* No need to invalidate - it was non-present before */ | |
123 | update_mmu_cache(dst_vma, dst_addr, dst_pte); | |
124 | ret = 0; | |
125 | out_unlock: | |
126 | pte_unmap_unlock(dst_pte, ptl); | |
3622d3cd | 127 | out: |
15313257 AR |
128 | return ret; |
129 | } | |
130 | ||
61c50040 | 131 | static int mfill_atomic_pte_copy(pmd_t *dst_pmd, |
a734991c AR |
132 | struct vm_area_struct *dst_vma, |
133 | unsigned long dst_addr, | |
134 | unsigned long src_addr, | |
d9712937 | 135 | uffd_flags_t flags, |
d7be6d7e | 136 | struct folio **foliop) |
c1a4de99 | 137 | { |
07e6d409 | 138 | void *kaddr; |
c1a4de99 | 139 | int ret; |
07e6d409 | 140 | struct folio *folio; |
c1a4de99 | 141 | |
d7be6d7e | 142 | if (!*foliop) { |
b6ebaedb | 143 | ret = -ENOMEM; |
07e6d409 Z |
144 | folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, |
145 | dst_addr, false); | |
146 | if (!folio) | |
b6ebaedb AA |
147 | goto out; |
148 | ||
07e6d409 | 149 | kaddr = kmap_local_folio(folio, 0); |
5521de7d IW |
150 | /* |
151 | * The read mmap_lock is held here. Despite the | |
152 | * mmap_lock being read recursive a deadlock is still | |
153 | * possible if a writer has taken a lock. For example: | |
154 | * | |
155 | * process A thread 1 takes read lock on own mmap_lock | |
156 | * process A thread 2 calls mmap, blocks taking write lock | |
157 | * process B thread 1 takes page fault, read lock on own mmap lock | |
158 | * process B thread 2 calls mmap, blocks taking write lock | |
159 | * process A thread 1 blocks taking read lock on process B | |
160 | * process B thread 1 blocks taking read lock on process A | |
161 | * | |
162 | * Disable page faults to prevent potential deadlock | |
163 | * and retry the copy outside the mmap_lock. | |
164 | */ | |
165 | pagefault_disable(); | |
07e6d409 | 166 | ret = copy_from_user(kaddr, (const void __user *) src_addr, |
b6ebaedb | 167 | PAGE_SIZE); |
5521de7d | 168 | pagefault_enable(); |
07e6d409 | 169 | kunmap_local(kaddr); |
b6ebaedb | 170 | |
c1e8d7c6 | 171 | /* fallback to copy_from_user outside mmap_lock */ |
b6ebaedb | 172 | if (unlikely(ret)) { |
9e368259 | 173 | ret = -ENOENT; |
d7be6d7e | 174 | *foliop = folio; |
b6ebaedb AA |
175 | /* don't free the page */ |
176 | goto out; | |
177 | } | |
7c25a0b8 | 178 | |
07e6d409 | 179 | flush_dcache_folio(folio); |
b6ebaedb | 180 | } else { |
d7be6d7e Z |
181 | folio = *foliop; |
182 | *foliop = NULL; | |
b6ebaedb | 183 | } |
c1a4de99 AA |
184 | |
185 | /* | |
07e6d409 | 186 | * The memory barrier inside __folio_mark_uptodate makes sure that |
f4f5329d | 187 | * preceding stores to the page contents become visible before |
c1a4de99 AA |
188 | * the set_pte_at() write. |
189 | */ | |
07e6d409 | 190 | __folio_mark_uptodate(folio); |
c1a4de99 AA |
191 | |
192 | ret = -ENOMEM; | |
07e6d409 | 193 | if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) |
c1a4de99 AA |
194 | goto out_release; |
195 | ||
61c50040 | 196 | ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, |
07e6d409 | 197 | &folio->page, true, flags); |
15313257 AR |
198 | if (ret) |
199 | goto out_release; | |
c1a4de99 AA |
200 | out: |
201 | return ret; | |
c1a4de99 | 202 | out_release: |
07e6d409 | 203 | folio_put(folio); |
c1a4de99 | 204 | goto out; |
c1a4de99 AA |
205 | } |
206 | ||
61c50040 | 207 | static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, |
a734991c AR |
208 | struct vm_area_struct *dst_vma, |
209 | unsigned long dst_addr) | |
c1a4de99 AA |
210 | { |
211 | pte_t _dst_pte, *dst_pte; | |
212 | spinlock_t *ptl; | |
213 | int ret; | |
e2a50c1f AA |
214 | pgoff_t offset, max_off; |
215 | struct inode *inode; | |
c1a4de99 AA |
216 | |
217 | _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), | |
218 | dst_vma->vm_page_prot)); | |
3622d3cd | 219 | ret = -EAGAIN; |
61c50040 | 220 | dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); |
3622d3cd HD |
221 | if (!dst_pte) |
222 | goto out; | |
e2a50c1f AA |
223 | if (dst_vma->vm_file) { |
224 | /* the shmem MAP_PRIVATE case requires checking the i_size */ | |
225 | inode = dst_vma->vm_file->f_inode; | |
226 | offset = linear_page_index(dst_vma, dst_addr); | |
227 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | |
228 | ret = -EFAULT; | |
229 | if (unlikely(offset >= max_off)) | |
230 | goto out_unlock; | |
231 | } | |
232 | ret = -EEXIST; | |
c33c7948 | 233 | if (!pte_none(ptep_get(dst_pte))) |
c1a4de99 | 234 | goto out_unlock; |
61c50040 | 235 | set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); |
c1a4de99 AA |
236 | /* No need to invalidate - it was non-present before */ |
237 | update_mmu_cache(dst_vma, dst_addr, dst_pte); | |
238 | ret = 0; | |
239 | out_unlock: | |
240 | pte_unmap_unlock(dst_pte, ptl); | |
3622d3cd | 241 | out: |
c1a4de99 AA |
242 | return ret; |
243 | } | |
244 | ||
15313257 | 245 | /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ |
61c50040 | 246 | static int mfill_atomic_pte_continue(pmd_t *dst_pmd, |
a734991c AR |
247 | struct vm_area_struct *dst_vma, |
248 | unsigned long dst_addr, | |
d9712937 | 249 | uffd_flags_t flags) |
15313257 AR |
250 | { |
251 | struct inode *inode = file_inode(dst_vma->vm_file); | |
252 | pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); | |
12acf4fb | 253 | struct folio *folio; |
15313257 AR |
254 | struct page *page; |
255 | int ret; | |
256 | ||
12acf4fb MWO |
257 | ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC); |
258 | /* Our caller expects us to return -EFAULT if we failed to find folio */ | |
73f37dbc AR |
259 | if (ret == -ENOENT) |
260 | ret = -EFAULT; | |
15313257 AR |
261 | if (ret) |
262 | goto out; | |
12acf4fb | 263 | if (!folio) { |
15313257 AR |
264 | ret = -EFAULT; |
265 | goto out; | |
266 | } | |
267 | ||
12acf4fb | 268 | page = folio_file_page(folio, pgoff); |
a7605426 YS |
269 | if (PageHWPoison(page)) { |
270 | ret = -EIO; | |
271 | goto out_release; | |
272 | } | |
273 | ||
61c50040 | 274 | ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, |
d9712937 | 275 | page, false, flags); |
15313257 AR |
276 | if (ret) |
277 | goto out_release; | |
278 | ||
12acf4fb | 279 | folio_unlock(folio); |
15313257 AR |
280 | ret = 0; |
281 | out: | |
282 | return ret; | |
283 | out_release: | |
12acf4fb MWO |
284 | folio_unlock(folio); |
285 | folio_put(folio); | |
15313257 AR |
286 | goto out; |
287 | } | |
288 | ||
c1a4de99 AA |
289 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
290 | { | |
291 | pgd_t *pgd; | |
c2febafc | 292 | p4d_t *p4d; |
c1a4de99 | 293 | pud_t *pud; |
c1a4de99 AA |
294 | |
295 | pgd = pgd_offset(mm, address); | |
c2febafc KS |
296 | p4d = p4d_alloc(mm, pgd, address); |
297 | if (!p4d) | |
298 | return NULL; | |
299 | pud = pud_alloc(mm, p4d, address); | |
300 | if (!pud) | |
301 | return NULL; | |
302 | /* | |
303 | * Note that we didn't run this because the pmd was | |
304 | * missing, the *pmd may be already established and in | |
305 | * turn it may also be a trans_huge_pmd. | |
306 | */ | |
307 | return pmd_alloc(mm, pud, address); | |
c1a4de99 AA |
308 | } |
309 | ||
60d4d2d2 MK |
310 | #ifdef CONFIG_HUGETLB_PAGE |
311 | /* | |
a734991c | 312 | * mfill_atomic processing for HUGETLB vmas. Note that this routine is |
c1e8d7c6 | 313 | * called with mmap_lock held, it will release mmap_lock before returning. |
60d4d2d2 | 314 | */ |
61c50040 | 315 | static __always_inline ssize_t mfill_atomic_hugetlb( |
60d4d2d2 MK |
316 | struct vm_area_struct *dst_vma, |
317 | unsigned long dst_start, | |
318 | unsigned long src_start, | |
319 | unsigned long len, | |
d9712937 | 320 | uffd_flags_t flags) |
60d4d2d2 | 321 | { |
61c50040 | 322 | struct mm_struct *dst_mm = dst_vma->vm_mm; |
1c9e8def | 323 | int vm_shared = dst_vma->vm_flags & VM_SHARED; |
60d4d2d2 MK |
324 | ssize_t err; |
325 | pte_t *dst_pte; | |
326 | unsigned long src_addr, dst_addr; | |
327 | long copied; | |
0169fd51 | 328 | struct folio *folio; |
60d4d2d2 MK |
329 | unsigned long vma_hpagesize; |
330 | pgoff_t idx; | |
331 | u32 hash; | |
332 | struct address_space *mapping; | |
333 | ||
334 | /* | |
335 | * There is no default zero huge page for all huge page sizes as | |
336 | * supported by hugetlb. A PMD_SIZE huge pages may exist as used | |
337 | * by THP. Since we can not reliably insert a zero page, this | |
338 | * feature is not supported. | |
339 | */ | |
d9712937 | 340 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) { |
d8ed45c5 | 341 | mmap_read_unlock(dst_mm); |
60d4d2d2 MK |
342 | return -EINVAL; |
343 | } | |
344 | ||
345 | src_addr = src_start; | |
346 | dst_addr = dst_start; | |
347 | copied = 0; | |
0169fd51 | 348 | folio = NULL; |
60d4d2d2 MK |
349 | vma_hpagesize = vma_kernel_pagesize(dst_vma); |
350 | ||
351 | /* | |
352 | * Validate alignment based on huge page size | |
353 | */ | |
354 | err = -EINVAL; | |
355 | if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) | |
356 | goto out_unlock; | |
357 | ||
358 | retry: | |
359 | /* | |
c1e8d7c6 | 360 | * On routine entry dst_vma is set. If we had to drop mmap_lock and |
60d4d2d2 MK |
361 | * retry, dst_vma will be set to NULL and we must lookup again. |
362 | */ | |
363 | if (!dst_vma) { | |
27d02568 | 364 | err = -ENOENT; |
643aa36e | 365 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
60d4d2d2 MK |
366 | if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) |
367 | goto out_unlock; | |
1c9e8def | 368 | |
27d02568 MR |
369 | err = -EINVAL; |
370 | if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) | |
371 | goto out_unlock; | |
372 | ||
1c9e8def | 373 | vm_shared = dst_vma->vm_flags & VM_SHARED; |
60d4d2d2 MK |
374 | } |
375 | ||
60d4d2d2 | 376 | /* |
1c9e8def | 377 | * If not shared, ensure the dst_vma has a anon_vma. |
60d4d2d2 MK |
378 | */ |
379 | err = -ENOMEM; | |
1c9e8def MK |
380 | if (!vm_shared) { |
381 | if (unlikely(anon_vma_prepare(dst_vma))) | |
382 | goto out_unlock; | |
383 | } | |
60d4d2d2 | 384 | |
60d4d2d2 | 385 | while (src_addr < src_start + len) { |
60d4d2d2 | 386 | BUG_ON(dst_addr >= dst_start + len); |
60d4d2d2 MK |
387 | |
388 | /* | |
40549ba8 MK |
389 | * Serialize via vma_lock and hugetlb_fault_mutex. |
390 | * vma_lock ensures the dst_pte remains valid even | |
391 | * in the case of shared pmds. fault mutex prevents | |
392 | * races with other faulting threads. | |
60d4d2d2 | 393 | */ |
c0d0381a | 394 | idx = linear_page_index(dst_vma, dst_addr); |
3a47c54f | 395 | mapping = dst_vma->vm_file->f_mapping; |
188b04a7 | 396 | hash = hugetlb_fault_mutex_hash(mapping, idx); |
60d4d2d2 | 397 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
40549ba8 | 398 | hugetlb_vma_lock_read(dst_vma); |
60d4d2d2 MK |
399 | |
400 | err = -ENOMEM; | |
aec44e0f | 401 | dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); |
60d4d2d2 | 402 | if (!dst_pte) { |
40549ba8 | 403 | hugetlb_vma_unlock_read(dst_vma); |
60d4d2d2 MK |
404 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
405 | goto out_unlock; | |
406 | } | |
407 | ||
d9712937 | 408 | if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) && |
6041c691 | 409 | !huge_pte_none_mostly(huge_ptep_get(dst_pte))) { |
f6191471 | 410 | err = -EEXIST; |
40549ba8 | 411 | hugetlb_vma_unlock_read(dst_vma); |
60d4d2d2 MK |
412 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
413 | goto out_unlock; | |
414 | } | |
415 | ||
d9712937 | 416 | err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr, |
0169fd51 | 417 | src_addr, flags, &folio); |
60d4d2d2 | 418 | |
40549ba8 | 419 | hugetlb_vma_unlock_read(dst_vma); |
60d4d2d2 MK |
420 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
421 | ||
422 | cond_resched(); | |
423 | ||
9e368259 | 424 | if (unlikely(err == -ENOENT)) { |
d8ed45c5 | 425 | mmap_read_unlock(dst_mm); |
0169fd51 | 426 | BUG_ON(!folio); |
60d4d2d2 | 427 | |
0169fd51 | 428 | err = copy_folio_from_user(folio, |
e87340ca | 429 | (const void __user *)src_addr, true); |
60d4d2d2 MK |
430 | if (unlikely(err)) { |
431 | err = -EFAULT; | |
432 | goto out; | |
433 | } | |
d8ed45c5 | 434 | mmap_read_lock(dst_mm); |
60d4d2d2 MK |
435 | |
436 | dst_vma = NULL; | |
437 | goto retry; | |
438 | } else | |
0169fd51 | 439 | BUG_ON(folio); |
60d4d2d2 MK |
440 | |
441 | if (!err) { | |
442 | dst_addr += vma_hpagesize; | |
443 | src_addr += vma_hpagesize; | |
444 | copied += vma_hpagesize; | |
445 | ||
446 | if (fatal_signal_pending(current)) | |
447 | err = -EINTR; | |
448 | } | |
449 | if (err) | |
450 | break; | |
451 | } | |
452 | ||
453 | out_unlock: | |
d8ed45c5 | 454 | mmap_read_unlock(dst_mm); |
60d4d2d2 | 455 | out: |
0169fd51 Z |
456 | if (folio) |
457 | folio_put(folio); | |
60d4d2d2 MK |
458 | BUG_ON(copied < 0); |
459 | BUG_ON(err > 0); | |
460 | BUG_ON(!copied && !err); | |
461 | return copied ? copied : err; | |
462 | } | |
463 | #else /* !CONFIG_HUGETLB_PAGE */ | |
464 | /* fail at build time if gcc attempts to use this */ | |
61c50040 AR |
465 | extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma, |
466 | unsigned long dst_start, | |
467 | unsigned long src_start, | |
468 | unsigned long len, | |
d9712937 | 469 | uffd_flags_t flags); |
60d4d2d2 MK |
470 | #endif /* CONFIG_HUGETLB_PAGE */ |
471 | ||
61c50040 | 472 | static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, |
3217d3c7 MR |
473 | struct vm_area_struct *dst_vma, |
474 | unsigned long dst_addr, | |
475 | unsigned long src_addr, | |
d9712937 | 476 | uffd_flags_t flags, |
d7be6d7e | 477 | struct folio **foliop) |
3217d3c7 MR |
478 | { |
479 | ssize_t err; | |
480 | ||
d9712937 | 481 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { |
61c50040 | 482 | return mfill_atomic_pte_continue(dst_pmd, dst_vma, |
d9712937 | 483 | dst_addr, flags); |
15313257 AR |
484 | } |
485 | ||
5b51072e AA |
486 | /* |
487 | * The normal page fault path for a shmem will invoke the | |
488 | * fault, fill the hole in the file and COW it right away. The | |
489 | * result generates plain anonymous memory. So when we are | |
490 | * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll | |
491 | * generate anonymous memory directly without actually filling | |
492 | * the hole. For the MAP_PRIVATE case the robustness check | |
493 | * only happens in the pagetable (to verify it's still none) | |
494 | * and not in the radix tree. | |
495 | */ | |
496 | if (!(dst_vma->vm_flags & VM_SHARED)) { | |
d9712937 | 497 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) |
61c50040 | 498 | err = mfill_atomic_pte_copy(dst_pmd, dst_vma, |
d9712937 | 499 | dst_addr, src_addr, |
d7be6d7e | 500 | flags, foliop); |
3217d3c7 | 501 | else |
61c50040 | 502 | err = mfill_atomic_pte_zeropage(dst_pmd, |
3217d3c7 MR |
503 | dst_vma, dst_addr); |
504 | } else { | |
61c50040 | 505 | err = shmem_mfill_atomic_pte(dst_pmd, dst_vma, |
15313257 | 506 | dst_addr, src_addr, |
d7be6d7e | 507 | flags, foliop); |
3217d3c7 MR |
508 | } |
509 | ||
510 | return err; | |
511 | } | |
512 | ||
a734991c AR |
513 | static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, |
514 | unsigned long dst_start, | |
515 | unsigned long src_start, | |
516 | unsigned long len, | |
a734991c | 517 | atomic_t *mmap_changing, |
d9712937 | 518 | uffd_flags_t flags) |
c1a4de99 AA |
519 | { |
520 | struct vm_area_struct *dst_vma; | |
521 | ssize_t err; | |
522 | pmd_t *dst_pmd; | |
523 | unsigned long src_addr, dst_addr; | |
b6ebaedb | 524 | long copied; |
d7be6d7e | 525 | struct folio *folio; |
c1a4de99 AA |
526 | |
527 | /* | |
528 | * Sanitize the command parameters: | |
529 | */ | |
530 | BUG_ON(dst_start & ~PAGE_MASK); | |
531 | BUG_ON(len & ~PAGE_MASK); | |
532 | ||
533 | /* Does the address range wrap, or is the span zero-sized? */ | |
534 | BUG_ON(src_start + len <= src_start); | |
535 | BUG_ON(dst_start + len <= dst_start); | |
536 | ||
b6ebaedb AA |
537 | src_addr = src_start; |
538 | dst_addr = dst_start; | |
539 | copied = 0; | |
d7be6d7e | 540 | folio = NULL; |
b6ebaedb | 541 | retry: |
d8ed45c5 | 542 | mmap_read_lock(dst_mm); |
c1a4de99 | 543 | |
df2cc96e MR |
544 | /* |
545 | * If memory mappings are changing because of non-cooperative | |
546 | * operation (e.g. mremap) running in parallel, bail out and | |
547 | * request the user to retry later | |
548 | */ | |
549 | err = -EAGAIN; | |
a759a909 | 550 | if (mmap_changing && atomic_read(mmap_changing)) |
df2cc96e MR |
551 | goto out_unlock; |
552 | ||
c1a4de99 AA |
553 | /* |
554 | * Make sure the vma is not shared, that the dst range is | |
555 | * both valid and fully within a single existing vma. | |
556 | */ | |
27d02568 | 557 | err = -ENOENT; |
643aa36e | 558 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
26071ced MR |
559 | if (!dst_vma) |
560 | goto out_unlock; | |
c1a4de99 | 561 | |
27d02568 MR |
562 | err = -EINVAL; |
563 | /* | |
564 | * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but | |
565 | * it will overwrite vm_ops, so vma_is_anonymous must return false. | |
566 | */ | |
567 | if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && | |
568 | dst_vma->vm_flags & VM_SHARED)) | |
569 | goto out_unlock; | |
570 | ||
72981e0e AA |
571 | /* |
572 | * validate 'mode' now that we know the dst_vma: don't allow | |
573 | * a wrprotect copy if the userfaultfd didn't register as WP. | |
574 | */ | |
d9712937 | 575 | if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) |
72981e0e AA |
576 | goto out_unlock; |
577 | ||
60d4d2d2 MK |
578 | /* |
579 | * If this is a HUGETLB vma, pass off to appropriate routine | |
580 | */ | |
581 | if (is_vm_hugetlb_page(dst_vma)) | |
61c50040 | 582 | return mfill_atomic_hugetlb(dst_vma, dst_start, |
d9712937 | 583 | src_start, len, flags); |
60d4d2d2 | 584 | |
26071ced | 585 | if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) |
b6ebaedb | 586 | goto out_unlock; |
d9712937 AR |
587 | if (!vma_is_shmem(dst_vma) && |
588 | uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) | |
f6191471 | 589 | goto out_unlock; |
c1a4de99 AA |
590 | |
591 | /* | |
592 | * Ensure the dst_vma has a anon_vma or this page | |
593 | * would get a NULL anon_vma when moved in the | |
594 | * dst_vma. | |
595 | */ | |
596 | err = -ENOMEM; | |
5b51072e AA |
597 | if (!(dst_vma->vm_flags & VM_SHARED) && |
598 | unlikely(anon_vma_prepare(dst_vma))) | |
b6ebaedb | 599 | goto out_unlock; |
c1a4de99 | 600 | |
b6ebaedb | 601 | while (src_addr < src_start + len) { |
c1a4de99 | 602 | pmd_t dst_pmdval; |
b6ebaedb | 603 | |
c1a4de99 | 604 | BUG_ON(dst_addr >= dst_start + len); |
b6ebaedb | 605 | |
c1a4de99 AA |
606 | dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); |
607 | if (unlikely(!dst_pmd)) { | |
608 | err = -ENOMEM; | |
609 | break; | |
610 | } | |
611 | ||
dab6e717 | 612 | dst_pmdval = pmdp_get_lockless(dst_pmd); |
c1a4de99 AA |
613 | /* |
614 | * If the dst_pmd is mapped as THP don't | |
615 | * override it and just be strict. | |
616 | */ | |
617 | if (unlikely(pmd_trans_huge(dst_pmdval))) { | |
618 | err = -EEXIST; | |
619 | break; | |
620 | } | |
621 | if (unlikely(pmd_none(dst_pmdval)) && | |
4cf58924 | 622 | unlikely(__pte_alloc(dst_mm, dst_pmd))) { |
c1a4de99 AA |
623 | err = -ENOMEM; |
624 | break; | |
625 | } | |
626 | /* If an huge pmd materialized from under us fail */ | |
627 | if (unlikely(pmd_trans_huge(*dst_pmd))) { | |
628 | err = -EFAULT; | |
629 | break; | |
630 | } | |
631 | ||
632 | BUG_ON(pmd_none(*dst_pmd)); | |
633 | BUG_ON(pmd_trans_huge(*dst_pmd)); | |
634 | ||
61c50040 | 635 | err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, |
d7be6d7e | 636 | src_addr, flags, &folio); |
c1a4de99 AA |
637 | cond_resched(); |
638 | ||
9e368259 | 639 | if (unlikely(err == -ENOENT)) { |
d7be6d7e | 640 | void *kaddr; |
b6ebaedb | 641 | |
d8ed45c5 | 642 | mmap_read_unlock(dst_mm); |
d7be6d7e | 643 | BUG_ON(!folio); |
b6ebaedb | 644 | |
d7be6d7e Z |
645 | kaddr = kmap_local_folio(folio, 0); |
646 | err = copy_from_user(kaddr, | |
b6ebaedb AA |
647 | (const void __user *) src_addr, |
648 | PAGE_SIZE); | |
d7be6d7e | 649 | kunmap_local(kaddr); |
b6ebaedb AA |
650 | if (unlikely(err)) { |
651 | err = -EFAULT; | |
652 | goto out; | |
653 | } | |
d7be6d7e | 654 | flush_dcache_folio(folio); |
b6ebaedb AA |
655 | goto retry; |
656 | } else | |
d7be6d7e | 657 | BUG_ON(folio); |
b6ebaedb | 658 | |
c1a4de99 AA |
659 | if (!err) { |
660 | dst_addr += PAGE_SIZE; | |
661 | src_addr += PAGE_SIZE; | |
662 | copied += PAGE_SIZE; | |
663 | ||
664 | if (fatal_signal_pending(current)) | |
665 | err = -EINTR; | |
666 | } | |
667 | if (err) | |
668 | break; | |
669 | } | |
670 | ||
b6ebaedb | 671 | out_unlock: |
d8ed45c5 | 672 | mmap_read_unlock(dst_mm); |
b6ebaedb | 673 | out: |
d7be6d7e Z |
674 | if (folio) |
675 | folio_put(folio); | |
c1a4de99 AA |
676 | BUG_ON(copied < 0); |
677 | BUG_ON(err > 0); | |
678 | BUG_ON(!copied && !err); | |
679 | return copied ? copied : err; | |
680 | } | |
681 | ||
a734991c AR |
682 | ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start, |
683 | unsigned long src_start, unsigned long len, | |
d9712937 | 684 | atomic_t *mmap_changing, uffd_flags_t flags) |
c1a4de99 | 685 | { |
d9712937 AR |
686 | return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing, |
687 | uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY)); | |
c1a4de99 AA |
688 | } |
689 | ||
a734991c AR |
690 | ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start, |
691 | unsigned long len, atomic_t *mmap_changing) | |
c1a4de99 | 692 | { |
d9712937 AR |
693 | return mfill_atomic(dst_mm, start, 0, len, mmap_changing, |
694 | uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE)); | |
f6191471 AR |
695 | } |
696 | ||
a734991c | 697 | ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start, |
02891844 AR |
698 | unsigned long len, atomic_t *mmap_changing, |
699 | uffd_flags_t flags) | |
f6191471 | 700 | { |
d9712937 | 701 | return mfill_atomic(dst_mm, start, 0, len, mmap_changing, |
02891844 | 702 | uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE)); |
c1a4de99 | 703 | } |
ffd05793 | 704 | |
61c50040 | 705 | long uffd_wp_range(struct vm_area_struct *dst_vma, |
f369b07c PX |
706 | unsigned long start, unsigned long len, bool enable_wp) |
707 | { | |
931298e1 | 708 | unsigned int mm_cp_flags; |
f369b07c | 709 | struct mmu_gather tlb; |
d1751118 | 710 | long ret; |
f369b07c | 711 | |
a1b92a3f MUA |
712 | VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, |
713 | "The address range exceeds VMA boundary.\n"); | |
f369b07c | 714 | if (enable_wp) |
931298e1 | 715 | mm_cp_flags = MM_CP_UFFD_WP; |
f369b07c | 716 | else |
931298e1 | 717 | mm_cp_flags = MM_CP_UFFD_WP_RESOLVE; |
f369b07c | 718 | |
931298e1 DH |
719 | /* |
720 | * vma->vm_page_prot already reflects that uffd-wp is enabled for this | |
721 | * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed | |
722 | * to be write-protected as default whenever protection changes. | |
723 | * Try upgrading write permissions manually. | |
724 | */ | |
725 | if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma)) | |
726 | mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; | |
61c50040 | 727 | tlb_gather_mmu(&tlb, dst_vma->vm_mm); |
d1751118 | 728 | ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags); |
f369b07c | 729 | tlb_finish_mmu(&tlb); |
d1751118 PX |
730 | |
731 | return ret; | |
f369b07c PX |
732 | } |
733 | ||
ffd05793 | 734 | int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, |
a759a909 NA |
735 | unsigned long len, bool enable_wp, |
736 | atomic_t *mmap_changing) | |
ffd05793 | 737 | { |
a1b92a3f MUA |
738 | unsigned long end = start + len; |
739 | unsigned long _start, _end; | |
ffd05793 | 740 | struct vm_area_struct *dst_vma; |
5a90d5a1 | 741 | unsigned long page_mask; |
d1751118 | 742 | long err; |
a1b92a3f | 743 | VMA_ITERATOR(vmi, dst_mm, start); |
ffd05793 SL |
744 | |
745 | /* | |
746 | * Sanitize the command parameters: | |
747 | */ | |
748 | BUG_ON(start & ~PAGE_MASK); | |
749 | BUG_ON(len & ~PAGE_MASK); | |
750 | ||
751 | /* Does the address range wrap, or is the span zero-sized? */ | |
752 | BUG_ON(start + len <= start); | |
753 | ||
d8ed45c5 | 754 | mmap_read_lock(dst_mm); |
ffd05793 SL |
755 | |
756 | /* | |
757 | * If memory mappings are changing because of non-cooperative | |
758 | * operation (e.g. mremap) running in parallel, bail out and | |
759 | * request the user to retry later | |
760 | */ | |
761 | err = -EAGAIN; | |
a759a909 | 762 | if (mmap_changing && atomic_read(mmap_changing)) |
ffd05793 SL |
763 | goto out_unlock; |
764 | ||
765 | err = -ENOENT; | |
a1b92a3f | 766 | for_each_vma_range(vmi, dst_vma, end) { |
b1f9e876 | 767 | |
a1b92a3f MUA |
768 | if (!userfaultfd_wp(dst_vma)) { |
769 | err = -ENOENT; | |
770 | break; | |
771 | } | |
ffd05793 | 772 | |
a1b92a3f MUA |
773 | if (is_vm_hugetlb_page(dst_vma)) { |
774 | err = -EINVAL; | |
775 | page_mask = vma_kernel_pagesize(dst_vma) - 1; | |
776 | if ((start & page_mask) || (len & page_mask)) | |
777 | break; | |
778 | } | |
5a90d5a1 | 779 | |
a1b92a3f MUA |
780 | _start = max(dst_vma->vm_start, start); |
781 | _end = min(dst_vma->vm_end, end); | |
d1751118 | 782 | |
61c50040 | 783 | err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp); |
ffd05793 | 784 | |
a1b92a3f MUA |
785 | /* Return 0 on success, <0 on failures */ |
786 | if (err < 0) | |
787 | break; | |
788 | err = 0; | |
789 | } | |
ffd05793 | 790 | out_unlock: |
d8ed45c5 | 791 | mmap_read_unlock(dst_mm); |
ffd05793 SL |
792 | return err; |
793 | } |