Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c1a4de99 AA |
2 | /* |
3 | * mm/userfaultfd.c | |
4 | * | |
5 | * Copyright (C) 2015 Red Hat, Inc. | |
c1a4de99 AA |
6 | */ |
7 | ||
8 | #include <linux/mm.h> | |
174cd4b1 | 9 | #include <linux/sched/signal.h> |
c1a4de99 AA |
10 | #include <linux/pagemap.h> |
11 | #include <linux/rmap.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/swapops.h> | |
14 | #include <linux/userfaultfd_k.h> | |
15 | #include <linux/mmu_notifier.h> | |
60d4d2d2 | 16 | #include <linux/hugetlb.h> |
26071ced | 17 | #include <linux/shmem_fs.h> |
c1a4de99 | 18 | #include <asm/tlbflush.h> |
4a18419f | 19 | #include <asm/tlb.h> |
c1a4de99 AA |
20 | #include "internal.h" |
21 | ||
643aa36e WY |
22 | static __always_inline |
23 | struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, | |
24 | unsigned long dst_start, | |
25 | unsigned long len) | |
26 | { | |
27 | /* | |
28 | * Make sure that the dst range is both valid and fully within a | |
29 | * single existing vma. | |
30 | */ | |
31 | struct vm_area_struct *dst_vma; | |
32 | ||
33 | dst_vma = find_vma(dst_mm, dst_start); | |
686ea6e6 | 34 | if (!range_in_vma(dst_vma, dst_start, dst_start + len)) |
643aa36e WY |
35 | return NULL; |
36 | ||
37 | /* | |
38 | * Check the vma is registered in uffd, this is required to | |
39 | * enforce the VM_MAYWRITE check done at uffd registration | |
40 | * time. | |
41 | */ | |
42 | if (!dst_vma->vm_userfaultfd_ctx.ctx) | |
43 | return NULL; | |
44 | ||
45 | return dst_vma; | |
46 | } | |
47 | ||
15313257 AR |
48 | /* |
49 | * Install PTEs, to map dst_addr (within dst_vma) to page. | |
50 | * | |
7d64ae3a AR |
51 | * This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem |
52 | * and anon, and for both shared and private VMAs. | |
15313257 | 53 | */ |
61c50040 | 54 | int mfill_atomic_install_pte(pmd_t *dst_pmd, |
7d64ae3a AR |
55 | struct vm_area_struct *dst_vma, |
56 | unsigned long dst_addr, struct page *page, | |
d9712937 | 57 | bool newly_allocated, uffd_flags_t flags) |
15313257 AR |
58 | { |
59 | int ret; | |
61c50040 | 60 | struct mm_struct *dst_mm = dst_vma->vm_mm; |
15313257 AR |
61 | pte_t _dst_pte, *dst_pte; |
62 | bool writable = dst_vma->vm_flags & VM_WRITE; | |
63 | bool vm_shared = dst_vma->vm_flags & VM_SHARED; | |
93b0d917 | 64 | bool page_in_cache = page_mapping(page); |
15313257 | 65 | spinlock_t *ptl; |
28965f0f | 66 | struct folio *folio; |
15313257 AR |
67 | struct inode *inode; |
68 | pgoff_t offset, max_off; | |
69 | ||
70 | _dst_pte = mk_pte(page, dst_vma->vm_page_prot); | |
9ae0f87d | 71 | _dst_pte = pte_mkdirty(_dst_pte); |
15313257 AR |
72 | if (page_in_cache && !vm_shared) |
73 | writable = false; | |
8ee79edf | 74 | if (writable) |
0e88904c | 75 | _dst_pte = pte_mkwrite(_dst_pte); |
d9712937 | 76 | if (flags & MFILL_ATOMIC_WP) |
f1eb1bac | 77 | _dst_pte = pte_mkuffd_wp(_dst_pte); |
15313257 AR |
78 | |
79 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); | |
80 | ||
81 | if (vma_is_shmem(dst_vma)) { | |
82 | /* serialize against truncate with the page table lock */ | |
83 | inode = dst_vma->vm_file->f_inode; | |
84 | offset = linear_page_index(dst_vma, dst_addr); | |
85 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | |
86 | ret = -EFAULT; | |
87 | if (unlikely(offset >= max_off)) | |
88 | goto out_unlock; | |
89 | } | |
90 | ||
91 | ret = -EEXIST; | |
8ee79edf PX |
92 | /* |
93 | * We allow to overwrite a pte marker: consider when both MISSING|WP | |
94 | * registered, we firstly wr-protect a none pte which has no page cache | |
95 | * page backing it, then access the page. | |
96 | */ | |
97 | if (!pte_none_mostly(*dst_pte)) | |
15313257 AR |
98 | goto out_unlock; |
99 | ||
28965f0f | 100 | folio = page_folio(page); |
cea86fe2 HD |
101 | if (page_in_cache) { |
102 | /* Usually, cache pages are already added to LRU */ | |
103 | if (newly_allocated) | |
28965f0f | 104 | folio_add_lru(folio); |
cea86fe2 HD |
105 | page_add_file_rmap(page, dst_vma, false); |
106 | } else { | |
40f2bbf7 | 107 | page_add_new_anon_rmap(page, dst_vma, dst_addr); |
28965f0f | 108 | folio_add_lru_vma(folio, dst_vma); |
cea86fe2 | 109 | } |
15313257 AR |
110 | |
111 | /* | |
112 | * Must happen after rmap, as mm_counter() checks mapping (via | |
113 | * PageAnon()), which is set by __page_set_anon_rmap(). | |
114 | */ | |
115 | inc_mm_counter(dst_mm, mm_counter(page)); | |
116 | ||
15313257 AR |
117 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
118 | ||
119 | /* No need to invalidate - it was non-present before */ | |
120 | update_mmu_cache(dst_vma, dst_addr, dst_pte); | |
121 | ret = 0; | |
122 | out_unlock: | |
123 | pte_unmap_unlock(dst_pte, ptl); | |
124 | return ret; | |
125 | } | |
126 | ||
61c50040 | 127 | static int mfill_atomic_pte_copy(pmd_t *dst_pmd, |
a734991c AR |
128 | struct vm_area_struct *dst_vma, |
129 | unsigned long dst_addr, | |
130 | unsigned long src_addr, | |
d9712937 | 131 | uffd_flags_t flags, |
d7be6d7e | 132 | struct folio **foliop) |
c1a4de99 | 133 | { |
07e6d409 | 134 | void *kaddr; |
c1a4de99 | 135 | int ret; |
07e6d409 | 136 | struct folio *folio; |
c1a4de99 | 137 | |
d7be6d7e | 138 | if (!*foliop) { |
b6ebaedb | 139 | ret = -ENOMEM; |
07e6d409 Z |
140 | folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma, |
141 | dst_addr, false); | |
142 | if (!folio) | |
b6ebaedb AA |
143 | goto out; |
144 | ||
07e6d409 | 145 | kaddr = kmap_local_folio(folio, 0); |
5521de7d IW |
146 | /* |
147 | * The read mmap_lock is held here. Despite the | |
148 | * mmap_lock being read recursive a deadlock is still | |
149 | * possible if a writer has taken a lock. For example: | |
150 | * | |
151 | * process A thread 1 takes read lock on own mmap_lock | |
152 | * process A thread 2 calls mmap, blocks taking write lock | |
153 | * process B thread 1 takes page fault, read lock on own mmap lock | |
154 | * process B thread 2 calls mmap, blocks taking write lock | |
155 | * process A thread 1 blocks taking read lock on process B | |
156 | * process B thread 1 blocks taking read lock on process A | |
157 | * | |
158 | * Disable page faults to prevent potential deadlock | |
159 | * and retry the copy outside the mmap_lock. | |
160 | */ | |
161 | pagefault_disable(); | |
07e6d409 | 162 | ret = copy_from_user(kaddr, (const void __user *) src_addr, |
b6ebaedb | 163 | PAGE_SIZE); |
5521de7d | 164 | pagefault_enable(); |
07e6d409 | 165 | kunmap_local(kaddr); |
b6ebaedb | 166 | |
c1e8d7c6 | 167 | /* fallback to copy_from_user outside mmap_lock */ |
b6ebaedb | 168 | if (unlikely(ret)) { |
9e368259 | 169 | ret = -ENOENT; |
d7be6d7e | 170 | *foliop = folio; |
b6ebaedb AA |
171 | /* don't free the page */ |
172 | goto out; | |
173 | } | |
7c25a0b8 | 174 | |
07e6d409 | 175 | flush_dcache_folio(folio); |
b6ebaedb | 176 | } else { |
d7be6d7e Z |
177 | folio = *foliop; |
178 | *foliop = NULL; | |
b6ebaedb | 179 | } |
c1a4de99 AA |
180 | |
181 | /* | |
07e6d409 | 182 | * The memory barrier inside __folio_mark_uptodate makes sure that |
f4f5329d | 183 | * preceding stores to the page contents become visible before |
c1a4de99 AA |
184 | * the set_pte_at() write. |
185 | */ | |
07e6d409 | 186 | __folio_mark_uptodate(folio); |
c1a4de99 AA |
187 | |
188 | ret = -ENOMEM; | |
07e6d409 | 189 | if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL)) |
c1a4de99 AA |
190 | goto out_release; |
191 | ||
61c50040 | 192 | ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, |
07e6d409 | 193 | &folio->page, true, flags); |
15313257 AR |
194 | if (ret) |
195 | goto out_release; | |
c1a4de99 AA |
196 | out: |
197 | return ret; | |
c1a4de99 | 198 | out_release: |
07e6d409 | 199 | folio_put(folio); |
c1a4de99 | 200 | goto out; |
c1a4de99 AA |
201 | } |
202 | ||
61c50040 | 203 | static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, |
a734991c AR |
204 | struct vm_area_struct *dst_vma, |
205 | unsigned long dst_addr) | |
c1a4de99 AA |
206 | { |
207 | pte_t _dst_pte, *dst_pte; | |
208 | spinlock_t *ptl; | |
209 | int ret; | |
e2a50c1f AA |
210 | pgoff_t offset, max_off; |
211 | struct inode *inode; | |
c1a4de99 AA |
212 | |
213 | _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), | |
214 | dst_vma->vm_page_prot)); | |
61c50040 | 215 | dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); |
e2a50c1f AA |
216 | if (dst_vma->vm_file) { |
217 | /* the shmem MAP_PRIVATE case requires checking the i_size */ | |
218 | inode = dst_vma->vm_file->f_inode; | |
219 | offset = linear_page_index(dst_vma, dst_addr); | |
220 | max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); | |
221 | ret = -EFAULT; | |
222 | if (unlikely(offset >= max_off)) | |
223 | goto out_unlock; | |
224 | } | |
225 | ret = -EEXIST; | |
c1a4de99 AA |
226 | if (!pte_none(*dst_pte)) |
227 | goto out_unlock; | |
61c50040 | 228 | set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); |
c1a4de99 AA |
229 | /* No need to invalidate - it was non-present before */ |
230 | update_mmu_cache(dst_vma, dst_addr, dst_pte); | |
231 | ret = 0; | |
232 | out_unlock: | |
233 | pte_unmap_unlock(dst_pte, ptl); | |
234 | return ret; | |
235 | } | |
236 | ||
15313257 | 237 | /* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ |
61c50040 | 238 | static int mfill_atomic_pte_continue(pmd_t *dst_pmd, |
a734991c AR |
239 | struct vm_area_struct *dst_vma, |
240 | unsigned long dst_addr, | |
d9712937 | 241 | uffd_flags_t flags) |
15313257 AR |
242 | { |
243 | struct inode *inode = file_inode(dst_vma->vm_file); | |
244 | pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); | |
12acf4fb | 245 | struct folio *folio; |
15313257 AR |
246 | struct page *page; |
247 | int ret; | |
248 | ||
12acf4fb MWO |
249 | ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC); |
250 | /* Our caller expects us to return -EFAULT if we failed to find folio */ | |
73f37dbc AR |
251 | if (ret == -ENOENT) |
252 | ret = -EFAULT; | |
15313257 AR |
253 | if (ret) |
254 | goto out; | |
12acf4fb | 255 | if (!folio) { |
15313257 AR |
256 | ret = -EFAULT; |
257 | goto out; | |
258 | } | |
259 | ||
12acf4fb | 260 | page = folio_file_page(folio, pgoff); |
a7605426 YS |
261 | if (PageHWPoison(page)) { |
262 | ret = -EIO; | |
263 | goto out_release; | |
264 | } | |
265 | ||
61c50040 | 266 | ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, |
d9712937 | 267 | page, false, flags); |
15313257 AR |
268 | if (ret) |
269 | goto out_release; | |
270 | ||
12acf4fb | 271 | folio_unlock(folio); |
15313257 AR |
272 | ret = 0; |
273 | out: | |
274 | return ret; | |
275 | out_release: | |
12acf4fb MWO |
276 | folio_unlock(folio); |
277 | folio_put(folio); | |
15313257 AR |
278 | goto out; |
279 | } | |
280 | ||
c1a4de99 AA |
281 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
282 | { | |
283 | pgd_t *pgd; | |
c2febafc | 284 | p4d_t *p4d; |
c1a4de99 | 285 | pud_t *pud; |
c1a4de99 AA |
286 | |
287 | pgd = pgd_offset(mm, address); | |
c2febafc KS |
288 | p4d = p4d_alloc(mm, pgd, address); |
289 | if (!p4d) | |
290 | return NULL; | |
291 | pud = pud_alloc(mm, p4d, address); | |
292 | if (!pud) | |
293 | return NULL; | |
294 | /* | |
295 | * Note that we didn't run this because the pmd was | |
296 | * missing, the *pmd may be already established and in | |
297 | * turn it may also be a trans_huge_pmd. | |
298 | */ | |
299 | return pmd_alloc(mm, pud, address); | |
c1a4de99 AA |
300 | } |
301 | ||
60d4d2d2 MK |
302 | #ifdef CONFIG_HUGETLB_PAGE |
303 | /* | |
a734991c | 304 | * mfill_atomic processing for HUGETLB vmas. Note that this routine is |
c1e8d7c6 | 305 | * called with mmap_lock held, it will release mmap_lock before returning. |
60d4d2d2 | 306 | */ |
61c50040 | 307 | static __always_inline ssize_t mfill_atomic_hugetlb( |
60d4d2d2 MK |
308 | struct vm_area_struct *dst_vma, |
309 | unsigned long dst_start, | |
310 | unsigned long src_start, | |
311 | unsigned long len, | |
d9712937 | 312 | uffd_flags_t flags) |
60d4d2d2 | 313 | { |
61c50040 | 314 | struct mm_struct *dst_mm = dst_vma->vm_mm; |
1c9e8def | 315 | int vm_shared = dst_vma->vm_flags & VM_SHARED; |
60d4d2d2 MK |
316 | ssize_t err; |
317 | pte_t *dst_pte; | |
318 | unsigned long src_addr, dst_addr; | |
319 | long copied; | |
0169fd51 | 320 | struct folio *folio; |
60d4d2d2 MK |
321 | unsigned long vma_hpagesize; |
322 | pgoff_t idx; | |
323 | u32 hash; | |
324 | struct address_space *mapping; | |
325 | ||
326 | /* | |
327 | * There is no default zero huge page for all huge page sizes as | |
328 | * supported by hugetlb. A PMD_SIZE huge pages may exist as used | |
329 | * by THP. Since we can not reliably insert a zero page, this | |
330 | * feature is not supported. | |
331 | */ | |
d9712937 | 332 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_ZEROPAGE)) { |
d8ed45c5 | 333 | mmap_read_unlock(dst_mm); |
60d4d2d2 MK |
334 | return -EINVAL; |
335 | } | |
336 | ||
337 | src_addr = src_start; | |
338 | dst_addr = dst_start; | |
339 | copied = 0; | |
0169fd51 | 340 | folio = NULL; |
60d4d2d2 MK |
341 | vma_hpagesize = vma_kernel_pagesize(dst_vma); |
342 | ||
343 | /* | |
344 | * Validate alignment based on huge page size | |
345 | */ | |
346 | err = -EINVAL; | |
347 | if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1)) | |
348 | goto out_unlock; | |
349 | ||
350 | retry: | |
351 | /* | |
c1e8d7c6 | 352 | * On routine entry dst_vma is set. If we had to drop mmap_lock and |
60d4d2d2 MK |
353 | * retry, dst_vma will be set to NULL and we must lookup again. |
354 | */ | |
355 | if (!dst_vma) { | |
27d02568 | 356 | err = -ENOENT; |
643aa36e | 357 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
60d4d2d2 MK |
358 | if (!dst_vma || !is_vm_hugetlb_page(dst_vma)) |
359 | goto out_unlock; | |
1c9e8def | 360 | |
27d02568 MR |
361 | err = -EINVAL; |
362 | if (vma_hpagesize != vma_kernel_pagesize(dst_vma)) | |
363 | goto out_unlock; | |
364 | ||
1c9e8def | 365 | vm_shared = dst_vma->vm_flags & VM_SHARED; |
60d4d2d2 MK |
366 | } |
367 | ||
60d4d2d2 | 368 | /* |
1c9e8def | 369 | * If not shared, ensure the dst_vma has a anon_vma. |
60d4d2d2 MK |
370 | */ |
371 | err = -ENOMEM; | |
1c9e8def MK |
372 | if (!vm_shared) { |
373 | if (unlikely(anon_vma_prepare(dst_vma))) | |
374 | goto out_unlock; | |
375 | } | |
60d4d2d2 | 376 | |
60d4d2d2 | 377 | while (src_addr < src_start + len) { |
60d4d2d2 | 378 | BUG_ON(dst_addr >= dst_start + len); |
60d4d2d2 MK |
379 | |
380 | /* | |
40549ba8 MK |
381 | * Serialize via vma_lock and hugetlb_fault_mutex. |
382 | * vma_lock ensures the dst_pte remains valid even | |
383 | * in the case of shared pmds. fault mutex prevents | |
384 | * races with other faulting threads. | |
60d4d2d2 | 385 | */ |
c0d0381a | 386 | idx = linear_page_index(dst_vma, dst_addr); |
3a47c54f | 387 | mapping = dst_vma->vm_file->f_mapping; |
188b04a7 | 388 | hash = hugetlb_fault_mutex_hash(mapping, idx); |
60d4d2d2 | 389 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
40549ba8 | 390 | hugetlb_vma_lock_read(dst_vma); |
60d4d2d2 MK |
391 | |
392 | err = -ENOMEM; | |
aec44e0f | 393 | dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize); |
60d4d2d2 | 394 | if (!dst_pte) { |
40549ba8 | 395 | hugetlb_vma_unlock_read(dst_vma); |
60d4d2d2 MK |
396 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
397 | goto out_unlock; | |
398 | } | |
399 | ||
d9712937 | 400 | if (!uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE) && |
6041c691 | 401 | !huge_pte_none_mostly(huge_ptep_get(dst_pte))) { |
f6191471 | 402 | err = -EEXIST; |
40549ba8 | 403 | hugetlb_vma_unlock_read(dst_vma); |
60d4d2d2 MK |
404 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
405 | goto out_unlock; | |
406 | } | |
407 | ||
d9712937 | 408 | err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr, |
0169fd51 | 409 | src_addr, flags, &folio); |
60d4d2d2 | 410 | |
40549ba8 | 411 | hugetlb_vma_unlock_read(dst_vma); |
60d4d2d2 MK |
412 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
413 | ||
414 | cond_resched(); | |
415 | ||
9e368259 | 416 | if (unlikely(err == -ENOENT)) { |
d8ed45c5 | 417 | mmap_read_unlock(dst_mm); |
0169fd51 | 418 | BUG_ON(!folio); |
60d4d2d2 | 419 | |
0169fd51 | 420 | err = copy_folio_from_user(folio, |
e87340ca | 421 | (const void __user *)src_addr, true); |
60d4d2d2 MK |
422 | if (unlikely(err)) { |
423 | err = -EFAULT; | |
424 | goto out; | |
425 | } | |
d8ed45c5 | 426 | mmap_read_lock(dst_mm); |
60d4d2d2 MK |
427 | |
428 | dst_vma = NULL; | |
429 | goto retry; | |
430 | } else | |
0169fd51 | 431 | BUG_ON(folio); |
60d4d2d2 MK |
432 | |
433 | if (!err) { | |
434 | dst_addr += vma_hpagesize; | |
435 | src_addr += vma_hpagesize; | |
436 | copied += vma_hpagesize; | |
437 | ||
438 | if (fatal_signal_pending(current)) | |
439 | err = -EINTR; | |
440 | } | |
441 | if (err) | |
442 | break; | |
443 | } | |
444 | ||
445 | out_unlock: | |
d8ed45c5 | 446 | mmap_read_unlock(dst_mm); |
60d4d2d2 | 447 | out: |
0169fd51 Z |
448 | if (folio) |
449 | folio_put(folio); | |
60d4d2d2 MK |
450 | BUG_ON(copied < 0); |
451 | BUG_ON(err > 0); | |
452 | BUG_ON(!copied && !err); | |
453 | return copied ? copied : err; | |
454 | } | |
455 | #else /* !CONFIG_HUGETLB_PAGE */ | |
456 | /* fail at build time if gcc attempts to use this */ | |
61c50040 AR |
457 | extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma, |
458 | unsigned long dst_start, | |
459 | unsigned long src_start, | |
460 | unsigned long len, | |
d9712937 | 461 | uffd_flags_t flags); |
60d4d2d2 MK |
462 | #endif /* CONFIG_HUGETLB_PAGE */ |
463 | ||
61c50040 | 464 | static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, |
3217d3c7 MR |
465 | struct vm_area_struct *dst_vma, |
466 | unsigned long dst_addr, | |
467 | unsigned long src_addr, | |
d9712937 | 468 | uffd_flags_t flags, |
d7be6d7e | 469 | struct folio **foliop) |
3217d3c7 MR |
470 | { |
471 | ssize_t err; | |
472 | ||
d9712937 | 473 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) { |
61c50040 | 474 | return mfill_atomic_pte_continue(dst_pmd, dst_vma, |
d9712937 | 475 | dst_addr, flags); |
15313257 AR |
476 | } |
477 | ||
5b51072e AA |
478 | /* |
479 | * The normal page fault path for a shmem will invoke the | |
480 | * fault, fill the hole in the file and COW it right away. The | |
481 | * result generates plain anonymous memory. So when we are | |
482 | * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll | |
483 | * generate anonymous memory directly without actually filling | |
484 | * the hole. For the MAP_PRIVATE case the robustness check | |
485 | * only happens in the pagetable (to verify it's still none) | |
486 | * and not in the radix tree. | |
487 | */ | |
488 | if (!(dst_vma->vm_flags & VM_SHARED)) { | |
d9712937 | 489 | if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) |
61c50040 | 490 | err = mfill_atomic_pte_copy(dst_pmd, dst_vma, |
d9712937 | 491 | dst_addr, src_addr, |
d7be6d7e | 492 | flags, foliop); |
3217d3c7 | 493 | else |
61c50040 | 494 | err = mfill_atomic_pte_zeropage(dst_pmd, |
3217d3c7 MR |
495 | dst_vma, dst_addr); |
496 | } else { | |
61c50040 | 497 | err = shmem_mfill_atomic_pte(dst_pmd, dst_vma, |
15313257 | 498 | dst_addr, src_addr, |
d7be6d7e | 499 | flags, foliop); |
3217d3c7 MR |
500 | } |
501 | ||
502 | return err; | |
503 | } | |
504 | ||
a734991c AR |
505 | static __always_inline ssize_t mfill_atomic(struct mm_struct *dst_mm, |
506 | unsigned long dst_start, | |
507 | unsigned long src_start, | |
508 | unsigned long len, | |
a734991c | 509 | atomic_t *mmap_changing, |
d9712937 | 510 | uffd_flags_t flags) |
c1a4de99 AA |
511 | { |
512 | struct vm_area_struct *dst_vma; | |
513 | ssize_t err; | |
514 | pmd_t *dst_pmd; | |
515 | unsigned long src_addr, dst_addr; | |
b6ebaedb | 516 | long copied; |
d7be6d7e | 517 | struct folio *folio; |
c1a4de99 AA |
518 | |
519 | /* | |
520 | * Sanitize the command parameters: | |
521 | */ | |
522 | BUG_ON(dst_start & ~PAGE_MASK); | |
523 | BUG_ON(len & ~PAGE_MASK); | |
524 | ||
525 | /* Does the address range wrap, or is the span zero-sized? */ | |
526 | BUG_ON(src_start + len <= src_start); | |
527 | BUG_ON(dst_start + len <= dst_start); | |
528 | ||
b6ebaedb AA |
529 | src_addr = src_start; |
530 | dst_addr = dst_start; | |
531 | copied = 0; | |
d7be6d7e | 532 | folio = NULL; |
b6ebaedb | 533 | retry: |
d8ed45c5 | 534 | mmap_read_lock(dst_mm); |
c1a4de99 | 535 | |
df2cc96e MR |
536 | /* |
537 | * If memory mappings are changing because of non-cooperative | |
538 | * operation (e.g. mremap) running in parallel, bail out and | |
539 | * request the user to retry later | |
540 | */ | |
541 | err = -EAGAIN; | |
a759a909 | 542 | if (mmap_changing && atomic_read(mmap_changing)) |
df2cc96e MR |
543 | goto out_unlock; |
544 | ||
c1a4de99 AA |
545 | /* |
546 | * Make sure the vma is not shared, that the dst range is | |
547 | * both valid and fully within a single existing vma. | |
548 | */ | |
27d02568 | 549 | err = -ENOENT; |
643aa36e | 550 | dst_vma = find_dst_vma(dst_mm, dst_start, len); |
26071ced MR |
551 | if (!dst_vma) |
552 | goto out_unlock; | |
c1a4de99 | 553 | |
27d02568 MR |
554 | err = -EINVAL; |
555 | /* | |
556 | * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but | |
557 | * it will overwrite vm_ops, so vma_is_anonymous must return false. | |
558 | */ | |
559 | if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) && | |
560 | dst_vma->vm_flags & VM_SHARED)) | |
561 | goto out_unlock; | |
562 | ||
72981e0e AA |
563 | /* |
564 | * validate 'mode' now that we know the dst_vma: don't allow | |
565 | * a wrprotect copy if the userfaultfd didn't register as WP. | |
566 | */ | |
d9712937 | 567 | if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP)) |
72981e0e AA |
568 | goto out_unlock; |
569 | ||
60d4d2d2 MK |
570 | /* |
571 | * If this is a HUGETLB vma, pass off to appropriate routine | |
572 | */ | |
573 | if (is_vm_hugetlb_page(dst_vma)) | |
61c50040 | 574 | return mfill_atomic_hugetlb(dst_vma, dst_start, |
d9712937 | 575 | src_start, len, flags); |
60d4d2d2 | 576 | |
26071ced | 577 | if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) |
b6ebaedb | 578 | goto out_unlock; |
d9712937 AR |
579 | if (!vma_is_shmem(dst_vma) && |
580 | uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) | |
f6191471 | 581 | goto out_unlock; |
c1a4de99 AA |
582 | |
583 | /* | |
584 | * Ensure the dst_vma has a anon_vma or this page | |
585 | * would get a NULL anon_vma when moved in the | |
586 | * dst_vma. | |
587 | */ | |
588 | err = -ENOMEM; | |
5b51072e AA |
589 | if (!(dst_vma->vm_flags & VM_SHARED) && |
590 | unlikely(anon_vma_prepare(dst_vma))) | |
b6ebaedb | 591 | goto out_unlock; |
c1a4de99 | 592 | |
b6ebaedb | 593 | while (src_addr < src_start + len) { |
c1a4de99 | 594 | pmd_t dst_pmdval; |
b6ebaedb | 595 | |
c1a4de99 | 596 | BUG_ON(dst_addr >= dst_start + len); |
b6ebaedb | 597 | |
c1a4de99 AA |
598 | dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); |
599 | if (unlikely(!dst_pmd)) { | |
600 | err = -ENOMEM; | |
601 | break; | |
602 | } | |
603 | ||
dab6e717 | 604 | dst_pmdval = pmdp_get_lockless(dst_pmd); |
c1a4de99 AA |
605 | /* |
606 | * If the dst_pmd is mapped as THP don't | |
607 | * override it and just be strict. | |
608 | */ | |
609 | if (unlikely(pmd_trans_huge(dst_pmdval))) { | |
610 | err = -EEXIST; | |
611 | break; | |
612 | } | |
613 | if (unlikely(pmd_none(dst_pmdval)) && | |
4cf58924 | 614 | unlikely(__pte_alloc(dst_mm, dst_pmd))) { |
c1a4de99 AA |
615 | err = -ENOMEM; |
616 | break; | |
617 | } | |
618 | /* If an huge pmd materialized from under us fail */ | |
619 | if (unlikely(pmd_trans_huge(*dst_pmd))) { | |
620 | err = -EFAULT; | |
621 | break; | |
622 | } | |
623 | ||
624 | BUG_ON(pmd_none(*dst_pmd)); | |
625 | BUG_ON(pmd_trans_huge(*dst_pmd)); | |
626 | ||
61c50040 | 627 | err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, |
d7be6d7e | 628 | src_addr, flags, &folio); |
c1a4de99 AA |
629 | cond_resched(); |
630 | ||
9e368259 | 631 | if (unlikely(err == -ENOENT)) { |
d7be6d7e | 632 | void *kaddr; |
b6ebaedb | 633 | |
d8ed45c5 | 634 | mmap_read_unlock(dst_mm); |
d7be6d7e | 635 | BUG_ON(!folio); |
b6ebaedb | 636 | |
d7be6d7e Z |
637 | kaddr = kmap_local_folio(folio, 0); |
638 | err = copy_from_user(kaddr, | |
b6ebaedb AA |
639 | (const void __user *) src_addr, |
640 | PAGE_SIZE); | |
d7be6d7e | 641 | kunmap_local(kaddr); |
b6ebaedb AA |
642 | if (unlikely(err)) { |
643 | err = -EFAULT; | |
644 | goto out; | |
645 | } | |
d7be6d7e | 646 | flush_dcache_folio(folio); |
b6ebaedb AA |
647 | goto retry; |
648 | } else | |
d7be6d7e | 649 | BUG_ON(folio); |
b6ebaedb | 650 | |
c1a4de99 AA |
651 | if (!err) { |
652 | dst_addr += PAGE_SIZE; | |
653 | src_addr += PAGE_SIZE; | |
654 | copied += PAGE_SIZE; | |
655 | ||
656 | if (fatal_signal_pending(current)) | |
657 | err = -EINTR; | |
658 | } | |
659 | if (err) | |
660 | break; | |
661 | } | |
662 | ||
b6ebaedb | 663 | out_unlock: |
d8ed45c5 | 664 | mmap_read_unlock(dst_mm); |
b6ebaedb | 665 | out: |
d7be6d7e Z |
666 | if (folio) |
667 | folio_put(folio); | |
c1a4de99 AA |
668 | BUG_ON(copied < 0); |
669 | BUG_ON(err > 0); | |
670 | BUG_ON(!copied && !err); | |
671 | return copied ? copied : err; | |
672 | } | |
673 | ||
a734991c AR |
674 | ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start, |
675 | unsigned long src_start, unsigned long len, | |
d9712937 | 676 | atomic_t *mmap_changing, uffd_flags_t flags) |
c1a4de99 | 677 | { |
d9712937 AR |
678 | return mfill_atomic(dst_mm, dst_start, src_start, len, mmap_changing, |
679 | uffd_flags_set_mode(flags, MFILL_ATOMIC_COPY)); | |
c1a4de99 AA |
680 | } |
681 | ||
a734991c AR |
682 | ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm, unsigned long start, |
683 | unsigned long len, atomic_t *mmap_changing) | |
c1a4de99 | 684 | { |
d9712937 AR |
685 | return mfill_atomic(dst_mm, start, 0, len, mmap_changing, |
686 | uffd_flags_set_mode(0, MFILL_ATOMIC_ZEROPAGE)); | |
f6191471 AR |
687 | } |
688 | ||
a734991c | 689 | ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start, |
02891844 AR |
690 | unsigned long len, atomic_t *mmap_changing, |
691 | uffd_flags_t flags) | |
f6191471 | 692 | { |
d9712937 | 693 | return mfill_atomic(dst_mm, start, 0, len, mmap_changing, |
02891844 | 694 | uffd_flags_set_mode(flags, MFILL_ATOMIC_CONTINUE)); |
c1a4de99 | 695 | } |
ffd05793 | 696 | |
61c50040 | 697 | long uffd_wp_range(struct vm_area_struct *dst_vma, |
f369b07c PX |
698 | unsigned long start, unsigned long len, bool enable_wp) |
699 | { | |
931298e1 | 700 | unsigned int mm_cp_flags; |
f369b07c | 701 | struct mmu_gather tlb; |
d1751118 | 702 | long ret; |
f369b07c | 703 | |
a1b92a3f MUA |
704 | VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end, |
705 | "The address range exceeds VMA boundary.\n"); | |
f369b07c | 706 | if (enable_wp) |
931298e1 | 707 | mm_cp_flags = MM_CP_UFFD_WP; |
f369b07c | 708 | else |
931298e1 | 709 | mm_cp_flags = MM_CP_UFFD_WP_RESOLVE; |
f369b07c | 710 | |
931298e1 DH |
711 | /* |
712 | * vma->vm_page_prot already reflects that uffd-wp is enabled for this | |
713 | * VMA (see userfaultfd_set_vm_flags()) and that all PTEs are supposed | |
714 | * to be write-protected as default whenever protection changes. | |
715 | * Try upgrading write permissions manually. | |
716 | */ | |
717 | if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma)) | |
718 | mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; | |
61c50040 | 719 | tlb_gather_mmu(&tlb, dst_vma->vm_mm); |
d1751118 | 720 | ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags); |
f369b07c | 721 | tlb_finish_mmu(&tlb); |
d1751118 PX |
722 | |
723 | return ret; | |
f369b07c PX |
724 | } |
725 | ||
ffd05793 | 726 | int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, |
a759a909 NA |
727 | unsigned long len, bool enable_wp, |
728 | atomic_t *mmap_changing) | |
ffd05793 | 729 | { |
a1b92a3f MUA |
730 | unsigned long end = start + len; |
731 | unsigned long _start, _end; | |
ffd05793 | 732 | struct vm_area_struct *dst_vma; |
5a90d5a1 | 733 | unsigned long page_mask; |
d1751118 | 734 | long err; |
a1b92a3f | 735 | VMA_ITERATOR(vmi, dst_mm, start); |
ffd05793 SL |
736 | |
737 | /* | |
738 | * Sanitize the command parameters: | |
739 | */ | |
740 | BUG_ON(start & ~PAGE_MASK); | |
741 | BUG_ON(len & ~PAGE_MASK); | |
742 | ||
743 | /* Does the address range wrap, or is the span zero-sized? */ | |
744 | BUG_ON(start + len <= start); | |
745 | ||
d8ed45c5 | 746 | mmap_read_lock(dst_mm); |
ffd05793 SL |
747 | |
748 | /* | |
749 | * If memory mappings are changing because of non-cooperative | |
750 | * operation (e.g. mremap) running in parallel, bail out and | |
751 | * request the user to retry later | |
752 | */ | |
753 | err = -EAGAIN; | |
a759a909 | 754 | if (mmap_changing && atomic_read(mmap_changing)) |
ffd05793 SL |
755 | goto out_unlock; |
756 | ||
757 | err = -ENOENT; | |
a1b92a3f | 758 | for_each_vma_range(vmi, dst_vma, end) { |
b1f9e876 | 759 | |
a1b92a3f MUA |
760 | if (!userfaultfd_wp(dst_vma)) { |
761 | err = -ENOENT; | |
762 | break; | |
763 | } | |
ffd05793 | 764 | |
a1b92a3f MUA |
765 | if (is_vm_hugetlb_page(dst_vma)) { |
766 | err = -EINVAL; | |
767 | page_mask = vma_kernel_pagesize(dst_vma) - 1; | |
768 | if ((start & page_mask) || (len & page_mask)) | |
769 | break; | |
770 | } | |
5a90d5a1 | 771 | |
a1b92a3f MUA |
772 | _start = max(dst_vma->vm_start, start); |
773 | _end = min(dst_vma->vm_end, end); | |
d1751118 | 774 | |
61c50040 | 775 | err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp); |
ffd05793 | 776 | |
a1b92a3f MUA |
777 | /* Return 0 on success, <0 on failures */ |
778 | if (err < 0) | |
779 | break; | |
780 | err = 0; | |
781 | } | |
ffd05793 | 782 | out_unlock: |
d8ed45c5 | 783 | mmap_read_unlock(dst_mm); |
ffd05793 SL |
784 | return err; |
785 | } |