Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
86039bd3 AA |
2 | /* |
3 | * fs/userfaultfd.c | |
4 | * | |
5 | * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> | |
6 | * Copyright (C) 2008-2009 Red Hat, Inc. | |
7 | * Copyright (C) 2015 Red Hat, Inc. | |
8 | * | |
86039bd3 AA |
9 | * Some part derived from fs/eventfd.c (anon inode setup) and |
10 | * mm/ksm.c (mm hashing). | |
11 | */ | |
12 | ||
9cd75c3c | 13 | #include <linux/list.h> |
86039bd3 | 14 | #include <linux/hashtable.h> |
174cd4b1 | 15 | #include <linux/sched/signal.h> |
6e84f315 | 16 | #include <linux/sched/mm.h> |
86039bd3 | 17 | #include <linux/mm.h> |
17fca131 | 18 | #include <linux/mm_inline.h> |
6dfeaff9 | 19 | #include <linux/mmu_notifier.h> |
86039bd3 AA |
20 | #include <linux/poll.h> |
21 | #include <linux/slab.h> | |
22 | #include <linux/seq_file.h> | |
23 | #include <linux/file.h> | |
24 | #include <linux/bug.h> | |
25 | #include <linux/anon_inodes.h> | |
26 | #include <linux/syscalls.h> | |
27 | #include <linux/userfaultfd_k.h> | |
28 | #include <linux/mempolicy.h> | |
29 | #include <linux/ioctl.h> | |
30 | #include <linux/security.h> | |
cab350af | 31 | #include <linux/hugetlb.h> |
5c041f5d | 32 | #include <linux/swapops.h> |
2d5de004 | 33 | #include <linux/miscdevice.h> |
86039bd3 | 34 | |
2d337b71 Z |
35 | static int sysctl_unprivileged_userfaultfd __read_mostly; |
36 | ||
37 | #ifdef CONFIG_SYSCTL | |
38 | static struct ctl_table vm_userfaultfd_table[] = { | |
39 | { | |
40 | .procname = "unprivileged_userfaultfd", | |
41 | .data = &sysctl_unprivileged_userfaultfd, | |
42 | .maxlen = sizeof(sysctl_unprivileged_userfaultfd), | |
43 | .mode = 0644, | |
44 | .proc_handler = proc_dointvec_minmax, | |
45 | .extra1 = SYSCTL_ZERO, | |
46 | .extra2 = SYSCTL_ONE, | |
47 | }, | |
2d337b71 Z |
48 | }; |
49 | #endif | |
cefdca0a | 50 | |
68279f9c | 51 | static struct kmem_cache *userfaultfd_ctx_cachep __ro_after_init; |
3004ec9c | 52 | |
893e26e6 PE |
53 | struct userfaultfd_fork_ctx { |
54 | struct userfaultfd_ctx *orig; | |
55 | struct userfaultfd_ctx *new; | |
56 | struct list_head list; | |
57 | }; | |
58 | ||
897ab3e0 MR |
59 | struct userfaultfd_unmap_ctx { |
60 | struct userfaultfd_ctx *ctx; | |
61 | unsigned long start; | |
62 | unsigned long end; | |
63 | struct list_head list; | |
64 | }; | |
65 | ||
86039bd3 | 66 | struct userfaultfd_wait_queue { |
a9b85f94 | 67 | struct uffd_msg msg; |
ac6424b9 | 68 | wait_queue_entry_t wq; |
86039bd3 | 69 | struct userfaultfd_ctx *ctx; |
15a77c6f | 70 | bool waken; |
86039bd3 AA |
71 | }; |
72 | ||
73 | struct userfaultfd_wake_range { | |
74 | unsigned long start; | |
75 | unsigned long len; | |
76 | }; | |
77 | ||
22e5fe2a NA |
78 | /* internal indication that UFFD_API ioctl was successfully executed */ |
79 | #define UFFD_FEATURE_INITIALIZED (1u << 31) | |
80 | ||
81 | static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx) | |
82 | { | |
83 | return ctx->features & UFFD_FEATURE_INITIALIZED; | |
84 | } | |
85 | ||
d61ea1cb PX |
86 | static bool userfaultfd_wp_async_ctx(struct userfaultfd_ctx *ctx) |
87 | { | |
88 | return ctx && (ctx->features & UFFD_FEATURE_WP_ASYNC); | |
89 | } | |
90 | ||
2bad466c PX |
91 | /* |
92 | * Whether WP_UNPOPULATED is enabled on the uffd context. It is only | |
93 | * meaningful when userfaultfd_wp()==true on the vma and when it's | |
94 | * anonymous. | |
95 | */ | |
96 | bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma) | |
97 | { | |
98 | struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; | |
99 | ||
100 | if (!ctx) | |
101 | return false; | |
102 | ||
103 | return ctx->features & UFFD_FEATURE_WP_UNPOPULATED; | |
104 | } | |
105 | ||
51d3d5eb DH |
106 | static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, |
107 | vm_flags_t flags) | |
108 | { | |
109 | const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP; | |
110 | ||
1c71222e | 111 | vm_flags_reset(vma, flags); |
51d3d5eb DH |
112 | /* |
113 | * For shared mappings, we want to enable writenotify while | |
114 | * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply | |
115 | * recalculate vma->vm_page_prot whenever userfaultfd-wp changes. | |
116 | */ | |
117 | if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed) | |
118 | vma_set_page_prot(vma); | |
119 | } | |
120 | ||
ac6424b9 | 121 | static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, |
86039bd3 AA |
122 | int wake_flags, void *key) |
123 | { | |
124 | struct userfaultfd_wake_range *range = key; | |
125 | int ret; | |
126 | struct userfaultfd_wait_queue *uwq; | |
127 | unsigned long start, len; | |
128 | ||
129 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); | |
130 | ret = 0; | |
86039bd3 AA |
131 | /* len == 0 means wake all */ |
132 | start = range->start; | |
133 | len = range->len; | |
a9b85f94 AA |
134 | if (len && (start > uwq->msg.arg.pagefault.address || |
135 | start + len <= uwq->msg.arg.pagefault.address)) | |
86039bd3 | 136 | goto out; |
15a77c6f AA |
137 | WRITE_ONCE(uwq->waken, true); |
138 | /* | |
a9668cd6 PZ |
139 | * The Program-Order guarantees provided by the scheduler |
140 | * ensure uwq->waken is visible before the task is woken. | |
15a77c6f | 141 | */ |
86039bd3 | 142 | ret = wake_up_state(wq->private, mode); |
a9668cd6 | 143 | if (ret) { |
86039bd3 AA |
144 | /* |
145 | * Wake only once, autoremove behavior. | |
146 | * | |
a9668cd6 PZ |
147 | * After the effect of list_del_init is visible to the other |
148 | * CPUs, the waitqueue may disappear from under us, see the | |
149 | * !list_empty_careful() in handle_userfault(). | |
150 | * | |
151 | * try_to_wake_up() has an implicit smp_mb(), and the | |
152 | * wq->private is read before calling the extern function | |
153 | * "wake_up_state" (which in turns calls try_to_wake_up). | |
86039bd3 | 154 | */ |
2055da97 | 155 | list_del_init(&wq->entry); |
a9668cd6 | 156 | } |
86039bd3 AA |
157 | out: |
158 | return ret; | |
159 | } | |
160 | ||
161 | /** | |
162 | * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd | |
163 | * context. | |
164 | * @ctx: [in] Pointer to the userfaultfd context. | |
86039bd3 AA |
165 | */ |
166 | static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) | |
167 | { | |
ca880420 | 168 | refcount_inc(&ctx->refcount); |
86039bd3 AA |
169 | } |
170 | ||
171 | /** | |
172 | * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd | |
173 | * context. | |
174 | * @ctx: [in] Pointer to userfaultfd context. | |
175 | * | |
176 | * The userfaultfd context reference must have been previously acquired either | |
177 | * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget(). | |
178 | */ | |
179 | static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) | |
180 | { | |
ca880420 | 181 | if (refcount_dec_and_test(&ctx->refcount)) { |
86039bd3 AA |
182 | VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); |
183 | VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); | |
184 | VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); | |
185 | VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); | |
9cd75c3c PE |
186 | VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); |
187 | VM_BUG_ON(waitqueue_active(&ctx->event_wqh)); | |
86039bd3 AA |
188 | VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); |
189 | VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); | |
d2005e3f | 190 | mmdrop(ctx->mm); |
3004ec9c | 191 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
86039bd3 AA |
192 | } |
193 | } | |
194 | ||
a9b85f94 | 195 | static inline void msg_init(struct uffd_msg *msg) |
86039bd3 | 196 | { |
a9b85f94 AA |
197 | BUILD_BUG_ON(sizeof(struct uffd_msg) != 32); |
198 | /* | |
199 | * Must use memset to zero out the paddings or kernel data is | |
200 | * leaked to userland. | |
201 | */ | |
202 | memset(msg, 0, sizeof(struct uffd_msg)); | |
203 | } | |
204 | ||
205 | static inline struct uffd_msg userfault_msg(unsigned long address, | |
d172b1a3 | 206 | unsigned long real_address, |
a9b85f94 | 207 | unsigned int flags, |
9d4ac934 AP |
208 | unsigned long reason, |
209 | unsigned int features) | |
a9b85f94 AA |
210 | { |
211 | struct uffd_msg msg; | |
d172b1a3 | 212 | |
a9b85f94 AA |
213 | msg_init(&msg); |
214 | msg.event = UFFD_EVENT_PAGEFAULT; | |
824ddc60 | 215 | |
d172b1a3 NA |
216 | msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ? |
217 | real_address : address; | |
218 | ||
7677f7fd AR |
219 | /* |
220 | * These flags indicate why the userfault occurred: | |
221 | * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault. | |
222 | * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault. | |
223 | * - Neither of these flags being set indicates a MISSING fault. | |
224 | * | |
225 | * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write | |
226 | * fault. Otherwise, it was a read fault. | |
227 | */ | |
86039bd3 | 228 | if (flags & FAULT_FLAG_WRITE) |
a9b85f94 | 229 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE; |
86039bd3 | 230 | if (reason & VM_UFFD_WP) |
a9b85f94 | 231 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP; |
7677f7fd AR |
232 | if (reason & VM_UFFD_MINOR) |
233 | msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR; | |
9d4ac934 | 234 | if (features & UFFD_FEATURE_THREAD_ID) |
a36985d3 | 235 | msg.arg.pagefault.feat.ptid = task_pid_vnr(current); |
a9b85f94 | 236 | return msg; |
86039bd3 AA |
237 | } |
238 | ||
369cd212 MK |
239 | #ifdef CONFIG_HUGETLB_PAGE |
240 | /* | |
241 | * Same functionality as userfaultfd_must_wait below with modifications for | |
242 | * hugepmd ranges. | |
243 | */ | |
244 | static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, | |
29a22b9e SB |
245 | struct vm_fault *vmf, |
246 | unsigned long reason) | |
369cd212 | 247 | { |
29a22b9e | 248 | struct vm_area_struct *vma = vmf->vma; |
1e2c0436 | 249 | pte_t *ptep, pte; |
369cd212 MK |
250 | bool ret = true; |
251 | ||
29a22b9e | 252 | assert_fault_locked(vmf); |
1e2c0436 | 253 | |
29a22b9e | 254 | ptep = hugetlb_walk(vma, vmf->address, vma_mmu_pagesize(vma)); |
1e2c0436 | 255 | if (!ptep) |
369cd212 MK |
256 | goto out; |
257 | ||
258 | ret = false; | |
1e2c0436 | 259 | pte = huge_ptep_get(ptep); |
369cd212 MK |
260 | |
261 | /* | |
262 | * Lockless access: we're in a wait_event so it's ok if it | |
5c041f5d PX |
263 | * changes under us. PTE markers should be handled the same as none |
264 | * ptes here. | |
369cd212 | 265 | */ |
5c041f5d | 266 | if (huge_pte_none_mostly(pte)) |
369cd212 | 267 | ret = true; |
1e2c0436 | 268 | if (!huge_pte_write(pte) && (reason & VM_UFFD_WP)) |
369cd212 MK |
269 | ret = true; |
270 | out: | |
271 | return ret; | |
272 | } | |
273 | #else | |
274 | static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, | |
29a22b9e SB |
275 | struct vm_fault *vmf, |
276 | unsigned long reason) | |
369cd212 MK |
277 | { |
278 | return false; /* should never get here */ | |
279 | } | |
280 | #endif /* CONFIG_HUGETLB_PAGE */ | |
281 | ||
8d2afd96 AA |
282 | /* |
283 | * Verify the pagetables are still not ok after having reigstered into | |
284 | * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any | |
285 | * userfault that has already been resolved, if userfaultfd_read and | |
286 | * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different | |
287 | * threads. | |
288 | */ | |
289 | static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, | |
29a22b9e | 290 | struct vm_fault *vmf, |
8d2afd96 AA |
291 | unsigned long reason) |
292 | { | |
293 | struct mm_struct *mm = ctx->mm; | |
29a22b9e | 294 | unsigned long address = vmf->address; |
8d2afd96 | 295 | pgd_t *pgd; |
c2febafc | 296 | p4d_t *p4d; |
8d2afd96 AA |
297 | pud_t *pud; |
298 | pmd_t *pmd, _pmd; | |
299 | pte_t *pte; | |
c33c7948 | 300 | pte_t ptent; |
8d2afd96 AA |
301 | bool ret = true; |
302 | ||
29a22b9e | 303 | assert_fault_locked(vmf); |
8d2afd96 AA |
304 | |
305 | pgd = pgd_offset(mm, address); | |
306 | if (!pgd_present(*pgd)) | |
307 | goto out; | |
c2febafc KS |
308 | p4d = p4d_offset(pgd, address); |
309 | if (!p4d_present(*p4d)) | |
310 | goto out; | |
311 | pud = pud_offset(p4d, address); | |
8d2afd96 AA |
312 | if (!pud_present(*pud)) |
313 | goto out; | |
314 | pmd = pmd_offset(pud, address); | |
2b683a4f | 315 | again: |
26e1a0c3 | 316 | _pmd = pmdp_get_lockless(pmd); |
a365ac09 | 317 | if (pmd_none(_pmd)) |
8d2afd96 AA |
318 | goto out; |
319 | ||
320 | ret = false; | |
2b683a4f | 321 | if (!pmd_present(_pmd) || pmd_devmap(_pmd)) |
a365ac09 HY |
322 | goto out; |
323 | ||
63b2d417 AA |
324 | if (pmd_trans_huge(_pmd)) { |
325 | if (!pmd_write(_pmd) && (reason & VM_UFFD_WP)) | |
326 | ret = true; | |
8d2afd96 | 327 | goto out; |
63b2d417 | 328 | } |
8d2afd96 | 329 | |
8d2afd96 | 330 | pte = pte_offset_map(pmd, address); |
2b683a4f HD |
331 | if (!pte) { |
332 | ret = true; | |
333 | goto again; | |
334 | } | |
8d2afd96 AA |
335 | /* |
336 | * Lockless access: we're in a wait_event so it's ok if it | |
5c041f5d PX |
337 | * changes under us. PTE markers should be handled the same as none |
338 | * ptes here. | |
8d2afd96 | 339 | */ |
c33c7948 RR |
340 | ptent = ptep_get(pte); |
341 | if (pte_none_mostly(ptent)) | |
8d2afd96 | 342 | ret = true; |
c33c7948 | 343 | if (!pte_write(ptent) && (reason & VM_UFFD_WP)) |
63b2d417 | 344 | ret = true; |
8d2afd96 AA |
345 | pte_unmap(pte); |
346 | ||
347 | out: | |
348 | return ret; | |
349 | } | |
350 | ||
2f064a59 | 351 | static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags) |
3e69ad08 PX |
352 | { |
353 | if (flags & FAULT_FLAG_INTERRUPTIBLE) | |
354 | return TASK_INTERRUPTIBLE; | |
355 | ||
356 | if (flags & FAULT_FLAG_KILLABLE) | |
357 | return TASK_KILLABLE; | |
358 | ||
359 | return TASK_UNINTERRUPTIBLE; | |
360 | } | |
361 | ||
86039bd3 AA |
362 | /* |
363 | * The locking rules involved in returning VM_FAULT_RETRY depending on | |
364 | * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and | |
365 | * FAULT_FLAG_KILLABLE are not straightforward. The "Caution" | |
366 | * recommendation in __lock_page_or_retry is not an understatement. | |
367 | * | |
c1e8d7c6 | 368 | * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released |
86039bd3 AA |
369 | * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is |
370 | * not set. | |
371 | * | |
372 | * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not | |
373 | * set, VM_FAULT_RETRY can still be returned if and only if there are | |
c1e8d7c6 | 374 | * fatal_signal_pending()s, and the mmap_lock must be released before |
86039bd3 AA |
375 | * returning it. |
376 | */ | |
2b740303 | 377 | vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) |
86039bd3 | 378 | { |
b8da2e46 PX |
379 | struct vm_area_struct *vma = vmf->vma; |
380 | struct mm_struct *mm = vma->vm_mm; | |
86039bd3 AA |
381 | struct userfaultfd_ctx *ctx; |
382 | struct userfaultfd_wait_queue uwq; | |
2b740303 | 383 | vm_fault_t ret = VM_FAULT_SIGBUS; |
3e69ad08 | 384 | bool must_wait; |
2f064a59 | 385 | unsigned int blocking_state; |
86039bd3 | 386 | |
64c2b203 AA |
387 | /* |
388 | * We don't do userfault handling for the final child pid update. | |
389 | * | |
390 | * We also don't do userfault handling during | |
391 | * coredumping. hugetlbfs has the special | |
48498071 | 392 | * hugetlb_follow_page_mask() to skip missing pages in the |
64c2b203 AA |
393 | * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with |
394 | * the no_page_table() helper in follow_page_mask(), but the | |
395 | * shmem_vm_ops->fault method is invoked even during | |
004a9a38 | 396 | * coredumping and it ends up here. |
64c2b203 AA |
397 | */ |
398 | if (current->flags & (PF_EXITING|PF_DUMPCORE)) | |
399 | goto out; | |
400 | ||
29a22b9e | 401 | assert_fault_locked(vmf); |
64c2b203 | 402 | |
b8da2e46 | 403 | ctx = vma->vm_userfaultfd_ctx.ctx; |
86039bd3 | 404 | if (!ctx) |
ba85c702 | 405 | goto out; |
86039bd3 AA |
406 | |
407 | BUG_ON(ctx->mm != mm); | |
408 | ||
7677f7fd AR |
409 | /* Any unrecognized flag is a bug. */ |
410 | VM_BUG_ON(reason & ~__VM_UFFD_FLAGS); | |
411 | /* 0 or > 1 flags set is a bug; we expect exactly 1. */ | |
412 | VM_BUG_ON(!reason || (reason & (reason - 1))); | |
86039bd3 | 413 | |
2d6d6f5a PS |
414 | if (ctx->features & UFFD_FEATURE_SIGBUS) |
415 | goto out; | |
2d5de004 | 416 | if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY)) |
37cd0575 | 417 | goto out; |
2d6d6f5a | 418 | |
86039bd3 AA |
419 | /* |
420 | * If it's already released don't get it. This avoids to loop | |
421 | * in __get_user_pages if userfaultfd_release waits on the | |
c1e8d7c6 | 422 | * caller of handle_userfault to release the mmap_lock. |
86039bd3 | 423 | */ |
6aa7de05 | 424 | if (unlikely(READ_ONCE(ctx->released))) { |
656710a6 AA |
425 | /* |
426 | * Don't return VM_FAULT_SIGBUS in this case, so a non | |
427 | * cooperative manager can close the uffd after the | |
428 | * last UFFDIO_COPY, without risking to trigger an | |
429 | * involuntary SIGBUS if the process was starting the | |
430 | * userfaultfd while the userfaultfd was still armed | |
431 | * (but after the last UFFDIO_COPY). If the uffd | |
432 | * wasn't already closed when the userfault reached | |
433 | * this point, that would normally be solved by | |
434 | * userfaultfd_must_wait returning 'false'. | |
435 | * | |
436 | * If we were to return VM_FAULT_SIGBUS here, the non | |
437 | * cooperative manager would be instead forced to | |
438 | * always call UFFDIO_UNREGISTER before it can safely | |
439 | * close the uffd. | |
440 | */ | |
441 | ret = VM_FAULT_NOPAGE; | |
ba85c702 | 442 | goto out; |
656710a6 | 443 | } |
86039bd3 AA |
444 | |
445 | /* | |
446 | * Check that we can return VM_FAULT_RETRY. | |
447 | * | |
448 | * NOTE: it should become possible to return VM_FAULT_RETRY | |
449 | * even if FAULT_FLAG_TRIED is set without leading to gup() | |
450 | * -EBUSY failures, if the userfaultfd is to be extended for | |
451 | * VM_UFFD_WP tracking and we intend to arm the userfault | |
452 | * without first stopping userland access to the memory. For | |
453 | * VM_UFFD_MISSING userfaults this is enough for now. | |
454 | */ | |
82b0f8c3 | 455 | if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) { |
86039bd3 AA |
456 | /* |
457 | * Validate the invariant that nowait must allow retry | |
458 | * to be sure not to return SIGBUS erroneously on | |
459 | * nowait invocations. | |
460 | */ | |
82b0f8c3 | 461 | BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT); |
86039bd3 AA |
462 | #ifdef CONFIG_DEBUG_VM |
463 | if (printk_ratelimit()) { | |
464 | printk(KERN_WARNING | |
82b0f8c3 JK |
465 | "FAULT_FLAG_ALLOW_RETRY missing %x\n", |
466 | vmf->flags); | |
86039bd3 AA |
467 | dump_stack(); |
468 | } | |
469 | #endif | |
ba85c702 | 470 | goto out; |
86039bd3 AA |
471 | } |
472 | ||
473 | /* | |
474 | * Handle nowait, not much to do other than tell it to retry | |
475 | * and wait. | |
476 | */ | |
ba85c702 | 477 | ret = VM_FAULT_RETRY; |
82b0f8c3 | 478 | if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) |
ba85c702 | 479 | goto out; |
86039bd3 | 480 | |
c1e8d7c6 | 481 | /* take the reference before dropping the mmap_lock */ |
86039bd3 AA |
482 | userfaultfd_ctx_get(ctx); |
483 | ||
86039bd3 AA |
484 | init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); |
485 | uwq.wq.private = current; | |
d172b1a3 NA |
486 | uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags, |
487 | reason, ctx->features); | |
86039bd3 | 488 | uwq.ctx = ctx; |
15a77c6f | 489 | uwq.waken = false; |
86039bd3 | 490 | |
3e69ad08 | 491 | blocking_state = userfaultfd_get_blocking_state(vmf->flags); |
dfa37dc3 | 492 | |
b8da2e46 PX |
493 | /* |
494 | * Take the vma lock now, in order to safely call | |
495 | * userfaultfd_huge_must_wait() later. Since acquiring the | |
496 | * (sleepable) vma lock can modify the current task state, that | |
497 | * must be before explicitly calling set_current_state(). | |
498 | */ | |
499 | if (is_vm_hugetlb_page(vma)) | |
500 | hugetlb_vma_lock_read(vma); | |
501 | ||
cbcfa130 | 502 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
86039bd3 AA |
503 | /* |
504 | * After the __add_wait_queue the uwq is visible to userland | |
505 | * through poll/read(). | |
506 | */ | |
15b726ef AA |
507 | __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); |
508 | /* | |
509 | * The smp_mb() after __set_current_state prevents the reads | |
510 | * following the spin_unlock to happen before the list_add in | |
511 | * __add_wait_queue. | |
512 | */ | |
15a77c6f | 513 | set_current_state(blocking_state); |
cbcfa130 | 514 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
86039bd3 | 515 | |
b8da2e46 | 516 | if (!is_vm_hugetlb_page(vma)) |
29a22b9e | 517 | must_wait = userfaultfd_must_wait(ctx, vmf, reason); |
369cd212 | 518 | else |
29a22b9e | 519 | must_wait = userfaultfd_huge_must_wait(ctx, vmf, reason); |
b8da2e46 PX |
520 | if (is_vm_hugetlb_page(vma)) |
521 | hugetlb_vma_unlock_read(vma); | |
29a22b9e | 522 | release_fault_lock(vmf); |
8d2afd96 | 523 | |
f9bf3522 | 524 | if (likely(must_wait && !READ_ONCE(ctx->released))) { |
a9a08845 | 525 | wake_up_poll(&ctx->fd_wqh, EPOLLIN); |
86039bd3 | 526 | schedule(); |
ba85c702 | 527 | } |
86039bd3 | 528 | |
ba85c702 | 529 | __set_current_state(TASK_RUNNING); |
15b726ef AA |
530 | |
531 | /* | |
532 | * Here we race with the list_del; list_add in | |
533 | * userfaultfd_ctx_read(), however because we don't ever run | |
534 | * list_del_init() to refile across the two lists, the prev | |
535 | * and next pointers will never point to self. list_add also | |
536 | * would never let any of the two pointers to point to | |
537 | * self. So list_empty_careful won't risk to see both pointers | |
538 | * pointing to self at any time during the list refile. The | |
539 | * only case where list_del_init() is called is the full | |
540 | * removal in the wake function and there we don't re-list_add | |
541 | * and it's fine not to block on the spinlock. The uwq on this | |
542 | * kernel stack can be released after the list_del_init. | |
543 | */ | |
2055da97 | 544 | if (!list_empty_careful(&uwq.wq.entry)) { |
cbcfa130 | 545 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
15b726ef AA |
546 | /* |
547 | * No need of list_del_init(), the uwq on the stack | |
548 | * will be freed shortly anyway. | |
549 | */ | |
2055da97 | 550 | list_del(&uwq.wq.entry); |
cbcfa130 | 551 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
86039bd3 | 552 | } |
86039bd3 AA |
553 | |
554 | /* | |
555 | * ctx may go away after this if the userfault pseudo fd is | |
556 | * already released. | |
557 | */ | |
558 | userfaultfd_ctx_put(ctx); | |
559 | ||
ba85c702 AA |
560 | out: |
561 | return ret; | |
86039bd3 AA |
562 | } |
563 | ||
8c9e7bb7 AA |
564 | static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, |
565 | struct userfaultfd_wait_queue *ewq) | |
9cd75c3c | 566 | { |
0cbb4b4f AA |
567 | struct userfaultfd_ctx *release_new_ctx; |
568 | ||
9a69a829 AA |
569 | if (WARN_ON_ONCE(current->flags & PF_EXITING)) |
570 | goto out; | |
9cd75c3c PE |
571 | |
572 | ewq->ctx = ctx; | |
573 | init_waitqueue_entry(&ewq->wq, current); | |
0cbb4b4f | 574 | release_new_ctx = NULL; |
9cd75c3c | 575 | |
cbcfa130 | 576 | spin_lock_irq(&ctx->event_wqh.lock); |
9cd75c3c PE |
577 | /* |
578 | * After the __add_wait_queue the uwq is visible to userland | |
579 | * through poll/read(). | |
580 | */ | |
581 | __add_wait_queue(&ctx->event_wqh, &ewq->wq); | |
582 | for (;;) { | |
583 | set_current_state(TASK_KILLABLE); | |
584 | if (ewq->msg.event == 0) | |
585 | break; | |
6aa7de05 | 586 | if (READ_ONCE(ctx->released) || |
9cd75c3c | 587 | fatal_signal_pending(current)) { |
384632e6 AA |
588 | /* |
589 | * &ewq->wq may be queued in fork_event, but | |
590 | * __remove_wait_queue ignores the head | |
591 | * parameter. It would be a problem if it | |
592 | * didn't. | |
593 | */ | |
9cd75c3c | 594 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); |
7eb76d45 MR |
595 | if (ewq->msg.event == UFFD_EVENT_FORK) { |
596 | struct userfaultfd_ctx *new; | |
597 | ||
598 | new = (struct userfaultfd_ctx *) | |
599 | (unsigned long) | |
600 | ewq->msg.arg.reserved.reserved1; | |
0cbb4b4f | 601 | release_new_ctx = new; |
7eb76d45 | 602 | } |
9cd75c3c PE |
603 | break; |
604 | } | |
605 | ||
cbcfa130 | 606 | spin_unlock_irq(&ctx->event_wqh.lock); |
9cd75c3c | 607 | |
a9a08845 | 608 | wake_up_poll(&ctx->fd_wqh, EPOLLIN); |
9cd75c3c PE |
609 | schedule(); |
610 | ||
cbcfa130 | 611 | spin_lock_irq(&ctx->event_wqh.lock); |
9cd75c3c PE |
612 | } |
613 | __set_current_state(TASK_RUNNING); | |
cbcfa130 | 614 | spin_unlock_irq(&ctx->event_wqh.lock); |
9cd75c3c | 615 | |
0cbb4b4f AA |
616 | if (release_new_ctx) { |
617 | struct vm_area_struct *vma; | |
618 | struct mm_struct *mm = release_new_ctx->mm; | |
69dbe6da | 619 | VMA_ITERATOR(vmi, mm, 0); |
0cbb4b4f AA |
620 | |
621 | /* the various vma->vm_userfaultfd_ctx still points to it */ | |
d8ed45c5 | 622 | mmap_write_lock(mm); |
69dbe6da | 623 | for_each_vma(vmi, vma) { |
31e810aa | 624 | if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { |
60081bf1 | 625 | vma_start_write(vma); |
0cbb4b4f | 626 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
51d3d5eb DH |
627 | userfaultfd_set_vm_flags(vma, |
628 | vma->vm_flags & ~__VM_UFFD_FLAGS); | |
31e810aa | 629 | } |
69dbe6da | 630 | } |
d8ed45c5 | 631 | mmap_write_unlock(mm); |
0cbb4b4f AA |
632 | |
633 | userfaultfd_ctx_put(release_new_ctx); | |
634 | } | |
635 | ||
9cd75c3c PE |
636 | /* |
637 | * ctx may go away after this if the userfault pseudo fd is | |
638 | * already released. | |
639 | */ | |
9a69a829 | 640 | out: |
a759a909 NA |
641 | atomic_dec(&ctx->mmap_changing); |
642 | VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0); | |
9cd75c3c | 643 | userfaultfd_ctx_put(ctx); |
9cd75c3c PE |
644 | } |
645 | ||
646 | static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, | |
647 | struct userfaultfd_wait_queue *ewq) | |
648 | { | |
649 | ewq->msg.event = 0; | |
650 | wake_up_locked(&ctx->event_wqh); | |
651 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); | |
652 | } | |
653 | ||
893e26e6 PE |
654 | int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) |
655 | { | |
656 | struct userfaultfd_ctx *ctx = NULL, *octx; | |
657 | struct userfaultfd_fork_ctx *fctx; | |
658 | ||
659 | octx = vma->vm_userfaultfd_ctx.ctx; | |
660 | if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { | |
60081bf1 | 661 | vma_start_write(vma); |
893e26e6 | 662 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
51d3d5eb | 663 | userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); |
893e26e6 PE |
664 | return 0; |
665 | } | |
666 | ||
667 | list_for_each_entry(fctx, fcs, list) | |
668 | if (fctx->orig == octx) { | |
669 | ctx = fctx->new; | |
670 | break; | |
671 | } | |
672 | ||
673 | if (!ctx) { | |
674 | fctx = kmalloc(sizeof(*fctx), GFP_KERNEL); | |
675 | if (!fctx) | |
676 | return -ENOMEM; | |
677 | ||
678 | ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); | |
679 | if (!ctx) { | |
680 | kfree(fctx); | |
681 | return -ENOMEM; | |
682 | } | |
683 | ||
ca880420 | 684 | refcount_set(&ctx->refcount, 1); |
893e26e6 | 685 | ctx->flags = octx->flags; |
893e26e6 PE |
686 | ctx->features = octx->features; |
687 | ctx->released = false; | |
5e4c24a5 | 688 | init_rwsem(&ctx->map_changing_lock); |
a759a909 | 689 | atomic_set(&ctx->mmap_changing, 0); |
893e26e6 | 690 | ctx->mm = vma->vm_mm; |
00bb31fa | 691 | mmgrab(ctx->mm); |
893e26e6 PE |
692 | |
693 | userfaultfd_ctx_get(octx); | |
5e4c24a5 | 694 | down_write(&octx->map_changing_lock); |
a759a909 | 695 | atomic_inc(&octx->mmap_changing); |
5e4c24a5 | 696 | up_write(&octx->map_changing_lock); |
893e26e6 PE |
697 | fctx->orig = octx; |
698 | fctx->new = ctx; | |
699 | list_add_tail(&fctx->list, fcs); | |
700 | } | |
701 | ||
702 | vma->vm_userfaultfd_ctx.ctx = ctx; | |
703 | return 0; | |
704 | } | |
705 | ||
8c9e7bb7 | 706 | static void dup_fctx(struct userfaultfd_fork_ctx *fctx) |
893e26e6 PE |
707 | { |
708 | struct userfaultfd_ctx *ctx = fctx->orig; | |
709 | struct userfaultfd_wait_queue ewq; | |
710 | ||
711 | msg_init(&ewq.msg); | |
712 | ||
713 | ewq.msg.event = UFFD_EVENT_FORK; | |
714 | ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new; | |
715 | ||
8c9e7bb7 | 716 | userfaultfd_event_wait_completion(ctx, &ewq); |
893e26e6 PE |
717 | } |
718 | ||
719 | void dup_userfaultfd_complete(struct list_head *fcs) | |
720 | { | |
893e26e6 PE |
721 | struct userfaultfd_fork_ctx *fctx, *n; |
722 | ||
723 | list_for_each_entry_safe(fctx, n, fcs, list) { | |
8c9e7bb7 | 724 | dup_fctx(fctx); |
893e26e6 PE |
725 | list_del(&fctx->list); |
726 | kfree(fctx); | |
727 | } | |
728 | } | |
729 | ||
72f87654 PE |
730 | void mremap_userfaultfd_prep(struct vm_area_struct *vma, |
731 | struct vm_userfaultfd_ctx *vm_ctx) | |
732 | { | |
733 | struct userfaultfd_ctx *ctx; | |
734 | ||
735 | ctx = vma->vm_userfaultfd_ctx.ctx; | |
3cfd22be PX |
736 | |
737 | if (!ctx) | |
738 | return; | |
739 | ||
740 | if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { | |
72f87654 PE |
741 | vm_ctx->ctx = ctx; |
742 | userfaultfd_ctx_get(ctx); | |
5e4c24a5 | 743 | down_write(&ctx->map_changing_lock); |
a759a909 | 744 | atomic_inc(&ctx->mmap_changing); |
5e4c24a5 | 745 | up_write(&ctx->map_changing_lock); |
3cfd22be PX |
746 | } else { |
747 | /* Drop uffd context if remap feature not enabled */ | |
60081bf1 | 748 | vma_start_write(vma); |
3cfd22be | 749 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
51d3d5eb | 750 | userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); |
72f87654 PE |
751 | } |
752 | } | |
753 | ||
90794bf1 | 754 | void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx, |
72f87654 PE |
755 | unsigned long from, unsigned long to, |
756 | unsigned long len) | |
757 | { | |
90794bf1 | 758 | struct userfaultfd_ctx *ctx = vm_ctx->ctx; |
72f87654 PE |
759 | struct userfaultfd_wait_queue ewq; |
760 | ||
761 | if (!ctx) | |
762 | return; | |
763 | ||
764 | if (to & ~PAGE_MASK) { | |
765 | userfaultfd_ctx_put(ctx); | |
766 | return; | |
767 | } | |
768 | ||
769 | msg_init(&ewq.msg); | |
770 | ||
771 | ewq.msg.event = UFFD_EVENT_REMAP; | |
772 | ewq.msg.arg.remap.from = from; | |
773 | ewq.msg.arg.remap.to = to; | |
774 | ewq.msg.arg.remap.len = len; | |
775 | ||
776 | userfaultfd_event_wait_completion(ctx, &ewq); | |
777 | } | |
778 | ||
70ccb92f | 779 | bool userfaultfd_remove(struct vm_area_struct *vma, |
d811914d | 780 | unsigned long start, unsigned long end) |
05ce7724 PE |
781 | { |
782 | struct mm_struct *mm = vma->vm_mm; | |
783 | struct userfaultfd_ctx *ctx; | |
784 | struct userfaultfd_wait_queue ewq; | |
785 | ||
786 | ctx = vma->vm_userfaultfd_ctx.ctx; | |
d811914d | 787 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) |
70ccb92f | 788 | return true; |
05ce7724 PE |
789 | |
790 | userfaultfd_ctx_get(ctx); | |
5e4c24a5 | 791 | down_write(&ctx->map_changing_lock); |
a759a909 | 792 | atomic_inc(&ctx->mmap_changing); |
5e4c24a5 | 793 | up_write(&ctx->map_changing_lock); |
d8ed45c5 | 794 | mmap_read_unlock(mm); |
05ce7724 | 795 | |
05ce7724 PE |
796 | msg_init(&ewq.msg); |
797 | ||
d811914d MR |
798 | ewq.msg.event = UFFD_EVENT_REMOVE; |
799 | ewq.msg.arg.remove.start = start; | |
800 | ewq.msg.arg.remove.end = end; | |
05ce7724 PE |
801 | |
802 | userfaultfd_event_wait_completion(ctx, &ewq); | |
803 | ||
70ccb92f | 804 | return false; |
05ce7724 PE |
805 | } |
806 | ||
897ab3e0 MR |
807 | static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, |
808 | unsigned long start, unsigned long end) | |
809 | { | |
810 | struct userfaultfd_unmap_ctx *unmap_ctx; | |
811 | ||
812 | list_for_each_entry(unmap_ctx, unmaps, list) | |
813 | if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && | |
814 | unmap_ctx->end == end) | |
815 | return true; | |
816 | ||
817 | return false; | |
818 | } | |
819 | ||
65ac1320 | 820 | int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, |
69dbe6da | 821 | unsigned long end, struct list_head *unmaps) |
897ab3e0 | 822 | { |
65ac1320 LH |
823 | struct userfaultfd_unmap_ctx *unmap_ctx; |
824 | struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; | |
897ab3e0 | 825 | |
65ac1320 LH |
826 | if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || |
827 | has_unmap_ctx(ctx, unmaps, start, end)) | |
828 | return 0; | |
897ab3e0 | 829 | |
65ac1320 LH |
830 | unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); |
831 | if (!unmap_ctx) | |
832 | return -ENOMEM; | |
897ab3e0 | 833 | |
65ac1320 | 834 | userfaultfd_ctx_get(ctx); |
5e4c24a5 | 835 | down_write(&ctx->map_changing_lock); |
65ac1320 | 836 | atomic_inc(&ctx->mmap_changing); |
5e4c24a5 | 837 | up_write(&ctx->map_changing_lock); |
65ac1320 LH |
838 | unmap_ctx->ctx = ctx; |
839 | unmap_ctx->start = start; | |
840 | unmap_ctx->end = end; | |
841 | list_add_tail(&unmap_ctx->list, unmaps); | |
897ab3e0 MR |
842 | |
843 | return 0; | |
844 | } | |
845 | ||
846 | void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf) | |
847 | { | |
848 | struct userfaultfd_unmap_ctx *ctx, *n; | |
849 | struct userfaultfd_wait_queue ewq; | |
850 | ||
851 | list_for_each_entry_safe(ctx, n, uf, list) { | |
852 | msg_init(&ewq.msg); | |
853 | ||
854 | ewq.msg.event = UFFD_EVENT_UNMAP; | |
855 | ewq.msg.arg.remove.start = ctx->start; | |
856 | ewq.msg.arg.remove.end = ctx->end; | |
857 | ||
858 | userfaultfd_event_wait_completion(ctx->ctx, &ewq); | |
859 | ||
860 | list_del(&ctx->list); | |
861 | kfree(ctx); | |
862 | } | |
863 | } | |
864 | ||
86039bd3 AA |
865 | static int userfaultfd_release(struct inode *inode, struct file *file) |
866 | { | |
867 | struct userfaultfd_ctx *ctx = file->private_data; | |
868 | struct mm_struct *mm = ctx->mm; | |
869 | struct vm_area_struct *vma, *prev; | |
870 | /* len == 0 means wake all */ | |
871 | struct userfaultfd_wake_range range = { .len = 0, }; | |
872 | unsigned long new_flags; | |
11a9b902 | 873 | VMA_ITERATOR(vmi, mm, 0); |
86039bd3 | 874 | |
6aa7de05 | 875 | WRITE_ONCE(ctx->released, true); |
86039bd3 | 876 | |
d2005e3f ON |
877 | if (!mmget_not_zero(mm)) |
878 | goto wakeup; | |
879 | ||
86039bd3 AA |
880 | /* |
881 | * Flush page faults out of all CPUs. NOTE: all page faults | |
882 | * must be retried without returning VM_FAULT_SIGBUS if | |
883 | * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx | |
c1e8d7c6 | 884 | * changes while handle_userfault released the mmap_lock. So |
86039bd3 | 885 | * it's critical that released is set to true (above), before |
c1e8d7c6 | 886 | * taking the mmap_lock for writing. |
86039bd3 | 887 | */ |
d8ed45c5 | 888 | mmap_write_lock(mm); |
86039bd3 | 889 | prev = NULL; |
11a9b902 | 890 | for_each_vma(vmi, vma) { |
86039bd3 AA |
891 | cond_resched(); |
892 | BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^ | |
7677f7fd | 893 | !!(vma->vm_flags & __VM_UFFD_FLAGS)); |
86039bd3 AA |
894 | if (vma->vm_userfaultfd_ctx.ctx != ctx) { |
895 | prev = vma; | |
896 | continue; | |
897 | } | |
c88033ef PX |
898 | /* Reset ptes for the whole vma range if wr-protected */ |
899 | if (userfaultfd_wp(vma)) | |
900 | uffd_wp_range(vma, vma->vm_start, | |
901 | vma->vm_end - vma->vm_start, false); | |
7677f7fd | 902 | new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; |
94d7d923 LS |
903 | vma = vma_modify_flags_uffd(&vmi, prev, vma, vma->vm_start, |
904 | vma->vm_end, new_flags, | |
905 | NULL_VM_UFFD_CTX); | |
69dbe6da | 906 | |
60081bf1 | 907 | vma_start_write(vma); |
51d3d5eb | 908 | userfaultfd_set_vm_flags(vma, new_flags); |
86039bd3 | 909 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
94d7d923 LS |
910 | |
911 | prev = vma; | |
86039bd3 | 912 | } |
d8ed45c5 | 913 | mmap_write_unlock(mm); |
d2005e3f ON |
914 | mmput(mm); |
915 | wakeup: | |
86039bd3 | 916 | /* |
15b726ef | 917 | * After no new page faults can wait on this fault_*wqh, flush |
86039bd3 | 918 | * the last page faults that may have been already waiting on |
15b726ef | 919 | * the fault_*wqh. |
86039bd3 | 920 | */ |
cbcfa130 | 921 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
ac5be6b4 | 922 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); |
c430d1e8 | 923 | __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); |
cbcfa130 | 924 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
86039bd3 | 925 | |
5a18b64e MR |
926 | /* Flush pending events that may still wait on event_wqh */ |
927 | wake_up_all(&ctx->event_wqh); | |
928 | ||
a9a08845 | 929 | wake_up_poll(&ctx->fd_wqh, EPOLLHUP); |
86039bd3 AA |
930 | userfaultfd_ctx_put(ctx); |
931 | return 0; | |
932 | } | |
933 | ||
15b726ef | 934 | /* fault_pending_wqh.lock must be hold by the caller */ |
6dcc27fd PE |
935 | static inline struct userfaultfd_wait_queue *find_userfault_in( |
936 | wait_queue_head_t *wqh) | |
86039bd3 | 937 | { |
ac6424b9 | 938 | wait_queue_entry_t *wq; |
15b726ef | 939 | struct userfaultfd_wait_queue *uwq; |
86039bd3 | 940 | |
456a7378 | 941 | lockdep_assert_held(&wqh->lock); |
86039bd3 | 942 | |
15b726ef | 943 | uwq = NULL; |
6dcc27fd | 944 | if (!waitqueue_active(wqh)) |
15b726ef AA |
945 | goto out; |
946 | /* walk in reverse to provide FIFO behavior to read userfaults */ | |
2055da97 | 947 | wq = list_last_entry(&wqh->head, typeof(*wq), entry); |
15b726ef AA |
948 | uwq = container_of(wq, struct userfaultfd_wait_queue, wq); |
949 | out: | |
950 | return uwq; | |
86039bd3 | 951 | } |
6dcc27fd PE |
952 | |
953 | static inline struct userfaultfd_wait_queue *find_userfault( | |
954 | struct userfaultfd_ctx *ctx) | |
955 | { | |
956 | return find_userfault_in(&ctx->fault_pending_wqh); | |
957 | } | |
86039bd3 | 958 | |
9cd75c3c PE |
959 | static inline struct userfaultfd_wait_queue *find_userfault_evt( |
960 | struct userfaultfd_ctx *ctx) | |
961 | { | |
962 | return find_userfault_in(&ctx->event_wqh); | |
963 | } | |
964 | ||
076ccb76 | 965 | static __poll_t userfaultfd_poll(struct file *file, poll_table *wait) |
86039bd3 AA |
966 | { |
967 | struct userfaultfd_ctx *ctx = file->private_data; | |
076ccb76 | 968 | __poll_t ret; |
86039bd3 AA |
969 | |
970 | poll_wait(file, &ctx->fd_wqh, wait); | |
971 | ||
22e5fe2a | 972 | if (!userfaultfd_is_initialized(ctx)) |
a9a08845 | 973 | return EPOLLERR; |
9cd75c3c | 974 | |
22e5fe2a NA |
975 | /* |
976 | * poll() never guarantees that read won't block. | |
977 | * userfaults can be waken before they're read(). | |
978 | */ | |
979 | if (unlikely(!(file->f_flags & O_NONBLOCK))) | |
a9a08845 | 980 | return EPOLLERR; |
22e5fe2a NA |
981 | /* |
982 | * lockless access to see if there are pending faults | |
983 | * __pollwait last action is the add_wait_queue but | |
984 | * the spin_unlock would allow the waitqueue_active to | |
985 | * pass above the actual list_add inside | |
986 | * add_wait_queue critical section. So use a full | |
987 | * memory barrier to serialize the list_add write of | |
988 | * add_wait_queue() with the waitqueue_active read | |
989 | * below. | |
990 | */ | |
991 | ret = 0; | |
992 | smp_mb(); | |
993 | if (waitqueue_active(&ctx->fault_pending_wqh)) | |
994 | ret = EPOLLIN; | |
995 | else if (waitqueue_active(&ctx->event_wqh)) | |
996 | ret = EPOLLIN; | |
997 | ||
998 | return ret; | |
86039bd3 AA |
999 | } |
1000 | ||
893e26e6 PE |
1001 | static const struct file_operations userfaultfd_fops; |
1002 | ||
b537900f DC |
1003 | static int resolve_userfault_fork(struct userfaultfd_ctx *new, |
1004 | struct inode *inode, | |
893e26e6 PE |
1005 | struct uffd_msg *msg) |
1006 | { | |
1007 | int fd; | |
893e26e6 | 1008 | |
4f0b9194 | 1009 | fd = anon_inode_create_getfd("[userfaultfd]", &userfaultfd_fops, new, |
abec3d01 | 1010 | O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode); |
893e26e6 PE |
1011 | if (fd < 0) |
1012 | return fd; | |
1013 | ||
893e26e6 PE |
1014 | msg->arg.reserved.reserved1 = 0; |
1015 | msg->arg.fork.ufd = fd; | |
893e26e6 PE |
1016 | return 0; |
1017 | } | |
1018 | ||
86039bd3 | 1019 | static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, |
b537900f | 1020 | struct uffd_msg *msg, struct inode *inode) |
86039bd3 AA |
1021 | { |
1022 | ssize_t ret; | |
1023 | DECLARE_WAITQUEUE(wait, current); | |
15b726ef | 1024 | struct userfaultfd_wait_queue *uwq; |
893e26e6 PE |
1025 | /* |
1026 | * Handling fork event requires sleeping operations, so | |
1027 | * we drop the event_wqh lock, then do these ops, then | |
1028 | * lock it back and wake up the waiter. While the lock is | |
1029 | * dropped the ewq may go away so we keep track of it | |
1030 | * carefully. | |
1031 | */ | |
1032 | LIST_HEAD(fork_event); | |
1033 | struct userfaultfd_ctx *fork_nctx = NULL; | |
86039bd3 | 1034 | |
15b726ef | 1035 | /* always take the fd_wqh lock before the fault_pending_wqh lock */ |
ae62c16e | 1036 | spin_lock_irq(&ctx->fd_wqh.lock); |
86039bd3 AA |
1037 | __add_wait_queue(&ctx->fd_wqh, &wait); |
1038 | for (;;) { | |
1039 | set_current_state(TASK_INTERRUPTIBLE); | |
15b726ef AA |
1040 | spin_lock(&ctx->fault_pending_wqh.lock); |
1041 | uwq = find_userfault(ctx); | |
1042 | if (uwq) { | |
2c5b7e1b AA |
1043 | /* |
1044 | * Use a seqcount to repeat the lockless check | |
1045 | * in wake_userfault() to avoid missing | |
1046 | * wakeups because during the refile both | |
1047 | * waitqueue could become empty if this is the | |
1048 | * only userfault. | |
1049 | */ | |
1050 | write_seqcount_begin(&ctx->refile_seq); | |
1051 | ||
86039bd3 | 1052 | /* |
15b726ef AA |
1053 | * The fault_pending_wqh.lock prevents the uwq |
1054 | * to disappear from under us. | |
1055 | * | |
1056 | * Refile this userfault from | |
1057 | * fault_pending_wqh to fault_wqh, it's not | |
1058 | * pending anymore after we read it. | |
1059 | * | |
1060 | * Use list_del() by hand (as | |
1061 | * userfaultfd_wake_function also uses | |
1062 | * list_del_init() by hand) to be sure nobody | |
1063 | * changes __remove_wait_queue() to use | |
1064 | * list_del_init() in turn breaking the | |
1065 | * !list_empty_careful() check in | |
2055da97 | 1066 | * handle_userfault(). The uwq->wq.head list |
15b726ef AA |
1067 | * must never be empty at any time during the |
1068 | * refile, or the waitqueue could disappear | |
1069 | * from under us. The "wait_queue_head_t" | |
1070 | * parameter of __remove_wait_queue() is unused | |
1071 | * anyway. | |
86039bd3 | 1072 | */ |
2055da97 | 1073 | list_del(&uwq->wq.entry); |
c430d1e8 | 1074 | add_wait_queue(&ctx->fault_wqh, &uwq->wq); |
15b726ef | 1075 | |
2c5b7e1b AA |
1076 | write_seqcount_end(&ctx->refile_seq); |
1077 | ||
a9b85f94 AA |
1078 | /* careful to always initialize msg if ret == 0 */ |
1079 | *msg = uwq->msg; | |
15b726ef | 1080 | spin_unlock(&ctx->fault_pending_wqh.lock); |
86039bd3 AA |
1081 | ret = 0; |
1082 | break; | |
1083 | } | |
15b726ef | 1084 | spin_unlock(&ctx->fault_pending_wqh.lock); |
9cd75c3c PE |
1085 | |
1086 | spin_lock(&ctx->event_wqh.lock); | |
1087 | uwq = find_userfault_evt(ctx); | |
1088 | if (uwq) { | |
1089 | *msg = uwq->msg; | |
1090 | ||
893e26e6 PE |
1091 | if (uwq->msg.event == UFFD_EVENT_FORK) { |
1092 | fork_nctx = (struct userfaultfd_ctx *) | |
1093 | (unsigned long) | |
1094 | uwq->msg.arg.reserved.reserved1; | |
2055da97 | 1095 | list_move(&uwq->wq.entry, &fork_event); |
384632e6 AA |
1096 | /* |
1097 | * fork_nctx can be freed as soon as | |
1098 | * we drop the lock, unless we take a | |
1099 | * reference on it. | |
1100 | */ | |
1101 | userfaultfd_ctx_get(fork_nctx); | |
893e26e6 PE |
1102 | spin_unlock(&ctx->event_wqh.lock); |
1103 | ret = 0; | |
1104 | break; | |
1105 | } | |
1106 | ||
9cd75c3c PE |
1107 | userfaultfd_event_complete(ctx, uwq); |
1108 | spin_unlock(&ctx->event_wqh.lock); | |
1109 | ret = 0; | |
1110 | break; | |
1111 | } | |
1112 | spin_unlock(&ctx->event_wqh.lock); | |
1113 | ||
86039bd3 AA |
1114 | if (signal_pending(current)) { |
1115 | ret = -ERESTARTSYS; | |
1116 | break; | |
1117 | } | |
1118 | if (no_wait) { | |
1119 | ret = -EAGAIN; | |
1120 | break; | |
1121 | } | |
ae62c16e | 1122 | spin_unlock_irq(&ctx->fd_wqh.lock); |
86039bd3 | 1123 | schedule(); |
ae62c16e | 1124 | spin_lock_irq(&ctx->fd_wqh.lock); |
86039bd3 AA |
1125 | } |
1126 | __remove_wait_queue(&ctx->fd_wqh, &wait); | |
1127 | __set_current_state(TASK_RUNNING); | |
ae62c16e | 1128 | spin_unlock_irq(&ctx->fd_wqh.lock); |
86039bd3 | 1129 | |
893e26e6 | 1130 | if (!ret && msg->event == UFFD_EVENT_FORK) { |
b537900f | 1131 | ret = resolve_userfault_fork(fork_nctx, inode, msg); |
cbcfa130 | 1132 | spin_lock_irq(&ctx->event_wqh.lock); |
384632e6 AA |
1133 | if (!list_empty(&fork_event)) { |
1134 | /* | |
1135 | * The fork thread didn't abort, so we can | |
1136 | * drop the temporary refcount. | |
1137 | */ | |
1138 | userfaultfd_ctx_put(fork_nctx); | |
1139 | ||
1140 | uwq = list_first_entry(&fork_event, | |
1141 | typeof(*uwq), | |
1142 | wq.entry); | |
1143 | /* | |
1144 | * If fork_event list wasn't empty and in turn | |
1145 | * the event wasn't already released by fork | |
1146 | * (the event is allocated on fork kernel | |
1147 | * stack), put the event back to its place in | |
1148 | * the event_wq. fork_event head will be freed | |
1149 | * as soon as we return so the event cannot | |
1150 | * stay queued there no matter the current | |
1151 | * "ret" value. | |
1152 | */ | |
1153 | list_del(&uwq->wq.entry); | |
1154 | __add_wait_queue(&ctx->event_wqh, &uwq->wq); | |
893e26e6 | 1155 | |
384632e6 AA |
1156 | /* |
1157 | * Leave the event in the waitqueue and report | |
1158 | * error to userland if we failed to resolve | |
1159 | * the userfault fork. | |
1160 | */ | |
1161 | if (likely(!ret)) | |
893e26e6 | 1162 | userfaultfd_event_complete(ctx, uwq); |
384632e6 AA |
1163 | } else { |
1164 | /* | |
1165 | * Here the fork thread aborted and the | |
1166 | * refcount from the fork thread on fork_nctx | |
1167 | * has already been released. We still hold | |
1168 | * the reference we took before releasing the | |
1169 | * lock above. If resolve_userfault_fork | |
1170 | * failed we've to drop it because the | |
1171 | * fork_nctx has to be freed in such case. If | |
1172 | * it succeeded we'll hold it because the new | |
1173 | * uffd references it. | |
1174 | */ | |
1175 | if (ret) | |
1176 | userfaultfd_ctx_put(fork_nctx); | |
893e26e6 | 1177 | } |
cbcfa130 | 1178 | spin_unlock_irq(&ctx->event_wqh.lock); |
893e26e6 PE |
1179 | } |
1180 | ||
86039bd3 AA |
1181 | return ret; |
1182 | } | |
1183 | ||
1184 | static ssize_t userfaultfd_read(struct file *file, char __user *buf, | |
1185 | size_t count, loff_t *ppos) | |
1186 | { | |
1187 | struct userfaultfd_ctx *ctx = file->private_data; | |
1188 | ssize_t _ret, ret = 0; | |
a9b85f94 | 1189 | struct uffd_msg msg; |
86039bd3 | 1190 | int no_wait = file->f_flags & O_NONBLOCK; |
b537900f | 1191 | struct inode *inode = file_inode(file); |
86039bd3 | 1192 | |
22e5fe2a | 1193 | if (!userfaultfd_is_initialized(ctx)) |
86039bd3 | 1194 | return -EINVAL; |
86039bd3 AA |
1195 | |
1196 | for (;;) { | |
a9b85f94 | 1197 | if (count < sizeof(msg)) |
86039bd3 | 1198 | return ret ? ret : -EINVAL; |
b537900f | 1199 | _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode); |
86039bd3 AA |
1200 | if (_ret < 0) |
1201 | return ret ? ret : _ret; | |
a9b85f94 | 1202 | if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg))) |
86039bd3 | 1203 | return ret ? ret : -EFAULT; |
a9b85f94 AA |
1204 | ret += sizeof(msg); |
1205 | buf += sizeof(msg); | |
1206 | count -= sizeof(msg); | |
86039bd3 AA |
1207 | /* |
1208 | * Allow to read more than one fault at time but only | |
1209 | * block if waiting for the very first one. | |
1210 | */ | |
1211 | no_wait = O_NONBLOCK; | |
1212 | } | |
1213 | } | |
1214 | ||
1215 | static void __wake_userfault(struct userfaultfd_ctx *ctx, | |
1216 | struct userfaultfd_wake_range *range) | |
1217 | { | |
cbcfa130 | 1218 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
86039bd3 | 1219 | /* wake all in the range and autoremove */ |
15b726ef | 1220 | if (waitqueue_active(&ctx->fault_pending_wqh)) |
ac5be6b4 | 1221 | __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, |
15b726ef AA |
1222 | range); |
1223 | if (waitqueue_active(&ctx->fault_wqh)) | |
c430d1e8 | 1224 | __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); |
cbcfa130 | 1225 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
86039bd3 AA |
1226 | } |
1227 | ||
1228 | static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, | |
1229 | struct userfaultfd_wake_range *range) | |
1230 | { | |
2c5b7e1b AA |
1231 | unsigned seq; |
1232 | bool need_wakeup; | |
1233 | ||
86039bd3 AA |
1234 | /* |
1235 | * To be sure waitqueue_active() is not reordered by the CPU | |
1236 | * before the pagetable update, use an explicit SMP memory | |
3e4e28c5 | 1237 | * barrier here. PT lock release or mmap_read_unlock(mm) still |
86039bd3 AA |
1238 | * have release semantics that can allow the |
1239 | * waitqueue_active() to be reordered before the pte update. | |
1240 | */ | |
1241 | smp_mb(); | |
1242 | ||
1243 | /* | |
1244 | * Use waitqueue_active because it's very frequent to | |
1245 | * change the address space atomically even if there are no | |
1246 | * userfaults yet. So we take the spinlock only when we're | |
1247 | * sure we've userfaults to wake. | |
1248 | */ | |
2c5b7e1b AA |
1249 | do { |
1250 | seq = read_seqcount_begin(&ctx->refile_seq); | |
1251 | need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || | |
1252 | waitqueue_active(&ctx->fault_wqh); | |
1253 | cond_resched(); | |
1254 | } while (read_seqcount_retry(&ctx->refile_seq, seq)); | |
1255 | if (need_wakeup) | |
86039bd3 AA |
1256 | __wake_userfault(ctx, range); |
1257 | } | |
1258 | ||
2ef5d724 AR |
1259 | static __always_inline int validate_unaligned_range( |
1260 | struct mm_struct *mm, __u64 start, __u64 len) | |
86039bd3 AA |
1261 | { |
1262 | __u64 task_size = mm->task_size; | |
1263 | ||
86039bd3 AA |
1264 | if (len & ~PAGE_MASK) |
1265 | return -EINVAL; | |
1266 | if (!len) | |
1267 | return -EINVAL; | |
e71e2ace | 1268 | if (start < mmap_min_addr) |
86039bd3 | 1269 | return -EINVAL; |
e71e2ace | 1270 | if (start >= task_size) |
86039bd3 | 1271 | return -EINVAL; |
e71e2ace | 1272 | if (len > task_size - start) |
86039bd3 | 1273 | return -EINVAL; |
2ef5d724 AR |
1274 | if (start + len <= start) |
1275 | return -EINVAL; | |
86039bd3 AA |
1276 | return 0; |
1277 | } | |
1278 | ||
2ef5d724 AR |
1279 | static __always_inline int validate_range(struct mm_struct *mm, |
1280 | __u64 start, __u64 len) | |
1281 | { | |
1282 | if (start & ~PAGE_MASK) | |
1283 | return -EINVAL; | |
1284 | ||
1285 | return validate_unaligned_range(mm, start, len); | |
1286 | } | |
1287 | ||
86039bd3 AA |
1288 | static int userfaultfd_register(struct userfaultfd_ctx *ctx, |
1289 | unsigned long arg) | |
1290 | { | |
1291 | struct mm_struct *mm = ctx->mm; | |
1292 | struct vm_area_struct *vma, *prev, *cur; | |
1293 | int ret; | |
1294 | struct uffdio_register uffdio_register; | |
1295 | struct uffdio_register __user *user_uffdio_register; | |
1296 | unsigned long vm_flags, new_flags; | |
1297 | bool found; | |
ce53e8e6 | 1298 | bool basic_ioctls; |
86039bd3 | 1299 | unsigned long start, end, vma_end; |
11a9b902 | 1300 | struct vma_iterator vmi; |
d61ea1cb | 1301 | bool wp_async = userfaultfd_wp_async_ctx(ctx); |
86039bd3 AA |
1302 | |
1303 | user_uffdio_register = (struct uffdio_register __user *) arg; | |
1304 | ||
1305 | ret = -EFAULT; | |
1306 | if (copy_from_user(&uffdio_register, user_uffdio_register, | |
1307 | sizeof(uffdio_register)-sizeof(__u64))) | |
1308 | goto out; | |
1309 | ||
1310 | ret = -EINVAL; | |
1311 | if (!uffdio_register.mode) | |
1312 | goto out; | |
7677f7fd | 1313 | if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES) |
86039bd3 AA |
1314 | goto out; |
1315 | vm_flags = 0; | |
1316 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING) | |
1317 | vm_flags |= VM_UFFD_MISSING; | |
00b151f2 PX |
1318 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) { |
1319 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP | |
1320 | goto out; | |
1321 | #endif | |
86039bd3 | 1322 | vm_flags |= VM_UFFD_WP; |
00b151f2 | 1323 | } |
7677f7fd AR |
1324 | if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) { |
1325 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR | |
1326 | goto out; | |
1327 | #endif | |
1328 | vm_flags |= VM_UFFD_MINOR; | |
1329 | } | |
86039bd3 | 1330 | |
e71e2ace | 1331 | ret = validate_range(mm, uffdio_register.range.start, |
86039bd3 AA |
1332 | uffdio_register.range.len); |
1333 | if (ret) | |
1334 | goto out; | |
1335 | ||
1336 | start = uffdio_register.range.start; | |
1337 | end = start + uffdio_register.range.len; | |
1338 | ||
d2005e3f ON |
1339 | ret = -ENOMEM; |
1340 | if (!mmget_not_zero(mm)) | |
1341 | goto out; | |
1342 | ||
11a9b902 | 1343 | ret = -EINVAL; |
d8ed45c5 | 1344 | mmap_write_lock(mm); |
11a9b902 LH |
1345 | vma_iter_init(&vmi, mm, start); |
1346 | vma = vma_find(&vmi, end); | |
86039bd3 AA |
1347 | if (!vma) |
1348 | goto out_unlock; | |
1349 | ||
cab350af MK |
1350 | /* |
1351 | * If the first vma contains huge pages, make sure start address | |
1352 | * is aligned to huge page size. | |
1353 | */ | |
1354 | if (is_vm_hugetlb_page(vma)) { | |
1355 | unsigned long vma_hpagesize = vma_kernel_pagesize(vma); | |
1356 | ||
1357 | if (start & (vma_hpagesize - 1)) | |
1358 | goto out_unlock; | |
1359 | } | |
1360 | ||
86039bd3 AA |
1361 | /* |
1362 | * Search for not compatible vmas. | |
86039bd3 AA |
1363 | */ |
1364 | found = false; | |
ce53e8e6 | 1365 | basic_ioctls = false; |
11a9b902 LH |
1366 | cur = vma; |
1367 | do { | |
86039bd3 AA |
1368 | cond_resched(); |
1369 | ||
1370 | BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ | |
7677f7fd | 1371 | !!(cur->vm_flags & __VM_UFFD_FLAGS)); |
86039bd3 AA |
1372 | |
1373 | /* check not compatible vmas */ | |
1374 | ret = -EINVAL; | |
d61ea1cb | 1375 | if (!vma_can_userfault(cur, vm_flags, wp_async)) |
86039bd3 | 1376 | goto out_unlock; |
29ec9066 AA |
1377 | |
1378 | /* | |
1379 | * UFFDIO_COPY will fill file holes even without | |
1380 | * PROT_WRITE. This check enforces that if this is a | |
1381 | * MAP_SHARED, the process has write permission to the backing | |
1382 | * file. If VM_MAYWRITE is set it also enforces that on a | |
1383 | * MAP_SHARED vma: there is no F_WRITE_SEAL and no further | |
1384 | * F_WRITE_SEAL can be taken until the vma is destroyed. | |
1385 | */ | |
1386 | ret = -EPERM; | |
1387 | if (unlikely(!(cur->vm_flags & VM_MAYWRITE))) | |
1388 | goto out_unlock; | |
1389 | ||
cab350af MK |
1390 | /* |
1391 | * If this vma contains ending address, and huge pages | |
1392 | * check alignment. | |
1393 | */ | |
1394 | if (is_vm_hugetlb_page(cur) && end <= cur->vm_end && | |
1395 | end > cur->vm_start) { | |
1396 | unsigned long vma_hpagesize = vma_kernel_pagesize(cur); | |
1397 | ||
1398 | ret = -EINVAL; | |
1399 | ||
1400 | if (end & (vma_hpagesize - 1)) | |
1401 | goto out_unlock; | |
1402 | } | |
63b2d417 AA |
1403 | if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE)) |
1404 | goto out_unlock; | |
86039bd3 AA |
1405 | |
1406 | /* | |
1407 | * Check that this vma isn't already owned by a | |
1408 | * different userfaultfd. We can't allow more than one | |
1409 | * userfaultfd to own a single vma simultaneously or we | |
1410 | * wouldn't know which one to deliver the userfaults to. | |
1411 | */ | |
1412 | ret = -EBUSY; | |
1413 | if (cur->vm_userfaultfd_ctx.ctx && | |
1414 | cur->vm_userfaultfd_ctx.ctx != ctx) | |
1415 | goto out_unlock; | |
1416 | ||
cab350af MK |
1417 | /* |
1418 | * Note vmas containing huge pages | |
1419 | */ | |
ce53e8e6 MR |
1420 | if (is_vm_hugetlb_page(cur)) |
1421 | basic_ioctls = true; | |
cab350af | 1422 | |
86039bd3 | 1423 | found = true; |
11a9b902 | 1424 | } for_each_vma_range(vmi, cur, end); |
86039bd3 AA |
1425 | BUG_ON(!found); |
1426 | ||
11a9b902 LH |
1427 | vma_iter_set(&vmi, start); |
1428 | prev = vma_prev(&vmi); | |
270aa010 PX |
1429 | if (vma->vm_start < start) |
1430 | prev = vma; | |
86039bd3 AA |
1431 | |
1432 | ret = 0; | |
11a9b902 | 1433 | for_each_vma_range(vmi, vma, end) { |
86039bd3 AA |
1434 | cond_resched(); |
1435 | ||
d61ea1cb | 1436 | BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async)); |
86039bd3 AA |
1437 | BUG_ON(vma->vm_userfaultfd_ctx.ctx && |
1438 | vma->vm_userfaultfd_ctx.ctx != ctx); | |
29ec9066 | 1439 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); |
86039bd3 AA |
1440 | |
1441 | /* | |
1442 | * Nothing to do: this vma is already registered into this | |
1443 | * userfaultfd and with the right tracking mode too. | |
1444 | */ | |
1445 | if (vma->vm_userfaultfd_ctx.ctx == ctx && | |
1446 | (vma->vm_flags & vm_flags) == vm_flags) | |
1447 | goto skip; | |
1448 | ||
1449 | if (vma->vm_start > start) | |
1450 | start = vma->vm_start; | |
1451 | vma_end = min(end, vma->vm_end); | |
1452 | ||
7677f7fd | 1453 | new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags; |
94d7d923 LS |
1454 | vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end, |
1455 | new_flags, | |
1456 | (struct vm_userfaultfd_ctx){ctx}); | |
1457 | if (IS_ERR(vma)) { | |
1458 | ret = PTR_ERR(vma); | |
1459 | break; | |
86039bd3 | 1460 | } |
94d7d923 | 1461 | |
86039bd3 AA |
1462 | /* |
1463 | * In the vma_merge() successful mprotect-like case 8: | |
1464 | * the next vma was merged into the current one and | |
1465 | * the current one has not been updated yet. | |
1466 | */ | |
60081bf1 | 1467 | vma_start_write(vma); |
51d3d5eb | 1468 | userfaultfd_set_vm_flags(vma, new_flags); |
86039bd3 AA |
1469 | vma->vm_userfaultfd_ctx.ctx = ctx; |
1470 | ||
6dfeaff9 PX |
1471 | if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma)) |
1472 | hugetlb_unshare_all_pmds(vma); | |
1473 | ||
86039bd3 AA |
1474 | skip: |
1475 | prev = vma; | |
1476 | start = vma->vm_end; | |
11a9b902 LH |
1477 | } |
1478 | ||
86039bd3 | 1479 | out_unlock: |
d8ed45c5 | 1480 | mmap_write_unlock(mm); |
d2005e3f | 1481 | mmput(mm); |
86039bd3 | 1482 | if (!ret) { |
14819305 PX |
1483 | __u64 ioctls_out; |
1484 | ||
1485 | ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC : | |
1486 | UFFD_API_RANGE_IOCTLS; | |
1487 | ||
1488 | /* | |
1489 | * Declare the WP ioctl only if the WP mode is | |
1490 | * specified and all checks passed with the range | |
1491 | */ | |
1492 | if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP)) | |
1493 | ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT); | |
1494 | ||
f6191471 AR |
1495 | /* CONTINUE ioctl is only supported for MINOR ranges. */ |
1496 | if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR)) | |
1497 | ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE); | |
1498 | ||
86039bd3 AA |
1499 | /* |
1500 | * Now that we scanned all vmas we can already tell | |
1501 | * userland which ioctls methods are guaranteed to | |
1502 | * succeed on this range. | |
1503 | */ | |
14819305 | 1504 | if (put_user(ioctls_out, &user_uffdio_register->ioctls)) |
86039bd3 AA |
1505 | ret = -EFAULT; |
1506 | } | |
1507 | out: | |
1508 | return ret; | |
1509 | } | |
1510 | ||
1511 | static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, | |
1512 | unsigned long arg) | |
1513 | { | |
1514 | struct mm_struct *mm = ctx->mm; | |
1515 | struct vm_area_struct *vma, *prev, *cur; | |
1516 | int ret; | |
1517 | struct uffdio_range uffdio_unregister; | |
1518 | unsigned long new_flags; | |
1519 | bool found; | |
1520 | unsigned long start, end, vma_end; | |
1521 | const void __user *buf = (void __user *)arg; | |
11a9b902 | 1522 | struct vma_iterator vmi; |
d61ea1cb | 1523 | bool wp_async = userfaultfd_wp_async_ctx(ctx); |
86039bd3 AA |
1524 | |
1525 | ret = -EFAULT; | |
1526 | if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) | |
1527 | goto out; | |
1528 | ||
e71e2ace | 1529 | ret = validate_range(mm, uffdio_unregister.start, |
86039bd3 AA |
1530 | uffdio_unregister.len); |
1531 | if (ret) | |
1532 | goto out; | |
1533 | ||
1534 | start = uffdio_unregister.start; | |
1535 | end = start + uffdio_unregister.len; | |
1536 | ||
d2005e3f ON |
1537 | ret = -ENOMEM; |
1538 | if (!mmget_not_zero(mm)) | |
1539 | goto out; | |
1540 | ||
d8ed45c5 | 1541 | mmap_write_lock(mm); |
86039bd3 | 1542 | ret = -EINVAL; |
11a9b902 LH |
1543 | vma_iter_init(&vmi, mm, start); |
1544 | vma = vma_find(&vmi, end); | |
1545 | if (!vma) | |
86039bd3 AA |
1546 | goto out_unlock; |
1547 | ||
cab350af MK |
1548 | /* |
1549 | * If the first vma contains huge pages, make sure start address | |
1550 | * is aligned to huge page size. | |
1551 | */ | |
1552 | if (is_vm_hugetlb_page(vma)) { | |
1553 | unsigned long vma_hpagesize = vma_kernel_pagesize(vma); | |
1554 | ||
1555 | if (start & (vma_hpagesize - 1)) | |
1556 | goto out_unlock; | |
1557 | } | |
1558 | ||
86039bd3 AA |
1559 | /* |
1560 | * Search for not compatible vmas. | |
86039bd3 AA |
1561 | */ |
1562 | found = false; | |
11a9b902 LH |
1563 | cur = vma; |
1564 | do { | |
86039bd3 AA |
1565 | cond_resched(); |
1566 | ||
1567 | BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^ | |
7677f7fd | 1568 | !!(cur->vm_flags & __VM_UFFD_FLAGS)); |
86039bd3 AA |
1569 | |
1570 | /* | |
1571 | * Check not compatible vmas, not strictly required | |
1572 | * here as not compatible vmas cannot have an | |
1573 | * userfaultfd_ctx registered on them, but this | |
1574 | * provides for more strict behavior to notice | |
1575 | * unregistration errors. | |
1576 | */ | |
d61ea1cb | 1577 | if (!vma_can_userfault(cur, cur->vm_flags, wp_async)) |
86039bd3 AA |
1578 | goto out_unlock; |
1579 | ||
1580 | found = true; | |
11a9b902 | 1581 | } for_each_vma_range(vmi, cur, end); |
86039bd3 AA |
1582 | BUG_ON(!found); |
1583 | ||
11a9b902 LH |
1584 | vma_iter_set(&vmi, start); |
1585 | prev = vma_prev(&vmi); | |
270aa010 PX |
1586 | if (vma->vm_start < start) |
1587 | prev = vma; | |
1588 | ||
86039bd3 | 1589 | ret = 0; |
11a9b902 | 1590 | for_each_vma_range(vmi, vma, end) { |
86039bd3 AA |
1591 | cond_resched(); |
1592 | ||
d61ea1cb | 1593 | BUG_ON(!vma_can_userfault(vma, vma->vm_flags, wp_async)); |
86039bd3 AA |
1594 | |
1595 | /* | |
1596 | * Nothing to do: this vma is already registered into this | |
1597 | * userfaultfd and with the right tracking mode too. | |
1598 | */ | |
1599 | if (!vma->vm_userfaultfd_ctx.ctx) | |
1600 | goto skip; | |
1601 | ||
01e881f5 AA |
1602 | WARN_ON(!(vma->vm_flags & VM_MAYWRITE)); |
1603 | ||
86039bd3 AA |
1604 | if (vma->vm_start > start) |
1605 | start = vma->vm_start; | |
1606 | vma_end = min(end, vma->vm_end); | |
1607 | ||
09fa5296 AA |
1608 | if (userfaultfd_missing(vma)) { |
1609 | /* | |
1610 | * Wake any concurrent pending userfault while | |
1611 | * we unregister, so they will not hang | |
1612 | * permanently and it avoids userland to call | |
1613 | * UFFDIO_WAKE explicitly. | |
1614 | */ | |
1615 | struct userfaultfd_wake_range range; | |
1616 | range.start = start; | |
1617 | range.len = vma_end - start; | |
1618 | wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range); | |
1619 | } | |
1620 | ||
f369b07c PX |
1621 | /* Reset ptes for the whole vma range if wr-protected */ |
1622 | if (userfaultfd_wp(vma)) | |
61c50040 | 1623 | uffd_wp_range(vma, start, vma_end - start, false); |
f369b07c | 1624 | |
7677f7fd | 1625 | new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; |
94d7d923 LS |
1626 | vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end, |
1627 | new_flags, NULL_VM_UFFD_CTX); | |
1628 | if (IS_ERR(vma)) { | |
1629 | ret = PTR_ERR(vma); | |
1630 | break; | |
86039bd3 | 1631 | } |
94d7d923 | 1632 | |
86039bd3 AA |
1633 | /* |
1634 | * In the vma_merge() successful mprotect-like case 8: | |
1635 | * the next vma was merged into the current one and | |
1636 | * the current one has not been updated yet. | |
1637 | */ | |
60081bf1 | 1638 | vma_start_write(vma); |
51d3d5eb | 1639 | userfaultfd_set_vm_flags(vma, new_flags); |
86039bd3 AA |
1640 | vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; |
1641 | ||
1642 | skip: | |
1643 | prev = vma; | |
1644 | start = vma->vm_end; | |
11a9b902 LH |
1645 | } |
1646 | ||
86039bd3 | 1647 | out_unlock: |
d8ed45c5 | 1648 | mmap_write_unlock(mm); |
d2005e3f | 1649 | mmput(mm); |
86039bd3 AA |
1650 | out: |
1651 | return ret; | |
1652 | } | |
1653 | ||
1654 | /* | |
ba85c702 AA |
1655 | * userfaultfd_wake may be used in combination with the |
1656 | * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches. | |
86039bd3 AA |
1657 | */ |
1658 | static int userfaultfd_wake(struct userfaultfd_ctx *ctx, | |
1659 | unsigned long arg) | |
1660 | { | |
1661 | int ret; | |
1662 | struct uffdio_range uffdio_wake; | |
1663 | struct userfaultfd_wake_range range; | |
1664 | const void __user *buf = (void __user *)arg; | |
1665 | ||
1666 | ret = -EFAULT; | |
1667 | if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) | |
1668 | goto out; | |
1669 | ||
e71e2ace | 1670 | ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); |
86039bd3 AA |
1671 | if (ret) |
1672 | goto out; | |
1673 | ||
1674 | range.start = uffdio_wake.start; | |
1675 | range.len = uffdio_wake.len; | |
1676 | ||
1677 | /* | |
1678 | * len == 0 means wake all and we don't want to wake all here, | |
1679 | * so check it again to be sure. | |
1680 | */ | |
1681 | VM_BUG_ON(!range.len); | |
1682 | ||
1683 | wake_userfault(ctx, &range); | |
1684 | ret = 0; | |
1685 | ||
1686 | out: | |
1687 | return ret; | |
1688 | } | |
1689 | ||
ad465cae AA |
1690 | static int userfaultfd_copy(struct userfaultfd_ctx *ctx, |
1691 | unsigned long arg) | |
1692 | { | |
1693 | __s64 ret; | |
1694 | struct uffdio_copy uffdio_copy; | |
1695 | struct uffdio_copy __user *user_uffdio_copy; | |
1696 | struct userfaultfd_wake_range range; | |
d9712937 | 1697 | uffd_flags_t flags = 0; |
ad465cae AA |
1698 | |
1699 | user_uffdio_copy = (struct uffdio_copy __user *) arg; | |
1700 | ||
df2cc96e | 1701 | ret = -EAGAIN; |
a759a909 | 1702 | if (atomic_read(&ctx->mmap_changing)) |
df2cc96e MR |
1703 | goto out; |
1704 | ||
ad465cae AA |
1705 | ret = -EFAULT; |
1706 | if (copy_from_user(&uffdio_copy, user_uffdio_copy, | |
1707 | /* don't copy "copy" last field */ | |
1708 | sizeof(uffdio_copy)-sizeof(__s64))) | |
1709 | goto out; | |
1710 | ||
2ef5d724 AR |
1711 | ret = validate_unaligned_range(ctx->mm, uffdio_copy.src, |
1712 | uffdio_copy.len); | |
1713 | if (ret) | |
1714 | goto out; | |
e71e2ace | 1715 | ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); |
ad465cae AA |
1716 | if (ret) |
1717 | goto out; | |
2ef5d724 | 1718 | |
ad465cae | 1719 | ret = -EINVAL; |
72981e0e | 1720 | if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP)) |
ad465cae | 1721 | goto out; |
d9712937 AR |
1722 | if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP) |
1723 | flags |= MFILL_ATOMIC_WP; | |
d2005e3f | 1724 | if (mmget_not_zero(ctx->mm)) { |
5e4c24a5 LG |
1725 | ret = mfill_atomic_copy(ctx, uffdio_copy.dst, uffdio_copy.src, |
1726 | uffdio_copy.len, flags); | |
d2005e3f | 1727 | mmput(ctx->mm); |
96333187 | 1728 | } else { |
e86b298b | 1729 | return -ESRCH; |
d2005e3f | 1730 | } |
ad465cae AA |
1731 | if (unlikely(put_user(ret, &user_uffdio_copy->copy))) |
1732 | return -EFAULT; | |
1733 | if (ret < 0) | |
1734 | goto out; | |
1735 | BUG_ON(!ret); | |
1736 | /* len == 0 would wake all */ | |
1737 | range.len = ret; | |
1738 | if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) { | |
1739 | range.start = uffdio_copy.dst; | |
1740 | wake_userfault(ctx, &range); | |
1741 | } | |
1742 | ret = range.len == uffdio_copy.len ? 0 : -EAGAIN; | |
1743 | out: | |
1744 | return ret; | |
1745 | } | |
1746 | ||
1747 | static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, | |
1748 | unsigned long arg) | |
1749 | { | |
1750 | __s64 ret; | |
1751 | struct uffdio_zeropage uffdio_zeropage; | |
1752 | struct uffdio_zeropage __user *user_uffdio_zeropage; | |
1753 | struct userfaultfd_wake_range range; | |
1754 | ||
1755 | user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg; | |
1756 | ||
df2cc96e | 1757 | ret = -EAGAIN; |
a759a909 | 1758 | if (atomic_read(&ctx->mmap_changing)) |
df2cc96e MR |
1759 | goto out; |
1760 | ||
ad465cae AA |
1761 | ret = -EFAULT; |
1762 | if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage, | |
1763 | /* don't copy "zeropage" last field */ | |
1764 | sizeof(uffdio_zeropage)-sizeof(__s64))) | |
1765 | goto out; | |
1766 | ||
e71e2ace | 1767 | ret = validate_range(ctx->mm, uffdio_zeropage.range.start, |
ad465cae AA |
1768 | uffdio_zeropage.range.len); |
1769 | if (ret) | |
1770 | goto out; | |
1771 | ret = -EINVAL; | |
1772 | if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) | |
1773 | goto out; | |
1774 | ||
d2005e3f | 1775 | if (mmget_not_zero(ctx->mm)) { |
5e4c24a5 LG |
1776 | ret = mfill_atomic_zeropage(ctx, uffdio_zeropage.range.start, |
1777 | uffdio_zeropage.range.len); | |
d2005e3f | 1778 | mmput(ctx->mm); |
9d95aa4b | 1779 | } else { |
e86b298b | 1780 | return -ESRCH; |
d2005e3f | 1781 | } |
ad465cae AA |
1782 | if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) |
1783 | return -EFAULT; | |
1784 | if (ret < 0) | |
1785 | goto out; | |
1786 | /* len == 0 would wake all */ | |
1787 | BUG_ON(!ret); | |
1788 | range.len = ret; | |
1789 | if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) { | |
1790 | range.start = uffdio_zeropage.range.start; | |
1791 | wake_userfault(ctx, &range); | |
1792 | } | |
1793 | ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN; | |
1794 | out: | |
1795 | return ret; | |
1796 | } | |
1797 | ||
63b2d417 AA |
1798 | static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, |
1799 | unsigned long arg) | |
1800 | { | |
1801 | int ret; | |
1802 | struct uffdio_writeprotect uffdio_wp; | |
1803 | struct uffdio_writeprotect __user *user_uffdio_wp; | |
1804 | struct userfaultfd_wake_range range; | |
23080e27 | 1805 | bool mode_wp, mode_dontwake; |
63b2d417 | 1806 | |
a759a909 | 1807 | if (atomic_read(&ctx->mmap_changing)) |
63b2d417 AA |
1808 | return -EAGAIN; |
1809 | ||
1810 | user_uffdio_wp = (struct uffdio_writeprotect __user *) arg; | |
1811 | ||
1812 | if (copy_from_user(&uffdio_wp, user_uffdio_wp, | |
1813 | sizeof(struct uffdio_writeprotect))) | |
1814 | return -EFAULT; | |
1815 | ||
e71e2ace | 1816 | ret = validate_range(ctx->mm, uffdio_wp.range.start, |
63b2d417 AA |
1817 | uffdio_wp.range.len); |
1818 | if (ret) | |
1819 | return ret; | |
1820 | ||
1821 | if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE | | |
1822 | UFFDIO_WRITEPROTECT_MODE_WP)) | |
1823 | return -EINVAL; | |
23080e27 PX |
1824 | |
1825 | mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP; | |
1826 | mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE; | |
1827 | ||
1828 | if (mode_wp && mode_dontwake) | |
63b2d417 AA |
1829 | return -EINVAL; |
1830 | ||
cb185d5f | 1831 | if (mmget_not_zero(ctx->mm)) { |
5e4c24a5 LG |
1832 | ret = mwriteprotect_range(ctx, uffdio_wp.range.start, |
1833 | uffdio_wp.range.len, mode_wp); | |
cb185d5f NA |
1834 | mmput(ctx->mm); |
1835 | } else { | |
1836 | return -ESRCH; | |
1837 | } | |
1838 | ||
63b2d417 AA |
1839 | if (ret) |
1840 | return ret; | |
1841 | ||
23080e27 | 1842 | if (!mode_wp && !mode_dontwake) { |
63b2d417 AA |
1843 | range.start = uffdio_wp.range.start; |
1844 | range.len = uffdio_wp.range.len; | |
1845 | wake_userfault(ctx, &range); | |
1846 | } | |
1847 | return ret; | |
1848 | } | |
1849 | ||
f6191471 AR |
1850 | static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) |
1851 | { | |
1852 | __s64 ret; | |
1853 | struct uffdio_continue uffdio_continue; | |
1854 | struct uffdio_continue __user *user_uffdio_continue; | |
1855 | struct userfaultfd_wake_range range; | |
02891844 | 1856 | uffd_flags_t flags = 0; |
f6191471 AR |
1857 | |
1858 | user_uffdio_continue = (struct uffdio_continue __user *)arg; | |
1859 | ||
1860 | ret = -EAGAIN; | |
a759a909 | 1861 | if (atomic_read(&ctx->mmap_changing)) |
f6191471 AR |
1862 | goto out; |
1863 | ||
1864 | ret = -EFAULT; | |
1865 | if (copy_from_user(&uffdio_continue, user_uffdio_continue, | |
1866 | /* don't copy the output fields */ | |
1867 | sizeof(uffdio_continue) - (sizeof(__s64)))) | |
1868 | goto out; | |
1869 | ||
e71e2ace | 1870 | ret = validate_range(ctx->mm, uffdio_continue.range.start, |
f6191471 AR |
1871 | uffdio_continue.range.len); |
1872 | if (ret) | |
1873 | goto out; | |
1874 | ||
1875 | ret = -EINVAL; | |
02891844 AR |
1876 | if (uffdio_continue.mode & ~(UFFDIO_CONTINUE_MODE_DONTWAKE | |
1877 | UFFDIO_CONTINUE_MODE_WP)) | |
f6191471 | 1878 | goto out; |
02891844 AR |
1879 | if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP) |
1880 | flags |= MFILL_ATOMIC_WP; | |
f6191471 AR |
1881 | |
1882 | if (mmget_not_zero(ctx->mm)) { | |
5e4c24a5 LG |
1883 | ret = mfill_atomic_continue(ctx, uffdio_continue.range.start, |
1884 | uffdio_continue.range.len, flags); | |
f6191471 AR |
1885 | mmput(ctx->mm); |
1886 | } else { | |
1887 | return -ESRCH; | |
1888 | } | |
1889 | ||
1890 | if (unlikely(put_user(ret, &user_uffdio_continue->mapped))) | |
1891 | return -EFAULT; | |
1892 | if (ret < 0) | |
1893 | goto out; | |
1894 | ||
1895 | /* len == 0 would wake all */ | |
1896 | BUG_ON(!ret); | |
1897 | range.len = ret; | |
1898 | if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) { | |
1899 | range.start = uffdio_continue.range.start; | |
1900 | wake_userfault(ctx, &range); | |
1901 | } | |
1902 | ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN; | |
1903 | ||
1904 | out: | |
1905 | return ret; | |
1906 | } | |
1907 | ||
fc71884a AR |
1908 | static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg) |
1909 | { | |
1910 | __s64 ret; | |
1911 | struct uffdio_poison uffdio_poison; | |
1912 | struct uffdio_poison __user *user_uffdio_poison; | |
1913 | struct userfaultfd_wake_range range; | |
1914 | ||
1915 | user_uffdio_poison = (struct uffdio_poison __user *)arg; | |
1916 | ||
1917 | ret = -EAGAIN; | |
1918 | if (atomic_read(&ctx->mmap_changing)) | |
1919 | goto out; | |
1920 | ||
1921 | ret = -EFAULT; | |
1922 | if (copy_from_user(&uffdio_poison, user_uffdio_poison, | |
1923 | /* don't copy the output fields */ | |
1924 | sizeof(uffdio_poison) - (sizeof(__s64)))) | |
1925 | goto out; | |
1926 | ||
1927 | ret = validate_range(ctx->mm, uffdio_poison.range.start, | |
1928 | uffdio_poison.range.len); | |
1929 | if (ret) | |
1930 | goto out; | |
1931 | ||
1932 | ret = -EINVAL; | |
1933 | if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE) | |
1934 | goto out; | |
1935 | ||
1936 | if (mmget_not_zero(ctx->mm)) { | |
5e4c24a5 LG |
1937 | ret = mfill_atomic_poison(ctx, uffdio_poison.range.start, |
1938 | uffdio_poison.range.len, 0); | |
fc71884a AR |
1939 | mmput(ctx->mm); |
1940 | } else { | |
1941 | return -ESRCH; | |
1942 | } | |
1943 | ||
1944 | if (unlikely(put_user(ret, &user_uffdio_poison->updated))) | |
1945 | return -EFAULT; | |
1946 | if (ret < 0) | |
1947 | goto out; | |
1948 | ||
1949 | /* len == 0 would wake all */ | |
1950 | BUG_ON(!ret); | |
1951 | range.len = ret; | |
1952 | if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) { | |
1953 | range.start = uffdio_poison.range.start; | |
1954 | wake_userfault(ctx, &range); | |
1955 | } | |
1956 | ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN; | |
1957 | ||
1958 | out: | |
1959 | return ret; | |
1960 | } | |
1961 | ||
d61ea1cb PX |
1962 | bool userfaultfd_wp_async(struct vm_area_struct *vma) |
1963 | { | |
1964 | return userfaultfd_wp_async_ctx(vma->vm_userfaultfd_ctx.ctx); | |
1965 | } | |
1966 | ||
9cd75c3c PE |
1967 | static inline unsigned int uffd_ctx_features(__u64 user_features) |
1968 | { | |
1969 | /* | |
22e5fe2a NA |
1970 | * For the current set of features the bits just coincide. Set |
1971 | * UFFD_FEATURE_INITIALIZED to mark the features as enabled. | |
9cd75c3c | 1972 | */ |
22e5fe2a | 1973 | return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED; |
9cd75c3c PE |
1974 | } |
1975 | ||
adef4406 AA |
1976 | static int userfaultfd_move(struct userfaultfd_ctx *ctx, |
1977 | unsigned long arg) | |
1978 | { | |
1979 | __s64 ret; | |
1980 | struct uffdio_move uffdio_move; | |
1981 | struct uffdio_move __user *user_uffdio_move; | |
1982 | struct userfaultfd_wake_range range; | |
1983 | struct mm_struct *mm = ctx->mm; | |
1984 | ||
1985 | user_uffdio_move = (struct uffdio_move __user *) arg; | |
1986 | ||
1987 | if (atomic_read(&ctx->mmap_changing)) | |
1988 | return -EAGAIN; | |
1989 | ||
1990 | if (copy_from_user(&uffdio_move, user_uffdio_move, | |
1991 | /* don't copy "move" last field */ | |
1992 | sizeof(uffdio_move)-sizeof(__s64))) | |
1993 | return -EFAULT; | |
1994 | ||
1995 | /* Do not allow cross-mm moves. */ | |
1996 | if (mm != current->mm) | |
1997 | return -EINVAL; | |
1998 | ||
1999 | ret = validate_range(mm, uffdio_move.dst, uffdio_move.len); | |
2000 | if (ret) | |
2001 | return ret; | |
2002 | ||
2003 | ret = validate_range(mm, uffdio_move.src, uffdio_move.len); | |
2004 | if (ret) | |
2005 | return ret; | |
2006 | ||
2007 | if (uffdio_move.mode & ~(UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES| | |
2008 | UFFDIO_MOVE_MODE_DONTWAKE)) | |
2009 | return -EINVAL; | |
2010 | ||
2011 | if (mmget_not_zero(mm)) { | |
867a43a3 LG |
2012 | ret = move_pages(ctx, uffdio_move.dst, uffdio_move.src, |
2013 | uffdio_move.len, uffdio_move.mode); | |
adef4406 AA |
2014 | mmput(mm); |
2015 | } else { | |
2016 | return -ESRCH; | |
2017 | } | |
2018 | ||
2019 | if (unlikely(put_user(ret, &user_uffdio_move->move))) | |
2020 | return -EFAULT; | |
2021 | if (ret < 0) | |
2022 | goto out; | |
2023 | ||
2024 | /* len == 0 would wake all */ | |
2025 | VM_WARN_ON(!ret); | |
2026 | range.len = ret; | |
2027 | if (!(uffdio_move.mode & UFFDIO_MOVE_MODE_DONTWAKE)) { | |
2028 | range.start = uffdio_move.dst; | |
2029 | wake_userfault(ctx, &range); | |
2030 | } | |
2031 | ret = range.len == uffdio_move.len ? 0 : -EAGAIN; | |
2032 | ||
2033 | out: | |
2034 | return ret; | |
2035 | } | |
2036 | ||
86039bd3 AA |
2037 | /* |
2038 | * userland asks for a certain API version and we return which bits | |
2039 | * and ioctl commands are implemented in this kernel for such API | |
2040 | * version or -EINVAL if unknown. | |
2041 | */ | |
2042 | static int userfaultfd_api(struct userfaultfd_ctx *ctx, | |
2043 | unsigned long arg) | |
2044 | { | |
2045 | struct uffdio_api uffdio_api; | |
2046 | void __user *buf = (void __user *)arg; | |
22e5fe2a | 2047 | unsigned int ctx_features; |
86039bd3 | 2048 | int ret; |
65603144 | 2049 | __u64 features; |
86039bd3 | 2050 | |
86039bd3 | 2051 | ret = -EFAULT; |
a9b85f94 | 2052 | if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api))) |
86039bd3 | 2053 | goto out; |
2ff559f3 PX |
2054 | features = uffdio_api.features; |
2055 | ret = -EINVAL; | |
2056 | if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) | |
2057 | goto err_out; | |
3c1c24d9 MR |
2058 | ret = -EPERM; |
2059 | if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE)) | |
2060 | goto err_out; | |
d61ea1cb PX |
2061 | |
2062 | /* WP_ASYNC relies on WP_UNPOPULATED, choose it unconditionally */ | |
2063 | if (features & UFFD_FEATURE_WP_ASYNC) | |
2064 | features |= UFFD_FEATURE_WP_UNPOPULATED; | |
2065 | ||
65603144 AA |
2066 | /* report all available features and ioctls to userland */ |
2067 | uffdio_api.features = UFFD_API_FEATURES; | |
7677f7fd | 2068 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR |
964ab004 AR |
2069 | uffdio_api.features &= |
2070 | ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM); | |
00b151f2 PX |
2071 | #endif |
2072 | #ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP | |
2073 | uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP; | |
b1f9e876 PX |
2074 | #endif |
2075 | #ifndef CONFIG_PTE_MARKER_UFFD_WP | |
2076 | uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM; | |
2bad466c | 2077 | uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED; |
d61ea1cb | 2078 | uffdio_api.features &= ~UFFD_FEATURE_WP_ASYNC; |
7677f7fd | 2079 | #endif |
86039bd3 AA |
2080 | uffdio_api.ioctls = UFFD_API_IOCTLS; |
2081 | ret = -EFAULT; | |
2082 | if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) | |
2083 | goto out; | |
22e5fe2a | 2084 | |
65603144 | 2085 | /* only enable the requested features for this uffd context */ |
22e5fe2a NA |
2086 | ctx_features = uffd_ctx_features(features); |
2087 | ret = -EINVAL; | |
2088 | if (cmpxchg(&ctx->features, 0, ctx_features) != 0) | |
2089 | goto err_out; | |
2090 | ||
86039bd3 AA |
2091 | ret = 0; |
2092 | out: | |
2093 | return ret; | |
3c1c24d9 MR |
2094 | err_out: |
2095 | memset(&uffdio_api, 0, sizeof(uffdio_api)); | |
2096 | if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) | |
2097 | ret = -EFAULT; | |
2098 | goto out; | |
86039bd3 AA |
2099 | } |
2100 | ||
2101 | static long userfaultfd_ioctl(struct file *file, unsigned cmd, | |
2102 | unsigned long arg) | |
2103 | { | |
2104 | int ret = -EINVAL; | |
2105 | struct userfaultfd_ctx *ctx = file->private_data; | |
2106 | ||
22e5fe2a | 2107 | if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx)) |
e6485a47 AA |
2108 | return -EINVAL; |
2109 | ||
86039bd3 AA |
2110 | switch(cmd) { |
2111 | case UFFDIO_API: | |
2112 | ret = userfaultfd_api(ctx, arg); | |
2113 | break; | |
2114 | case UFFDIO_REGISTER: | |
2115 | ret = userfaultfd_register(ctx, arg); | |
2116 | break; | |
2117 | case UFFDIO_UNREGISTER: | |
2118 | ret = userfaultfd_unregister(ctx, arg); | |
2119 | break; | |
2120 | case UFFDIO_WAKE: | |
2121 | ret = userfaultfd_wake(ctx, arg); | |
2122 | break; | |
ad465cae AA |
2123 | case UFFDIO_COPY: |
2124 | ret = userfaultfd_copy(ctx, arg); | |
2125 | break; | |
2126 | case UFFDIO_ZEROPAGE: | |
2127 | ret = userfaultfd_zeropage(ctx, arg); | |
2128 | break; | |
adef4406 AA |
2129 | case UFFDIO_MOVE: |
2130 | ret = userfaultfd_move(ctx, arg); | |
2131 | break; | |
63b2d417 AA |
2132 | case UFFDIO_WRITEPROTECT: |
2133 | ret = userfaultfd_writeprotect(ctx, arg); | |
2134 | break; | |
f6191471 AR |
2135 | case UFFDIO_CONTINUE: |
2136 | ret = userfaultfd_continue(ctx, arg); | |
2137 | break; | |
fc71884a AR |
2138 | case UFFDIO_POISON: |
2139 | ret = userfaultfd_poison(ctx, arg); | |
2140 | break; | |
86039bd3 AA |
2141 | } |
2142 | return ret; | |
2143 | } | |
2144 | ||
2145 | #ifdef CONFIG_PROC_FS | |
2146 | static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) | |
2147 | { | |
2148 | struct userfaultfd_ctx *ctx = f->private_data; | |
ac6424b9 | 2149 | wait_queue_entry_t *wq; |
86039bd3 AA |
2150 | unsigned long pending = 0, total = 0; |
2151 | ||
cbcfa130 | 2152 | spin_lock_irq(&ctx->fault_pending_wqh.lock); |
2055da97 | 2153 | list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { |
15b726ef AA |
2154 | pending++; |
2155 | total++; | |
2156 | } | |
2055da97 | 2157 | list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { |
86039bd3 AA |
2158 | total++; |
2159 | } | |
cbcfa130 | 2160 | spin_unlock_irq(&ctx->fault_pending_wqh.lock); |
86039bd3 AA |
2161 | |
2162 | /* | |
2163 | * If more protocols will be added, there will be all shown | |
2164 | * separated by a space. Like this: | |
2165 | * protocols: aa:... bb:... | |
2166 | */ | |
2167 | seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n", | |
045098e9 | 2168 | pending, total, UFFD_API, ctx->features, |
86039bd3 AA |
2169 | UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS); |
2170 | } | |
2171 | #endif | |
2172 | ||
2173 | static const struct file_operations userfaultfd_fops = { | |
2174 | #ifdef CONFIG_PROC_FS | |
2175 | .show_fdinfo = userfaultfd_show_fdinfo, | |
2176 | #endif | |
2177 | .release = userfaultfd_release, | |
2178 | .poll = userfaultfd_poll, | |
2179 | .read = userfaultfd_read, | |
2180 | .unlocked_ioctl = userfaultfd_ioctl, | |
1832f2d8 | 2181 | .compat_ioctl = compat_ptr_ioctl, |
86039bd3 AA |
2182 | .llseek = noop_llseek, |
2183 | }; | |
2184 | ||
3004ec9c AA |
2185 | static void init_once_userfaultfd_ctx(void *mem) |
2186 | { | |
2187 | struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; | |
2188 | ||
2189 | init_waitqueue_head(&ctx->fault_pending_wqh); | |
2190 | init_waitqueue_head(&ctx->fault_wqh); | |
9cd75c3c | 2191 | init_waitqueue_head(&ctx->event_wqh); |
3004ec9c | 2192 | init_waitqueue_head(&ctx->fd_wqh); |
2ca97ac8 | 2193 | seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock); |
3004ec9c AA |
2194 | } |
2195 | ||
2d5de004 | 2196 | static int new_userfaultfd(int flags) |
86039bd3 | 2197 | { |
86039bd3 | 2198 | struct userfaultfd_ctx *ctx; |
284cd241 | 2199 | int fd; |
86039bd3 AA |
2200 | |
2201 | BUG_ON(!current->mm); | |
2202 | ||
2203 | /* Check the UFFD_* constants for consistency. */ | |
37cd0575 | 2204 | BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS); |
86039bd3 AA |
2205 | BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC); |
2206 | BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK); | |
2207 | ||
37cd0575 | 2208 | if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY)) |
284cd241 | 2209 | return -EINVAL; |
86039bd3 | 2210 | |
3004ec9c | 2211 | ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); |
86039bd3 | 2212 | if (!ctx) |
284cd241 | 2213 | return -ENOMEM; |
86039bd3 | 2214 | |
ca880420 | 2215 | refcount_set(&ctx->refcount, 1); |
86039bd3 | 2216 | ctx->flags = flags; |
9cd75c3c | 2217 | ctx->features = 0; |
86039bd3 | 2218 | ctx->released = false; |
5e4c24a5 | 2219 | init_rwsem(&ctx->map_changing_lock); |
a759a909 | 2220 | atomic_set(&ctx->mmap_changing, 0); |
86039bd3 AA |
2221 | ctx->mm = current->mm; |
2222 | /* prevent the mm struct to be freed */ | |
f1f10076 | 2223 | mmgrab(ctx->mm); |
86039bd3 | 2224 | |
4f0b9194 PB |
2225 | /* Create a new inode so that the LSM can block the creation. */ |
2226 | fd = anon_inode_create_getfd("[userfaultfd]", &userfaultfd_fops, ctx, | |
abec3d01 | 2227 | O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL); |
284cd241 | 2228 | if (fd < 0) { |
d2005e3f | 2229 | mmdrop(ctx->mm); |
3004ec9c | 2230 | kmem_cache_free(userfaultfd_ctx_cachep, ctx); |
c03e946f | 2231 | } |
86039bd3 | 2232 | return fd; |
86039bd3 | 2233 | } |
3004ec9c | 2234 | |
2d5de004 AR |
2235 | static inline bool userfaultfd_syscall_allowed(int flags) |
2236 | { | |
2237 | /* Userspace-only page faults are always allowed */ | |
2238 | if (flags & UFFD_USER_MODE_ONLY) | |
2239 | return true; | |
2240 | ||
2241 | /* | |
2242 | * The user is requesting a userfaultfd which can handle kernel faults. | |
2243 | * Privileged users are always allowed to do this. | |
2244 | */ | |
2245 | if (capable(CAP_SYS_PTRACE)) | |
2246 | return true; | |
2247 | ||
2248 | /* Otherwise, access to kernel fault handling is sysctl controlled. */ | |
2249 | return sysctl_unprivileged_userfaultfd; | |
2250 | } | |
2251 | ||
2252 | SYSCALL_DEFINE1(userfaultfd, int, flags) | |
2253 | { | |
2254 | if (!userfaultfd_syscall_allowed(flags)) | |
2255 | return -EPERM; | |
2256 | ||
2257 | return new_userfaultfd(flags); | |
2258 | } | |
2259 | ||
2260 | static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags) | |
2261 | { | |
2262 | if (cmd != USERFAULTFD_IOC_NEW) | |
2263 | return -EINVAL; | |
2264 | ||
2265 | return new_userfaultfd(flags); | |
2266 | } | |
2267 | ||
2268 | static const struct file_operations userfaultfd_dev_fops = { | |
2269 | .unlocked_ioctl = userfaultfd_dev_ioctl, | |
2270 | .compat_ioctl = userfaultfd_dev_ioctl, | |
2271 | .owner = THIS_MODULE, | |
2272 | .llseek = noop_llseek, | |
2273 | }; | |
2274 | ||
2275 | static struct miscdevice userfaultfd_misc = { | |
2276 | .minor = MISC_DYNAMIC_MINOR, | |
2277 | .name = "userfaultfd", | |
2278 | .fops = &userfaultfd_dev_fops | |
2279 | }; | |
2280 | ||
3004ec9c AA |
2281 | static int __init userfaultfd_init(void) |
2282 | { | |
2d5de004 AR |
2283 | int ret; |
2284 | ||
2285 | ret = misc_register(&userfaultfd_misc); | |
2286 | if (ret) | |
2287 | return ret; | |
2288 | ||
3004ec9c AA |
2289 | userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache", |
2290 | sizeof(struct userfaultfd_ctx), | |
2291 | 0, | |
2292 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, | |
2293 | init_once_userfaultfd_ctx); | |
2d337b71 Z |
2294 | #ifdef CONFIG_SYSCTL |
2295 | register_sysctl_init("vm", vm_userfaultfd_table); | |
2296 | #endif | |
3004ec9c AA |
2297 | return 0; |
2298 | } | |
2299 | __initcall(userfaultfd_init); |