ipc/shm: introduce new do_vma_munmap() to munmap
[linux-2.6-block.git] / fs / userfaultfd.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
86039bd3
AA
2/*
3 * fs/userfaultfd.c
4 *
5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
6 * Copyright (C) 2008-2009 Red Hat, Inc.
7 * Copyright (C) 2015 Red Hat, Inc.
8 *
86039bd3
AA
9 * Some part derived from fs/eventfd.c (anon inode setup) and
10 * mm/ksm.c (mm hashing).
11 */
12
9cd75c3c 13#include <linux/list.h>
86039bd3 14#include <linux/hashtable.h>
174cd4b1 15#include <linux/sched/signal.h>
6e84f315 16#include <linux/sched/mm.h>
86039bd3 17#include <linux/mm.h>
17fca131 18#include <linux/mm_inline.h>
6dfeaff9 19#include <linux/mmu_notifier.h>
86039bd3
AA
20#include <linux/poll.h>
21#include <linux/slab.h>
22#include <linux/seq_file.h>
23#include <linux/file.h>
24#include <linux/bug.h>
25#include <linux/anon_inodes.h>
26#include <linux/syscalls.h>
27#include <linux/userfaultfd_k.h>
28#include <linux/mempolicy.h>
29#include <linux/ioctl.h>
30#include <linux/security.h>
cab350af 31#include <linux/hugetlb.h>
5c041f5d 32#include <linux/swapops.h>
2d5de004 33#include <linux/miscdevice.h>
86039bd3 34
d0d4730a 35int sysctl_unprivileged_userfaultfd __read_mostly;
cefdca0a 36
3004ec9c
AA
37static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
38
3004ec9c
AA
39/*
40 * Start with fault_pending_wqh and fault_wqh so they're more likely
41 * to be in the same cacheline.
cbcfa130
EB
42 *
43 * Locking order:
44 * fd_wqh.lock
45 * fault_pending_wqh.lock
46 * fault_wqh.lock
47 * event_wqh.lock
48 *
49 * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
50 * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
51 * also taken in IRQ context.
3004ec9c 52 */
86039bd3 53struct userfaultfd_ctx {
15b726ef
AA
54 /* waitqueue head for the pending (i.e. not read) userfaults */
55 wait_queue_head_t fault_pending_wqh;
56 /* waitqueue head for the userfaults */
86039bd3
AA
57 wait_queue_head_t fault_wqh;
58 /* waitqueue head for the pseudo fd to wakeup poll/read */
59 wait_queue_head_t fd_wqh;
9cd75c3c
PE
60 /* waitqueue head for events */
61 wait_queue_head_t event_wqh;
2c5b7e1b 62 /* a refile sequence protected by fault_pending_wqh lock */
2ca97ac8 63 seqcount_spinlock_t refile_seq;
3004ec9c 64 /* pseudo fd refcounting */
ca880420 65 refcount_t refcount;
86039bd3
AA
66 /* userfaultfd syscall flags */
67 unsigned int flags;
9cd75c3c
PE
68 /* features requested from the userspace */
69 unsigned int features;
86039bd3
AA
70 /* released */
71 bool released;
df2cc96e 72 /* memory mappings are changing because of non-cooperative event */
a759a909 73 atomic_t mmap_changing;
86039bd3
AA
74 /* mm with one ore more vmas attached to this userfaultfd_ctx */
75 struct mm_struct *mm;
76};
77
893e26e6
PE
78struct userfaultfd_fork_ctx {
79 struct userfaultfd_ctx *orig;
80 struct userfaultfd_ctx *new;
81 struct list_head list;
82};
83
897ab3e0
MR
84struct userfaultfd_unmap_ctx {
85 struct userfaultfd_ctx *ctx;
86 unsigned long start;
87 unsigned long end;
88 struct list_head list;
89};
90
86039bd3 91struct userfaultfd_wait_queue {
a9b85f94 92 struct uffd_msg msg;
ac6424b9 93 wait_queue_entry_t wq;
86039bd3 94 struct userfaultfd_ctx *ctx;
15a77c6f 95 bool waken;
86039bd3
AA
96};
97
98struct userfaultfd_wake_range {
99 unsigned long start;
100 unsigned long len;
101};
102
22e5fe2a
NA
103/* internal indication that UFFD_API ioctl was successfully executed */
104#define UFFD_FEATURE_INITIALIZED (1u << 31)
105
106static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
107{
108 return ctx->features & UFFD_FEATURE_INITIALIZED;
109}
110
51d3d5eb
DH
111static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
112 vm_flags_t flags)
113{
114 const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
115
116 vma->vm_flags = flags;
117 /*
118 * For shared mappings, we want to enable writenotify while
119 * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
120 * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
121 */
122 if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
123 vma_set_page_prot(vma);
124}
125
ac6424b9 126static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
86039bd3
AA
127 int wake_flags, void *key)
128{
129 struct userfaultfd_wake_range *range = key;
130 int ret;
131 struct userfaultfd_wait_queue *uwq;
132 unsigned long start, len;
133
134 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
135 ret = 0;
86039bd3
AA
136 /* len == 0 means wake all */
137 start = range->start;
138 len = range->len;
a9b85f94
AA
139 if (len && (start > uwq->msg.arg.pagefault.address ||
140 start + len <= uwq->msg.arg.pagefault.address))
86039bd3 141 goto out;
15a77c6f
AA
142 WRITE_ONCE(uwq->waken, true);
143 /*
a9668cd6
PZ
144 * The Program-Order guarantees provided by the scheduler
145 * ensure uwq->waken is visible before the task is woken.
15a77c6f 146 */
86039bd3 147 ret = wake_up_state(wq->private, mode);
a9668cd6 148 if (ret) {
86039bd3
AA
149 /*
150 * Wake only once, autoremove behavior.
151 *
a9668cd6
PZ
152 * After the effect of list_del_init is visible to the other
153 * CPUs, the waitqueue may disappear from under us, see the
154 * !list_empty_careful() in handle_userfault().
155 *
156 * try_to_wake_up() has an implicit smp_mb(), and the
157 * wq->private is read before calling the extern function
158 * "wake_up_state" (which in turns calls try_to_wake_up).
86039bd3 159 */
2055da97 160 list_del_init(&wq->entry);
a9668cd6 161 }
86039bd3
AA
162out:
163 return ret;
164}
165
166/**
167 * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
168 * context.
169 * @ctx: [in] Pointer to the userfaultfd context.
86039bd3
AA
170 */
171static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
172{
ca880420 173 refcount_inc(&ctx->refcount);
86039bd3
AA
174}
175
176/**
177 * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
178 * context.
179 * @ctx: [in] Pointer to userfaultfd context.
180 *
181 * The userfaultfd context reference must have been previously acquired either
182 * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
183 */
184static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
185{
ca880420 186 if (refcount_dec_and_test(&ctx->refcount)) {
86039bd3
AA
187 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
188 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
189 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
190 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
9cd75c3c
PE
191 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
192 VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
86039bd3
AA
193 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
194 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
d2005e3f 195 mmdrop(ctx->mm);
3004ec9c 196 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
86039bd3
AA
197 }
198}
199
a9b85f94 200static inline void msg_init(struct uffd_msg *msg)
86039bd3 201{
a9b85f94
AA
202 BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
203 /*
204 * Must use memset to zero out the paddings or kernel data is
205 * leaked to userland.
206 */
207 memset(msg, 0, sizeof(struct uffd_msg));
208}
209
210static inline struct uffd_msg userfault_msg(unsigned long address,
d172b1a3 211 unsigned long real_address,
a9b85f94 212 unsigned int flags,
9d4ac934
AP
213 unsigned long reason,
214 unsigned int features)
a9b85f94
AA
215{
216 struct uffd_msg msg;
d172b1a3 217
a9b85f94
AA
218 msg_init(&msg);
219 msg.event = UFFD_EVENT_PAGEFAULT;
824ddc60 220
d172b1a3
NA
221 msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
222 real_address : address;
223
7677f7fd
AR
224 /*
225 * These flags indicate why the userfault occurred:
226 * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
227 * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault.
228 * - Neither of these flags being set indicates a MISSING fault.
229 *
230 * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write
231 * fault. Otherwise, it was a read fault.
232 */
86039bd3 233 if (flags & FAULT_FLAG_WRITE)
a9b85f94 234 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
86039bd3 235 if (reason & VM_UFFD_WP)
a9b85f94 236 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
7677f7fd
AR
237 if (reason & VM_UFFD_MINOR)
238 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
9d4ac934 239 if (features & UFFD_FEATURE_THREAD_ID)
a36985d3 240 msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
a9b85f94 241 return msg;
86039bd3
AA
242}
243
369cd212
MK
244#ifdef CONFIG_HUGETLB_PAGE
245/*
246 * Same functionality as userfaultfd_must_wait below with modifications for
247 * hugepmd ranges.
248 */
249static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
7868a208 250 struct vm_area_struct *vma,
369cd212
MK
251 unsigned long address,
252 unsigned long flags,
253 unsigned long reason)
254{
1e2c0436 255 pte_t *ptep, pte;
369cd212
MK
256 bool ret = true;
257
9c67a207 258 mmap_assert_locked(ctx->mm);
1e2c0436 259
9c67a207 260 ptep = hugetlb_walk(vma, address, vma_mmu_pagesize(vma));
1e2c0436 261 if (!ptep)
369cd212
MK
262 goto out;
263
264 ret = false;
1e2c0436 265 pte = huge_ptep_get(ptep);
369cd212
MK
266
267 /*
268 * Lockless access: we're in a wait_event so it's ok if it
5c041f5d
PX
269 * changes under us. PTE markers should be handled the same as none
270 * ptes here.
369cd212 271 */
5c041f5d 272 if (huge_pte_none_mostly(pte))
369cd212 273 ret = true;
1e2c0436 274 if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
369cd212
MK
275 ret = true;
276out:
277 return ret;
278}
279#else
280static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
7868a208 281 struct vm_area_struct *vma,
369cd212
MK
282 unsigned long address,
283 unsigned long flags,
284 unsigned long reason)
285{
286 return false; /* should never get here */
287}
288#endif /* CONFIG_HUGETLB_PAGE */
289
8d2afd96
AA
290/*
291 * Verify the pagetables are still not ok after having reigstered into
292 * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
293 * userfault that has already been resolved, if userfaultfd_read and
294 * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
295 * threads.
296 */
297static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
298 unsigned long address,
299 unsigned long flags,
300 unsigned long reason)
301{
302 struct mm_struct *mm = ctx->mm;
303 pgd_t *pgd;
c2febafc 304 p4d_t *p4d;
8d2afd96
AA
305 pud_t *pud;
306 pmd_t *pmd, _pmd;
307 pte_t *pte;
308 bool ret = true;
309
42fc5414 310 mmap_assert_locked(mm);
8d2afd96
AA
311
312 pgd = pgd_offset(mm, address);
313 if (!pgd_present(*pgd))
314 goto out;
c2febafc
KS
315 p4d = p4d_offset(pgd, address);
316 if (!p4d_present(*p4d))
317 goto out;
318 pud = pud_offset(p4d, address);
8d2afd96
AA
319 if (!pud_present(*pud))
320 goto out;
321 pmd = pmd_offset(pud, address);
322 /*
323 * READ_ONCE must function as a barrier with narrower scope
324 * and it must be equivalent to:
325 * _pmd = *pmd; barrier();
326 *
327 * This is to deal with the instability (as in
328 * pmd_trans_unstable) of the pmd.
329 */
330 _pmd = READ_ONCE(*pmd);
a365ac09 331 if (pmd_none(_pmd))
8d2afd96
AA
332 goto out;
333
334 ret = false;
a365ac09
HY
335 if (!pmd_present(_pmd))
336 goto out;
337
63b2d417
AA
338 if (pmd_trans_huge(_pmd)) {
339 if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
340 ret = true;
8d2afd96 341 goto out;
63b2d417 342 }
8d2afd96
AA
343
344 /*
345 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
346 * and use the standard pte_offset_map() instead of parsing _pmd.
347 */
348 pte = pte_offset_map(pmd, address);
349 /*
350 * Lockless access: we're in a wait_event so it's ok if it
5c041f5d
PX
351 * changes under us. PTE markers should be handled the same as none
352 * ptes here.
8d2afd96 353 */
5c041f5d 354 if (pte_none_mostly(*pte))
8d2afd96 355 ret = true;
63b2d417
AA
356 if (!pte_write(*pte) && (reason & VM_UFFD_WP))
357 ret = true;
8d2afd96
AA
358 pte_unmap(pte);
359
360out:
361 return ret;
362}
363
2f064a59 364static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
3e69ad08
PX
365{
366 if (flags & FAULT_FLAG_INTERRUPTIBLE)
367 return TASK_INTERRUPTIBLE;
368
369 if (flags & FAULT_FLAG_KILLABLE)
370 return TASK_KILLABLE;
371
372 return TASK_UNINTERRUPTIBLE;
373}
374
86039bd3
AA
375/*
376 * The locking rules involved in returning VM_FAULT_RETRY depending on
377 * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
378 * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
379 * recommendation in __lock_page_or_retry is not an understatement.
380 *
c1e8d7c6 381 * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
86039bd3
AA
382 * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
383 * not set.
384 *
385 * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
386 * set, VM_FAULT_RETRY can still be returned if and only if there are
c1e8d7c6 387 * fatal_signal_pending()s, and the mmap_lock must be released before
86039bd3
AA
388 * returning it.
389 */
2b740303 390vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
86039bd3 391{
b8da2e46
PX
392 struct vm_area_struct *vma = vmf->vma;
393 struct mm_struct *mm = vma->vm_mm;
86039bd3
AA
394 struct userfaultfd_ctx *ctx;
395 struct userfaultfd_wait_queue uwq;
2b740303 396 vm_fault_t ret = VM_FAULT_SIGBUS;
3e69ad08 397 bool must_wait;
2f064a59 398 unsigned int blocking_state;
86039bd3 399
64c2b203
AA
400 /*
401 * We don't do userfault handling for the final child pid update.
402 *
403 * We also don't do userfault handling during
404 * coredumping. hugetlbfs has the special
405 * follow_hugetlb_page() to skip missing pages in the
406 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
407 * the no_page_table() helper in follow_page_mask(), but the
408 * shmem_vm_ops->fault method is invoked even during
c1e8d7c6 409 * coredumping without mmap_lock and it ends up here.
64c2b203
AA
410 */
411 if (current->flags & (PF_EXITING|PF_DUMPCORE))
412 goto out;
413
414 /*
c1e8d7c6
ML
415 * Coredumping runs without mmap_lock so we can only check that
416 * the mmap_lock is held, if PF_DUMPCORE was not set.
64c2b203 417 */
42fc5414 418 mmap_assert_locked(mm);
64c2b203 419
b8da2e46 420 ctx = vma->vm_userfaultfd_ctx.ctx;
86039bd3 421 if (!ctx)
ba85c702 422 goto out;
86039bd3
AA
423
424 BUG_ON(ctx->mm != mm);
425
7677f7fd
AR
426 /* Any unrecognized flag is a bug. */
427 VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
428 /* 0 or > 1 flags set is a bug; we expect exactly 1. */
429 VM_BUG_ON(!reason || (reason & (reason - 1)));
86039bd3 430
2d6d6f5a
PS
431 if (ctx->features & UFFD_FEATURE_SIGBUS)
432 goto out;
2d5de004 433 if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
37cd0575 434 goto out;
2d6d6f5a 435
86039bd3
AA
436 /*
437 * If it's already released don't get it. This avoids to loop
438 * in __get_user_pages if userfaultfd_release waits on the
c1e8d7c6 439 * caller of handle_userfault to release the mmap_lock.
86039bd3 440 */
6aa7de05 441 if (unlikely(READ_ONCE(ctx->released))) {
656710a6
AA
442 /*
443 * Don't return VM_FAULT_SIGBUS in this case, so a non
444 * cooperative manager can close the uffd after the
445 * last UFFDIO_COPY, without risking to trigger an
446 * involuntary SIGBUS if the process was starting the
447 * userfaultfd while the userfaultfd was still armed
448 * (but after the last UFFDIO_COPY). If the uffd
449 * wasn't already closed when the userfault reached
450 * this point, that would normally be solved by
451 * userfaultfd_must_wait returning 'false'.
452 *
453 * If we were to return VM_FAULT_SIGBUS here, the non
454 * cooperative manager would be instead forced to
455 * always call UFFDIO_UNREGISTER before it can safely
456 * close the uffd.
457 */
458 ret = VM_FAULT_NOPAGE;
ba85c702 459 goto out;
656710a6 460 }
86039bd3
AA
461
462 /*
463 * Check that we can return VM_FAULT_RETRY.
464 *
465 * NOTE: it should become possible to return VM_FAULT_RETRY
466 * even if FAULT_FLAG_TRIED is set without leading to gup()
467 * -EBUSY failures, if the userfaultfd is to be extended for
468 * VM_UFFD_WP tracking and we intend to arm the userfault
469 * without first stopping userland access to the memory. For
470 * VM_UFFD_MISSING userfaults this is enough for now.
471 */
82b0f8c3 472 if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
86039bd3
AA
473 /*
474 * Validate the invariant that nowait must allow retry
475 * to be sure not to return SIGBUS erroneously on
476 * nowait invocations.
477 */
82b0f8c3 478 BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
86039bd3
AA
479#ifdef CONFIG_DEBUG_VM
480 if (printk_ratelimit()) {
481 printk(KERN_WARNING
82b0f8c3
JK
482 "FAULT_FLAG_ALLOW_RETRY missing %x\n",
483 vmf->flags);
86039bd3
AA
484 dump_stack();
485 }
486#endif
ba85c702 487 goto out;
86039bd3
AA
488 }
489
490 /*
491 * Handle nowait, not much to do other than tell it to retry
492 * and wait.
493 */
ba85c702 494 ret = VM_FAULT_RETRY;
82b0f8c3 495 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
ba85c702 496 goto out;
86039bd3 497
c1e8d7c6 498 /* take the reference before dropping the mmap_lock */
86039bd3
AA
499 userfaultfd_ctx_get(ctx);
500
86039bd3
AA
501 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
502 uwq.wq.private = current;
d172b1a3
NA
503 uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
504 reason, ctx->features);
86039bd3 505 uwq.ctx = ctx;
15a77c6f 506 uwq.waken = false;
86039bd3 507
3e69ad08 508 blocking_state = userfaultfd_get_blocking_state(vmf->flags);
dfa37dc3 509
b8da2e46
PX
510 /*
511 * Take the vma lock now, in order to safely call
512 * userfaultfd_huge_must_wait() later. Since acquiring the
513 * (sleepable) vma lock can modify the current task state, that
514 * must be before explicitly calling set_current_state().
515 */
516 if (is_vm_hugetlb_page(vma))
517 hugetlb_vma_lock_read(vma);
518
cbcfa130 519 spin_lock_irq(&ctx->fault_pending_wqh.lock);
86039bd3
AA
520 /*
521 * After the __add_wait_queue the uwq is visible to userland
522 * through poll/read().
523 */
15b726ef
AA
524 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
525 /*
526 * The smp_mb() after __set_current_state prevents the reads
527 * following the spin_unlock to happen before the list_add in
528 * __add_wait_queue.
529 */
15a77c6f 530 set_current_state(blocking_state);
cbcfa130 531 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3 532
b8da2e46 533 if (!is_vm_hugetlb_page(vma))
369cd212
MK
534 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
535 reason);
536 else
b8da2e46 537 must_wait = userfaultfd_huge_must_wait(ctx, vma,
7868a208 538 vmf->address,
369cd212 539 vmf->flags, reason);
b8da2e46
PX
540 if (is_vm_hugetlb_page(vma))
541 hugetlb_vma_unlock_read(vma);
d8ed45c5 542 mmap_read_unlock(mm);
8d2afd96 543
f9bf3522 544 if (likely(must_wait && !READ_ONCE(ctx->released))) {
a9a08845 545 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
86039bd3 546 schedule();
ba85c702 547 }
86039bd3 548
ba85c702 549 __set_current_state(TASK_RUNNING);
15b726ef
AA
550
551 /*
552 * Here we race with the list_del; list_add in
553 * userfaultfd_ctx_read(), however because we don't ever run
554 * list_del_init() to refile across the two lists, the prev
555 * and next pointers will never point to self. list_add also
556 * would never let any of the two pointers to point to
557 * self. So list_empty_careful won't risk to see both pointers
558 * pointing to self at any time during the list refile. The
559 * only case where list_del_init() is called is the full
560 * removal in the wake function and there we don't re-list_add
561 * and it's fine not to block on the spinlock. The uwq on this
562 * kernel stack can be released after the list_del_init.
563 */
2055da97 564 if (!list_empty_careful(&uwq.wq.entry)) {
cbcfa130 565 spin_lock_irq(&ctx->fault_pending_wqh.lock);
15b726ef
AA
566 /*
567 * No need of list_del_init(), the uwq on the stack
568 * will be freed shortly anyway.
569 */
2055da97 570 list_del(&uwq.wq.entry);
cbcfa130 571 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3 572 }
86039bd3
AA
573
574 /*
575 * ctx may go away after this if the userfault pseudo fd is
576 * already released.
577 */
578 userfaultfd_ctx_put(ctx);
579
ba85c702
AA
580out:
581 return ret;
86039bd3
AA
582}
583
8c9e7bb7
AA
584static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
585 struct userfaultfd_wait_queue *ewq)
9cd75c3c 586{
0cbb4b4f
AA
587 struct userfaultfd_ctx *release_new_ctx;
588
9a69a829
AA
589 if (WARN_ON_ONCE(current->flags & PF_EXITING))
590 goto out;
9cd75c3c
PE
591
592 ewq->ctx = ctx;
593 init_waitqueue_entry(&ewq->wq, current);
0cbb4b4f 594 release_new_ctx = NULL;
9cd75c3c 595
cbcfa130 596 spin_lock_irq(&ctx->event_wqh.lock);
9cd75c3c
PE
597 /*
598 * After the __add_wait_queue the uwq is visible to userland
599 * through poll/read().
600 */
601 __add_wait_queue(&ctx->event_wqh, &ewq->wq);
602 for (;;) {
603 set_current_state(TASK_KILLABLE);
604 if (ewq->msg.event == 0)
605 break;
6aa7de05 606 if (READ_ONCE(ctx->released) ||
9cd75c3c 607 fatal_signal_pending(current)) {
384632e6
AA
608 /*
609 * &ewq->wq may be queued in fork_event, but
610 * __remove_wait_queue ignores the head
611 * parameter. It would be a problem if it
612 * didn't.
613 */
9cd75c3c 614 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
7eb76d45
MR
615 if (ewq->msg.event == UFFD_EVENT_FORK) {
616 struct userfaultfd_ctx *new;
617
618 new = (struct userfaultfd_ctx *)
619 (unsigned long)
620 ewq->msg.arg.reserved.reserved1;
0cbb4b4f 621 release_new_ctx = new;
7eb76d45 622 }
9cd75c3c
PE
623 break;
624 }
625
cbcfa130 626 spin_unlock_irq(&ctx->event_wqh.lock);
9cd75c3c 627
a9a08845 628 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
9cd75c3c
PE
629 schedule();
630
cbcfa130 631 spin_lock_irq(&ctx->event_wqh.lock);
9cd75c3c
PE
632 }
633 __set_current_state(TASK_RUNNING);
cbcfa130 634 spin_unlock_irq(&ctx->event_wqh.lock);
9cd75c3c 635
0cbb4b4f
AA
636 if (release_new_ctx) {
637 struct vm_area_struct *vma;
638 struct mm_struct *mm = release_new_ctx->mm;
69dbe6da 639 VMA_ITERATOR(vmi, mm, 0);
0cbb4b4f
AA
640
641 /* the various vma->vm_userfaultfd_ctx still points to it */
d8ed45c5 642 mmap_write_lock(mm);
69dbe6da 643 for_each_vma(vmi, vma) {
31e810aa 644 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
0cbb4b4f 645 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
51d3d5eb
DH
646 userfaultfd_set_vm_flags(vma,
647 vma->vm_flags & ~__VM_UFFD_FLAGS);
31e810aa 648 }
69dbe6da 649 }
d8ed45c5 650 mmap_write_unlock(mm);
0cbb4b4f
AA
651
652 userfaultfd_ctx_put(release_new_ctx);
653 }
654
9cd75c3c
PE
655 /*
656 * ctx may go away after this if the userfault pseudo fd is
657 * already released.
658 */
9a69a829 659out:
a759a909
NA
660 atomic_dec(&ctx->mmap_changing);
661 VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
9cd75c3c 662 userfaultfd_ctx_put(ctx);
9cd75c3c
PE
663}
664
665static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
666 struct userfaultfd_wait_queue *ewq)
667{
668 ewq->msg.event = 0;
669 wake_up_locked(&ctx->event_wqh);
670 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
671}
672
893e26e6
PE
673int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
674{
675 struct userfaultfd_ctx *ctx = NULL, *octx;
676 struct userfaultfd_fork_ctx *fctx;
677
678 octx = vma->vm_userfaultfd_ctx.ctx;
679 if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
680 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
51d3d5eb 681 userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
893e26e6
PE
682 return 0;
683 }
684
685 list_for_each_entry(fctx, fcs, list)
686 if (fctx->orig == octx) {
687 ctx = fctx->new;
688 break;
689 }
690
691 if (!ctx) {
692 fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
693 if (!fctx)
694 return -ENOMEM;
695
696 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
697 if (!ctx) {
698 kfree(fctx);
699 return -ENOMEM;
700 }
701
ca880420 702 refcount_set(&ctx->refcount, 1);
893e26e6 703 ctx->flags = octx->flags;
893e26e6
PE
704 ctx->features = octx->features;
705 ctx->released = false;
a759a909 706 atomic_set(&ctx->mmap_changing, 0);
893e26e6 707 ctx->mm = vma->vm_mm;
00bb31fa 708 mmgrab(ctx->mm);
893e26e6
PE
709
710 userfaultfd_ctx_get(octx);
a759a909 711 atomic_inc(&octx->mmap_changing);
893e26e6
PE
712 fctx->orig = octx;
713 fctx->new = ctx;
714 list_add_tail(&fctx->list, fcs);
715 }
716
717 vma->vm_userfaultfd_ctx.ctx = ctx;
718 return 0;
719}
720
8c9e7bb7 721static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
893e26e6
PE
722{
723 struct userfaultfd_ctx *ctx = fctx->orig;
724 struct userfaultfd_wait_queue ewq;
725
726 msg_init(&ewq.msg);
727
728 ewq.msg.event = UFFD_EVENT_FORK;
729 ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
730
8c9e7bb7 731 userfaultfd_event_wait_completion(ctx, &ewq);
893e26e6
PE
732}
733
734void dup_userfaultfd_complete(struct list_head *fcs)
735{
893e26e6
PE
736 struct userfaultfd_fork_ctx *fctx, *n;
737
738 list_for_each_entry_safe(fctx, n, fcs, list) {
8c9e7bb7 739 dup_fctx(fctx);
893e26e6
PE
740 list_del(&fctx->list);
741 kfree(fctx);
742 }
743}
744
72f87654
PE
745void mremap_userfaultfd_prep(struct vm_area_struct *vma,
746 struct vm_userfaultfd_ctx *vm_ctx)
747{
748 struct userfaultfd_ctx *ctx;
749
750 ctx = vma->vm_userfaultfd_ctx.ctx;
3cfd22be
PX
751
752 if (!ctx)
753 return;
754
755 if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
72f87654
PE
756 vm_ctx->ctx = ctx;
757 userfaultfd_ctx_get(ctx);
a759a909 758 atomic_inc(&ctx->mmap_changing);
3cfd22be
PX
759 } else {
760 /* Drop uffd context if remap feature not enabled */
761 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
51d3d5eb 762 userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
72f87654
PE
763 }
764}
765
90794bf1 766void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
72f87654
PE
767 unsigned long from, unsigned long to,
768 unsigned long len)
769{
90794bf1 770 struct userfaultfd_ctx *ctx = vm_ctx->ctx;
72f87654
PE
771 struct userfaultfd_wait_queue ewq;
772
773 if (!ctx)
774 return;
775
776 if (to & ~PAGE_MASK) {
777 userfaultfd_ctx_put(ctx);
778 return;
779 }
780
781 msg_init(&ewq.msg);
782
783 ewq.msg.event = UFFD_EVENT_REMAP;
784 ewq.msg.arg.remap.from = from;
785 ewq.msg.arg.remap.to = to;
786 ewq.msg.arg.remap.len = len;
787
788 userfaultfd_event_wait_completion(ctx, &ewq);
789}
790
70ccb92f 791bool userfaultfd_remove(struct vm_area_struct *vma,
d811914d 792 unsigned long start, unsigned long end)
05ce7724
PE
793{
794 struct mm_struct *mm = vma->vm_mm;
795 struct userfaultfd_ctx *ctx;
796 struct userfaultfd_wait_queue ewq;
797
798 ctx = vma->vm_userfaultfd_ctx.ctx;
d811914d 799 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
70ccb92f 800 return true;
05ce7724
PE
801
802 userfaultfd_ctx_get(ctx);
a759a909 803 atomic_inc(&ctx->mmap_changing);
d8ed45c5 804 mmap_read_unlock(mm);
05ce7724 805
05ce7724
PE
806 msg_init(&ewq.msg);
807
d811914d
MR
808 ewq.msg.event = UFFD_EVENT_REMOVE;
809 ewq.msg.arg.remove.start = start;
810 ewq.msg.arg.remove.end = end;
05ce7724
PE
811
812 userfaultfd_event_wait_completion(ctx, &ewq);
813
70ccb92f 814 return false;
05ce7724
PE
815}
816
897ab3e0
MR
817static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
818 unsigned long start, unsigned long end)
819{
820 struct userfaultfd_unmap_ctx *unmap_ctx;
821
822 list_for_each_entry(unmap_ctx, unmaps, list)
823 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
824 unmap_ctx->end == end)
825 return true;
826
827 return false;
828}
829
69dbe6da
LH
830int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start,
831 unsigned long end, struct list_head *unmaps)
897ab3e0 832{
69dbe6da
LH
833 VMA_ITERATOR(vmi, mm, start);
834 struct vm_area_struct *vma;
835
836 for_each_vma_range(vmi, vma, end) {
897ab3e0
MR
837 struct userfaultfd_unmap_ctx *unmap_ctx;
838 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
839
840 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
841 has_unmap_ctx(ctx, unmaps, start, end))
842 continue;
843
844 unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
845 if (!unmap_ctx)
846 return -ENOMEM;
847
848 userfaultfd_ctx_get(ctx);
a759a909 849 atomic_inc(&ctx->mmap_changing);
897ab3e0
MR
850 unmap_ctx->ctx = ctx;
851 unmap_ctx->start = start;
852 unmap_ctx->end = end;
853 list_add_tail(&unmap_ctx->list, unmaps);
854 }
855
856 return 0;
857}
858
859void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
860{
861 struct userfaultfd_unmap_ctx *ctx, *n;
862 struct userfaultfd_wait_queue ewq;
863
864 list_for_each_entry_safe(ctx, n, uf, list) {
865 msg_init(&ewq.msg);
866
867 ewq.msg.event = UFFD_EVENT_UNMAP;
868 ewq.msg.arg.remove.start = ctx->start;
869 ewq.msg.arg.remove.end = ctx->end;
870
871 userfaultfd_event_wait_completion(ctx->ctx, &ewq);
872
873 list_del(&ctx->list);
874 kfree(ctx);
875 }
876}
877
86039bd3
AA
878static int userfaultfd_release(struct inode *inode, struct file *file)
879{
880 struct userfaultfd_ctx *ctx = file->private_data;
881 struct mm_struct *mm = ctx->mm;
882 struct vm_area_struct *vma, *prev;
883 /* len == 0 means wake all */
884 struct userfaultfd_wake_range range = { .len = 0, };
885 unsigned long new_flags;
69dbe6da 886 MA_STATE(mas, &mm->mm_mt, 0, 0);
86039bd3 887
6aa7de05 888 WRITE_ONCE(ctx->released, true);
86039bd3 889
d2005e3f
ON
890 if (!mmget_not_zero(mm))
891 goto wakeup;
892
86039bd3
AA
893 /*
894 * Flush page faults out of all CPUs. NOTE: all page faults
895 * must be retried without returning VM_FAULT_SIGBUS if
896 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
c1e8d7c6 897 * changes while handle_userfault released the mmap_lock. So
86039bd3 898 * it's critical that released is set to true (above), before
c1e8d7c6 899 * taking the mmap_lock for writing.
86039bd3 900 */
d8ed45c5 901 mmap_write_lock(mm);
86039bd3 902 prev = NULL;
69dbe6da 903 mas_for_each(&mas, vma, ULONG_MAX) {
86039bd3
AA
904 cond_resched();
905 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
7677f7fd 906 !!(vma->vm_flags & __VM_UFFD_FLAGS));
86039bd3
AA
907 if (vma->vm_userfaultfd_ctx.ctx != ctx) {
908 prev = vma;
909 continue;
910 }
7677f7fd 911 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
4d45e75a
JH
912 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
913 new_flags, vma->anon_vma,
914 vma->vm_file, vma->vm_pgoff,
915 vma_policy(vma),
5c26f6ac 916 NULL_VM_UFFD_CTX, anon_vma_name(vma));
69dbe6da
LH
917 if (prev) {
918 mas_pause(&mas);
4d45e75a 919 vma = prev;
69dbe6da 920 } else {
4d45e75a 921 prev = vma;
69dbe6da
LH
922 }
923
51d3d5eb 924 userfaultfd_set_vm_flags(vma, new_flags);
86039bd3
AA
925 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
926 }
d8ed45c5 927 mmap_write_unlock(mm);
d2005e3f
ON
928 mmput(mm);
929wakeup:
86039bd3 930 /*
15b726ef 931 * After no new page faults can wait on this fault_*wqh, flush
86039bd3 932 * the last page faults that may have been already waiting on
15b726ef 933 * the fault_*wqh.
86039bd3 934 */
cbcfa130 935 spin_lock_irq(&ctx->fault_pending_wqh.lock);
ac5be6b4 936 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
c430d1e8 937 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
cbcfa130 938 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3 939
5a18b64e
MR
940 /* Flush pending events that may still wait on event_wqh */
941 wake_up_all(&ctx->event_wqh);
942
a9a08845 943 wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
86039bd3
AA
944 userfaultfd_ctx_put(ctx);
945 return 0;
946}
947
15b726ef 948/* fault_pending_wqh.lock must be hold by the caller */
6dcc27fd
PE
949static inline struct userfaultfd_wait_queue *find_userfault_in(
950 wait_queue_head_t *wqh)
86039bd3 951{
ac6424b9 952 wait_queue_entry_t *wq;
15b726ef 953 struct userfaultfd_wait_queue *uwq;
86039bd3 954
456a7378 955 lockdep_assert_held(&wqh->lock);
86039bd3 956
15b726ef 957 uwq = NULL;
6dcc27fd 958 if (!waitqueue_active(wqh))
15b726ef
AA
959 goto out;
960 /* walk in reverse to provide FIFO behavior to read userfaults */
2055da97 961 wq = list_last_entry(&wqh->head, typeof(*wq), entry);
15b726ef
AA
962 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
963out:
964 return uwq;
86039bd3 965}
6dcc27fd
PE
966
967static inline struct userfaultfd_wait_queue *find_userfault(
968 struct userfaultfd_ctx *ctx)
969{
970 return find_userfault_in(&ctx->fault_pending_wqh);
971}
86039bd3 972
9cd75c3c
PE
973static inline struct userfaultfd_wait_queue *find_userfault_evt(
974 struct userfaultfd_ctx *ctx)
975{
976 return find_userfault_in(&ctx->event_wqh);
977}
978
076ccb76 979static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
86039bd3
AA
980{
981 struct userfaultfd_ctx *ctx = file->private_data;
076ccb76 982 __poll_t ret;
86039bd3
AA
983
984 poll_wait(file, &ctx->fd_wqh, wait);
985
22e5fe2a 986 if (!userfaultfd_is_initialized(ctx))
a9a08845 987 return EPOLLERR;
9cd75c3c 988
22e5fe2a
NA
989 /*
990 * poll() never guarantees that read won't block.
991 * userfaults can be waken before they're read().
992 */
993 if (unlikely(!(file->f_flags & O_NONBLOCK)))
a9a08845 994 return EPOLLERR;
22e5fe2a
NA
995 /*
996 * lockless access to see if there are pending faults
997 * __pollwait last action is the add_wait_queue but
998 * the spin_unlock would allow the waitqueue_active to
999 * pass above the actual list_add inside
1000 * add_wait_queue critical section. So use a full
1001 * memory barrier to serialize the list_add write of
1002 * add_wait_queue() with the waitqueue_active read
1003 * below.
1004 */
1005 ret = 0;
1006 smp_mb();
1007 if (waitqueue_active(&ctx->fault_pending_wqh))
1008 ret = EPOLLIN;
1009 else if (waitqueue_active(&ctx->event_wqh))
1010 ret = EPOLLIN;
1011
1012 return ret;
86039bd3
AA
1013}
1014
893e26e6
PE
1015static const struct file_operations userfaultfd_fops;
1016
b537900f
DC
1017static int resolve_userfault_fork(struct userfaultfd_ctx *new,
1018 struct inode *inode,
893e26e6
PE
1019 struct uffd_msg *msg)
1020{
1021 int fd;
893e26e6 1022
b537900f 1023 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
abec3d01 1024 O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
893e26e6
PE
1025 if (fd < 0)
1026 return fd;
1027
893e26e6
PE
1028 msg->arg.reserved.reserved1 = 0;
1029 msg->arg.fork.ufd = fd;
893e26e6
PE
1030 return 0;
1031}
1032
86039bd3 1033static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
b537900f 1034 struct uffd_msg *msg, struct inode *inode)
86039bd3
AA
1035{
1036 ssize_t ret;
1037 DECLARE_WAITQUEUE(wait, current);
15b726ef 1038 struct userfaultfd_wait_queue *uwq;
893e26e6
PE
1039 /*
1040 * Handling fork event requires sleeping operations, so
1041 * we drop the event_wqh lock, then do these ops, then
1042 * lock it back and wake up the waiter. While the lock is
1043 * dropped the ewq may go away so we keep track of it
1044 * carefully.
1045 */
1046 LIST_HEAD(fork_event);
1047 struct userfaultfd_ctx *fork_nctx = NULL;
86039bd3 1048
15b726ef 1049 /* always take the fd_wqh lock before the fault_pending_wqh lock */
ae62c16e 1050 spin_lock_irq(&ctx->fd_wqh.lock);
86039bd3
AA
1051 __add_wait_queue(&ctx->fd_wqh, &wait);
1052 for (;;) {
1053 set_current_state(TASK_INTERRUPTIBLE);
15b726ef
AA
1054 spin_lock(&ctx->fault_pending_wqh.lock);
1055 uwq = find_userfault(ctx);
1056 if (uwq) {
2c5b7e1b
AA
1057 /*
1058 * Use a seqcount to repeat the lockless check
1059 * in wake_userfault() to avoid missing
1060 * wakeups because during the refile both
1061 * waitqueue could become empty if this is the
1062 * only userfault.
1063 */
1064 write_seqcount_begin(&ctx->refile_seq);
1065
86039bd3 1066 /*
15b726ef
AA
1067 * The fault_pending_wqh.lock prevents the uwq
1068 * to disappear from under us.
1069 *
1070 * Refile this userfault from
1071 * fault_pending_wqh to fault_wqh, it's not
1072 * pending anymore after we read it.
1073 *
1074 * Use list_del() by hand (as
1075 * userfaultfd_wake_function also uses
1076 * list_del_init() by hand) to be sure nobody
1077 * changes __remove_wait_queue() to use
1078 * list_del_init() in turn breaking the
1079 * !list_empty_careful() check in
2055da97 1080 * handle_userfault(). The uwq->wq.head list
15b726ef
AA
1081 * must never be empty at any time during the
1082 * refile, or the waitqueue could disappear
1083 * from under us. The "wait_queue_head_t"
1084 * parameter of __remove_wait_queue() is unused
1085 * anyway.
86039bd3 1086 */
2055da97 1087 list_del(&uwq->wq.entry);
c430d1e8 1088 add_wait_queue(&ctx->fault_wqh, &uwq->wq);
15b726ef 1089
2c5b7e1b
AA
1090 write_seqcount_end(&ctx->refile_seq);
1091
a9b85f94
AA
1092 /* careful to always initialize msg if ret == 0 */
1093 *msg = uwq->msg;
15b726ef 1094 spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3
AA
1095 ret = 0;
1096 break;
1097 }
15b726ef 1098 spin_unlock(&ctx->fault_pending_wqh.lock);
9cd75c3c
PE
1099
1100 spin_lock(&ctx->event_wqh.lock);
1101 uwq = find_userfault_evt(ctx);
1102 if (uwq) {
1103 *msg = uwq->msg;
1104
893e26e6
PE
1105 if (uwq->msg.event == UFFD_EVENT_FORK) {
1106 fork_nctx = (struct userfaultfd_ctx *)
1107 (unsigned long)
1108 uwq->msg.arg.reserved.reserved1;
2055da97 1109 list_move(&uwq->wq.entry, &fork_event);
384632e6
AA
1110 /*
1111 * fork_nctx can be freed as soon as
1112 * we drop the lock, unless we take a
1113 * reference on it.
1114 */
1115 userfaultfd_ctx_get(fork_nctx);
893e26e6
PE
1116 spin_unlock(&ctx->event_wqh.lock);
1117 ret = 0;
1118 break;
1119 }
1120
9cd75c3c
PE
1121 userfaultfd_event_complete(ctx, uwq);
1122 spin_unlock(&ctx->event_wqh.lock);
1123 ret = 0;
1124 break;
1125 }
1126 spin_unlock(&ctx->event_wqh.lock);
1127
86039bd3
AA
1128 if (signal_pending(current)) {
1129 ret = -ERESTARTSYS;
1130 break;
1131 }
1132 if (no_wait) {
1133 ret = -EAGAIN;
1134 break;
1135 }
ae62c16e 1136 spin_unlock_irq(&ctx->fd_wqh.lock);
86039bd3 1137 schedule();
ae62c16e 1138 spin_lock_irq(&ctx->fd_wqh.lock);
86039bd3
AA
1139 }
1140 __remove_wait_queue(&ctx->fd_wqh, &wait);
1141 __set_current_state(TASK_RUNNING);
ae62c16e 1142 spin_unlock_irq(&ctx->fd_wqh.lock);
86039bd3 1143
893e26e6 1144 if (!ret && msg->event == UFFD_EVENT_FORK) {
b537900f 1145 ret = resolve_userfault_fork(fork_nctx, inode, msg);
cbcfa130 1146 spin_lock_irq(&ctx->event_wqh.lock);
384632e6
AA
1147 if (!list_empty(&fork_event)) {
1148 /*
1149 * The fork thread didn't abort, so we can
1150 * drop the temporary refcount.
1151 */
1152 userfaultfd_ctx_put(fork_nctx);
1153
1154 uwq = list_first_entry(&fork_event,
1155 typeof(*uwq),
1156 wq.entry);
1157 /*
1158 * If fork_event list wasn't empty and in turn
1159 * the event wasn't already released by fork
1160 * (the event is allocated on fork kernel
1161 * stack), put the event back to its place in
1162 * the event_wq. fork_event head will be freed
1163 * as soon as we return so the event cannot
1164 * stay queued there no matter the current
1165 * "ret" value.
1166 */
1167 list_del(&uwq->wq.entry);
1168 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
893e26e6 1169
384632e6
AA
1170 /*
1171 * Leave the event in the waitqueue and report
1172 * error to userland if we failed to resolve
1173 * the userfault fork.
1174 */
1175 if (likely(!ret))
893e26e6 1176 userfaultfd_event_complete(ctx, uwq);
384632e6
AA
1177 } else {
1178 /*
1179 * Here the fork thread aborted and the
1180 * refcount from the fork thread on fork_nctx
1181 * has already been released. We still hold
1182 * the reference we took before releasing the
1183 * lock above. If resolve_userfault_fork
1184 * failed we've to drop it because the
1185 * fork_nctx has to be freed in such case. If
1186 * it succeeded we'll hold it because the new
1187 * uffd references it.
1188 */
1189 if (ret)
1190 userfaultfd_ctx_put(fork_nctx);
893e26e6 1191 }
cbcfa130 1192 spin_unlock_irq(&ctx->event_wqh.lock);
893e26e6
PE
1193 }
1194
86039bd3
AA
1195 return ret;
1196}
1197
1198static ssize_t userfaultfd_read(struct file *file, char __user *buf,
1199 size_t count, loff_t *ppos)
1200{
1201 struct userfaultfd_ctx *ctx = file->private_data;
1202 ssize_t _ret, ret = 0;
a9b85f94 1203 struct uffd_msg msg;
86039bd3 1204 int no_wait = file->f_flags & O_NONBLOCK;
b537900f 1205 struct inode *inode = file_inode(file);
86039bd3 1206
22e5fe2a 1207 if (!userfaultfd_is_initialized(ctx))
86039bd3 1208 return -EINVAL;
86039bd3
AA
1209
1210 for (;;) {
a9b85f94 1211 if (count < sizeof(msg))
86039bd3 1212 return ret ? ret : -EINVAL;
b537900f 1213 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
86039bd3
AA
1214 if (_ret < 0)
1215 return ret ? ret : _ret;
a9b85f94 1216 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
86039bd3 1217 return ret ? ret : -EFAULT;
a9b85f94
AA
1218 ret += sizeof(msg);
1219 buf += sizeof(msg);
1220 count -= sizeof(msg);
86039bd3
AA
1221 /*
1222 * Allow to read more than one fault at time but only
1223 * block if waiting for the very first one.
1224 */
1225 no_wait = O_NONBLOCK;
1226 }
1227}
1228
1229static void __wake_userfault(struct userfaultfd_ctx *ctx,
1230 struct userfaultfd_wake_range *range)
1231{
cbcfa130 1232 spin_lock_irq(&ctx->fault_pending_wqh.lock);
86039bd3 1233 /* wake all in the range and autoremove */
15b726ef 1234 if (waitqueue_active(&ctx->fault_pending_wqh))
ac5be6b4 1235 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
15b726ef
AA
1236 range);
1237 if (waitqueue_active(&ctx->fault_wqh))
c430d1e8 1238 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
cbcfa130 1239 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3
AA
1240}
1241
1242static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1243 struct userfaultfd_wake_range *range)
1244{
2c5b7e1b
AA
1245 unsigned seq;
1246 bool need_wakeup;
1247
86039bd3
AA
1248 /*
1249 * To be sure waitqueue_active() is not reordered by the CPU
1250 * before the pagetable update, use an explicit SMP memory
3e4e28c5 1251 * barrier here. PT lock release or mmap_read_unlock(mm) still
86039bd3
AA
1252 * have release semantics that can allow the
1253 * waitqueue_active() to be reordered before the pte update.
1254 */
1255 smp_mb();
1256
1257 /*
1258 * Use waitqueue_active because it's very frequent to
1259 * change the address space atomically even if there are no
1260 * userfaults yet. So we take the spinlock only when we're
1261 * sure we've userfaults to wake.
1262 */
2c5b7e1b
AA
1263 do {
1264 seq = read_seqcount_begin(&ctx->refile_seq);
1265 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1266 waitqueue_active(&ctx->fault_wqh);
1267 cond_resched();
1268 } while (read_seqcount_retry(&ctx->refile_seq, seq));
1269 if (need_wakeup)
86039bd3
AA
1270 __wake_userfault(ctx, range);
1271}
1272
1273static __always_inline int validate_range(struct mm_struct *mm,
e71e2ace 1274 __u64 start, __u64 len)
86039bd3
AA
1275{
1276 __u64 task_size = mm->task_size;
1277
e71e2ace 1278 if (start & ~PAGE_MASK)
86039bd3
AA
1279 return -EINVAL;
1280 if (len & ~PAGE_MASK)
1281 return -EINVAL;
1282 if (!len)
1283 return -EINVAL;
e71e2ace 1284 if (start < mmap_min_addr)
86039bd3 1285 return -EINVAL;
e71e2ace 1286 if (start >= task_size)
86039bd3 1287 return -EINVAL;
e71e2ace 1288 if (len > task_size - start)
86039bd3
AA
1289 return -EINVAL;
1290 return 0;
1291}
1292
1293static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1294 unsigned long arg)
1295{
1296 struct mm_struct *mm = ctx->mm;
1297 struct vm_area_struct *vma, *prev, *cur;
1298 int ret;
1299 struct uffdio_register uffdio_register;
1300 struct uffdio_register __user *user_uffdio_register;
1301 unsigned long vm_flags, new_flags;
1302 bool found;
ce53e8e6 1303 bool basic_ioctls;
86039bd3 1304 unsigned long start, end, vma_end;
69dbe6da 1305 MA_STATE(mas, &mm->mm_mt, 0, 0);
86039bd3
AA
1306
1307 user_uffdio_register = (struct uffdio_register __user *) arg;
1308
1309 ret = -EFAULT;
1310 if (copy_from_user(&uffdio_register, user_uffdio_register,
1311 sizeof(uffdio_register)-sizeof(__u64)))
1312 goto out;
1313
1314 ret = -EINVAL;
1315 if (!uffdio_register.mode)
1316 goto out;
7677f7fd 1317 if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
86039bd3
AA
1318 goto out;
1319 vm_flags = 0;
1320 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
1321 vm_flags |= VM_UFFD_MISSING;
00b151f2
PX
1322 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
1323#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1324 goto out;
1325#endif
86039bd3 1326 vm_flags |= VM_UFFD_WP;
00b151f2 1327 }
7677f7fd
AR
1328 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
1329#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1330 goto out;
1331#endif
1332 vm_flags |= VM_UFFD_MINOR;
1333 }
86039bd3 1334
e71e2ace 1335 ret = validate_range(mm, uffdio_register.range.start,
86039bd3
AA
1336 uffdio_register.range.len);
1337 if (ret)
1338 goto out;
1339
1340 start = uffdio_register.range.start;
1341 end = start + uffdio_register.range.len;
1342
d2005e3f
ON
1343 ret = -ENOMEM;
1344 if (!mmget_not_zero(mm))
1345 goto out;
1346
d8ed45c5 1347 mmap_write_lock(mm);
69dbe6da
LH
1348 mas_set(&mas, start);
1349 vma = mas_find(&mas, ULONG_MAX);
86039bd3
AA
1350 if (!vma)
1351 goto out_unlock;
1352
1353 /* check that there's at least one vma in the range */
1354 ret = -EINVAL;
1355 if (vma->vm_start >= end)
1356 goto out_unlock;
1357
cab350af
MK
1358 /*
1359 * If the first vma contains huge pages, make sure start address
1360 * is aligned to huge page size.
1361 */
1362 if (is_vm_hugetlb_page(vma)) {
1363 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1364
1365 if (start & (vma_hpagesize - 1))
1366 goto out_unlock;
1367 }
1368
86039bd3
AA
1369 /*
1370 * Search for not compatible vmas.
86039bd3
AA
1371 */
1372 found = false;
ce53e8e6 1373 basic_ioctls = false;
69dbe6da 1374 for (cur = vma; cur; cur = mas_next(&mas, end - 1)) {
86039bd3
AA
1375 cond_resched();
1376
1377 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
7677f7fd 1378 !!(cur->vm_flags & __VM_UFFD_FLAGS));
86039bd3
AA
1379
1380 /* check not compatible vmas */
1381 ret = -EINVAL;
63b2d417 1382 if (!vma_can_userfault(cur, vm_flags))
86039bd3 1383 goto out_unlock;
29ec9066
AA
1384
1385 /*
1386 * UFFDIO_COPY will fill file holes even without
1387 * PROT_WRITE. This check enforces that if this is a
1388 * MAP_SHARED, the process has write permission to the backing
1389 * file. If VM_MAYWRITE is set it also enforces that on a
1390 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
1391 * F_WRITE_SEAL can be taken until the vma is destroyed.
1392 */
1393 ret = -EPERM;
1394 if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
1395 goto out_unlock;
1396
cab350af
MK
1397 /*
1398 * If this vma contains ending address, and huge pages
1399 * check alignment.
1400 */
1401 if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1402 end > cur->vm_start) {
1403 unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1404
1405 ret = -EINVAL;
1406
1407 if (end & (vma_hpagesize - 1))
1408 goto out_unlock;
1409 }
63b2d417
AA
1410 if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
1411 goto out_unlock;
86039bd3
AA
1412
1413 /*
1414 * Check that this vma isn't already owned by a
1415 * different userfaultfd. We can't allow more than one
1416 * userfaultfd to own a single vma simultaneously or we
1417 * wouldn't know which one to deliver the userfaults to.
1418 */
1419 ret = -EBUSY;
1420 if (cur->vm_userfaultfd_ctx.ctx &&
1421 cur->vm_userfaultfd_ctx.ctx != ctx)
1422 goto out_unlock;
1423
cab350af
MK
1424 /*
1425 * Note vmas containing huge pages
1426 */
ce53e8e6
MR
1427 if (is_vm_hugetlb_page(cur))
1428 basic_ioctls = true;
cab350af 1429
86039bd3
AA
1430 found = true;
1431 }
1432 BUG_ON(!found);
1433
69dbe6da
LH
1434 mas_set(&mas, start);
1435 prev = mas_prev(&mas, 0);
1436 if (prev != vma)
1437 mas_next(&mas, ULONG_MAX);
86039bd3
AA
1438
1439 ret = 0;
1440 do {
1441 cond_resched();
1442
63b2d417 1443 BUG_ON(!vma_can_userfault(vma, vm_flags));
86039bd3
AA
1444 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1445 vma->vm_userfaultfd_ctx.ctx != ctx);
29ec9066 1446 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
86039bd3
AA
1447
1448 /*
1449 * Nothing to do: this vma is already registered into this
1450 * userfaultfd and with the right tracking mode too.
1451 */
1452 if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1453 (vma->vm_flags & vm_flags) == vm_flags)
1454 goto skip;
1455
1456 if (vma->vm_start > start)
1457 start = vma->vm_start;
1458 vma_end = min(end, vma->vm_end);
1459
7677f7fd 1460 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
86039bd3
AA
1461 prev = vma_merge(mm, prev, start, vma_end, new_flags,
1462 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1463 vma_policy(vma),
9a10064f 1464 ((struct vm_userfaultfd_ctx){ ctx }),
5c26f6ac 1465 anon_vma_name(vma));
86039bd3 1466 if (prev) {
69dbe6da
LH
1467 /* vma_merge() invalidated the mas */
1468 mas_pause(&mas);
86039bd3
AA
1469 vma = prev;
1470 goto next;
1471 }
1472 if (vma->vm_start < start) {
1473 ret = split_vma(mm, vma, start, 1);
1474 if (ret)
1475 break;
69dbe6da
LH
1476 /* split_vma() invalidated the mas */
1477 mas_pause(&mas);
86039bd3
AA
1478 }
1479 if (vma->vm_end > end) {
1480 ret = split_vma(mm, vma, end, 0);
1481 if (ret)
1482 break;
69dbe6da
LH
1483 /* split_vma() invalidated the mas */
1484 mas_pause(&mas);
86039bd3
AA
1485 }
1486 next:
1487 /*
1488 * In the vma_merge() successful mprotect-like case 8:
1489 * the next vma was merged into the current one and
1490 * the current one has not been updated yet.
1491 */
51d3d5eb 1492 userfaultfd_set_vm_flags(vma, new_flags);
86039bd3
AA
1493 vma->vm_userfaultfd_ctx.ctx = ctx;
1494
6dfeaff9
PX
1495 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
1496 hugetlb_unshare_all_pmds(vma);
1497
86039bd3
AA
1498 skip:
1499 prev = vma;
1500 start = vma->vm_end;
69dbe6da
LH
1501 vma = mas_next(&mas, end - 1);
1502 } while (vma);
86039bd3 1503out_unlock:
d8ed45c5 1504 mmap_write_unlock(mm);
d2005e3f 1505 mmput(mm);
86039bd3 1506 if (!ret) {
14819305
PX
1507 __u64 ioctls_out;
1508
1509 ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
1510 UFFD_API_RANGE_IOCTLS;
1511
1512 /*
1513 * Declare the WP ioctl only if the WP mode is
1514 * specified and all checks passed with the range
1515 */
1516 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
1517 ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
1518
f6191471
AR
1519 /* CONTINUE ioctl is only supported for MINOR ranges. */
1520 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
1521 ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
1522
86039bd3
AA
1523 /*
1524 * Now that we scanned all vmas we can already tell
1525 * userland which ioctls methods are guaranteed to
1526 * succeed on this range.
1527 */
14819305 1528 if (put_user(ioctls_out, &user_uffdio_register->ioctls))
86039bd3
AA
1529 ret = -EFAULT;
1530 }
1531out:
1532 return ret;
1533}
1534
1535static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1536 unsigned long arg)
1537{
1538 struct mm_struct *mm = ctx->mm;
1539 struct vm_area_struct *vma, *prev, *cur;
1540 int ret;
1541 struct uffdio_range uffdio_unregister;
1542 unsigned long new_flags;
1543 bool found;
1544 unsigned long start, end, vma_end;
1545 const void __user *buf = (void __user *)arg;
69dbe6da 1546 MA_STATE(mas, &mm->mm_mt, 0, 0);
86039bd3
AA
1547
1548 ret = -EFAULT;
1549 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
1550 goto out;
1551
e71e2ace 1552 ret = validate_range(mm, uffdio_unregister.start,
86039bd3
AA
1553 uffdio_unregister.len);
1554 if (ret)
1555 goto out;
1556
1557 start = uffdio_unregister.start;
1558 end = start + uffdio_unregister.len;
1559
d2005e3f
ON
1560 ret = -ENOMEM;
1561 if (!mmget_not_zero(mm))
1562 goto out;
1563
d8ed45c5 1564 mmap_write_lock(mm);
69dbe6da
LH
1565 mas_set(&mas, start);
1566 vma = mas_find(&mas, ULONG_MAX);
86039bd3
AA
1567 if (!vma)
1568 goto out_unlock;
1569
1570 /* check that there's at least one vma in the range */
1571 ret = -EINVAL;
1572 if (vma->vm_start >= end)
1573 goto out_unlock;
1574
cab350af
MK
1575 /*
1576 * If the first vma contains huge pages, make sure start address
1577 * is aligned to huge page size.
1578 */
1579 if (is_vm_hugetlb_page(vma)) {
1580 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1581
1582 if (start & (vma_hpagesize - 1))
1583 goto out_unlock;
1584 }
1585
86039bd3
AA
1586 /*
1587 * Search for not compatible vmas.
86039bd3
AA
1588 */
1589 found = false;
1590 ret = -EINVAL;
69dbe6da 1591 for (cur = vma; cur; cur = mas_next(&mas, end - 1)) {
86039bd3
AA
1592 cond_resched();
1593
1594 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
7677f7fd 1595 !!(cur->vm_flags & __VM_UFFD_FLAGS));
86039bd3
AA
1596
1597 /*
1598 * Check not compatible vmas, not strictly required
1599 * here as not compatible vmas cannot have an
1600 * userfaultfd_ctx registered on them, but this
1601 * provides for more strict behavior to notice
1602 * unregistration errors.
1603 */
63b2d417 1604 if (!vma_can_userfault(cur, cur->vm_flags))
86039bd3
AA
1605 goto out_unlock;
1606
1607 found = true;
1608 }
1609 BUG_ON(!found);
1610
69dbe6da
LH
1611 mas_set(&mas, start);
1612 prev = mas_prev(&mas, 0);
1613 if (prev != vma)
1614 mas_next(&mas, ULONG_MAX);
86039bd3
AA
1615
1616 ret = 0;
1617 do {
1618 cond_resched();
1619
63b2d417 1620 BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
86039bd3
AA
1621
1622 /*
1623 * Nothing to do: this vma is already registered into this
1624 * userfaultfd and with the right tracking mode too.
1625 */
1626 if (!vma->vm_userfaultfd_ctx.ctx)
1627 goto skip;
1628
01e881f5
AA
1629 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1630
86039bd3
AA
1631 if (vma->vm_start > start)
1632 start = vma->vm_start;
1633 vma_end = min(end, vma->vm_end);
1634
09fa5296
AA
1635 if (userfaultfd_missing(vma)) {
1636 /*
1637 * Wake any concurrent pending userfault while
1638 * we unregister, so they will not hang
1639 * permanently and it avoids userland to call
1640 * UFFDIO_WAKE explicitly.
1641 */
1642 struct userfaultfd_wake_range range;
1643 range.start = start;
1644 range.len = vma_end - start;
1645 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1646 }
1647
f369b07c
PX
1648 /* Reset ptes for the whole vma range if wr-protected */
1649 if (userfaultfd_wp(vma))
1650 uffd_wp_range(mm, vma, start, vma_end - start, false);
1651
7677f7fd 1652 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
86039bd3
AA
1653 prev = vma_merge(mm, prev, start, vma_end, new_flags,
1654 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
1655 vma_policy(vma),
5c26f6ac 1656 NULL_VM_UFFD_CTX, anon_vma_name(vma));
86039bd3
AA
1657 if (prev) {
1658 vma = prev;
59f2f4b8 1659 mas_pause(&mas);
86039bd3
AA
1660 goto next;
1661 }
1662 if (vma->vm_start < start) {
1663 ret = split_vma(mm, vma, start, 1);
1664 if (ret)
1665 break;
59f2f4b8 1666 mas_pause(&mas);
86039bd3
AA
1667 }
1668 if (vma->vm_end > end) {
1669 ret = split_vma(mm, vma, end, 0);
1670 if (ret)
1671 break;
59f2f4b8 1672 mas_pause(&mas);
86039bd3
AA
1673 }
1674 next:
1675 /*
1676 * In the vma_merge() successful mprotect-like case 8:
1677 * the next vma was merged into the current one and
1678 * the current one has not been updated yet.
1679 */
51d3d5eb 1680 userfaultfd_set_vm_flags(vma, new_flags);
86039bd3
AA
1681 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1682
1683 skip:
1684 prev = vma;
1685 start = vma->vm_end;
69dbe6da
LH
1686 vma = mas_next(&mas, end - 1);
1687 } while (vma);
86039bd3 1688out_unlock:
d8ed45c5 1689 mmap_write_unlock(mm);
d2005e3f 1690 mmput(mm);
86039bd3
AA
1691out:
1692 return ret;
1693}
1694
1695/*
ba85c702
AA
1696 * userfaultfd_wake may be used in combination with the
1697 * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
86039bd3
AA
1698 */
1699static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1700 unsigned long arg)
1701{
1702 int ret;
1703 struct uffdio_range uffdio_wake;
1704 struct userfaultfd_wake_range range;
1705 const void __user *buf = (void __user *)arg;
1706
1707 ret = -EFAULT;
1708 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1709 goto out;
1710
e71e2ace 1711 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
86039bd3
AA
1712 if (ret)
1713 goto out;
1714
1715 range.start = uffdio_wake.start;
1716 range.len = uffdio_wake.len;
1717
1718 /*
1719 * len == 0 means wake all and we don't want to wake all here,
1720 * so check it again to be sure.
1721 */
1722 VM_BUG_ON(!range.len);
1723
1724 wake_userfault(ctx, &range);
1725 ret = 0;
1726
1727out:
1728 return ret;
1729}
1730
ad465cae
AA
1731static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1732 unsigned long arg)
1733{
1734 __s64 ret;
1735 struct uffdio_copy uffdio_copy;
1736 struct uffdio_copy __user *user_uffdio_copy;
1737 struct userfaultfd_wake_range range;
1738
1739 user_uffdio_copy = (struct uffdio_copy __user *) arg;
1740
df2cc96e 1741 ret = -EAGAIN;
a759a909 1742 if (atomic_read(&ctx->mmap_changing))
df2cc96e
MR
1743 goto out;
1744
ad465cae
AA
1745 ret = -EFAULT;
1746 if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1747 /* don't copy "copy" last field */
1748 sizeof(uffdio_copy)-sizeof(__s64)))
1749 goto out;
1750
e71e2ace 1751 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
ad465cae
AA
1752 if (ret)
1753 goto out;
1754 /*
1755 * double check for wraparound just in case. copy_from_user()
1756 * will later check uffdio_copy.src + uffdio_copy.len to fit
1757 * in the userland range.
1758 */
1759 ret = -EINVAL;
1760 if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
1761 goto out;
72981e0e 1762 if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
ad465cae 1763 goto out;
d2005e3f
ON
1764 if (mmget_not_zero(ctx->mm)) {
1765 ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
72981e0e
AA
1766 uffdio_copy.len, &ctx->mmap_changing,
1767 uffdio_copy.mode);
d2005e3f 1768 mmput(ctx->mm);
96333187 1769 } else {
e86b298b 1770 return -ESRCH;
d2005e3f 1771 }
ad465cae
AA
1772 if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1773 return -EFAULT;
1774 if (ret < 0)
1775 goto out;
1776 BUG_ON(!ret);
1777 /* len == 0 would wake all */
1778 range.len = ret;
1779 if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1780 range.start = uffdio_copy.dst;
1781 wake_userfault(ctx, &range);
1782 }
1783 ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1784out:
1785 return ret;
1786}
1787
1788static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1789 unsigned long arg)
1790{
1791 __s64 ret;
1792 struct uffdio_zeropage uffdio_zeropage;
1793 struct uffdio_zeropage __user *user_uffdio_zeropage;
1794 struct userfaultfd_wake_range range;
1795
1796 user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1797
df2cc96e 1798 ret = -EAGAIN;
a759a909 1799 if (atomic_read(&ctx->mmap_changing))
df2cc96e
MR
1800 goto out;
1801
ad465cae
AA
1802 ret = -EFAULT;
1803 if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1804 /* don't copy "zeropage" last field */
1805 sizeof(uffdio_zeropage)-sizeof(__s64)))
1806 goto out;
1807
e71e2ace 1808 ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
ad465cae
AA
1809 uffdio_zeropage.range.len);
1810 if (ret)
1811 goto out;
1812 ret = -EINVAL;
1813 if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1814 goto out;
1815
d2005e3f
ON
1816 if (mmget_not_zero(ctx->mm)) {
1817 ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
df2cc96e
MR
1818 uffdio_zeropage.range.len,
1819 &ctx->mmap_changing);
d2005e3f 1820 mmput(ctx->mm);
9d95aa4b 1821 } else {
e86b298b 1822 return -ESRCH;
d2005e3f 1823 }
ad465cae
AA
1824 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1825 return -EFAULT;
1826 if (ret < 0)
1827 goto out;
1828 /* len == 0 would wake all */
1829 BUG_ON(!ret);
1830 range.len = ret;
1831 if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1832 range.start = uffdio_zeropage.range.start;
1833 wake_userfault(ctx, &range);
1834 }
1835 ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1836out:
1837 return ret;
1838}
1839
63b2d417
AA
1840static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
1841 unsigned long arg)
1842{
1843 int ret;
1844 struct uffdio_writeprotect uffdio_wp;
1845 struct uffdio_writeprotect __user *user_uffdio_wp;
1846 struct userfaultfd_wake_range range;
23080e27 1847 bool mode_wp, mode_dontwake;
63b2d417 1848
a759a909 1849 if (atomic_read(&ctx->mmap_changing))
63b2d417
AA
1850 return -EAGAIN;
1851
1852 user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
1853
1854 if (copy_from_user(&uffdio_wp, user_uffdio_wp,
1855 sizeof(struct uffdio_writeprotect)))
1856 return -EFAULT;
1857
e71e2ace 1858 ret = validate_range(ctx->mm, uffdio_wp.range.start,
63b2d417
AA
1859 uffdio_wp.range.len);
1860 if (ret)
1861 return ret;
1862
1863 if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
1864 UFFDIO_WRITEPROTECT_MODE_WP))
1865 return -EINVAL;
23080e27
PX
1866
1867 mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
1868 mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
1869
1870 if (mode_wp && mode_dontwake)
63b2d417
AA
1871 return -EINVAL;
1872
cb185d5f
NA
1873 if (mmget_not_zero(ctx->mm)) {
1874 ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
1875 uffdio_wp.range.len, mode_wp,
1876 &ctx->mmap_changing);
1877 mmput(ctx->mm);
1878 } else {
1879 return -ESRCH;
1880 }
1881
63b2d417
AA
1882 if (ret)
1883 return ret;
1884
23080e27 1885 if (!mode_wp && !mode_dontwake) {
63b2d417
AA
1886 range.start = uffdio_wp.range.start;
1887 range.len = uffdio_wp.range.len;
1888 wake_userfault(ctx, &range);
1889 }
1890 return ret;
1891}
1892
f6191471
AR
1893static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
1894{
1895 __s64 ret;
1896 struct uffdio_continue uffdio_continue;
1897 struct uffdio_continue __user *user_uffdio_continue;
1898 struct userfaultfd_wake_range range;
1899
1900 user_uffdio_continue = (struct uffdio_continue __user *)arg;
1901
1902 ret = -EAGAIN;
a759a909 1903 if (atomic_read(&ctx->mmap_changing))
f6191471
AR
1904 goto out;
1905
1906 ret = -EFAULT;
1907 if (copy_from_user(&uffdio_continue, user_uffdio_continue,
1908 /* don't copy the output fields */
1909 sizeof(uffdio_continue) - (sizeof(__s64))))
1910 goto out;
1911
e71e2ace 1912 ret = validate_range(ctx->mm, uffdio_continue.range.start,
f6191471
AR
1913 uffdio_continue.range.len);
1914 if (ret)
1915 goto out;
1916
1917 ret = -EINVAL;
1918 /* double check for wraparound just in case. */
1919 if (uffdio_continue.range.start + uffdio_continue.range.len <=
1920 uffdio_continue.range.start) {
1921 goto out;
1922 }
1923 if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE)
1924 goto out;
1925
1926 if (mmget_not_zero(ctx->mm)) {
1927 ret = mcopy_continue(ctx->mm, uffdio_continue.range.start,
1928 uffdio_continue.range.len,
1929 &ctx->mmap_changing);
1930 mmput(ctx->mm);
1931 } else {
1932 return -ESRCH;
1933 }
1934
1935 if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
1936 return -EFAULT;
1937 if (ret < 0)
1938 goto out;
1939
1940 /* len == 0 would wake all */
1941 BUG_ON(!ret);
1942 range.len = ret;
1943 if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
1944 range.start = uffdio_continue.range.start;
1945 wake_userfault(ctx, &range);
1946 }
1947 ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
1948
1949out:
1950 return ret;
1951}
1952
9cd75c3c
PE
1953static inline unsigned int uffd_ctx_features(__u64 user_features)
1954{
1955 /*
22e5fe2a
NA
1956 * For the current set of features the bits just coincide. Set
1957 * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
9cd75c3c 1958 */
22e5fe2a 1959 return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
9cd75c3c
PE
1960}
1961
86039bd3
AA
1962/*
1963 * userland asks for a certain API version and we return which bits
1964 * and ioctl commands are implemented in this kernel for such API
1965 * version or -EINVAL if unknown.
1966 */
1967static int userfaultfd_api(struct userfaultfd_ctx *ctx,
1968 unsigned long arg)
1969{
1970 struct uffdio_api uffdio_api;
1971 void __user *buf = (void __user *)arg;
22e5fe2a 1972 unsigned int ctx_features;
86039bd3 1973 int ret;
65603144 1974 __u64 features;
86039bd3 1975
86039bd3 1976 ret = -EFAULT;
a9b85f94 1977 if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
86039bd3 1978 goto out;
914eedcb
AR
1979 /* Ignore unsupported features (userspace built against newer kernel) */
1980 features = uffdio_api.features & UFFD_API_FEATURES;
3c1c24d9
MR
1981 ret = -EPERM;
1982 if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
1983 goto err_out;
65603144
AA
1984 /* report all available features and ioctls to userland */
1985 uffdio_api.features = UFFD_API_FEATURES;
7677f7fd 1986#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
964ab004
AR
1987 uffdio_api.features &=
1988 ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
00b151f2
PX
1989#endif
1990#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1991 uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
b1f9e876
PX
1992#endif
1993#ifndef CONFIG_PTE_MARKER_UFFD_WP
1994 uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
7677f7fd 1995#endif
86039bd3
AA
1996 uffdio_api.ioctls = UFFD_API_IOCTLS;
1997 ret = -EFAULT;
1998 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
1999 goto out;
22e5fe2a 2000
65603144 2001 /* only enable the requested features for this uffd context */
22e5fe2a
NA
2002 ctx_features = uffd_ctx_features(features);
2003 ret = -EINVAL;
2004 if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
2005 goto err_out;
2006
86039bd3
AA
2007 ret = 0;
2008out:
2009 return ret;
3c1c24d9
MR
2010err_out:
2011 memset(&uffdio_api, 0, sizeof(uffdio_api));
2012 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
2013 ret = -EFAULT;
2014 goto out;
86039bd3
AA
2015}
2016
2017static long userfaultfd_ioctl(struct file *file, unsigned cmd,
2018 unsigned long arg)
2019{
2020 int ret = -EINVAL;
2021 struct userfaultfd_ctx *ctx = file->private_data;
2022
22e5fe2a 2023 if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
e6485a47
AA
2024 return -EINVAL;
2025
86039bd3
AA
2026 switch(cmd) {
2027 case UFFDIO_API:
2028 ret = userfaultfd_api(ctx, arg);
2029 break;
2030 case UFFDIO_REGISTER:
2031 ret = userfaultfd_register(ctx, arg);
2032 break;
2033 case UFFDIO_UNREGISTER:
2034 ret = userfaultfd_unregister(ctx, arg);
2035 break;
2036 case UFFDIO_WAKE:
2037 ret = userfaultfd_wake(ctx, arg);
2038 break;
ad465cae
AA
2039 case UFFDIO_COPY:
2040 ret = userfaultfd_copy(ctx, arg);
2041 break;
2042 case UFFDIO_ZEROPAGE:
2043 ret = userfaultfd_zeropage(ctx, arg);
2044 break;
63b2d417
AA
2045 case UFFDIO_WRITEPROTECT:
2046 ret = userfaultfd_writeprotect(ctx, arg);
2047 break;
f6191471
AR
2048 case UFFDIO_CONTINUE:
2049 ret = userfaultfd_continue(ctx, arg);
2050 break;
86039bd3
AA
2051 }
2052 return ret;
2053}
2054
2055#ifdef CONFIG_PROC_FS
2056static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
2057{
2058 struct userfaultfd_ctx *ctx = f->private_data;
ac6424b9 2059 wait_queue_entry_t *wq;
86039bd3
AA
2060 unsigned long pending = 0, total = 0;
2061
cbcfa130 2062 spin_lock_irq(&ctx->fault_pending_wqh.lock);
2055da97 2063 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
15b726ef
AA
2064 pending++;
2065 total++;
2066 }
2055da97 2067 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
86039bd3
AA
2068 total++;
2069 }
cbcfa130 2070 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3
AA
2071
2072 /*
2073 * If more protocols will be added, there will be all shown
2074 * separated by a space. Like this:
2075 * protocols: aa:... bb:...
2076 */
2077 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
045098e9 2078 pending, total, UFFD_API, ctx->features,
86039bd3
AA
2079 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
2080}
2081#endif
2082
2083static const struct file_operations userfaultfd_fops = {
2084#ifdef CONFIG_PROC_FS
2085 .show_fdinfo = userfaultfd_show_fdinfo,
2086#endif
2087 .release = userfaultfd_release,
2088 .poll = userfaultfd_poll,
2089 .read = userfaultfd_read,
2090 .unlocked_ioctl = userfaultfd_ioctl,
1832f2d8 2091 .compat_ioctl = compat_ptr_ioctl,
86039bd3
AA
2092 .llseek = noop_llseek,
2093};
2094
3004ec9c
AA
2095static void init_once_userfaultfd_ctx(void *mem)
2096{
2097 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
2098
2099 init_waitqueue_head(&ctx->fault_pending_wqh);
2100 init_waitqueue_head(&ctx->fault_wqh);
9cd75c3c 2101 init_waitqueue_head(&ctx->event_wqh);
3004ec9c 2102 init_waitqueue_head(&ctx->fd_wqh);
2ca97ac8 2103 seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
3004ec9c
AA
2104}
2105
2d5de004 2106static int new_userfaultfd(int flags)
86039bd3 2107{
86039bd3 2108 struct userfaultfd_ctx *ctx;
284cd241 2109 int fd;
86039bd3
AA
2110
2111 BUG_ON(!current->mm);
2112
2113 /* Check the UFFD_* constants for consistency. */
37cd0575 2114 BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
86039bd3
AA
2115 BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
2116 BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
2117
37cd0575 2118 if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
284cd241 2119 return -EINVAL;
86039bd3 2120
3004ec9c 2121 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
86039bd3 2122 if (!ctx)
284cd241 2123 return -ENOMEM;
86039bd3 2124
ca880420 2125 refcount_set(&ctx->refcount, 1);
86039bd3 2126 ctx->flags = flags;
9cd75c3c 2127 ctx->features = 0;
86039bd3 2128 ctx->released = false;
a759a909 2129 atomic_set(&ctx->mmap_changing, 0);
86039bd3
AA
2130 ctx->mm = current->mm;
2131 /* prevent the mm struct to be freed */
f1f10076 2132 mmgrab(ctx->mm);
86039bd3 2133
b537900f 2134 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
abec3d01 2135 O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
284cd241 2136 if (fd < 0) {
d2005e3f 2137 mmdrop(ctx->mm);
3004ec9c 2138 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
c03e946f 2139 }
86039bd3 2140 return fd;
86039bd3 2141}
3004ec9c 2142
2d5de004
AR
2143static inline bool userfaultfd_syscall_allowed(int flags)
2144{
2145 /* Userspace-only page faults are always allowed */
2146 if (flags & UFFD_USER_MODE_ONLY)
2147 return true;
2148
2149 /*
2150 * The user is requesting a userfaultfd which can handle kernel faults.
2151 * Privileged users are always allowed to do this.
2152 */
2153 if (capable(CAP_SYS_PTRACE))
2154 return true;
2155
2156 /* Otherwise, access to kernel fault handling is sysctl controlled. */
2157 return sysctl_unprivileged_userfaultfd;
2158}
2159
2160SYSCALL_DEFINE1(userfaultfd, int, flags)
2161{
2162 if (!userfaultfd_syscall_allowed(flags))
2163 return -EPERM;
2164
2165 return new_userfaultfd(flags);
2166}
2167
2168static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags)
2169{
2170 if (cmd != USERFAULTFD_IOC_NEW)
2171 return -EINVAL;
2172
2173 return new_userfaultfd(flags);
2174}
2175
2176static const struct file_operations userfaultfd_dev_fops = {
2177 .unlocked_ioctl = userfaultfd_dev_ioctl,
2178 .compat_ioctl = userfaultfd_dev_ioctl,
2179 .owner = THIS_MODULE,
2180 .llseek = noop_llseek,
2181};
2182
2183static struct miscdevice userfaultfd_misc = {
2184 .minor = MISC_DYNAMIC_MINOR,
2185 .name = "userfaultfd",
2186 .fops = &userfaultfd_dev_fops
2187};
2188
3004ec9c
AA
2189static int __init userfaultfd_init(void)
2190{
2d5de004
AR
2191 int ret;
2192
2193 ret = misc_register(&userfaultfd_misc);
2194 if (ret)
2195 return ret;
2196
3004ec9c
AA
2197 userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
2198 sizeof(struct userfaultfd_ctx),
2199 0,
2200 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2201 init_once_userfaultfd_ctx);
2202 return 0;
2203}
2204__initcall(userfaultfd_init);