blk-mq: don't count completed flush data request as inflight in case of quiesce
[linux-block.git] / fs / userfaultfd.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
86039bd3
AA
2/*
3 * fs/userfaultfd.c
4 *
5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
6 * Copyright (C) 2008-2009 Red Hat, Inc.
7 * Copyright (C) 2015 Red Hat, Inc.
8 *
86039bd3
AA
9 * Some part derived from fs/eventfd.c (anon inode setup) and
10 * mm/ksm.c (mm hashing).
11 */
12
9cd75c3c 13#include <linux/list.h>
86039bd3 14#include <linux/hashtable.h>
174cd4b1 15#include <linux/sched/signal.h>
6e84f315 16#include <linux/sched/mm.h>
86039bd3 17#include <linux/mm.h>
17fca131 18#include <linux/mm_inline.h>
6dfeaff9 19#include <linux/mmu_notifier.h>
86039bd3
AA
20#include <linux/poll.h>
21#include <linux/slab.h>
22#include <linux/seq_file.h>
23#include <linux/file.h>
24#include <linux/bug.h>
25#include <linux/anon_inodes.h>
26#include <linux/syscalls.h>
27#include <linux/userfaultfd_k.h>
28#include <linux/mempolicy.h>
29#include <linux/ioctl.h>
30#include <linux/security.h>
cab350af 31#include <linux/hugetlb.h>
5c041f5d 32#include <linux/swapops.h>
2d5de004 33#include <linux/miscdevice.h>
86039bd3 34
2d337b71
Z
35static int sysctl_unprivileged_userfaultfd __read_mostly;
36
37#ifdef CONFIG_SYSCTL
38static struct ctl_table vm_userfaultfd_table[] = {
39 {
40 .procname = "unprivileged_userfaultfd",
41 .data = &sysctl_unprivileged_userfaultfd,
42 .maxlen = sizeof(sysctl_unprivileged_userfaultfd),
43 .mode = 0644,
44 .proc_handler = proc_dointvec_minmax,
45 .extra1 = SYSCTL_ZERO,
46 .extra2 = SYSCTL_ONE,
47 },
48 { }
49};
50#endif
cefdca0a 51
68279f9c 52static struct kmem_cache *userfaultfd_ctx_cachep __ro_after_init;
3004ec9c 53
3004ec9c
AA
54/*
55 * Start with fault_pending_wqh and fault_wqh so they're more likely
56 * to be in the same cacheline.
cbcfa130
EB
57 *
58 * Locking order:
59 * fd_wqh.lock
60 * fault_pending_wqh.lock
61 * fault_wqh.lock
62 * event_wqh.lock
63 *
64 * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
65 * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
66 * also taken in IRQ context.
3004ec9c 67 */
86039bd3 68struct userfaultfd_ctx {
15b726ef
AA
69 /* waitqueue head for the pending (i.e. not read) userfaults */
70 wait_queue_head_t fault_pending_wqh;
71 /* waitqueue head for the userfaults */
86039bd3
AA
72 wait_queue_head_t fault_wqh;
73 /* waitqueue head for the pseudo fd to wakeup poll/read */
74 wait_queue_head_t fd_wqh;
9cd75c3c
PE
75 /* waitqueue head for events */
76 wait_queue_head_t event_wqh;
2c5b7e1b 77 /* a refile sequence protected by fault_pending_wqh lock */
2ca97ac8 78 seqcount_spinlock_t refile_seq;
3004ec9c 79 /* pseudo fd refcounting */
ca880420 80 refcount_t refcount;
86039bd3
AA
81 /* userfaultfd syscall flags */
82 unsigned int flags;
9cd75c3c
PE
83 /* features requested from the userspace */
84 unsigned int features;
86039bd3
AA
85 /* released */
86 bool released;
df2cc96e 87 /* memory mappings are changing because of non-cooperative event */
a759a909 88 atomic_t mmap_changing;
86039bd3
AA
89 /* mm with one ore more vmas attached to this userfaultfd_ctx */
90 struct mm_struct *mm;
91};
92
893e26e6
PE
93struct userfaultfd_fork_ctx {
94 struct userfaultfd_ctx *orig;
95 struct userfaultfd_ctx *new;
96 struct list_head list;
97};
98
897ab3e0
MR
99struct userfaultfd_unmap_ctx {
100 struct userfaultfd_ctx *ctx;
101 unsigned long start;
102 unsigned long end;
103 struct list_head list;
104};
105
86039bd3 106struct userfaultfd_wait_queue {
a9b85f94 107 struct uffd_msg msg;
ac6424b9 108 wait_queue_entry_t wq;
86039bd3 109 struct userfaultfd_ctx *ctx;
15a77c6f 110 bool waken;
86039bd3
AA
111};
112
113struct userfaultfd_wake_range {
114 unsigned long start;
115 unsigned long len;
116};
117
22e5fe2a
NA
118/* internal indication that UFFD_API ioctl was successfully executed */
119#define UFFD_FEATURE_INITIALIZED (1u << 31)
120
121static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
122{
123 return ctx->features & UFFD_FEATURE_INITIALIZED;
124}
125
d61ea1cb
PX
126static bool userfaultfd_wp_async_ctx(struct userfaultfd_ctx *ctx)
127{
128 return ctx && (ctx->features & UFFD_FEATURE_WP_ASYNC);
129}
130
2bad466c
PX
131/*
132 * Whether WP_UNPOPULATED is enabled on the uffd context. It is only
133 * meaningful when userfaultfd_wp()==true on the vma and when it's
134 * anonymous.
135 */
136bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
137{
138 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
139
140 if (!ctx)
141 return false;
142
143 return ctx->features & UFFD_FEATURE_WP_UNPOPULATED;
144}
145
51d3d5eb
DH
146static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
147 vm_flags_t flags)
148{
149 const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
150
1c71222e 151 vm_flags_reset(vma, flags);
51d3d5eb
DH
152 /*
153 * For shared mappings, we want to enable writenotify while
154 * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
155 * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
156 */
157 if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
158 vma_set_page_prot(vma);
159}
160
ac6424b9 161static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
86039bd3
AA
162 int wake_flags, void *key)
163{
164 struct userfaultfd_wake_range *range = key;
165 int ret;
166 struct userfaultfd_wait_queue *uwq;
167 unsigned long start, len;
168
169 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
170 ret = 0;
86039bd3
AA
171 /* len == 0 means wake all */
172 start = range->start;
173 len = range->len;
a9b85f94
AA
174 if (len && (start > uwq->msg.arg.pagefault.address ||
175 start + len <= uwq->msg.arg.pagefault.address))
86039bd3 176 goto out;
15a77c6f
AA
177 WRITE_ONCE(uwq->waken, true);
178 /*
a9668cd6
PZ
179 * The Program-Order guarantees provided by the scheduler
180 * ensure uwq->waken is visible before the task is woken.
15a77c6f 181 */
86039bd3 182 ret = wake_up_state(wq->private, mode);
a9668cd6 183 if (ret) {
86039bd3
AA
184 /*
185 * Wake only once, autoremove behavior.
186 *
a9668cd6
PZ
187 * After the effect of list_del_init is visible to the other
188 * CPUs, the waitqueue may disappear from under us, see the
189 * !list_empty_careful() in handle_userfault().
190 *
191 * try_to_wake_up() has an implicit smp_mb(), and the
192 * wq->private is read before calling the extern function
193 * "wake_up_state" (which in turns calls try_to_wake_up).
86039bd3 194 */
2055da97 195 list_del_init(&wq->entry);
a9668cd6 196 }
86039bd3
AA
197out:
198 return ret;
199}
200
201/**
202 * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
203 * context.
204 * @ctx: [in] Pointer to the userfaultfd context.
86039bd3
AA
205 */
206static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
207{
ca880420 208 refcount_inc(&ctx->refcount);
86039bd3
AA
209}
210
211/**
212 * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
213 * context.
214 * @ctx: [in] Pointer to userfaultfd context.
215 *
216 * The userfaultfd context reference must have been previously acquired either
217 * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
218 */
219static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
220{
ca880420 221 if (refcount_dec_and_test(&ctx->refcount)) {
86039bd3
AA
222 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
223 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
224 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
225 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
9cd75c3c
PE
226 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
227 VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
86039bd3
AA
228 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
229 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
d2005e3f 230 mmdrop(ctx->mm);
3004ec9c 231 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
86039bd3
AA
232 }
233}
234
a9b85f94 235static inline void msg_init(struct uffd_msg *msg)
86039bd3 236{
a9b85f94
AA
237 BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
238 /*
239 * Must use memset to zero out the paddings or kernel data is
240 * leaked to userland.
241 */
242 memset(msg, 0, sizeof(struct uffd_msg));
243}
244
245static inline struct uffd_msg userfault_msg(unsigned long address,
d172b1a3 246 unsigned long real_address,
a9b85f94 247 unsigned int flags,
9d4ac934
AP
248 unsigned long reason,
249 unsigned int features)
a9b85f94
AA
250{
251 struct uffd_msg msg;
d172b1a3 252
a9b85f94
AA
253 msg_init(&msg);
254 msg.event = UFFD_EVENT_PAGEFAULT;
824ddc60 255
d172b1a3
NA
256 msg.arg.pagefault.address = (features & UFFD_FEATURE_EXACT_ADDRESS) ?
257 real_address : address;
258
7677f7fd
AR
259 /*
260 * These flags indicate why the userfault occurred:
261 * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
262 * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault.
263 * - Neither of these flags being set indicates a MISSING fault.
264 *
265 * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write
266 * fault. Otherwise, it was a read fault.
267 */
86039bd3 268 if (flags & FAULT_FLAG_WRITE)
a9b85f94 269 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
86039bd3 270 if (reason & VM_UFFD_WP)
a9b85f94 271 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
7677f7fd
AR
272 if (reason & VM_UFFD_MINOR)
273 msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
9d4ac934 274 if (features & UFFD_FEATURE_THREAD_ID)
a36985d3 275 msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
a9b85f94 276 return msg;
86039bd3
AA
277}
278
369cd212
MK
279#ifdef CONFIG_HUGETLB_PAGE
280/*
281 * Same functionality as userfaultfd_must_wait below with modifications for
282 * hugepmd ranges.
283 */
284static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
29a22b9e
SB
285 struct vm_fault *vmf,
286 unsigned long reason)
369cd212 287{
29a22b9e 288 struct vm_area_struct *vma = vmf->vma;
1e2c0436 289 pte_t *ptep, pte;
369cd212
MK
290 bool ret = true;
291
29a22b9e 292 assert_fault_locked(vmf);
1e2c0436 293
29a22b9e 294 ptep = hugetlb_walk(vma, vmf->address, vma_mmu_pagesize(vma));
1e2c0436 295 if (!ptep)
369cd212
MK
296 goto out;
297
298 ret = false;
1e2c0436 299 pte = huge_ptep_get(ptep);
369cd212
MK
300
301 /*
302 * Lockless access: we're in a wait_event so it's ok if it
5c041f5d
PX
303 * changes under us. PTE markers should be handled the same as none
304 * ptes here.
369cd212 305 */
5c041f5d 306 if (huge_pte_none_mostly(pte))
369cd212 307 ret = true;
1e2c0436 308 if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
369cd212
MK
309 ret = true;
310out:
311 return ret;
312}
313#else
314static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
29a22b9e
SB
315 struct vm_fault *vmf,
316 unsigned long reason)
369cd212
MK
317{
318 return false; /* should never get here */
319}
320#endif /* CONFIG_HUGETLB_PAGE */
321
8d2afd96
AA
322/*
323 * Verify the pagetables are still not ok after having reigstered into
324 * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
325 * userfault that has already been resolved, if userfaultfd_read and
326 * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
327 * threads.
328 */
329static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
29a22b9e 330 struct vm_fault *vmf,
8d2afd96
AA
331 unsigned long reason)
332{
333 struct mm_struct *mm = ctx->mm;
29a22b9e 334 unsigned long address = vmf->address;
8d2afd96 335 pgd_t *pgd;
c2febafc 336 p4d_t *p4d;
8d2afd96
AA
337 pud_t *pud;
338 pmd_t *pmd, _pmd;
339 pte_t *pte;
c33c7948 340 pte_t ptent;
8d2afd96
AA
341 bool ret = true;
342
29a22b9e 343 assert_fault_locked(vmf);
8d2afd96
AA
344
345 pgd = pgd_offset(mm, address);
346 if (!pgd_present(*pgd))
347 goto out;
c2febafc
KS
348 p4d = p4d_offset(pgd, address);
349 if (!p4d_present(*p4d))
350 goto out;
351 pud = pud_offset(p4d, address);
8d2afd96
AA
352 if (!pud_present(*pud))
353 goto out;
354 pmd = pmd_offset(pud, address);
2b683a4f 355again:
26e1a0c3 356 _pmd = pmdp_get_lockless(pmd);
a365ac09 357 if (pmd_none(_pmd))
8d2afd96
AA
358 goto out;
359
360 ret = false;
2b683a4f 361 if (!pmd_present(_pmd) || pmd_devmap(_pmd))
a365ac09
HY
362 goto out;
363
63b2d417
AA
364 if (pmd_trans_huge(_pmd)) {
365 if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
366 ret = true;
8d2afd96 367 goto out;
63b2d417 368 }
8d2afd96 369
8d2afd96 370 pte = pte_offset_map(pmd, address);
2b683a4f
HD
371 if (!pte) {
372 ret = true;
373 goto again;
374 }
8d2afd96
AA
375 /*
376 * Lockless access: we're in a wait_event so it's ok if it
5c041f5d
PX
377 * changes under us. PTE markers should be handled the same as none
378 * ptes here.
8d2afd96 379 */
c33c7948
RR
380 ptent = ptep_get(pte);
381 if (pte_none_mostly(ptent))
8d2afd96 382 ret = true;
c33c7948 383 if (!pte_write(ptent) && (reason & VM_UFFD_WP))
63b2d417 384 ret = true;
8d2afd96
AA
385 pte_unmap(pte);
386
387out:
388 return ret;
389}
390
2f064a59 391static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
3e69ad08
PX
392{
393 if (flags & FAULT_FLAG_INTERRUPTIBLE)
394 return TASK_INTERRUPTIBLE;
395
396 if (flags & FAULT_FLAG_KILLABLE)
397 return TASK_KILLABLE;
398
399 return TASK_UNINTERRUPTIBLE;
400}
401
86039bd3
AA
402/*
403 * The locking rules involved in returning VM_FAULT_RETRY depending on
404 * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
405 * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
406 * recommendation in __lock_page_or_retry is not an understatement.
407 *
c1e8d7c6 408 * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
86039bd3
AA
409 * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
410 * not set.
411 *
412 * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
413 * set, VM_FAULT_RETRY can still be returned if and only if there are
c1e8d7c6 414 * fatal_signal_pending()s, and the mmap_lock must be released before
86039bd3
AA
415 * returning it.
416 */
2b740303 417vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
86039bd3 418{
b8da2e46
PX
419 struct vm_area_struct *vma = vmf->vma;
420 struct mm_struct *mm = vma->vm_mm;
86039bd3
AA
421 struct userfaultfd_ctx *ctx;
422 struct userfaultfd_wait_queue uwq;
2b740303 423 vm_fault_t ret = VM_FAULT_SIGBUS;
3e69ad08 424 bool must_wait;
2f064a59 425 unsigned int blocking_state;
86039bd3 426
64c2b203
AA
427 /*
428 * We don't do userfault handling for the final child pid update.
429 *
430 * We also don't do userfault handling during
431 * coredumping. hugetlbfs has the special
48498071 432 * hugetlb_follow_page_mask() to skip missing pages in the
64c2b203
AA
433 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
434 * the no_page_table() helper in follow_page_mask(), but the
435 * shmem_vm_ops->fault method is invoked even during
004a9a38 436 * coredumping and it ends up here.
64c2b203
AA
437 */
438 if (current->flags & (PF_EXITING|PF_DUMPCORE))
439 goto out;
440
29a22b9e 441 assert_fault_locked(vmf);
64c2b203 442
b8da2e46 443 ctx = vma->vm_userfaultfd_ctx.ctx;
86039bd3 444 if (!ctx)
ba85c702 445 goto out;
86039bd3
AA
446
447 BUG_ON(ctx->mm != mm);
448
7677f7fd
AR
449 /* Any unrecognized flag is a bug. */
450 VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
451 /* 0 or > 1 flags set is a bug; we expect exactly 1. */
452 VM_BUG_ON(!reason || (reason & (reason - 1)));
86039bd3 453
2d6d6f5a
PS
454 if (ctx->features & UFFD_FEATURE_SIGBUS)
455 goto out;
2d5de004 456 if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
37cd0575 457 goto out;
2d6d6f5a 458
86039bd3
AA
459 /*
460 * If it's already released don't get it. This avoids to loop
461 * in __get_user_pages if userfaultfd_release waits on the
c1e8d7c6 462 * caller of handle_userfault to release the mmap_lock.
86039bd3 463 */
6aa7de05 464 if (unlikely(READ_ONCE(ctx->released))) {
656710a6
AA
465 /*
466 * Don't return VM_FAULT_SIGBUS in this case, so a non
467 * cooperative manager can close the uffd after the
468 * last UFFDIO_COPY, without risking to trigger an
469 * involuntary SIGBUS if the process was starting the
470 * userfaultfd while the userfaultfd was still armed
471 * (but after the last UFFDIO_COPY). If the uffd
472 * wasn't already closed when the userfault reached
473 * this point, that would normally be solved by
474 * userfaultfd_must_wait returning 'false'.
475 *
476 * If we were to return VM_FAULT_SIGBUS here, the non
477 * cooperative manager would be instead forced to
478 * always call UFFDIO_UNREGISTER before it can safely
479 * close the uffd.
480 */
481 ret = VM_FAULT_NOPAGE;
ba85c702 482 goto out;
656710a6 483 }
86039bd3
AA
484
485 /*
486 * Check that we can return VM_FAULT_RETRY.
487 *
488 * NOTE: it should become possible to return VM_FAULT_RETRY
489 * even if FAULT_FLAG_TRIED is set without leading to gup()
490 * -EBUSY failures, if the userfaultfd is to be extended for
491 * VM_UFFD_WP tracking and we intend to arm the userfault
492 * without first stopping userland access to the memory. For
493 * VM_UFFD_MISSING userfaults this is enough for now.
494 */
82b0f8c3 495 if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
86039bd3
AA
496 /*
497 * Validate the invariant that nowait must allow retry
498 * to be sure not to return SIGBUS erroneously on
499 * nowait invocations.
500 */
82b0f8c3 501 BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
86039bd3
AA
502#ifdef CONFIG_DEBUG_VM
503 if (printk_ratelimit()) {
504 printk(KERN_WARNING
82b0f8c3
JK
505 "FAULT_FLAG_ALLOW_RETRY missing %x\n",
506 vmf->flags);
86039bd3
AA
507 dump_stack();
508 }
509#endif
ba85c702 510 goto out;
86039bd3
AA
511 }
512
513 /*
514 * Handle nowait, not much to do other than tell it to retry
515 * and wait.
516 */
ba85c702 517 ret = VM_FAULT_RETRY;
82b0f8c3 518 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
ba85c702 519 goto out;
86039bd3 520
c1e8d7c6 521 /* take the reference before dropping the mmap_lock */
86039bd3
AA
522 userfaultfd_ctx_get(ctx);
523
86039bd3
AA
524 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
525 uwq.wq.private = current;
d172b1a3
NA
526 uwq.msg = userfault_msg(vmf->address, vmf->real_address, vmf->flags,
527 reason, ctx->features);
86039bd3 528 uwq.ctx = ctx;
15a77c6f 529 uwq.waken = false;
86039bd3 530
3e69ad08 531 blocking_state = userfaultfd_get_blocking_state(vmf->flags);
dfa37dc3 532
b8da2e46
PX
533 /*
534 * Take the vma lock now, in order to safely call
535 * userfaultfd_huge_must_wait() later. Since acquiring the
536 * (sleepable) vma lock can modify the current task state, that
537 * must be before explicitly calling set_current_state().
538 */
539 if (is_vm_hugetlb_page(vma))
540 hugetlb_vma_lock_read(vma);
541
cbcfa130 542 spin_lock_irq(&ctx->fault_pending_wqh.lock);
86039bd3
AA
543 /*
544 * After the __add_wait_queue the uwq is visible to userland
545 * through poll/read().
546 */
15b726ef
AA
547 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
548 /*
549 * The smp_mb() after __set_current_state prevents the reads
550 * following the spin_unlock to happen before the list_add in
551 * __add_wait_queue.
552 */
15a77c6f 553 set_current_state(blocking_state);
cbcfa130 554 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3 555
b8da2e46 556 if (!is_vm_hugetlb_page(vma))
29a22b9e 557 must_wait = userfaultfd_must_wait(ctx, vmf, reason);
369cd212 558 else
29a22b9e 559 must_wait = userfaultfd_huge_must_wait(ctx, vmf, reason);
b8da2e46
PX
560 if (is_vm_hugetlb_page(vma))
561 hugetlb_vma_unlock_read(vma);
29a22b9e 562 release_fault_lock(vmf);
8d2afd96 563
f9bf3522 564 if (likely(must_wait && !READ_ONCE(ctx->released))) {
a9a08845 565 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
86039bd3 566 schedule();
ba85c702 567 }
86039bd3 568
ba85c702 569 __set_current_state(TASK_RUNNING);
15b726ef
AA
570
571 /*
572 * Here we race with the list_del; list_add in
573 * userfaultfd_ctx_read(), however because we don't ever run
574 * list_del_init() to refile across the two lists, the prev
575 * and next pointers will never point to self. list_add also
576 * would never let any of the two pointers to point to
577 * self. So list_empty_careful won't risk to see both pointers
578 * pointing to self at any time during the list refile. The
579 * only case where list_del_init() is called is the full
580 * removal in the wake function and there we don't re-list_add
581 * and it's fine not to block on the spinlock. The uwq on this
582 * kernel stack can be released after the list_del_init.
583 */
2055da97 584 if (!list_empty_careful(&uwq.wq.entry)) {
cbcfa130 585 spin_lock_irq(&ctx->fault_pending_wqh.lock);
15b726ef
AA
586 /*
587 * No need of list_del_init(), the uwq on the stack
588 * will be freed shortly anyway.
589 */
2055da97 590 list_del(&uwq.wq.entry);
cbcfa130 591 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3 592 }
86039bd3
AA
593
594 /*
595 * ctx may go away after this if the userfault pseudo fd is
596 * already released.
597 */
598 userfaultfd_ctx_put(ctx);
599
ba85c702
AA
600out:
601 return ret;
86039bd3
AA
602}
603
8c9e7bb7
AA
604static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
605 struct userfaultfd_wait_queue *ewq)
9cd75c3c 606{
0cbb4b4f
AA
607 struct userfaultfd_ctx *release_new_ctx;
608
9a69a829
AA
609 if (WARN_ON_ONCE(current->flags & PF_EXITING))
610 goto out;
9cd75c3c
PE
611
612 ewq->ctx = ctx;
613 init_waitqueue_entry(&ewq->wq, current);
0cbb4b4f 614 release_new_ctx = NULL;
9cd75c3c 615
cbcfa130 616 spin_lock_irq(&ctx->event_wqh.lock);
9cd75c3c
PE
617 /*
618 * After the __add_wait_queue the uwq is visible to userland
619 * through poll/read().
620 */
621 __add_wait_queue(&ctx->event_wqh, &ewq->wq);
622 for (;;) {
623 set_current_state(TASK_KILLABLE);
624 if (ewq->msg.event == 0)
625 break;
6aa7de05 626 if (READ_ONCE(ctx->released) ||
9cd75c3c 627 fatal_signal_pending(current)) {
384632e6
AA
628 /*
629 * &ewq->wq may be queued in fork_event, but
630 * __remove_wait_queue ignores the head
631 * parameter. It would be a problem if it
632 * didn't.
633 */
9cd75c3c 634 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
7eb76d45
MR
635 if (ewq->msg.event == UFFD_EVENT_FORK) {
636 struct userfaultfd_ctx *new;
637
638 new = (struct userfaultfd_ctx *)
639 (unsigned long)
640 ewq->msg.arg.reserved.reserved1;
0cbb4b4f 641 release_new_ctx = new;
7eb76d45 642 }
9cd75c3c
PE
643 break;
644 }
645
cbcfa130 646 spin_unlock_irq(&ctx->event_wqh.lock);
9cd75c3c 647
a9a08845 648 wake_up_poll(&ctx->fd_wqh, EPOLLIN);
9cd75c3c
PE
649 schedule();
650
cbcfa130 651 spin_lock_irq(&ctx->event_wqh.lock);
9cd75c3c
PE
652 }
653 __set_current_state(TASK_RUNNING);
cbcfa130 654 spin_unlock_irq(&ctx->event_wqh.lock);
9cd75c3c 655
0cbb4b4f
AA
656 if (release_new_ctx) {
657 struct vm_area_struct *vma;
658 struct mm_struct *mm = release_new_ctx->mm;
69dbe6da 659 VMA_ITERATOR(vmi, mm, 0);
0cbb4b4f
AA
660
661 /* the various vma->vm_userfaultfd_ctx still points to it */
d8ed45c5 662 mmap_write_lock(mm);
69dbe6da 663 for_each_vma(vmi, vma) {
31e810aa 664 if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
60081bf1 665 vma_start_write(vma);
0cbb4b4f 666 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
51d3d5eb
DH
667 userfaultfd_set_vm_flags(vma,
668 vma->vm_flags & ~__VM_UFFD_FLAGS);
31e810aa 669 }
69dbe6da 670 }
d8ed45c5 671 mmap_write_unlock(mm);
0cbb4b4f
AA
672
673 userfaultfd_ctx_put(release_new_ctx);
674 }
675
9cd75c3c
PE
676 /*
677 * ctx may go away after this if the userfault pseudo fd is
678 * already released.
679 */
9a69a829 680out:
a759a909
NA
681 atomic_dec(&ctx->mmap_changing);
682 VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
9cd75c3c 683 userfaultfd_ctx_put(ctx);
9cd75c3c
PE
684}
685
686static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
687 struct userfaultfd_wait_queue *ewq)
688{
689 ewq->msg.event = 0;
690 wake_up_locked(&ctx->event_wqh);
691 __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
692}
693
893e26e6
PE
694int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
695{
696 struct userfaultfd_ctx *ctx = NULL, *octx;
697 struct userfaultfd_fork_ctx *fctx;
698
699 octx = vma->vm_userfaultfd_ctx.ctx;
700 if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
60081bf1 701 vma_start_write(vma);
893e26e6 702 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
51d3d5eb 703 userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
893e26e6
PE
704 return 0;
705 }
706
707 list_for_each_entry(fctx, fcs, list)
708 if (fctx->orig == octx) {
709 ctx = fctx->new;
710 break;
711 }
712
713 if (!ctx) {
714 fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
715 if (!fctx)
716 return -ENOMEM;
717
718 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
719 if (!ctx) {
720 kfree(fctx);
721 return -ENOMEM;
722 }
723
ca880420 724 refcount_set(&ctx->refcount, 1);
893e26e6 725 ctx->flags = octx->flags;
893e26e6
PE
726 ctx->features = octx->features;
727 ctx->released = false;
a759a909 728 atomic_set(&ctx->mmap_changing, 0);
893e26e6 729 ctx->mm = vma->vm_mm;
00bb31fa 730 mmgrab(ctx->mm);
893e26e6
PE
731
732 userfaultfd_ctx_get(octx);
a759a909 733 atomic_inc(&octx->mmap_changing);
893e26e6
PE
734 fctx->orig = octx;
735 fctx->new = ctx;
736 list_add_tail(&fctx->list, fcs);
737 }
738
739 vma->vm_userfaultfd_ctx.ctx = ctx;
740 return 0;
741}
742
8c9e7bb7 743static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
893e26e6
PE
744{
745 struct userfaultfd_ctx *ctx = fctx->orig;
746 struct userfaultfd_wait_queue ewq;
747
748 msg_init(&ewq.msg);
749
750 ewq.msg.event = UFFD_EVENT_FORK;
751 ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
752
8c9e7bb7 753 userfaultfd_event_wait_completion(ctx, &ewq);
893e26e6
PE
754}
755
756void dup_userfaultfd_complete(struct list_head *fcs)
757{
893e26e6
PE
758 struct userfaultfd_fork_ctx *fctx, *n;
759
760 list_for_each_entry_safe(fctx, n, fcs, list) {
8c9e7bb7 761 dup_fctx(fctx);
893e26e6
PE
762 list_del(&fctx->list);
763 kfree(fctx);
764 }
765}
766
72f87654
PE
767void mremap_userfaultfd_prep(struct vm_area_struct *vma,
768 struct vm_userfaultfd_ctx *vm_ctx)
769{
770 struct userfaultfd_ctx *ctx;
771
772 ctx = vma->vm_userfaultfd_ctx.ctx;
3cfd22be
PX
773
774 if (!ctx)
775 return;
776
777 if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
72f87654
PE
778 vm_ctx->ctx = ctx;
779 userfaultfd_ctx_get(ctx);
a759a909 780 atomic_inc(&ctx->mmap_changing);
3cfd22be
PX
781 } else {
782 /* Drop uffd context if remap feature not enabled */
60081bf1 783 vma_start_write(vma);
3cfd22be 784 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
51d3d5eb 785 userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
72f87654
PE
786 }
787}
788
90794bf1 789void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
72f87654
PE
790 unsigned long from, unsigned long to,
791 unsigned long len)
792{
90794bf1 793 struct userfaultfd_ctx *ctx = vm_ctx->ctx;
72f87654
PE
794 struct userfaultfd_wait_queue ewq;
795
796 if (!ctx)
797 return;
798
799 if (to & ~PAGE_MASK) {
800 userfaultfd_ctx_put(ctx);
801 return;
802 }
803
804 msg_init(&ewq.msg);
805
806 ewq.msg.event = UFFD_EVENT_REMAP;
807 ewq.msg.arg.remap.from = from;
808 ewq.msg.arg.remap.to = to;
809 ewq.msg.arg.remap.len = len;
810
811 userfaultfd_event_wait_completion(ctx, &ewq);
812}
813
70ccb92f 814bool userfaultfd_remove(struct vm_area_struct *vma,
d811914d 815 unsigned long start, unsigned long end)
05ce7724
PE
816{
817 struct mm_struct *mm = vma->vm_mm;
818 struct userfaultfd_ctx *ctx;
819 struct userfaultfd_wait_queue ewq;
820
821 ctx = vma->vm_userfaultfd_ctx.ctx;
d811914d 822 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
70ccb92f 823 return true;
05ce7724
PE
824
825 userfaultfd_ctx_get(ctx);
a759a909 826 atomic_inc(&ctx->mmap_changing);
d8ed45c5 827 mmap_read_unlock(mm);
05ce7724 828
05ce7724
PE
829 msg_init(&ewq.msg);
830
d811914d
MR
831 ewq.msg.event = UFFD_EVENT_REMOVE;
832 ewq.msg.arg.remove.start = start;
833 ewq.msg.arg.remove.end = end;
05ce7724
PE
834
835 userfaultfd_event_wait_completion(ctx, &ewq);
836
70ccb92f 837 return false;
05ce7724
PE
838}
839
897ab3e0
MR
840static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
841 unsigned long start, unsigned long end)
842{
843 struct userfaultfd_unmap_ctx *unmap_ctx;
844
845 list_for_each_entry(unmap_ctx, unmaps, list)
846 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
847 unmap_ctx->end == end)
848 return true;
849
850 return false;
851}
852
65ac1320 853int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start,
69dbe6da 854 unsigned long end, struct list_head *unmaps)
897ab3e0 855{
65ac1320
LH
856 struct userfaultfd_unmap_ctx *unmap_ctx;
857 struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
897ab3e0 858
65ac1320
LH
859 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
860 has_unmap_ctx(ctx, unmaps, start, end))
861 return 0;
897ab3e0 862
65ac1320
LH
863 unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
864 if (!unmap_ctx)
865 return -ENOMEM;
897ab3e0 866
65ac1320
LH
867 userfaultfd_ctx_get(ctx);
868 atomic_inc(&ctx->mmap_changing);
869 unmap_ctx->ctx = ctx;
870 unmap_ctx->start = start;
871 unmap_ctx->end = end;
872 list_add_tail(&unmap_ctx->list, unmaps);
897ab3e0
MR
873
874 return 0;
875}
876
877void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
878{
879 struct userfaultfd_unmap_ctx *ctx, *n;
880 struct userfaultfd_wait_queue ewq;
881
882 list_for_each_entry_safe(ctx, n, uf, list) {
883 msg_init(&ewq.msg);
884
885 ewq.msg.event = UFFD_EVENT_UNMAP;
886 ewq.msg.arg.remove.start = ctx->start;
887 ewq.msg.arg.remove.end = ctx->end;
888
889 userfaultfd_event_wait_completion(ctx->ctx, &ewq);
890
891 list_del(&ctx->list);
892 kfree(ctx);
893 }
894}
895
86039bd3
AA
896static int userfaultfd_release(struct inode *inode, struct file *file)
897{
898 struct userfaultfd_ctx *ctx = file->private_data;
899 struct mm_struct *mm = ctx->mm;
900 struct vm_area_struct *vma, *prev;
901 /* len == 0 means wake all */
902 struct userfaultfd_wake_range range = { .len = 0, };
903 unsigned long new_flags;
11a9b902 904 VMA_ITERATOR(vmi, mm, 0);
86039bd3 905
6aa7de05 906 WRITE_ONCE(ctx->released, true);
86039bd3 907
d2005e3f
ON
908 if (!mmget_not_zero(mm))
909 goto wakeup;
910
86039bd3
AA
911 /*
912 * Flush page faults out of all CPUs. NOTE: all page faults
913 * must be retried without returning VM_FAULT_SIGBUS if
914 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
c1e8d7c6 915 * changes while handle_userfault released the mmap_lock. So
86039bd3 916 * it's critical that released is set to true (above), before
c1e8d7c6 917 * taking the mmap_lock for writing.
86039bd3 918 */
d8ed45c5 919 mmap_write_lock(mm);
86039bd3 920 prev = NULL;
11a9b902 921 for_each_vma(vmi, vma) {
86039bd3
AA
922 cond_resched();
923 BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
7677f7fd 924 !!(vma->vm_flags & __VM_UFFD_FLAGS));
86039bd3
AA
925 if (vma->vm_userfaultfd_ctx.ctx != ctx) {
926 prev = vma;
927 continue;
928 }
7677f7fd 929 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
94d7d923
LS
930 vma = vma_modify_flags_uffd(&vmi, prev, vma, vma->vm_start,
931 vma->vm_end, new_flags,
932 NULL_VM_UFFD_CTX);
69dbe6da 933
60081bf1 934 vma_start_write(vma);
51d3d5eb 935 userfaultfd_set_vm_flags(vma, new_flags);
86039bd3 936 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
94d7d923
LS
937
938 prev = vma;
86039bd3 939 }
d8ed45c5 940 mmap_write_unlock(mm);
d2005e3f
ON
941 mmput(mm);
942wakeup:
86039bd3 943 /*
15b726ef 944 * After no new page faults can wait on this fault_*wqh, flush
86039bd3 945 * the last page faults that may have been already waiting on
15b726ef 946 * the fault_*wqh.
86039bd3 947 */
cbcfa130 948 spin_lock_irq(&ctx->fault_pending_wqh.lock);
ac5be6b4 949 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
c430d1e8 950 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
cbcfa130 951 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3 952
5a18b64e
MR
953 /* Flush pending events that may still wait on event_wqh */
954 wake_up_all(&ctx->event_wqh);
955
a9a08845 956 wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
86039bd3
AA
957 userfaultfd_ctx_put(ctx);
958 return 0;
959}
960
15b726ef 961/* fault_pending_wqh.lock must be hold by the caller */
6dcc27fd
PE
962static inline struct userfaultfd_wait_queue *find_userfault_in(
963 wait_queue_head_t *wqh)
86039bd3 964{
ac6424b9 965 wait_queue_entry_t *wq;
15b726ef 966 struct userfaultfd_wait_queue *uwq;
86039bd3 967
456a7378 968 lockdep_assert_held(&wqh->lock);
86039bd3 969
15b726ef 970 uwq = NULL;
6dcc27fd 971 if (!waitqueue_active(wqh))
15b726ef
AA
972 goto out;
973 /* walk in reverse to provide FIFO behavior to read userfaults */
2055da97 974 wq = list_last_entry(&wqh->head, typeof(*wq), entry);
15b726ef
AA
975 uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
976out:
977 return uwq;
86039bd3 978}
6dcc27fd
PE
979
980static inline struct userfaultfd_wait_queue *find_userfault(
981 struct userfaultfd_ctx *ctx)
982{
983 return find_userfault_in(&ctx->fault_pending_wqh);
984}
86039bd3 985
9cd75c3c
PE
986static inline struct userfaultfd_wait_queue *find_userfault_evt(
987 struct userfaultfd_ctx *ctx)
988{
989 return find_userfault_in(&ctx->event_wqh);
990}
991
076ccb76 992static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
86039bd3
AA
993{
994 struct userfaultfd_ctx *ctx = file->private_data;
076ccb76 995 __poll_t ret;
86039bd3
AA
996
997 poll_wait(file, &ctx->fd_wqh, wait);
998
22e5fe2a 999 if (!userfaultfd_is_initialized(ctx))
a9a08845 1000 return EPOLLERR;
9cd75c3c 1001
22e5fe2a
NA
1002 /*
1003 * poll() never guarantees that read won't block.
1004 * userfaults can be waken before they're read().
1005 */
1006 if (unlikely(!(file->f_flags & O_NONBLOCK)))
a9a08845 1007 return EPOLLERR;
22e5fe2a
NA
1008 /*
1009 * lockless access to see if there are pending faults
1010 * __pollwait last action is the add_wait_queue but
1011 * the spin_unlock would allow the waitqueue_active to
1012 * pass above the actual list_add inside
1013 * add_wait_queue critical section. So use a full
1014 * memory barrier to serialize the list_add write of
1015 * add_wait_queue() with the waitqueue_active read
1016 * below.
1017 */
1018 ret = 0;
1019 smp_mb();
1020 if (waitqueue_active(&ctx->fault_pending_wqh))
1021 ret = EPOLLIN;
1022 else if (waitqueue_active(&ctx->event_wqh))
1023 ret = EPOLLIN;
1024
1025 return ret;
86039bd3
AA
1026}
1027
893e26e6
PE
1028static const struct file_operations userfaultfd_fops;
1029
b537900f
DC
1030static int resolve_userfault_fork(struct userfaultfd_ctx *new,
1031 struct inode *inode,
893e26e6
PE
1032 struct uffd_msg *msg)
1033{
1034 int fd;
893e26e6 1035
b537900f 1036 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
abec3d01 1037 O_RDONLY | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
893e26e6
PE
1038 if (fd < 0)
1039 return fd;
1040
893e26e6
PE
1041 msg->arg.reserved.reserved1 = 0;
1042 msg->arg.fork.ufd = fd;
893e26e6
PE
1043 return 0;
1044}
1045
86039bd3 1046static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
b537900f 1047 struct uffd_msg *msg, struct inode *inode)
86039bd3
AA
1048{
1049 ssize_t ret;
1050 DECLARE_WAITQUEUE(wait, current);
15b726ef 1051 struct userfaultfd_wait_queue *uwq;
893e26e6
PE
1052 /*
1053 * Handling fork event requires sleeping operations, so
1054 * we drop the event_wqh lock, then do these ops, then
1055 * lock it back and wake up the waiter. While the lock is
1056 * dropped the ewq may go away so we keep track of it
1057 * carefully.
1058 */
1059 LIST_HEAD(fork_event);
1060 struct userfaultfd_ctx *fork_nctx = NULL;
86039bd3 1061
15b726ef 1062 /* always take the fd_wqh lock before the fault_pending_wqh lock */
ae62c16e 1063 spin_lock_irq(&ctx->fd_wqh.lock);
86039bd3
AA
1064 __add_wait_queue(&ctx->fd_wqh, &wait);
1065 for (;;) {
1066 set_current_state(TASK_INTERRUPTIBLE);
15b726ef
AA
1067 spin_lock(&ctx->fault_pending_wqh.lock);
1068 uwq = find_userfault(ctx);
1069 if (uwq) {
2c5b7e1b
AA
1070 /*
1071 * Use a seqcount to repeat the lockless check
1072 * in wake_userfault() to avoid missing
1073 * wakeups because during the refile both
1074 * waitqueue could become empty if this is the
1075 * only userfault.
1076 */
1077 write_seqcount_begin(&ctx->refile_seq);
1078
86039bd3 1079 /*
15b726ef
AA
1080 * The fault_pending_wqh.lock prevents the uwq
1081 * to disappear from under us.
1082 *
1083 * Refile this userfault from
1084 * fault_pending_wqh to fault_wqh, it's not
1085 * pending anymore after we read it.
1086 *
1087 * Use list_del() by hand (as
1088 * userfaultfd_wake_function also uses
1089 * list_del_init() by hand) to be sure nobody
1090 * changes __remove_wait_queue() to use
1091 * list_del_init() in turn breaking the
1092 * !list_empty_careful() check in
2055da97 1093 * handle_userfault(). The uwq->wq.head list
15b726ef
AA
1094 * must never be empty at any time during the
1095 * refile, or the waitqueue could disappear
1096 * from under us. The "wait_queue_head_t"
1097 * parameter of __remove_wait_queue() is unused
1098 * anyway.
86039bd3 1099 */
2055da97 1100 list_del(&uwq->wq.entry);
c430d1e8 1101 add_wait_queue(&ctx->fault_wqh, &uwq->wq);
15b726ef 1102
2c5b7e1b
AA
1103 write_seqcount_end(&ctx->refile_seq);
1104
a9b85f94
AA
1105 /* careful to always initialize msg if ret == 0 */
1106 *msg = uwq->msg;
15b726ef 1107 spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3
AA
1108 ret = 0;
1109 break;
1110 }
15b726ef 1111 spin_unlock(&ctx->fault_pending_wqh.lock);
9cd75c3c
PE
1112
1113 spin_lock(&ctx->event_wqh.lock);
1114 uwq = find_userfault_evt(ctx);
1115 if (uwq) {
1116 *msg = uwq->msg;
1117
893e26e6
PE
1118 if (uwq->msg.event == UFFD_EVENT_FORK) {
1119 fork_nctx = (struct userfaultfd_ctx *)
1120 (unsigned long)
1121 uwq->msg.arg.reserved.reserved1;
2055da97 1122 list_move(&uwq->wq.entry, &fork_event);
384632e6
AA
1123 /*
1124 * fork_nctx can be freed as soon as
1125 * we drop the lock, unless we take a
1126 * reference on it.
1127 */
1128 userfaultfd_ctx_get(fork_nctx);
893e26e6
PE
1129 spin_unlock(&ctx->event_wqh.lock);
1130 ret = 0;
1131 break;
1132 }
1133
9cd75c3c
PE
1134 userfaultfd_event_complete(ctx, uwq);
1135 spin_unlock(&ctx->event_wqh.lock);
1136 ret = 0;
1137 break;
1138 }
1139 spin_unlock(&ctx->event_wqh.lock);
1140
86039bd3
AA
1141 if (signal_pending(current)) {
1142 ret = -ERESTARTSYS;
1143 break;
1144 }
1145 if (no_wait) {
1146 ret = -EAGAIN;
1147 break;
1148 }
ae62c16e 1149 spin_unlock_irq(&ctx->fd_wqh.lock);
86039bd3 1150 schedule();
ae62c16e 1151 spin_lock_irq(&ctx->fd_wqh.lock);
86039bd3
AA
1152 }
1153 __remove_wait_queue(&ctx->fd_wqh, &wait);
1154 __set_current_state(TASK_RUNNING);
ae62c16e 1155 spin_unlock_irq(&ctx->fd_wqh.lock);
86039bd3 1156
893e26e6 1157 if (!ret && msg->event == UFFD_EVENT_FORK) {
b537900f 1158 ret = resolve_userfault_fork(fork_nctx, inode, msg);
cbcfa130 1159 spin_lock_irq(&ctx->event_wqh.lock);
384632e6
AA
1160 if (!list_empty(&fork_event)) {
1161 /*
1162 * The fork thread didn't abort, so we can
1163 * drop the temporary refcount.
1164 */
1165 userfaultfd_ctx_put(fork_nctx);
1166
1167 uwq = list_first_entry(&fork_event,
1168 typeof(*uwq),
1169 wq.entry);
1170 /*
1171 * If fork_event list wasn't empty and in turn
1172 * the event wasn't already released by fork
1173 * (the event is allocated on fork kernel
1174 * stack), put the event back to its place in
1175 * the event_wq. fork_event head will be freed
1176 * as soon as we return so the event cannot
1177 * stay queued there no matter the current
1178 * "ret" value.
1179 */
1180 list_del(&uwq->wq.entry);
1181 __add_wait_queue(&ctx->event_wqh, &uwq->wq);
893e26e6 1182
384632e6
AA
1183 /*
1184 * Leave the event in the waitqueue and report
1185 * error to userland if we failed to resolve
1186 * the userfault fork.
1187 */
1188 if (likely(!ret))
893e26e6 1189 userfaultfd_event_complete(ctx, uwq);
384632e6
AA
1190 } else {
1191 /*
1192 * Here the fork thread aborted and the
1193 * refcount from the fork thread on fork_nctx
1194 * has already been released. We still hold
1195 * the reference we took before releasing the
1196 * lock above. If resolve_userfault_fork
1197 * failed we've to drop it because the
1198 * fork_nctx has to be freed in such case. If
1199 * it succeeded we'll hold it because the new
1200 * uffd references it.
1201 */
1202 if (ret)
1203 userfaultfd_ctx_put(fork_nctx);
893e26e6 1204 }
cbcfa130 1205 spin_unlock_irq(&ctx->event_wqh.lock);
893e26e6
PE
1206 }
1207
86039bd3
AA
1208 return ret;
1209}
1210
1211static ssize_t userfaultfd_read(struct file *file, char __user *buf,
1212 size_t count, loff_t *ppos)
1213{
1214 struct userfaultfd_ctx *ctx = file->private_data;
1215 ssize_t _ret, ret = 0;
a9b85f94 1216 struct uffd_msg msg;
86039bd3 1217 int no_wait = file->f_flags & O_NONBLOCK;
b537900f 1218 struct inode *inode = file_inode(file);
86039bd3 1219
22e5fe2a 1220 if (!userfaultfd_is_initialized(ctx))
86039bd3 1221 return -EINVAL;
86039bd3
AA
1222
1223 for (;;) {
a9b85f94 1224 if (count < sizeof(msg))
86039bd3 1225 return ret ? ret : -EINVAL;
b537900f 1226 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
86039bd3
AA
1227 if (_ret < 0)
1228 return ret ? ret : _ret;
a9b85f94 1229 if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
86039bd3 1230 return ret ? ret : -EFAULT;
a9b85f94
AA
1231 ret += sizeof(msg);
1232 buf += sizeof(msg);
1233 count -= sizeof(msg);
86039bd3
AA
1234 /*
1235 * Allow to read more than one fault at time but only
1236 * block if waiting for the very first one.
1237 */
1238 no_wait = O_NONBLOCK;
1239 }
1240}
1241
1242static void __wake_userfault(struct userfaultfd_ctx *ctx,
1243 struct userfaultfd_wake_range *range)
1244{
cbcfa130 1245 spin_lock_irq(&ctx->fault_pending_wqh.lock);
86039bd3 1246 /* wake all in the range and autoremove */
15b726ef 1247 if (waitqueue_active(&ctx->fault_pending_wqh))
ac5be6b4 1248 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
15b726ef
AA
1249 range);
1250 if (waitqueue_active(&ctx->fault_wqh))
c430d1e8 1251 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
cbcfa130 1252 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3
AA
1253}
1254
1255static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
1256 struct userfaultfd_wake_range *range)
1257{
2c5b7e1b
AA
1258 unsigned seq;
1259 bool need_wakeup;
1260
86039bd3
AA
1261 /*
1262 * To be sure waitqueue_active() is not reordered by the CPU
1263 * before the pagetable update, use an explicit SMP memory
3e4e28c5 1264 * barrier here. PT lock release or mmap_read_unlock(mm) still
86039bd3
AA
1265 * have release semantics that can allow the
1266 * waitqueue_active() to be reordered before the pte update.
1267 */
1268 smp_mb();
1269
1270 /*
1271 * Use waitqueue_active because it's very frequent to
1272 * change the address space atomically even if there are no
1273 * userfaults yet. So we take the spinlock only when we're
1274 * sure we've userfaults to wake.
1275 */
2c5b7e1b
AA
1276 do {
1277 seq = read_seqcount_begin(&ctx->refile_seq);
1278 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
1279 waitqueue_active(&ctx->fault_wqh);
1280 cond_resched();
1281 } while (read_seqcount_retry(&ctx->refile_seq, seq));
1282 if (need_wakeup)
86039bd3
AA
1283 __wake_userfault(ctx, range);
1284}
1285
2ef5d724
AR
1286static __always_inline int validate_unaligned_range(
1287 struct mm_struct *mm, __u64 start, __u64 len)
86039bd3
AA
1288{
1289 __u64 task_size = mm->task_size;
1290
86039bd3
AA
1291 if (len & ~PAGE_MASK)
1292 return -EINVAL;
1293 if (!len)
1294 return -EINVAL;
e71e2ace 1295 if (start < mmap_min_addr)
86039bd3 1296 return -EINVAL;
e71e2ace 1297 if (start >= task_size)
86039bd3 1298 return -EINVAL;
e71e2ace 1299 if (len > task_size - start)
86039bd3 1300 return -EINVAL;
2ef5d724
AR
1301 if (start + len <= start)
1302 return -EINVAL;
86039bd3
AA
1303 return 0;
1304}
1305
2ef5d724
AR
1306static __always_inline int validate_range(struct mm_struct *mm,
1307 __u64 start, __u64 len)
1308{
1309 if (start & ~PAGE_MASK)
1310 return -EINVAL;
1311
1312 return validate_unaligned_range(mm, start, len);
1313}
1314
86039bd3
AA
1315static int userfaultfd_register(struct userfaultfd_ctx *ctx,
1316 unsigned long arg)
1317{
1318 struct mm_struct *mm = ctx->mm;
1319 struct vm_area_struct *vma, *prev, *cur;
1320 int ret;
1321 struct uffdio_register uffdio_register;
1322 struct uffdio_register __user *user_uffdio_register;
1323 unsigned long vm_flags, new_flags;
1324 bool found;
ce53e8e6 1325 bool basic_ioctls;
86039bd3 1326 unsigned long start, end, vma_end;
11a9b902 1327 struct vma_iterator vmi;
d61ea1cb 1328 bool wp_async = userfaultfd_wp_async_ctx(ctx);
86039bd3
AA
1329
1330 user_uffdio_register = (struct uffdio_register __user *) arg;
1331
1332 ret = -EFAULT;
1333 if (copy_from_user(&uffdio_register, user_uffdio_register,
1334 sizeof(uffdio_register)-sizeof(__u64)))
1335 goto out;
1336
1337 ret = -EINVAL;
1338 if (!uffdio_register.mode)
1339 goto out;
7677f7fd 1340 if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
86039bd3
AA
1341 goto out;
1342 vm_flags = 0;
1343 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
1344 vm_flags |= VM_UFFD_MISSING;
00b151f2
PX
1345 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
1346#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1347 goto out;
1348#endif
86039bd3 1349 vm_flags |= VM_UFFD_WP;
00b151f2 1350 }
7677f7fd
AR
1351 if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
1352#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
1353 goto out;
1354#endif
1355 vm_flags |= VM_UFFD_MINOR;
1356 }
86039bd3 1357
e71e2ace 1358 ret = validate_range(mm, uffdio_register.range.start,
86039bd3
AA
1359 uffdio_register.range.len);
1360 if (ret)
1361 goto out;
1362
1363 start = uffdio_register.range.start;
1364 end = start + uffdio_register.range.len;
1365
d2005e3f
ON
1366 ret = -ENOMEM;
1367 if (!mmget_not_zero(mm))
1368 goto out;
1369
11a9b902 1370 ret = -EINVAL;
d8ed45c5 1371 mmap_write_lock(mm);
11a9b902
LH
1372 vma_iter_init(&vmi, mm, start);
1373 vma = vma_find(&vmi, end);
86039bd3
AA
1374 if (!vma)
1375 goto out_unlock;
1376
cab350af
MK
1377 /*
1378 * If the first vma contains huge pages, make sure start address
1379 * is aligned to huge page size.
1380 */
1381 if (is_vm_hugetlb_page(vma)) {
1382 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1383
1384 if (start & (vma_hpagesize - 1))
1385 goto out_unlock;
1386 }
1387
86039bd3
AA
1388 /*
1389 * Search for not compatible vmas.
86039bd3
AA
1390 */
1391 found = false;
ce53e8e6 1392 basic_ioctls = false;
11a9b902
LH
1393 cur = vma;
1394 do {
86039bd3
AA
1395 cond_resched();
1396
1397 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
7677f7fd 1398 !!(cur->vm_flags & __VM_UFFD_FLAGS));
86039bd3
AA
1399
1400 /* check not compatible vmas */
1401 ret = -EINVAL;
d61ea1cb 1402 if (!vma_can_userfault(cur, vm_flags, wp_async))
86039bd3 1403 goto out_unlock;
29ec9066
AA
1404
1405 /*
1406 * UFFDIO_COPY will fill file holes even without
1407 * PROT_WRITE. This check enforces that if this is a
1408 * MAP_SHARED, the process has write permission to the backing
1409 * file. If VM_MAYWRITE is set it also enforces that on a
1410 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
1411 * F_WRITE_SEAL can be taken until the vma is destroyed.
1412 */
1413 ret = -EPERM;
1414 if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
1415 goto out_unlock;
1416
cab350af
MK
1417 /*
1418 * If this vma contains ending address, and huge pages
1419 * check alignment.
1420 */
1421 if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
1422 end > cur->vm_start) {
1423 unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
1424
1425 ret = -EINVAL;
1426
1427 if (end & (vma_hpagesize - 1))
1428 goto out_unlock;
1429 }
63b2d417
AA
1430 if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
1431 goto out_unlock;
86039bd3
AA
1432
1433 /*
1434 * Check that this vma isn't already owned by a
1435 * different userfaultfd. We can't allow more than one
1436 * userfaultfd to own a single vma simultaneously or we
1437 * wouldn't know which one to deliver the userfaults to.
1438 */
1439 ret = -EBUSY;
1440 if (cur->vm_userfaultfd_ctx.ctx &&
1441 cur->vm_userfaultfd_ctx.ctx != ctx)
1442 goto out_unlock;
1443
cab350af
MK
1444 /*
1445 * Note vmas containing huge pages
1446 */
ce53e8e6
MR
1447 if (is_vm_hugetlb_page(cur))
1448 basic_ioctls = true;
cab350af 1449
86039bd3 1450 found = true;
11a9b902 1451 } for_each_vma_range(vmi, cur, end);
86039bd3
AA
1452 BUG_ON(!found);
1453
11a9b902
LH
1454 vma_iter_set(&vmi, start);
1455 prev = vma_prev(&vmi);
270aa010
PX
1456 if (vma->vm_start < start)
1457 prev = vma;
86039bd3
AA
1458
1459 ret = 0;
11a9b902 1460 for_each_vma_range(vmi, vma, end) {
86039bd3
AA
1461 cond_resched();
1462
d61ea1cb 1463 BUG_ON(!vma_can_userfault(vma, vm_flags, wp_async));
86039bd3
AA
1464 BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
1465 vma->vm_userfaultfd_ctx.ctx != ctx);
29ec9066 1466 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
86039bd3
AA
1467
1468 /*
1469 * Nothing to do: this vma is already registered into this
1470 * userfaultfd and with the right tracking mode too.
1471 */
1472 if (vma->vm_userfaultfd_ctx.ctx == ctx &&
1473 (vma->vm_flags & vm_flags) == vm_flags)
1474 goto skip;
1475
1476 if (vma->vm_start > start)
1477 start = vma->vm_start;
1478 vma_end = min(end, vma->vm_end);
1479
7677f7fd 1480 new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
94d7d923
LS
1481 vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
1482 new_flags,
1483 (struct vm_userfaultfd_ctx){ctx});
1484 if (IS_ERR(vma)) {
1485 ret = PTR_ERR(vma);
1486 break;
86039bd3 1487 }
94d7d923 1488
86039bd3
AA
1489 /*
1490 * In the vma_merge() successful mprotect-like case 8:
1491 * the next vma was merged into the current one and
1492 * the current one has not been updated yet.
1493 */
60081bf1 1494 vma_start_write(vma);
51d3d5eb 1495 userfaultfd_set_vm_flags(vma, new_flags);
86039bd3
AA
1496 vma->vm_userfaultfd_ctx.ctx = ctx;
1497
6dfeaff9
PX
1498 if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
1499 hugetlb_unshare_all_pmds(vma);
1500
86039bd3
AA
1501 skip:
1502 prev = vma;
1503 start = vma->vm_end;
11a9b902
LH
1504 }
1505
86039bd3 1506out_unlock:
d8ed45c5 1507 mmap_write_unlock(mm);
d2005e3f 1508 mmput(mm);
86039bd3 1509 if (!ret) {
14819305
PX
1510 __u64 ioctls_out;
1511
1512 ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
1513 UFFD_API_RANGE_IOCTLS;
1514
1515 /*
1516 * Declare the WP ioctl only if the WP mode is
1517 * specified and all checks passed with the range
1518 */
1519 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
1520 ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
1521
f6191471
AR
1522 /* CONTINUE ioctl is only supported for MINOR ranges. */
1523 if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
1524 ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
1525
86039bd3
AA
1526 /*
1527 * Now that we scanned all vmas we can already tell
1528 * userland which ioctls methods are guaranteed to
1529 * succeed on this range.
1530 */
14819305 1531 if (put_user(ioctls_out, &user_uffdio_register->ioctls))
86039bd3
AA
1532 ret = -EFAULT;
1533 }
1534out:
1535 return ret;
1536}
1537
1538static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
1539 unsigned long arg)
1540{
1541 struct mm_struct *mm = ctx->mm;
1542 struct vm_area_struct *vma, *prev, *cur;
1543 int ret;
1544 struct uffdio_range uffdio_unregister;
1545 unsigned long new_flags;
1546 bool found;
1547 unsigned long start, end, vma_end;
1548 const void __user *buf = (void __user *)arg;
11a9b902 1549 struct vma_iterator vmi;
d61ea1cb 1550 bool wp_async = userfaultfd_wp_async_ctx(ctx);
86039bd3
AA
1551
1552 ret = -EFAULT;
1553 if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
1554 goto out;
1555
e71e2ace 1556 ret = validate_range(mm, uffdio_unregister.start,
86039bd3
AA
1557 uffdio_unregister.len);
1558 if (ret)
1559 goto out;
1560
1561 start = uffdio_unregister.start;
1562 end = start + uffdio_unregister.len;
1563
d2005e3f
ON
1564 ret = -ENOMEM;
1565 if (!mmget_not_zero(mm))
1566 goto out;
1567
d8ed45c5 1568 mmap_write_lock(mm);
86039bd3 1569 ret = -EINVAL;
11a9b902
LH
1570 vma_iter_init(&vmi, mm, start);
1571 vma = vma_find(&vmi, end);
1572 if (!vma)
86039bd3
AA
1573 goto out_unlock;
1574
cab350af
MK
1575 /*
1576 * If the first vma contains huge pages, make sure start address
1577 * is aligned to huge page size.
1578 */
1579 if (is_vm_hugetlb_page(vma)) {
1580 unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
1581
1582 if (start & (vma_hpagesize - 1))
1583 goto out_unlock;
1584 }
1585
86039bd3
AA
1586 /*
1587 * Search for not compatible vmas.
86039bd3
AA
1588 */
1589 found = false;
11a9b902
LH
1590 cur = vma;
1591 do {
86039bd3
AA
1592 cond_resched();
1593
1594 BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
7677f7fd 1595 !!(cur->vm_flags & __VM_UFFD_FLAGS));
86039bd3
AA
1596
1597 /*
1598 * Check not compatible vmas, not strictly required
1599 * here as not compatible vmas cannot have an
1600 * userfaultfd_ctx registered on them, but this
1601 * provides for more strict behavior to notice
1602 * unregistration errors.
1603 */
d61ea1cb 1604 if (!vma_can_userfault(cur, cur->vm_flags, wp_async))
86039bd3
AA
1605 goto out_unlock;
1606
1607 found = true;
11a9b902 1608 } for_each_vma_range(vmi, cur, end);
86039bd3
AA
1609 BUG_ON(!found);
1610
11a9b902
LH
1611 vma_iter_set(&vmi, start);
1612 prev = vma_prev(&vmi);
270aa010
PX
1613 if (vma->vm_start < start)
1614 prev = vma;
1615
86039bd3 1616 ret = 0;
11a9b902 1617 for_each_vma_range(vmi, vma, end) {
86039bd3
AA
1618 cond_resched();
1619
d61ea1cb 1620 BUG_ON(!vma_can_userfault(vma, vma->vm_flags, wp_async));
86039bd3
AA
1621
1622 /*
1623 * Nothing to do: this vma is already registered into this
1624 * userfaultfd and with the right tracking mode too.
1625 */
1626 if (!vma->vm_userfaultfd_ctx.ctx)
1627 goto skip;
1628
01e881f5
AA
1629 WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
1630
86039bd3
AA
1631 if (vma->vm_start > start)
1632 start = vma->vm_start;
1633 vma_end = min(end, vma->vm_end);
1634
09fa5296
AA
1635 if (userfaultfd_missing(vma)) {
1636 /*
1637 * Wake any concurrent pending userfault while
1638 * we unregister, so they will not hang
1639 * permanently and it avoids userland to call
1640 * UFFDIO_WAKE explicitly.
1641 */
1642 struct userfaultfd_wake_range range;
1643 range.start = start;
1644 range.len = vma_end - start;
1645 wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
1646 }
1647
f369b07c
PX
1648 /* Reset ptes for the whole vma range if wr-protected */
1649 if (userfaultfd_wp(vma))
61c50040 1650 uffd_wp_range(vma, start, vma_end - start, false);
f369b07c 1651
7677f7fd 1652 new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
94d7d923
LS
1653 vma = vma_modify_flags_uffd(&vmi, prev, vma, start, vma_end,
1654 new_flags, NULL_VM_UFFD_CTX);
1655 if (IS_ERR(vma)) {
1656 ret = PTR_ERR(vma);
1657 break;
86039bd3 1658 }
94d7d923 1659
86039bd3
AA
1660 /*
1661 * In the vma_merge() successful mprotect-like case 8:
1662 * the next vma was merged into the current one and
1663 * the current one has not been updated yet.
1664 */
60081bf1 1665 vma_start_write(vma);
51d3d5eb 1666 userfaultfd_set_vm_flags(vma, new_flags);
86039bd3
AA
1667 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1668
1669 skip:
1670 prev = vma;
1671 start = vma->vm_end;
11a9b902
LH
1672 }
1673
86039bd3 1674out_unlock:
d8ed45c5 1675 mmap_write_unlock(mm);
d2005e3f 1676 mmput(mm);
86039bd3
AA
1677out:
1678 return ret;
1679}
1680
1681/*
ba85c702
AA
1682 * userfaultfd_wake may be used in combination with the
1683 * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
86039bd3
AA
1684 */
1685static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
1686 unsigned long arg)
1687{
1688 int ret;
1689 struct uffdio_range uffdio_wake;
1690 struct userfaultfd_wake_range range;
1691 const void __user *buf = (void __user *)arg;
1692
1693 ret = -EFAULT;
1694 if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
1695 goto out;
1696
e71e2ace 1697 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
86039bd3
AA
1698 if (ret)
1699 goto out;
1700
1701 range.start = uffdio_wake.start;
1702 range.len = uffdio_wake.len;
1703
1704 /*
1705 * len == 0 means wake all and we don't want to wake all here,
1706 * so check it again to be sure.
1707 */
1708 VM_BUG_ON(!range.len);
1709
1710 wake_userfault(ctx, &range);
1711 ret = 0;
1712
1713out:
1714 return ret;
1715}
1716
ad465cae
AA
1717static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
1718 unsigned long arg)
1719{
1720 __s64 ret;
1721 struct uffdio_copy uffdio_copy;
1722 struct uffdio_copy __user *user_uffdio_copy;
1723 struct userfaultfd_wake_range range;
d9712937 1724 uffd_flags_t flags = 0;
ad465cae
AA
1725
1726 user_uffdio_copy = (struct uffdio_copy __user *) arg;
1727
df2cc96e 1728 ret = -EAGAIN;
a759a909 1729 if (atomic_read(&ctx->mmap_changing))
df2cc96e
MR
1730 goto out;
1731
ad465cae
AA
1732 ret = -EFAULT;
1733 if (copy_from_user(&uffdio_copy, user_uffdio_copy,
1734 /* don't copy "copy" last field */
1735 sizeof(uffdio_copy)-sizeof(__s64)))
1736 goto out;
1737
2ef5d724
AR
1738 ret = validate_unaligned_range(ctx->mm, uffdio_copy.src,
1739 uffdio_copy.len);
1740 if (ret)
1741 goto out;
e71e2ace 1742 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
ad465cae
AA
1743 if (ret)
1744 goto out;
2ef5d724 1745
ad465cae 1746 ret = -EINVAL;
72981e0e 1747 if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
ad465cae 1748 goto out;
d9712937
AR
1749 if (uffdio_copy.mode & UFFDIO_COPY_MODE_WP)
1750 flags |= MFILL_ATOMIC_WP;
d2005e3f 1751 if (mmget_not_zero(ctx->mm)) {
a734991c
AR
1752 ret = mfill_atomic_copy(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
1753 uffdio_copy.len, &ctx->mmap_changing,
d9712937 1754 flags);
d2005e3f 1755 mmput(ctx->mm);
96333187 1756 } else {
e86b298b 1757 return -ESRCH;
d2005e3f 1758 }
ad465cae
AA
1759 if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
1760 return -EFAULT;
1761 if (ret < 0)
1762 goto out;
1763 BUG_ON(!ret);
1764 /* len == 0 would wake all */
1765 range.len = ret;
1766 if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
1767 range.start = uffdio_copy.dst;
1768 wake_userfault(ctx, &range);
1769 }
1770 ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
1771out:
1772 return ret;
1773}
1774
1775static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
1776 unsigned long arg)
1777{
1778 __s64 ret;
1779 struct uffdio_zeropage uffdio_zeropage;
1780 struct uffdio_zeropage __user *user_uffdio_zeropage;
1781 struct userfaultfd_wake_range range;
1782
1783 user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
1784
df2cc96e 1785 ret = -EAGAIN;
a759a909 1786 if (atomic_read(&ctx->mmap_changing))
df2cc96e
MR
1787 goto out;
1788
ad465cae
AA
1789 ret = -EFAULT;
1790 if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
1791 /* don't copy "zeropage" last field */
1792 sizeof(uffdio_zeropage)-sizeof(__s64)))
1793 goto out;
1794
e71e2ace 1795 ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
ad465cae
AA
1796 uffdio_zeropage.range.len);
1797 if (ret)
1798 goto out;
1799 ret = -EINVAL;
1800 if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
1801 goto out;
1802
d2005e3f 1803 if (mmget_not_zero(ctx->mm)) {
a734991c
AR
1804 ret = mfill_atomic_zeropage(ctx->mm, uffdio_zeropage.range.start,
1805 uffdio_zeropage.range.len,
1806 &ctx->mmap_changing);
d2005e3f 1807 mmput(ctx->mm);
9d95aa4b 1808 } else {
e86b298b 1809 return -ESRCH;
d2005e3f 1810 }
ad465cae
AA
1811 if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
1812 return -EFAULT;
1813 if (ret < 0)
1814 goto out;
1815 /* len == 0 would wake all */
1816 BUG_ON(!ret);
1817 range.len = ret;
1818 if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
1819 range.start = uffdio_zeropage.range.start;
1820 wake_userfault(ctx, &range);
1821 }
1822 ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
1823out:
1824 return ret;
1825}
1826
63b2d417
AA
1827static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
1828 unsigned long arg)
1829{
1830 int ret;
1831 struct uffdio_writeprotect uffdio_wp;
1832 struct uffdio_writeprotect __user *user_uffdio_wp;
1833 struct userfaultfd_wake_range range;
23080e27 1834 bool mode_wp, mode_dontwake;
63b2d417 1835
a759a909 1836 if (atomic_read(&ctx->mmap_changing))
63b2d417
AA
1837 return -EAGAIN;
1838
1839 user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
1840
1841 if (copy_from_user(&uffdio_wp, user_uffdio_wp,
1842 sizeof(struct uffdio_writeprotect)))
1843 return -EFAULT;
1844
e71e2ace 1845 ret = validate_range(ctx->mm, uffdio_wp.range.start,
63b2d417
AA
1846 uffdio_wp.range.len);
1847 if (ret)
1848 return ret;
1849
1850 if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
1851 UFFDIO_WRITEPROTECT_MODE_WP))
1852 return -EINVAL;
23080e27
PX
1853
1854 mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
1855 mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
1856
1857 if (mode_wp && mode_dontwake)
63b2d417
AA
1858 return -EINVAL;
1859
cb185d5f
NA
1860 if (mmget_not_zero(ctx->mm)) {
1861 ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
1862 uffdio_wp.range.len, mode_wp,
1863 &ctx->mmap_changing);
1864 mmput(ctx->mm);
1865 } else {
1866 return -ESRCH;
1867 }
1868
63b2d417
AA
1869 if (ret)
1870 return ret;
1871
23080e27 1872 if (!mode_wp && !mode_dontwake) {
63b2d417
AA
1873 range.start = uffdio_wp.range.start;
1874 range.len = uffdio_wp.range.len;
1875 wake_userfault(ctx, &range);
1876 }
1877 return ret;
1878}
1879
f6191471
AR
1880static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
1881{
1882 __s64 ret;
1883 struct uffdio_continue uffdio_continue;
1884 struct uffdio_continue __user *user_uffdio_continue;
1885 struct userfaultfd_wake_range range;
02891844 1886 uffd_flags_t flags = 0;
f6191471
AR
1887
1888 user_uffdio_continue = (struct uffdio_continue __user *)arg;
1889
1890 ret = -EAGAIN;
a759a909 1891 if (atomic_read(&ctx->mmap_changing))
f6191471
AR
1892 goto out;
1893
1894 ret = -EFAULT;
1895 if (copy_from_user(&uffdio_continue, user_uffdio_continue,
1896 /* don't copy the output fields */
1897 sizeof(uffdio_continue) - (sizeof(__s64))))
1898 goto out;
1899
e71e2ace 1900 ret = validate_range(ctx->mm, uffdio_continue.range.start,
f6191471
AR
1901 uffdio_continue.range.len);
1902 if (ret)
1903 goto out;
1904
1905 ret = -EINVAL;
02891844
AR
1906 if (uffdio_continue.mode & ~(UFFDIO_CONTINUE_MODE_DONTWAKE |
1907 UFFDIO_CONTINUE_MODE_WP))
f6191471 1908 goto out;
02891844
AR
1909 if (uffdio_continue.mode & UFFDIO_CONTINUE_MODE_WP)
1910 flags |= MFILL_ATOMIC_WP;
f6191471
AR
1911
1912 if (mmget_not_zero(ctx->mm)) {
a734991c
AR
1913 ret = mfill_atomic_continue(ctx->mm, uffdio_continue.range.start,
1914 uffdio_continue.range.len,
02891844 1915 &ctx->mmap_changing, flags);
f6191471
AR
1916 mmput(ctx->mm);
1917 } else {
1918 return -ESRCH;
1919 }
1920
1921 if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
1922 return -EFAULT;
1923 if (ret < 0)
1924 goto out;
1925
1926 /* len == 0 would wake all */
1927 BUG_ON(!ret);
1928 range.len = ret;
1929 if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
1930 range.start = uffdio_continue.range.start;
1931 wake_userfault(ctx, &range);
1932 }
1933 ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
1934
1935out:
1936 return ret;
1937}
1938
fc71884a
AR
1939static inline int userfaultfd_poison(struct userfaultfd_ctx *ctx, unsigned long arg)
1940{
1941 __s64 ret;
1942 struct uffdio_poison uffdio_poison;
1943 struct uffdio_poison __user *user_uffdio_poison;
1944 struct userfaultfd_wake_range range;
1945
1946 user_uffdio_poison = (struct uffdio_poison __user *)arg;
1947
1948 ret = -EAGAIN;
1949 if (atomic_read(&ctx->mmap_changing))
1950 goto out;
1951
1952 ret = -EFAULT;
1953 if (copy_from_user(&uffdio_poison, user_uffdio_poison,
1954 /* don't copy the output fields */
1955 sizeof(uffdio_poison) - (sizeof(__s64))))
1956 goto out;
1957
1958 ret = validate_range(ctx->mm, uffdio_poison.range.start,
1959 uffdio_poison.range.len);
1960 if (ret)
1961 goto out;
1962
1963 ret = -EINVAL;
1964 if (uffdio_poison.mode & ~UFFDIO_POISON_MODE_DONTWAKE)
1965 goto out;
1966
1967 if (mmget_not_zero(ctx->mm)) {
1968 ret = mfill_atomic_poison(ctx->mm, uffdio_poison.range.start,
1969 uffdio_poison.range.len,
1970 &ctx->mmap_changing, 0);
1971 mmput(ctx->mm);
1972 } else {
1973 return -ESRCH;
1974 }
1975
1976 if (unlikely(put_user(ret, &user_uffdio_poison->updated)))
1977 return -EFAULT;
1978 if (ret < 0)
1979 goto out;
1980
1981 /* len == 0 would wake all */
1982 BUG_ON(!ret);
1983 range.len = ret;
1984 if (!(uffdio_poison.mode & UFFDIO_POISON_MODE_DONTWAKE)) {
1985 range.start = uffdio_poison.range.start;
1986 wake_userfault(ctx, &range);
1987 }
1988 ret = range.len == uffdio_poison.range.len ? 0 : -EAGAIN;
1989
1990out:
1991 return ret;
1992}
1993
d61ea1cb
PX
1994bool userfaultfd_wp_async(struct vm_area_struct *vma)
1995{
1996 return userfaultfd_wp_async_ctx(vma->vm_userfaultfd_ctx.ctx);
1997}
1998
9cd75c3c
PE
1999static inline unsigned int uffd_ctx_features(__u64 user_features)
2000{
2001 /*
22e5fe2a
NA
2002 * For the current set of features the bits just coincide. Set
2003 * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
9cd75c3c 2004 */
22e5fe2a 2005 return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
9cd75c3c
PE
2006}
2007
86039bd3
AA
2008/*
2009 * userland asks for a certain API version and we return which bits
2010 * and ioctl commands are implemented in this kernel for such API
2011 * version or -EINVAL if unknown.
2012 */
2013static int userfaultfd_api(struct userfaultfd_ctx *ctx,
2014 unsigned long arg)
2015{
2016 struct uffdio_api uffdio_api;
2017 void __user *buf = (void __user *)arg;
22e5fe2a 2018 unsigned int ctx_features;
86039bd3 2019 int ret;
65603144 2020 __u64 features;
86039bd3 2021
86039bd3 2022 ret = -EFAULT;
a9b85f94 2023 if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
86039bd3 2024 goto out;
2ff559f3
PX
2025 features = uffdio_api.features;
2026 ret = -EINVAL;
2027 if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
2028 goto err_out;
3c1c24d9
MR
2029 ret = -EPERM;
2030 if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
2031 goto err_out;
d61ea1cb
PX
2032
2033 /* WP_ASYNC relies on WP_UNPOPULATED, choose it unconditionally */
2034 if (features & UFFD_FEATURE_WP_ASYNC)
2035 features |= UFFD_FEATURE_WP_UNPOPULATED;
2036
65603144
AA
2037 /* report all available features and ioctls to userland */
2038 uffdio_api.features = UFFD_API_FEATURES;
7677f7fd 2039#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
964ab004
AR
2040 uffdio_api.features &=
2041 ~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
00b151f2
PX
2042#endif
2043#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
2044 uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
b1f9e876
PX
2045#endif
2046#ifndef CONFIG_PTE_MARKER_UFFD_WP
2047 uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
2bad466c 2048 uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED;
d61ea1cb 2049 uffdio_api.features &= ~UFFD_FEATURE_WP_ASYNC;
7677f7fd 2050#endif
86039bd3
AA
2051 uffdio_api.ioctls = UFFD_API_IOCTLS;
2052 ret = -EFAULT;
2053 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
2054 goto out;
22e5fe2a 2055
65603144 2056 /* only enable the requested features for this uffd context */
22e5fe2a
NA
2057 ctx_features = uffd_ctx_features(features);
2058 ret = -EINVAL;
2059 if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
2060 goto err_out;
2061
86039bd3
AA
2062 ret = 0;
2063out:
2064 return ret;
3c1c24d9
MR
2065err_out:
2066 memset(&uffdio_api, 0, sizeof(uffdio_api));
2067 if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
2068 ret = -EFAULT;
2069 goto out;
86039bd3
AA
2070}
2071
2072static long userfaultfd_ioctl(struct file *file, unsigned cmd,
2073 unsigned long arg)
2074{
2075 int ret = -EINVAL;
2076 struct userfaultfd_ctx *ctx = file->private_data;
2077
22e5fe2a 2078 if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
e6485a47
AA
2079 return -EINVAL;
2080
86039bd3
AA
2081 switch(cmd) {
2082 case UFFDIO_API:
2083 ret = userfaultfd_api(ctx, arg);
2084 break;
2085 case UFFDIO_REGISTER:
2086 ret = userfaultfd_register(ctx, arg);
2087 break;
2088 case UFFDIO_UNREGISTER:
2089 ret = userfaultfd_unregister(ctx, arg);
2090 break;
2091 case UFFDIO_WAKE:
2092 ret = userfaultfd_wake(ctx, arg);
2093 break;
ad465cae
AA
2094 case UFFDIO_COPY:
2095 ret = userfaultfd_copy(ctx, arg);
2096 break;
2097 case UFFDIO_ZEROPAGE:
2098 ret = userfaultfd_zeropage(ctx, arg);
2099 break;
63b2d417
AA
2100 case UFFDIO_WRITEPROTECT:
2101 ret = userfaultfd_writeprotect(ctx, arg);
2102 break;
f6191471
AR
2103 case UFFDIO_CONTINUE:
2104 ret = userfaultfd_continue(ctx, arg);
2105 break;
fc71884a
AR
2106 case UFFDIO_POISON:
2107 ret = userfaultfd_poison(ctx, arg);
2108 break;
86039bd3
AA
2109 }
2110 return ret;
2111}
2112
2113#ifdef CONFIG_PROC_FS
2114static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
2115{
2116 struct userfaultfd_ctx *ctx = f->private_data;
ac6424b9 2117 wait_queue_entry_t *wq;
86039bd3
AA
2118 unsigned long pending = 0, total = 0;
2119
cbcfa130 2120 spin_lock_irq(&ctx->fault_pending_wqh.lock);
2055da97 2121 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
15b726ef
AA
2122 pending++;
2123 total++;
2124 }
2055da97 2125 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
86039bd3
AA
2126 total++;
2127 }
cbcfa130 2128 spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3
AA
2129
2130 /*
2131 * If more protocols will be added, there will be all shown
2132 * separated by a space. Like this:
2133 * protocols: aa:... bb:...
2134 */
2135 seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
045098e9 2136 pending, total, UFFD_API, ctx->features,
86039bd3
AA
2137 UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
2138}
2139#endif
2140
2141static const struct file_operations userfaultfd_fops = {
2142#ifdef CONFIG_PROC_FS
2143 .show_fdinfo = userfaultfd_show_fdinfo,
2144#endif
2145 .release = userfaultfd_release,
2146 .poll = userfaultfd_poll,
2147 .read = userfaultfd_read,
2148 .unlocked_ioctl = userfaultfd_ioctl,
1832f2d8 2149 .compat_ioctl = compat_ptr_ioctl,
86039bd3
AA
2150 .llseek = noop_llseek,
2151};
2152
3004ec9c
AA
2153static void init_once_userfaultfd_ctx(void *mem)
2154{
2155 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
2156
2157 init_waitqueue_head(&ctx->fault_pending_wqh);
2158 init_waitqueue_head(&ctx->fault_wqh);
9cd75c3c 2159 init_waitqueue_head(&ctx->event_wqh);
3004ec9c 2160 init_waitqueue_head(&ctx->fd_wqh);
2ca97ac8 2161 seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
3004ec9c
AA
2162}
2163
2d5de004 2164static int new_userfaultfd(int flags)
86039bd3 2165{
86039bd3 2166 struct userfaultfd_ctx *ctx;
284cd241 2167 int fd;
86039bd3
AA
2168
2169 BUG_ON(!current->mm);
2170
2171 /* Check the UFFD_* constants for consistency. */
37cd0575 2172 BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
86039bd3
AA
2173 BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
2174 BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
2175
37cd0575 2176 if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
284cd241 2177 return -EINVAL;
86039bd3 2178
3004ec9c 2179 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
86039bd3 2180 if (!ctx)
284cd241 2181 return -ENOMEM;
86039bd3 2182
ca880420 2183 refcount_set(&ctx->refcount, 1);
86039bd3 2184 ctx->flags = flags;
9cd75c3c 2185 ctx->features = 0;
86039bd3 2186 ctx->released = false;
a759a909 2187 atomic_set(&ctx->mmap_changing, 0);
86039bd3
AA
2188 ctx->mm = current->mm;
2189 /* prevent the mm struct to be freed */
f1f10076 2190 mmgrab(ctx->mm);
86039bd3 2191
b537900f 2192 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
abec3d01 2193 O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
284cd241 2194 if (fd < 0) {
d2005e3f 2195 mmdrop(ctx->mm);
3004ec9c 2196 kmem_cache_free(userfaultfd_ctx_cachep, ctx);
c03e946f 2197 }
86039bd3 2198 return fd;
86039bd3 2199}
3004ec9c 2200
2d5de004
AR
2201static inline bool userfaultfd_syscall_allowed(int flags)
2202{
2203 /* Userspace-only page faults are always allowed */
2204 if (flags & UFFD_USER_MODE_ONLY)
2205 return true;
2206
2207 /*
2208 * The user is requesting a userfaultfd which can handle kernel faults.
2209 * Privileged users are always allowed to do this.
2210 */
2211 if (capable(CAP_SYS_PTRACE))
2212 return true;
2213
2214 /* Otherwise, access to kernel fault handling is sysctl controlled. */
2215 return sysctl_unprivileged_userfaultfd;
2216}
2217
2218SYSCALL_DEFINE1(userfaultfd, int, flags)
2219{
2220 if (!userfaultfd_syscall_allowed(flags))
2221 return -EPERM;
2222
2223 return new_userfaultfd(flags);
2224}
2225
2226static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags)
2227{
2228 if (cmd != USERFAULTFD_IOC_NEW)
2229 return -EINVAL;
2230
2231 return new_userfaultfd(flags);
2232}
2233
2234static const struct file_operations userfaultfd_dev_fops = {
2235 .unlocked_ioctl = userfaultfd_dev_ioctl,
2236 .compat_ioctl = userfaultfd_dev_ioctl,
2237 .owner = THIS_MODULE,
2238 .llseek = noop_llseek,
2239};
2240
2241static struct miscdevice userfaultfd_misc = {
2242 .minor = MISC_DYNAMIC_MINOR,
2243 .name = "userfaultfd",
2244 .fops = &userfaultfd_dev_fops
2245};
2246
3004ec9c
AA
2247static int __init userfaultfd_init(void)
2248{
2d5de004
AR
2249 int ret;
2250
2251 ret = misc_register(&userfaultfd_misc);
2252 if (ret)
2253 return ret;
2254
3004ec9c
AA
2255 userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
2256 sizeof(struct userfaultfd_ctx),
2257 0,
2258 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2259 init_once_userfaultfd_ctx);
2d337b71
Z
2260#ifdef CONFIG_SYSCTL
2261 register_sysctl_init("vm", vm_userfaultfd_table);
2262#endif
3004ec9c
AA
2263 return 0;
2264}
2265__initcall(userfaultfd_init);