Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e1ad7468 DL |
2 | /* |
3 | * fs/eventfd.c | |
4 | * | |
5 | * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> | |
6 | * | |
7 | */ | |
8 | ||
9 | #include <linux/file.h> | |
10 | #include <linux/poll.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/fs.h> | |
174cd4b1 | 13 | #include <linux/sched/signal.h> |
e1ad7468 | 14 | #include <linux/kernel.h> |
5a0e3ad6 | 15 | #include <linux/slab.h> |
e1ad7468 DL |
16 | #include <linux/list.h> |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/anon_inodes.h> | |
7747cdb2 | 19 | #include <linux/syscalls.h> |
630d9c47 | 20 | #include <linux/export.h> |
13389010 DL |
21 | #include <linux/kref.h> |
22 | #include <linux/eventfd.h> | |
cbac5542 CG |
23 | #include <linux/proc_fs.h> |
24 | #include <linux/seq_file.h> | |
b556db17 | 25 | #include <linux/idr.h> |
12aceb89 | 26 | #include <linux/uio.h> |
b556db17 | 27 | |
ce528c4c | 28 | static DEFINE_IDA(eventfd_ida); |
e1ad7468 DL |
29 | |
30 | struct eventfd_ctx { | |
13389010 | 31 | struct kref kref; |
e1ad7468 DL |
32 | wait_queue_head_t wqh; |
33 | /* | |
34 | * Every time that a write(2) is performed on an eventfd, the | |
35 | * value of the __u64 being written is added to "count" and a | |
36 | * wakeup is performed on "wqh". A read(2) will return the "count" | |
37 | * value to userspace, and will reset "count" to zero. The kernel | |
13389010 | 38 | * side eventfd_signal() also, adds to the "count" counter and |
e1ad7468 DL |
39 | * issue a wakeup. |
40 | */ | |
41 | __u64 count; | |
bcd0b235 | 42 | unsigned int flags; |
b556db17 | 43 | int id; |
e1ad7468 DL |
44 | }; |
45 | ||
03e02acd | 46 | __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask) |
e1ad7468 | 47 | { |
e1ad7468 DL |
48 | unsigned long flags; |
49 | ||
b5e683d5 JA |
50 | /* |
51 | * Deadlock or stack overflow issues can happen if we recurse here | |
52 | * through waitqueue wakeup handlers. If the caller users potentially | |
53 | * nested waitqueues with custom wakeup handlers, then it should | |
b542e383 TG |
54 | * check eventfd_signal_allowed() before calling this function. If |
55 | * it returns false, the eventfd_signal() call should be deferred to a | |
b5e683d5 JA |
56 | * safe context. |
57 | */ | |
9f0deaa1 | 58 | if (WARN_ON_ONCE(current->in_eventfd)) |
b5e683d5 JA |
59 | return 0; |
60 | ||
d48eb233 | 61 | spin_lock_irqsave(&ctx->wqh.lock, flags); |
9f0deaa1 | 62 | current->in_eventfd = 1; |
e1ad7468 | 63 | if (ULLONG_MAX - ctx->count < n) |
ee62c6b2 | 64 | n = ULLONG_MAX - ctx->count; |
e1ad7468 DL |
65 | ctx->count += n; |
66 | if (waitqueue_active(&ctx->wqh)) | |
03e02acd | 67 | wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask); |
9f0deaa1 | 68 | current->in_eventfd = 0; |
d48eb233 | 69 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); |
e1ad7468 DL |
70 | |
71 | return n; | |
72 | } | |
03e02acd JA |
73 | |
74 | /** | |
75 | * eventfd_signal - Adds @n to the eventfd counter. | |
76 | * @ctx: [in] Pointer to the eventfd context. | |
77 | * @n: [in] Value of the counter to be added to the eventfd internal counter. | |
78 | * The value cannot be negative. | |
79 | * | |
80 | * This function is supposed to be called by the kernel in paths that do not | |
81 | * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX | |
82 | * value, and we signal this as overflow condition by returning a EPOLLERR | |
83 | * to poll(2). | |
84 | * | |
85 | * Returns the amount by which the counter was incremented. This will be less | |
86 | * than @n if the counter has overflowed. | |
87 | */ | |
88 | __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) | |
89 | { | |
90 | return eventfd_signal_mask(ctx, n, 0); | |
91 | } | |
5718607b | 92 | EXPORT_SYMBOL_GPL(eventfd_signal); |
e1ad7468 | 93 | |
562787a5 DL |
94 | static void eventfd_free_ctx(struct eventfd_ctx *ctx) |
95 | { | |
b556db17 MY |
96 | if (ctx->id >= 0) |
97 | ida_simple_remove(&eventfd_ida, ctx->id); | |
562787a5 DL |
98 | kfree(ctx); |
99 | } | |
100 | ||
13389010 DL |
101 | static void eventfd_free(struct kref *kref) |
102 | { | |
103 | struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); | |
104 | ||
562787a5 | 105 | eventfd_free_ctx(ctx); |
13389010 DL |
106 | } |
107 | ||
13389010 DL |
108 | /** |
109 | * eventfd_ctx_put - Releases a reference to the internal eventfd context. | |
110 | * @ctx: [in] Pointer to eventfd context. | |
111 | * | |
112 | * The eventfd context reference must have been previously acquired either | |
105f2b70 | 113 | * with eventfd_ctx_fdget() or eventfd_ctx_fileget(). |
13389010 DL |
114 | */ |
115 | void eventfd_ctx_put(struct eventfd_ctx *ctx) | |
116 | { | |
117 | kref_put(&ctx->kref, eventfd_free); | |
118 | } | |
119 | EXPORT_SYMBOL_GPL(eventfd_ctx_put); | |
120 | ||
e1ad7468 DL |
121 | static int eventfd_release(struct inode *inode, struct file *file) |
122 | { | |
13389010 DL |
123 | struct eventfd_ctx *ctx = file->private_data; |
124 | ||
a9a08845 | 125 | wake_up_poll(&ctx->wqh, EPOLLHUP); |
13389010 | 126 | eventfd_ctx_put(ctx); |
e1ad7468 DL |
127 | return 0; |
128 | } | |
129 | ||
a11e1d43 | 130 | static __poll_t eventfd_poll(struct file *file, poll_table *wait) |
e1ad7468 DL |
131 | { |
132 | struct eventfd_ctx *ctx = file->private_data; | |
076ccb76 | 133 | __poll_t events = 0; |
e22553e2 | 134 | u64 count; |
e1ad7468 | 135 | |
a11e1d43 LT |
136 | poll_wait(file, &ctx->wqh, wait); |
137 | ||
a484c3dd PB |
138 | /* |
139 | * All writes to ctx->count occur within ctx->wqh.lock. This read | |
140 | * can be done outside ctx->wqh.lock because we know that poll_wait | |
141 | * takes that lock (through add_wait_queue) if our caller will sleep. | |
142 | * | |
143 | * The read _can_ therefore seep into add_wait_queue's critical | |
144 | * section, but cannot move above it! add_wait_queue's spin_lock acts | |
145 | * as an acquire barrier and ensures that the read be ordered properly | |
146 | * against the writes. The following CAN happen and is safe: | |
147 | * | |
148 | * poll write | |
149 | * ----------------- ------------ | |
150 | * lock ctx->wqh.lock (in poll_wait) | |
151 | * count = ctx->count | |
152 | * __add_wait_queue | |
153 | * unlock ctx->wqh.lock | |
154 | * lock ctx->qwh.lock | |
155 | * ctx->count += n | |
156 | * if (waitqueue_active) | |
157 | * wake_up_locked_poll | |
158 | * unlock ctx->qwh.lock | |
159 | * eventfd_poll returns 0 | |
160 | * | |
161 | * but the following, which would miss a wakeup, cannot happen: | |
162 | * | |
163 | * poll write | |
164 | * ----------------- ------------ | |
165 | * count = ctx->count (INVALID!) | |
166 | * lock ctx->qwh.lock | |
167 | * ctx->count += n | |
168 | * **waitqueue_active is false** | |
169 | * **no wake_up_locked_poll!** | |
170 | * unlock ctx->qwh.lock | |
171 | * lock ctx->wqh.lock (in poll_wait) | |
172 | * __add_wait_queue | |
173 | * unlock ctx->wqh.lock | |
174 | * eventfd_poll returns 0 | |
175 | */ | |
176 | count = READ_ONCE(ctx->count); | |
e1ad7468 | 177 | |
e22553e2 | 178 | if (count > 0) |
a11e1d43 | 179 | events |= EPOLLIN; |
e22553e2 | 180 | if (count == ULLONG_MAX) |
a9a08845 | 181 | events |= EPOLLERR; |
e22553e2 | 182 | if (ULLONG_MAX - 1 > count) |
a11e1d43 | 183 | events |= EPOLLOUT; |
e1ad7468 DL |
184 | |
185 | return events; | |
186 | } | |
187 | ||
28f13267 | 188 | void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) |
cb289d62 | 189 | { |
28f13267 DW |
190 | lockdep_assert_held(&ctx->wqh.lock); |
191 | ||
cb289d62 DL |
192 | *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; |
193 | ctx->count -= *cnt; | |
194 | } | |
28f13267 | 195 | EXPORT_SYMBOL_GPL(eventfd_ctx_do_read); |
cb289d62 DL |
196 | |
197 | /** | |
198 | * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. | |
199 | * @ctx: [in] Pointer to eventfd context. | |
200 | * @wait: [in] Wait queue to be removed. | |
36182185 | 201 | * @cnt: [out] Pointer to the 64-bit counter value. |
cb289d62 | 202 | * |
36182185 | 203 | * Returns %0 if successful, or the following error codes: |
cb289d62 DL |
204 | * |
205 | * -EAGAIN : The operation would have blocked. | |
206 | * | |
207 | * This is used to atomically remove a wait queue entry from the eventfd wait | |
208 | * queue head, and read/reset the counter value. | |
209 | */ | |
ac6424b9 | 210 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, |
cb289d62 DL |
211 | __u64 *cnt) |
212 | { | |
213 | unsigned long flags; | |
214 | ||
215 | spin_lock_irqsave(&ctx->wqh.lock, flags); | |
216 | eventfd_ctx_do_read(ctx, cnt); | |
217 | __remove_wait_queue(&ctx->wqh, wait); | |
218 | if (*cnt != 0 && waitqueue_active(&ctx->wqh)) | |
a9a08845 | 219 | wake_up_locked_poll(&ctx->wqh, EPOLLOUT); |
cb289d62 DL |
220 | spin_unlock_irqrestore(&ctx->wqh.lock, flags); |
221 | ||
222 | return *cnt != 0 ? 0 : -EAGAIN; | |
223 | } | |
224 | EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); | |
225 | ||
12aceb89 | 226 | static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to) |
e1ad7468 | 227 | { |
12aceb89 | 228 | struct file *file = iocb->ki_filp; |
b6364572 | 229 | struct eventfd_ctx *ctx = file->private_data; |
b6364572 | 230 | __u64 ucnt = 0; |
e1ad7468 DL |
231 | DECLARE_WAITQUEUE(wait, current); |
232 | ||
12aceb89 | 233 | if (iov_iter_count(to) < sizeof(ucnt)) |
b6364572 | 234 | return -EINVAL; |
d48eb233 | 235 | spin_lock_irq(&ctx->wqh.lock); |
12aceb89 JA |
236 | if (!ctx->count) { |
237 | if ((file->f_flags & O_NONBLOCK) || | |
238 | (iocb->ki_flags & IOCB_NOWAIT)) { | |
239 | spin_unlock_irq(&ctx->wqh.lock); | |
240 | return -EAGAIN; | |
241 | } | |
e1ad7468 | 242 | __add_wait_queue(&ctx->wqh, &wait); |
cb289d62 | 243 | for (;;) { |
e1ad7468 | 244 | set_current_state(TASK_INTERRUPTIBLE); |
12aceb89 | 245 | if (ctx->count) |
e1ad7468 | 246 | break; |
e1ad7468 | 247 | if (signal_pending(current)) { |
12aceb89 JA |
248 | __remove_wait_queue(&ctx->wqh, &wait); |
249 | __set_current_state(TASK_RUNNING); | |
250 | spin_unlock_irq(&ctx->wqh.lock); | |
251 | return -ERESTARTSYS; | |
e1ad7468 | 252 | } |
d48eb233 | 253 | spin_unlock_irq(&ctx->wqh.lock); |
e1ad7468 | 254 | schedule(); |
d48eb233 | 255 | spin_lock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
256 | } |
257 | __remove_wait_queue(&ctx->wqh, &wait); | |
258 | __set_current_state(TASK_RUNNING); | |
259 | } | |
12aceb89 | 260 | eventfd_ctx_do_read(ctx, &ucnt); |
9f0deaa1 | 261 | current->in_eventfd = 1; |
12aceb89 JA |
262 | if (waitqueue_active(&ctx->wqh)) |
263 | wake_up_locked_poll(&ctx->wqh, EPOLLOUT); | |
9f0deaa1 | 264 | current->in_eventfd = 0; |
d48eb233 | 265 | spin_unlock_irq(&ctx->wqh.lock); |
12aceb89 | 266 | if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt))) |
b6364572 | 267 | return -EFAULT; |
cb289d62 | 268 | |
12aceb89 | 269 | return sizeof(ucnt); |
cb289d62 | 270 | } |
e1ad7468 DL |
271 | |
272 | static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count, | |
273 | loff_t *ppos) | |
274 | { | |
275 | struct eventfd_ctx *ctx = file->private_data; | |
276 | ssize_t res; | |
277 | __u64 ucnt; | |
278 | DECLARE_WAITQUEUE(wait, current); | |
279 | ||
280 | if (count < sizeof(ucnt)) | |
281 | return -EINVAL; | |
282 | if (copy_from_user(&ucnt, buf, sizeof(ucnt))) | |
283 | return -EFAULT; | |
284 | if (ucnt == ULLONG_MAX) | |
285 | return -EINVAL; | |
d48eb233 | 286 | spin_lock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
287 | res = -EAGAIN; |
288 | if (ULLONG_MAX - ctx->count > ucnt) | |
289 | res = sizeof(ucnt); | |
290 | else if (!(file->f_flags & O_NONBLOCK)) { | |
291 | __add_wait_queue(&ctx->wqh, &wait); | |
292 | for (res = 0;;) { | |
293 | set_current_state(TASK_INTERRUPTIBLE); | |
294 | if (ULLONG_MAX - ctx->count > ucnt) { | |
295 | res = sizeof(ucnt); | |
296 | break; | |
297 | } | |
298 | if (signal_pending(current)) { | |
299 | res = -ERESTARTSYS; | |
300 | break; | |
301 | } | |
d48eb233 | 302 | spin_unlock_irq(&ctx->wqh.lock); |
e1ad7468 | 303 | schedule(); |
d48eb233 | 304 | spin_lock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
305 | } |
306 | __remove_wait_queue(&ctx->wqh, &wait); | |
307 | __set_current_state(TASK_RUNNING); | |
308 | } | |
bcd0b235 | 309 | if (likely(res > 0)) { |
e1ad7468 | 310 | ctx->count += ucnt; |
9f0deaa1 | 311 | current->in_eventfd = 1; |
e1ad7468 | 312 | if (waitqueue_active(&ctx->wqh)) |
a9a08845 | 313 | wake_up_locked_poll(&ctx->wqh, EPOLLIN); |
9f0deaa1 | 314 | current->in_eventfd = 0; |
e1ad7468 | 315 | } |
d48eb233 | 316 | spin_unlock_irq(&ctx->wqh.lock); |
e1ad7468 DL |
317 | |
318 | return res; | |
319 | } | |
320 | ||
cbac5542 | 321 | #ifdef CONFIG_PROC_FS |
a3816ab0 | 322 | static void eventfd_show_fdinfo(struct seq_file *m, struct file *f) |
cbac5542 CG |
323 | { |
324 | struct eventfd_ctx *ctx = f->private_data; | |
cbac5542 CG |
325 | |
326 | spin_lock_irq(&ctx->wqh.lock); | |
a3816ab0 JP |
327 | seq_printf(m, "eventfd-count: %16llx\n", |
328 | (unsigned long long)ctx->count); | |
cbac5542 | 329 | spin_unlock_irq(&ctx->wqh.lock); |
b556db17 | 330 | seq_printf(m, "eventfd-id: %d\n", ctx->id); |
cbac5542 CG |
331 | } |
332 | #endif | |
333 | ||
e1ad7468 | 334 | static const struct file_operations eventfd_fops = { |
cbac5542 CG |
335 | #ifdef CONFIG_PROC_FS |
336 | .show_fdinfo = eventfd_show_fdinfo, | |
337 | #endif | |
e1ad7468 | 338 | .release = eventfd_release, |
a11e1d43 | 339 | .poll = eventfd_poll, |
12aceb89 | 340 | .read_iter = eventfd_read, |
e1ad7468 | 341 | .write = eventfd_write, |
6038f373 | 342 | .llseek = noop_llseek, |
e1ad7468 DL |
343 | }; |
344 | ||
13389010 DL |
345 | /** |
346 | * eventfd_fget - Acquire a reference of an eventfd file descriptor. | |
347 | * @fd: [in] Eventfd file descriptor. | |
348 | * | |
349 | * Returns a pointer to the eventfd file structure in case of success, or the | |
350 | * following error pointer: | |
351 | * | |
352 | * -EBADF : Invalid @fd file descriptor. | |
353 | * -EINVAL : The @fd file descriptor is not an eventfd file. | |
354 | */ | |
e1ad7468 DL |
355 | struct file *eventfd_fget(int fd) |
356 | { | |
357 | struct file *file; | |
358 | ||
359 | file = fget(fd); | |
360 | if (!file) | |
361 | return ERR_PTR(-EBADF); | |
362 | if (file->f_op != &eventfd_fops) { | |
363 | fput(file); | |
364 | return ERR_PTR(-EINVAL); | |
365 | } | |
366 | ||
367 | return file; | |
368 | } | |
5718607b | 369 | EXPORT_SYMBOL_GPL(eventfd_fget); |
e1ad7468 | 370 | |
13389010 DL |
371 | /** |
372 | * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context. | |
373 | * @fd: [in] Eventfd file descriptor. | |
374 | * | |
375 | * Returns a pointer to the internal eventfd context, otherwise the error | |
376 | * pointers returned by the following functions: | |
377 | * | |
378 | * eventfd_fget | |
379 | */ | |
380 | struct eventfd_ctx *eventfd_ctx_fdget(int fd) | |
381 | { | |
13389010 | 382 | struct eventfd_ctx *ctx; |
36a74117 AV |
383 | struct fd f = fdget(fd); |
384 | if (!f.file) | |
385 | return ERR_PTR(-EBADF); | |
386 | ctx = eventfd_ctx_fileget(f.file); | |
387 | fdput(f); | |
13389010 DL |
388 | return ctx; |
389 | } | |
390 | EXPORT_SYMBOL_GPL(eventfd_ctx_fdget); | |
391 | ||
392 | /** | |
393 | * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context. | |
394 | * @file: [in] Eventfd file pointer. | |
395 | * | |
396 | * Returns a pointer to the internal eventfd context, otherwise the error | |
397 | * pointer: | |
398 | * | |
399 | * -EINVAL : The @fd file descriptor is not an eventfd file. | |
400 | */ | |
401 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file) | |
402 | { | |
105f2b70 EB |
403 | struct eventfd_ctx *ctx; |
404 | ||
13389010 DL |
405 | if (file->f_op != &eventfd_fops) |
406 | return ERR_PTR(-EINVAL); | |
407 | ||
105f2b70 EB |
408 | ctx = file->private_data; |
409 | kref_get(&ctx->kref); | |
410 | return ctx; | |
13389010 DL |
411 | } |
412 | EXPORT_SYMBOL_GPL(eventfd_ctx_fileget); | |
413 | ||
2fc96f83 | 414 | static int do_eventfd(unsigned int count, int flags) |
e1ad7468 | 415 | { |
e1ad7468 | 416 | struct eventfd_ctx *ctx; |
12aceb89 | 417 | struct file *file; |
7d815165 | 418 | int fd; |
e1ad7468 | 419 | |
e38b36f3 UD |
420 | /* Check the EFD_* constants for consistency. */ |
421 | BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC); | |
422 | BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK); | |
423 | ||
bcd0b235 | 424 | if (flags & ~EFD_FLAGS_SET) |
7d815165 | 425 | return -EINVAL; |
b087498e | 426 | |
e1ad7468 DL |
427 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
428 | if (!ctx) | |
7d815165 | 429 | return -ENOMEM; |
e1ad7468 | 430 | |
13389010 | 431 | kref_init(&ctx->kref); |
e1ad7468 | 432 | init_waitqueue_head(&ctx->wqh); |
e1ad7468 | 433 | ctx->count = count; |
bcd0b235 | 434 | ctx->flags = flags; |
b556db17 | 435 | ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL); |
e1ad7468 | 436 | |
12aceb89 JA |
437 | flags &= EFD_SHARED_FCNTL_FLAGS; |
438 | flags |= O_RDWR; | |
439 | fd = get_unused_fd_flags(flags); | |
7d815165 | 440 | if (fd < 0) |
12aceb89 JA |
441 | goto err; |
442 | ||
443 | file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags); | |
444 | if (IS_ERR(file)) { | |
445 | put_unused_fd(fd); | |
446 | fd = PTR_ERR(file); | |
447 | goto err; | |
448 | } | |
562787a5 | 449 | |
12aceb89 JA |
450 | file->f_mode |= FMODE_NOWAIT; |
451 | fd_install(fd, file); | |
452 | return fd; | |
453 | err: | |
454 | eventfd_free_ctx(ctx); | |
2030a42c | 455 | return fd; |
e1ad7468 DL |
456 | } |
457 | ||
2fc96f83 DB |
458 | SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) |
459 | { | |
460 | return do_eventfd(count, flags); | |
461 | } | |
462 | ||
d4e82042 | 463 | SYSCALL_DEFINE1(eventfd, unsigned int, count) |
b087498e | 464 | { |
2fc96f83 | 465 | return do_eventfd(count, 0); |
b087498e | 466 | } |
bcd0b235 | 467 |