4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
8 #include <linux/file.h>
9 #include <linux/poll.h>
10 #include <linux/init.h>
12 #include <linux/sched/signal.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/anon_inodes.h>
18 #include <linux/syscalls.h>
19 #include <linux/export.h>
20 #include <linux/kref.h>
21 #include <linux/eventfd.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
27 wait_queue_head_t wqh;
29 * Every time that a write(2) is performed on an eventfd, the
30 * value of the __u64 being written is added to "count" and a
31 * wakeup is performed on "wqh". A read(2) will return the "count"
32 * value to userspace, and will reset "count" to zero. The kernel
33 * side eventfd_signal() also, adds to the "count" counter and
41 * eventfd_signal - Adds @n to the eventfd counter.
42 * @ctx: [in] Pointer to the eventfd context.
43 * @n: [in] Value of the counter to be added to the eventfd internal counter.
44 * The value cannot be negative.
46 * This function is supposed to be called by the kernel in paths that do not
47 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
48 * value, and we signal this as overflow condition by returning a EPOLLERR
51 * Returns the amount by which the counter was incremented. This will be less
52 * than @n if the counter has overflowed.
54 __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
58 spin_lock_irqsave(&ctx->wqh.lock, flags);
59 if (ULLONG_MAX - ctx->count < n)
60 n = ULLONG_MAX - ctx->count;
62 if (waitqueue_active(&ctx->wqh))
63 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
64 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
68 EXPORT_SYMBOL_GPL(eventfd_signal);
70 static void eventfd_free_ctx(struct eventfd_ctx *ctx)
75 static void eventfd_free(struct kref *kref)
77 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
79 eventfd_free_ctx(ctx);
83 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
84 * @ctx: [in] Pointer to eventfd context.
86 * The eventfd context reference must have been previously acquired either
87 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
89 void eventfd_ctx_put(struct eventfd_ctx *ctx)
91 kref_put(&ctx->kref, eventfd_free);
93 EXPORT_SYMBOL_GPL(eventfd_ctx_put);
95 static int eventfd_release(struct inode *inode, struct file *file)
97 struct eventfd_ctx *ctx = file->private_data;
99 wake_up_poll(&ctx->wqh, EPOLLHUP);
100 eventfd_ctx_put(ctx);
104 static struct wait_queue_head *
105 eventfd_get_poll_head(struct file *file, __poll_t events)
107 struct eventfd_ctx *ctx = file->private_data;
112 static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
114 struct eventfd_ctx *ctx = file->private_data;
119 * All writes to ctx->count occur within ctx->wqh.lock. This read
120 * can be done outside ctx->wqh.lock because we know that poll_wait
121 * takes that lock (through add_wait_queue) if our caller will sleep.
123 * The read _can_ therefore seep into add_wait_queue's critical
124 * section, but cannot move above it! add_wait_queue's spin_lock acts
125 * as an acquire barrier and ensures that the read be ordered properly
126 * against the writes. The following CAN happen and is safe:
129 * ----------------- ------------
130 * lock ctx->wqh.lock (in poll_wait)
133 * unlock ctx->wqh.lock
136 * if (waitqueue_active)
137 * wake_up_locked_poll
138 * unlock ctx->qwh.lock
139 * eventfd_poll returns 0
141 * but the following, which would miss a wakeup, cannot happen:
144 * ----------------- ------------
145 * count = ctx->count (INVALID!)
148 * **waitqueue_active is false**
149 * **no wake_up_locked_poll!**
150 * unlock ctx->qwh.lock
151 * lock ctx->wqh.lock (in poll_wait)
153 * unlock ctx->wqh.lock
154 * eventfd_poll returns 0
156 count = READ_ONCE(ctx->count);
160 if (count == ULLONG_MAX)
162 if (ULLONG_MAX - 1 > count)
168 static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
170 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
175 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
176 * @ctx: [in] Pointer to eventfd context.
177 * @wait: [in] Wait queue to be removed.
178 * @cnt: [out] Pointer to the 64-bit counter value.
180 * Returns %0 if successful, or the following error codes:
182 * -EAGAIN : The operation would have blocked.
184 * This is used to atomically remove a wait queue entry from the eventfd wait
185 * queue head, and read/reset the counter value.
187 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
192 spin_lock_irqsave(&ctx->wqh.lock, flags);
193 eventfd_ctx_do_read(ctx, cnt);
194 __remove_wait_queue(&ctx->wqh, wait);
195 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
196 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
197 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
199 return *cnt != 0 ? 0 : -EAGAIN;
201 EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
203 static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
206 struct eventfd_ctx *ctx = file->private_data;
209 DECLARE_WAITQUEUE(wait, current);
211 if (count < sizeof(ucnt))
214 spin_lock_irq(&ctx->wqh.lock);
218 else if (!(file->f_flags & O_NONBLOCK)) {
219 __add_wait_queue(&ctx->wqh, &wait);
221 set_current_state(TASK_INTERRUPTIBLE);
222 if (ctx->count > 0) {
226 if (signal_pending(current)) {
230 spin_unlock_irq(&ctx->wqh.lock);
232 spin_lock_irq(&ctx->wqh.lock);
234 __remove_wait_queue(&ctx->wqh, &wait);
235 __set_current_state(TASK_RUNNING);
237 if (likely(res > 0)) {
238 eventfd_ctx_do_read(ctx, &ucnt);
239 if (waitqueue_active(&ctx->wqh))
240 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
242 spin_unlock_irq(&ctx->wqh.lock);
244 if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
250 static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
253 struct eventfd_ctx *ctx = file->private_data;
256 DECLARE_WAITQUEUE(wait, current);
258 if (count < sizeof(ucnt))
260 if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
262 if (ucnt == ULLONG_MAX)
264 spin_lock_irq(&ctx->wqh.lock);
266 if (ULLONG_MAX - ctx->count > ucnt)
268 else if (!(file->f_flags & O_NONBLOCK)) {
269 __add_wait_queue(&ctx->wqh, &wait);
271 set_current_state(TASK_INTERRUPTIBLE);
272 if (ULLONG_MAX - ctx->count > ucnt) {
276 if (signal_pending(current)) {
280 spin_unlock_irq(&ctx->wqh.lock);
282 spin_lock_irq(&ctx->wqh.lock);
284 __remove_wait_queue(&ctx->wqh, &wait);
285 __set_current_state(TASK_RUNNING);
287 if (likely(res > 0)) {
289 if (waitqueue_active(&ctx->wqh))
290 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
292 spin_unlock_irq(&ctx->wqh.lock);
297 #ifdef CONFIG_PROC_FS
298 static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
300 struct eventfd_ctx *ctx = f->private_data;
302 spin_lock_irq(&ctx->wqh.lock);
303 seq_printf(m, "eventfd-count: %16llx\n",
304 (unsigned long long)ctx->count);
305 spin_unlock_irq(&ctx->wqh.lock);
309 static const struct file_operations eventfd_fops = {
310 #ifdef CONFIG_PROC_FS
311 .show_fdinfo = eventfd_show_fdinfo,
313 .release = eventfd_release,
314 .get_poll_head = eventfd_get_poll_head,
315 .poll_mask = eventfd_poll_mask,
316 .read = eventfd_read,
317 .write = eventfd_write,
318 .llseek = noop_llseek,
322 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
323 * @fd: [in] Eventfd file descriptor.
325 * Returns a pointer to the eventfd file structure in case of success, or the
326 * following error pointer:
328 * -EBADF : Invalid @fd file descriptor.
329 * -EINVAL : The @fd file descriptor is not an eventfd file.
331 struct file *eventfd_fget(int fd)
337 return ERR_PTR(-EBADF);
338 if (file->f_op != &eventfd_fops) {
340 return ERR_PTR(-EINVAL);
345 EXPORT_SYMBOL_GPL(eventfd_fget);
348 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
349 * @fd: [in] Eventfd file descriptor.
351 * Returns a pointer to the internal eventfd context, otherwise the error
352 * pointers returned by the following functions:
356 struct eventfd_ctx *eventfd_ctx_fdget(int fd)
358 struct eventfd_ctx *ctx;
359 struct fd f = fdget(fd);
361 return ERR_PTR(-EBADF);
362 ctx = eventfd_ctx_fileget(f.file);
366 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
369 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
370 * @file: [in] Eventfd file pointer.
372 * Returns a pointer to the internal eventfd context, otherwise the error
375 * -EINVAL : The @fd file descriptor is not an eventfd file.
377 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
379 struct eventfd_ctx *ctx;
381 if (file->f_op != &eventfd_fops)
382 return ERR_PTR(-EINVAL);
384 ctx = file->private_data;
385 kref_get(&ctx->kref);
388 EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
390 static int do_eventfd(unsigned int count, int flags)
392 struct eventfd_ctx *ctx;
395 /* Check the EFD_* constants for consistency. */
396 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
397 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
399 if (flags & ~EFD_FLAGS_SET)
402 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
406 kref_init(&ctx->kref);
407 init_waitqueue_head(&ctx->wqh);
411 fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
412 O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
414 eventfd_free_ctx(ctx);
419 SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
421 return do_eventfd(count, flags);
424 SYSCALL_DEFINE1(eventfd, unsigned int, count)
426 return do_eventfd(count, 0);