| 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * fs/eventpoll.c (Efficient event retrieval implementation) |
| 4 | * Copyright (C) 2001,...,2009 Davide Libenzi |
| 5 | * |
| 6 | * Davide Libenzi <davidel@xmailserver.org> |
| 7 | */ |
| 8 | |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/sched/signal.h> |
| 12 | #include <linux/fs.h> |
| 13 | #include <linux/file.h> |
| 14 | #include <linux/signal.h> |
| 15 | #include <linux/errno.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/poll.h> |
| 19 | #include <linux/string.h> |
| 20 | #include <linux/list.h> |
| 21 | #include <linux/hash.h> |
| 22 | #include <linux/spinlock.h> |
| 23 | #include <linux/syscalls.h> |
| 24 | #include <linux/rbtree.h> |
| 25 | #include <linux/wait.h> |
| 26 | #include <linux/eventpoll.h> |
| 27 | #include <linux/mount.h> |
| 28 | #include <linux/bitops.h> |
| 29 | #include <linux/mutex.h> |
| 30 | #include <linux/anon_inodes.h> |
| 31 | #include <linux/device.h> |
| 32 | #include <linux/uaccess.h> |
| 33 | #include <asm/io.h> |
| 34 | #include <asm/mman.h> |
| 35 | #include <linux/atomic.h> |
| 36 | #include <linux/proc_fs.h> |
| 37 | #include <linux/seq_file.h> |
| 38 | #include <linux/compat.h> |
| 39 | #include <linux/rculist.h> |
| 40 | #include <linux/capability.h> |
| 41 | #include <net/busy_poll.h> |
| 42 | |
| 43 | /* |
| 44 | * LOCKING: |
| 45 | * There are three level of locking required by epoll : |
| 46 | * |
| 47 | * 1) epnested_mutex (mutex) |
| 48 | * 2) ep->mtx (mutex) |
| 49 | * 3) ep->lock (rwlock) |
| 50 | * |
| 51 | * The acquire order is the one listed above, from 1 to 3. |
| 52 | * We need a rwlock (ep->lock) because we manipulate objects |
| 53 | * from inside the poll callback, that might be triggered from |
| 54 | * a wake_up() that in turn might be called from IRQ context. |
| 55 | * So we can't sleep inside the poll callback and hence we need |
| 56 | * a spinlock. During the event transfer loop (from kernel to |
| 57 | * user space) we could end up sleeping due a copy_to_user(), so |
| 58 | * we need a lock that will allow us to sleep. This lock is a |
| 59 | * mutex (ep->mtx). It is acquired during the event transfer loop, |
| 60 | * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file(). |
| 61 | * The epnested_mutex is acquired when inserting an epoll fd onto another |
| 62 | * epoll fd. We do this so that we walk the epoll tree and ensure that this |
| 63 | * insertion does not create a cycle of epoll file descriptors, which |
| 64 | * could lead to deadlock. We need a global mutex to prevent two |
| 65 | * simultaneous inserts (A into B and B into A) from racing and |
| 66 | * constructing a cycle without either insert observing that it is |
| 67 | * going to. |
| 68 | * It is necessary to acquire multiple "ep->mtx"es at once in the |
| 69 | * case when one epoll fd is added to another. In this case, we |
| 70 | * always acquire the locks in the order of nesting (i.e. after |
| 71 | * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired |
| 72 | * before e2->mtx). Since we disallow cycles of epoll file |
| 73 | * descriptors, this ensures that the mutexes are well-ordered. In |
| 74 | * order to communicate this nesting to lockdep, when walking a tree |
| 75 | * of epoll file descriptors, we use the current recursion depth as |
| 76 | * the lockdep subkey. |
| 77 | * It is possible to drop the "ep->mtx" and to use the global |
| 78 | * mutex "epnested_mutex" (together with "ep->lock") to have it working, |
| 79 | * but having "ep->mtx" will make the interface more scalable. |
| 80 | * Events that require holding "epnested_mutex" are very rare, while for |
| 81 | * normal operations the epoll private "ep->mtx" will guarantee |
| 82 | * a better scalability. |
| 83 | */ |
| 84 | |
| 85 | /* Epoll private bits inside the event mask */ |
| 86 | #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE) |
| 87 | |
| 88 | #define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT) |
| 89 | |
| 90 | #define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \ |
| 91 | EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE) |
| 92 | |
| 93 | /* Maximum number of nesting allowed inside epoll sets */ |
| 94 | #define EP_MAX_NESTS 4 |
| 95 | |
| 96 | #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) |
| 97 | |
| 98 | #define EP_UNACTIVE_PTR ((void *) -1L) |
| 99 | |
| 100 | #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry)) |
| 101 | |
| 102 | struct epoll_filefd { |
| 103 | struct file *file; |
| 104 | int fd; |
| 105 | } __packed; |
| 106 | |
| 107 | /* Wait structure used by the poll hooks */ |
| 108 | struct eppoll_entry { |
| 109 | /* List header used to link this structure to the "struct epitem" */ |
| 110 | struct eppoll_entry *next; |
| 111 | |
| 112 | /* The "base" pointer is set to the container "struct epitem" */ |
| 113 | struct epitem *base; |
| 114 | |
| 115 | /* |
| 116 | * Wait queue item that will be linked to the target file wait |
| 117 | * queue head. |
| 118 | */ |
| 119 | wait_queue_entry_t wait; |
| 120 | |
| 121 | /* The wait queue head that linked the "wait" wait queue item */ |
| 122 | wait_queue_head_t *whead; |
| 123 | }; |
| 124 | |
| 125 | /* |
| 126 | * Each file descriptor added to the eventpoll interface will |
| 127 | * have an entry of this type linked to the "rbr" RB tree. |
| 128 | * Avoid increasing the size of this struct, there can be many thousands |
| 129 | * of these on a server and we do not want this to take another cache line. |
| 130 | */ |
| 131 | struct epitem { |
| 132 | union { |
| 133 | /* RB tree node links this structure to the eventpoll RB tree */ |
| 134 | struct rb_node rbn; |
| 135 | /* Used to free the struct epitem */ |
| 136 | struct rcu_head rcu; |
| 137 | }; |
| 138 | |
| 139 | /* List header used to link this structure to the eventpoll ready list */ |
| 140 | struct list_head rdllink; |
| 141 | |
| 142 | /* |
| 143 | * Works together "struct eventpoll"->ovflist in keeping the |
| 144 | * single linked chain of items. |
| 145 | */ |
| 146 | struct epitem *next; |
| 147 | |
| 148 | /* The file descriptor information this item refers to */ |
| 149 | struct epoll_filefd ffd; |
| 150 | |
| 151 | /* |
| 152 | * Protected by file->f_lock, true for to-be-released epitem already |
| 153 | * removed from the "struct file" items list; together with |
| 154 | * eventpoll->refcount orchestrates "struct eventpoll" disposal |
| 155 | */ |
| 156 | bool dying; |
| 157 | |
| 158 | /* List containing poll wait queues */ |
| 159 | struct eppoll_entry *pwqlist; |
| 160 | |
| 161 | /* The "container" of this item */ |
| 162 | struct eventpoll *ep; |
| 163 | |
| 164 | /* List header used to link this item to the "struct file" items list */ |
| 165 | struct hlist_node fllink; |
| 166 | |
| 167 | /* wakeup_source used when EPOLLWAKEUP is set */ |
| 168 | struct wakeup_source __rcu *ws; |
| 169 | |
| 170 | /* The structure that describe the interested events and the source fd */ |
| 171 | struct epoll_event event; |
| 172 | }; |
| 173 | |
| 174 | /* |
| 175 | * This structure is stored inside the "private_data" member of the file |
| 176 | * structure and represents the main data structure for the eventpoll |
| 177 | * interface. |
| 178 | */ |
| 179 | struct eventpoll { |
| 180 | /* |
| 181 | * This mutex is used to ensure that files are not removed |
| 182 | * while epoll is using them. This is held during the event |
| 183 | * collection loop, the file cleanup path, the epoll file exit |
| 184 | * code and the ctl operations. |
| 185 | */ |
| 186 | struct mutex mtx; |
| 187 | |
| 188 | /* Wait queue used by sys_epoll_wait() */ |
| 189 | wait_queue_head_t wq; |
| 190 | |
| 191 | /* Wait queue used by file->poll() */ |
| 192 | wait_queue_head_t poll_wait; |
| 193 | |
| 194 | /* List of ready file descriptors */ |
| 195 | struct list_head rdllist; |
| 196 | |
| 197 | /* Lock which protects rdllist and ovflist */ |
| 198 | rwlock_t lock; |
| 199 | |
| 200 | /* RB tree root used to store monitored fd structs */ |
| 201 | struct rb_root_cached rbr; |
| 202 | |
| 203 | /* |
| 204 | * This is a single linked list that chains all the "struct epitem" that |
| 205 | * happened while transferring ready events to userspace w/out |
| 206 | * holding ->lock. |
| 207 | */ |
| 208 | struct epitem *ovflist; |
| 209 | |
| 210 | /* wakeup_source used when ep_send_events or __ep_eventpoll_poll is running */ |
| 211 | struct wakeup_source *ws; |
| 212 | |
| 213 | /* The user that created the eventpoll descriptor */ |
| 214 | struct user_struct *user; |
| 215 | |
| 216 | struct file *file; |
| 217 | |
| 218 | /* used to optimize loop detection check */ |
| 219 | u64 gen; |
| 220 | struct hlist_head refs; |
| 221 | |
| 222 | /* |
| 223 | * usage count, used together with epitem->dying to |
| 224 | * orchestrate the disposal of this struct |
| 225 | */ |
| 226 | refcount_t refcount; |
| 227 | |
| 228 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 229 | /* used to track busy poll napi_id */ |
| 230 | unsigned int napi_id; |
| 231 | /* busy poll timeout */ |
| 232 | u32 busy_poll_usecs; |
| 233 | /* busy poll packet budget */ |
| 234 | u16 busy_poll_budget; |
| 235 | bool prefer_busy_poll; |
| 236 | #endif |
| 237 | |
| 238 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 239 | /* tracks wakeup nests for lockdep validation */ |
| 240 | u8 nests; |
| 241 | #endif |
| 242 | }; |
| 243 | |
| 244 | /* Wrapper struct used by poll queueing */ |
| 245 | struct ep_pqueue { |
| 246 | poll_table pt; |
| 247 | struct epitem *epi; |
| 248 | }; |
| 249 | |
| 250 | /* |
| 251 | * Configuration options available inside /proc/sys/fs/epoll/ |
| 252 | */ |
| 253 | /* Maximum number of epoll watched descriptors, per user */ |
| 254 | static long max_user_watches __read_mostly; |
| 255 | |
| 256 | /* Used for cycles detection */ |
| 257 | static DEFINE_MUTEX(epnested_mutex); |
| 258 | |
| 259 | static u64 loop_check_gen = 0; |
| 260 | |
| 261 | /* Used to check for epoll file descriptor inclusion loops */ |
| 262 | static struct eventpoll *inserting_into; |
| 263 | |
| 264 | /* Slab cache used to allocate "struct epitem" */ |
| 265 | static struct kmem_cache *epi_cache __ro_after_init; |
| 266 | |
| 267 | /* Slab cache used to allocate "struct eppoll_entry" */ |
| 268 | static struct kmem_cache *pwq_cache __ro_after_init; |
| 269 | |
| 270 | /* |
| 271 | * List of files with newly added links, where we may need to limit the number |
| 272 | * of emanating paths. Protected by the epnested_mutex. |
| 273 | */ |
| 274 | struct epitems_head { |
| 275 | struct hlist_head epitems; |
| 276 | struct epitems_head *next; |
| 277 | }; |
| 278 | static struct epitems_head *tfile_check_list = EP_UNACTIVE_PTR; |
| 279 | |
| 280 | static struct kmem_cache *ephead_cache __ro_after_init; |
| 281 | |
| 282 | static inline void free_ephead(struct epitems_head *head) |
| 283 | { |
| 284 | if (head) |
| 285 | kmem_cache_free(ephead_cache, head); |
| 286 | } |
| 287 | |
| 288 | static void list_file(struct file *file) |
| 289 | { |
| 290 | struct epitems_head *head; |
| 291 | |
| 292 | head = container_of(file->f_ep, struct epitems_head, epitems); |
| 293 | if (!head->next) { |
| 294 | head->next = tfile_check_list; |
| 295 | tfile_check_list = head; |
| 296 | } |
| 297 | } |
| 298 | |
| 299 | static void unlist_file(struct epitems_head *head) |
| 300 | { |
| 301 | struct epitems_head *to_free = head; |
| 302 | struct hlist_node *p = rcu_dereference(hlist_first_rcu(&head->epitems)); |
| 303 | if (p) { |
| 304 | struct epitem *epi= container_of(p, struct epitem, fllink); |
| 305 | spin_lock(&epi->ffd.file->f_lock); |
| 306 | if (!hlist_empty(&head->epitems)) |
| 307 | to_free = NULL; |
| 308 | head->next = NULL; |
| 309 | spin_unlock(&epi->ffd.file->f_lock); |
| 310 | } |
| 311 | free_ephead(to_free); |
| 312 | } |
| 313 | |
| 314 | #ifdef CONFIG_SYSCTL |
| 315 | |
| 316 | #include <linux/sysctl.h> |
| 317 | |
| 318 | static long long_zero; |
| 319 | static long long_max = LONG_MAX; |
| 320 | |
| 321 | static const struct ctl_table epoll_table[] = { |
| 322 | { |
| 323 | .procname = "max_user_watches", |
| 324 | .data = &max_user_watches, |
| 325 | .maxlen = sizeof(max_user_watches), |
| 326 | .mode = 0644, |
| 327 | .proc_handler = proc_doulongvec_minmax, |
| 328 | .extra1 = &long_zero, |
| 329 | .extra2 = &long_max, |
| 330 | }, |
| 331 | }; |
| 332 | |
| 333 | static void __init epoll_sysctls_init(void) |
| 334 | { |
| 335 | register_sysctl("fs/epoll", epoll_table); |
| 336 | } |
| 337 | #else |
| 338 | #define epoll_sysctls_init() do { } while (0) |
| 339 | #endif /* CONFIG_SYSCTL */ |
| 340 | |
| 341 | static const struct file_operations eventpoll_fops; |
| 342 | |
| 343 | static inline int is_file_epoll(struct file *f) |
| 344 | { |
| 345 | return f->f_op == &eventpoll_fops; |
| 346 | } |
| 347 | |
| 348 | /* Setup the structure that is used as key for the RB tree */ |
| 349 | static inline void ep_set_ffd(struct epoll_filefd *ffd, |
| 350 | struct file *file, int fd) |
| 351 | { |
| 352 | ffd->file = file; |
| 353 | ffd->fd = fd; |
| 354 | } |
| 355 | |
| 356 | /* Compare RB tree keys */ |
| 357 | static inline int ep_cmp_ffd(struct epoll_filefd *p1, |
| 358 | struct epoll_filefd *p2) |
| 359 | { |
| 360 | return (p1->file > p2->file ? +1: |
| 361 | (p1->file < p2->file ? -1 : p1->fd - p2->fd)); |
| 362 | } |
| 363 | |
| 364 | /* Tells us if the item is currently linked */ |
| 365 | static inline int ep_is_linked(struct epitem *epi) |
| 366 | { |
| 367 | return !list_empty(&epi->rdllink); |
| 368 | } |
| 369 | |
| 370 | static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p) |
| 371 | { |
| 372 | return container_of(p, struct eppoll_entry, wait); |
| 373 | } |
| 374 | |
| 375 | /* Get the "struct epitem" from a wait queue pointer */ |
| 376 | static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p) |
| 377 | { |
| 378 | return container_of(p, struct eppoll_entry, wait)->base; |
| 379 | } |
| 380 | |
| 381 | /** |
| 382 | * ep_events_available - Checks if ready events might be available. |
| 383 | * |
| 384 | * @ep: Pointer to the eventpoll context. |
| 385 | * |
| 386 | * Return: a value different than %zero if ready events are available, |
| 387 | * or %zero otherwise. |
| 388 | */ |
| 389 | static inline int ep_events_available(struct eventpoll *ep) |
| 390 | { |
| 391 | return !list_empty_careful(&ep->rdllist) || |
| 392 | READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR; |
| 393 | } |
| 394 | |
| 395 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| 396 | /** |
| 397 | * busy_loop_ep_timeout - check if busy poll has timed out. The timeout value |
| 398 | * from the epoll instance ep is preferred, but if it is not set fallback to |
| 399 | * the system-wide global via busy_loop_timeout. |
| 400 | * |
| 401 | * @start_time: The start time used to compute the remaining time until timeout. |
| 402 | * @ep: Pointer to the eventpoll context. |
| 403 | * |
| 404 | * Return: true if the timeout has expired, false otherwise. |
| 405 | */ |
| 406 | static bool busy_loop_ep_timeout(unsigned long start_time, |
| 407 | struct eventpoll *ep) |
| 408 | { |
| 409 | unsigned long bp_usec = READ_ONCE(ep->busy_poll_usecs); |
| 410 | |
| 411 | if (bp_usec) { |
| 412 | unsigned long end_time = start_time + bp_usec; |
| 413 | unsigned long now = busy_loop_current_time(); |
| 414 | |
| 415 | return time_after(now, end_time); |
| 416 | } else { |
| 417 | return busy_loop_timeout(start_time); |
| 418 | } |
| 419 | } |
| 420 | |
| 421 | static bool ep_busy_loop_on(struct eventpoll *ep) |
| 422 | { |
| 423 | return !!READ_ONCE(ep->busy_poll_usecs) || |
| 424 | READ_ONCE(ep->prefer_busy_poll) || |
| 425 | net_busy_loop_on(); |
| 426 | } |
| 427 | |
| 428 | static bool ep_busy_loop_end(void *p, unsigned long start_time) |
| 429 | { |
| 430 | struct eventpoll *ep = p; |
| 431 | |
| 432 | return ep_events_available(ep) || busy_loop_ep_timeout(start_time, ep); |
| 433 | } |
| 434 | |
| 435 | /* |
| 436 | * Busy poll if globally on and supporting sockets found && no events, |
| 437 | * busy loop will return if need_resched or ep_events_available. |
| 438 | * |
| 439 | * we must do our busy polling with irqs enabled |
| 440 | */ |
| 441 | static bool ep_busy_loop(struct eventpoll *ep) |
| 442 | { |
| 443 | unsigned int napi_id = READ_ONCE(ep->napi_id); |
| 444 | u16 budget = READ_ONCE(ep->busy_poll_budget); |
| 445 | bool prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll); |
| 446 | |
| 447 | if (!budget) |
| 448 | budget = BUSY_POLL_BUDGET; |
| 449 | |
| 450 | if (napi_id_valid(napi_id) && ep_busy_loop_on(ep)) { |
| 451 | napi_busy_loop(napi_id, ep_busy_loop_end, |
| 452 | ep, prefer_busy_poll, budget); |
| 453 | if (ep_events_available(ep)) |
| 454 | return true; |
| 455 | /* |
| 456 | * Busy poll timed out. Drop NAPI ID for now, we can add |
| 457 | * it back in when we have moved a socket with a valid NAPI |
| 458 | * ID onto the ready list. |
| 459 | */ |
| 460 | if (prefer_busy_poll) |
| 461 | napi_resume_irqs(napi_id); |
| 462 | ep->napi_id = 0; |
| 463 | return false; |
| 464 | } |
| 465 | return false; |
| 466 | } |
| 467 | |
| 468 | /* |
| 469 | * Set epoll busy poll NAPI ID from sk. |
| 470 | */ |
| 471 | static inline void ep_set_busy_poll_napi_id(struct epitem *epi) |
| 472 | { |
| 473 | struct eventpoll *ep = epi->ep; |
| 474 | unsigned int napi_id; |
| 475 | struct socket *sock; |
| 476 | struct sock *sk; |
| 477 | |
| 478 | if (!ep_busy_loop_on(ep)) |
| 479 | return; |
| 480 | |
| 481 | sock = sock_from_file(epi->ffd.file); |
| 482 | if (!sock) |
| 483 | return; |
| 484 | |
| 485 | sk = sock->sk; |
| 486 | if (!sk) |
| 487 | return; |
| 488 | |
| 489 | napi_id = READ_ONCE(sk->sk_napi_id); |
| 490 | |
| 491 | /* Non-NAPI IDs can be rejected |
| 492 | * or |
| 493 | * Nothing to do if we already have this ID |
| 494 | */ |
| 495 | if (!napi_id_valid(napi_id) || napi_id == ep->napi_id) |
| 496 | return; |
| 497 | |
| 498 | /* record NAPI ID for use in next busy poll */ |
| 499 | ep->napi_id = napi_id; |
| 500 | } |
| 501 | |
| 502 | static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd, |
| 503 | unsigned long arg) |
| 504 | { |
| 505 | struct eventpoll *ep = file->private_data; |
| 506 | void __user *uarg = (void __user *)arg; |
| 507 | struct epoll_params epoll_params; |
| 508 | |
| 509 | switch (cmd) { |
| 510 | case EPIOCSPARAMS: |
| 511 | if (copy_from_user(&epoll_params, uarg, sizeof(epoll_params))) |
| 512 | return -EFAULT; |
| 513 | |
| 514 | /* pad byte must be zero */ |
| 515 | if (epoll_params.__pad) |
| 516 | return -EINVAL; |
| 517 | |
| 518 | if (epoll_params.busy_poll_usecs > S32_MAX) |
| 519 | return -EINVAL; |
| 520 | |
| 521 | if (epoll_params.prefer_busy_poll > 1) |
| 522 | return -EINVAL; |
| 523 | |
| 524 | if (epoll_params.busy_poll_budget > NAPI_POLL_WEIGHT && |
| 525 | !capable(CAP_NET_ADMIN)) |
| 526 | return -EPERM; |
| 527 | |
| 528 | WRITE_ONCE(ep->busy_poll_usecs, epoll_params.busy_poll_usecs); |
| 529 | WRITE_ONCE(ep->busy_poll_budget, epoll_params.busy_poll_budget); |
| 530 | WRITE_ONCE(ep->prefer_busy_poll, epoll_params.prefer_busy_poll); |
| 531 | return 0; |
| 532 | case EPIOCGPARAMS: |
| 533 | memset(&epoll_params, 0, sizeof(epoll_params)); |
| 534 | epoll_params.busy_poll_usecs = READ_ONCE(ep->busy_poll_usecs); |
| 535 | epoll_params.busy_poll_budget = READ_ONCE(ep->busy_poll_budget); |
| 536 | epoll_params.prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll); |
| 537 | if (copy_to_user(uarg, &epoll_params, sizeof(epoll_params))) |
| 538 | return -EFAULT; |
| 539 | return 0; |
| 540 | default: |
| 541 | return -ENOIOCTLCMD; |
| 542 | } |
| 543 | } |
| 544 | |
| 545 | static void ep_suspend_napi_irqs(struct eventpoll *ep) |
| 546 | { |
| 547 | unsigned int napi_id = READ_ONCE(ep->napi_id); |
| 548 | |
| 549 | if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll)) |
| 550 | napi_suspend_irqs(napi_id); |
| 551 | } |
| 552 | |
| 553 | static void ep_resume_napi_irqs(struct eventpoll *ep) |
| 554 | { |
| 555 | unsigned int napi_id = READ_ONCE(ep->napi_id); |
| 556 | |
| 557 | if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll)) |
| 558 | napi_resume_irqs(napi_id); |
| 559 | } |
| 560 | |
| 561 | #else |
| 562 | |
| 563 | static inline bool ep_busy_loop(struct eventpoll *ep) |
| 564 | { |
| 565 | return false; |
| 566 | } |
| 567 | |
| 568 | static inline void ep_set_busy_poll_napi_id(struct epitem *epi) |
| 569 | { |
| 570 | } |
| 571 | |
| 572 | static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd, |
| 573 | unsigned long arg) |
| 574 | { |
| 575 | return -EOPNOTSUPP; |
| 576 | } |
| 577 | |
| 578 | static void ep_suspend_napi_irqs(struct eventpoll *ep) |
| 579 | { |
| 580 | } |
| 581 | |
| 582 | static void ep_resume_napi_irqs(struct eventpoll *ep) |
| 583 | { |
| 584 | } |
| 585 | |
| 586 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 587 | |
| 588 | /* |
| 589 | * As described in commit 0ccf831cb lockdep: annotate epoll |
| 590 | * the use of wait queues used by epoll is done in a very controlled |
| 591 | * manner. Wake ups can nest inside each other, but are never done |
| 592 | * with the same locking. For example: |
| 593 | * |
| 594 | * dfd = socket(...); |
| 595 | * efd1 = epoll_create(); |
| 596 | * efd2 = epoll_create(); |
| 597 | * epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...); |
| 598 | * epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...); |
| 599 | * |
| 600 | * When a packet arrives to the device underneath "dfd", the net code will |
| 601 | * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a |
| 602 | * callback wakeup entry on that queue, and the wake_up() performed by the |
| 603 | * "dfd" net code will end up in ep_poll_callback(). At this point epoll |
| 604 | * (efd1) notices that it may have some event ready, so it needs to wake up |
| 605 | * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake() |
| 606 | * that ends up in another wake_up(), after having checked about the |
| 607 | * recursion constraints. That are, no more than EP_MAX_NESTS, to avoid |
| 608 | * stack blasting. |
| 609 | * |
| 610 | * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle |
| 611 | * this special case of epoll. |
| 612 | */ |
| 613 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 614 | |
| 615 | static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi, |
| 616 | unsigned pollflags) |
| 617 | { |
| 618 | struct eventpoll *ep_src; |
| 619 | unsigned long flags; |
| 620 | u8 nests = 0; |
| 621 | |
| 622 | /* |
| 623 | * To set the subclass or nesting level for spin_lock_irqsave_nested() |
| 624 | * it might be natural to create a per-cpu nest count. However, since |
| 625 | * we can recurse on ep->poll_wait.lock, and a non-raw spinlock can |
| 626 | * schedule() in the -rt kernel, the per-cpu variable are no longer |
| 627 | * protected. Thus, we are introducing a per eventpoll nest field. |
| 628 | * If we are not being call from ep_poll_callback(), epi is NULL and |
| 629 | * we are at the first level of nesting, 0. Otherwise, we are being |
| 630 | * called from ep_poll_callback() and if a previous wakeup source is |
| 631 | * not an epoll file itself, we are at depth 1 since the wakeup source |
| 632 | * is depth 0. If the wakeup source is a previous epoll file in the |
| 633 | * wakeup chain then we use its nests value and record ours as |
| 634 | * nests + 1. The previous epoll file nests value is stable since its |
| 635 | * already holding its own poll_wait.lock. |
| 636 | */ |
| 637 | if (epi) { |
| 638 | if ((is_file_epoll(epi->ffd.file))) { |
| 639 | ep_src = epi->ffd.file->private_data; |
| 640 | nests = ep_src->nests; |
| 641 | } else { |
| 642 | nests = 1; |
| 643 | } |
| 644 | } |
| 645 | spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests); |
| 646 | ep->nests = nests + 1; |
| 647 | wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags); |
| 648 | ep->nests = 0; |
| 649 | spin_unlock_irqrestore(&ep->poll_wait.lock, flags); |
| 650 | } |
| 651 | |
| 652 | #else |
| 653 | |
| 654 | static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi, |
| 655 | __poll_t pollflags) |
| 656 | { |
| 657 | wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags); |
| 658 | } |
| 659 | |
| 660 | #endif |
| 661 | |
| 662 | static void ep_remove_wait_queue(struct eppoll_entry *pwq) |
| 663 | { |
| 664 | wait_queue_head_t *whead; |
| 665 | |
| 666 | rcu_read_lock(); |
| 667 | /* |
| 668 | * If it is cleared by POLLFREE, it should be rcu-safe. |
| 669 | * If we read NULL we need a barrier paired with |
| 670 | * smp_store_release() in ep_poll_callback(), otherwise |
| 671 | * we rely on whead->lock. |
| 672 | */ |
| 673 | whead = smp_load_acquire(&pwq->whead); |
| 674 | if (whead) |
| 675 | remove_wait_queue(whead, &pwq->wait); |
| 676 | rcu_read_unlock(); |
| 677 | } |
| 678 | |
| 679 | /* |
| 680 | * This function unregisters poll callbacks from the associated file |
| 681 | * descriptor. Must be called with "mtx" held. |
| 682 | */ |
| 683 | static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) |
| 684 | { |
| 685 | struct eppoll_entry **p = &epi->pwqlist; |
| 686 | struct eppoll_entry *pwq; |
| 687 | |
| 688 | while ((pwq = *p) != NULL) { |
| 689 | *p = pwq->next; |
| 690 | ep_remove_wait_queue(pwq); |
| 691 | kmem_cache_free(pwq_cache, pwq); |
| 692 | } |
| 693 | } |
| 694 | |
| 695 | /* call only when ep->mtx is held */ |
| 696 | static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi) |
| 697 | { |
| 698 | return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx)); |
| 699 | } |
| 700 | |
| 701 | /* call only when ep->mtx is held */ |
| 702 | static inline void ep_pm_stay_awake(struct epitem *epi) |
| 703 | { |
| 704 | struct wakeup_source *ws = ep_wakeup_source(epi); |
| 705 | |
| 706 | if (ws) |
| 707 | __pm_stay_awake(ws); |
| 708 | } |
| 709 | |
| 710 | static inline bool ep_has_wakeup_source(struct epitem *epi) |
| 711 | { |
| 712 | return rcu_access_pointer(epi->ws) ? true : false; |
| 713 | } |
| 714 | |
| 715 | /* call when ep->mtx cannot be held (ep_poll_callback) */ |
| 716 | static inline void ep_pm_stay_awake_rcu(struct epitem *epi) |
| 717 | { |
| 718 | struct wakeup_source *ws; |
| 719 | |
| 720 | rcu_read_lock(); |
| 721 | ws = rcu_dereference(epi->ws); |
| 722 | if (ws) |
| 723 | __pm_stay_awake(ws); |
| 724 | rcu_read_unlock(); |
| 725 | } |
| 726 | |
| 727 | |
| 728 | /* |
| 729 | * ep->mutex needs to be held because we could be hit by |
| 730 | * eventpoll_release_file() and epoll_ctl(). |
| 731 | */ |
| 732 | static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist) |
| 733 | { |
| 734 | /* |
| 735 | * Steal the ready list, and re-init the original one to the |
| 736 | * empty list. Also, set ep->ovflist to NULL so that events |
| 737 | * happening while looping w/out locks, are not lost. We cannot |
| 738 | * have the poll callback to queue directly on ep->rdllist, |
| 739 | * because we want the "sproc" callback to be able to do it |
| 740 | * in a lockless way. |
| 741 | */ |
| 742 | lockdep_assert_irqs_enabled(); |
| 743 | write_lock_irq(&ep->lock); |
| 744 | list_splice_init(&ep->rdllist, txlist); |
| 745 | WRITE_ONCE(ep->ovflist, NULL); |
| 746 | write_unlock_irq(&ep->lock); |
| 747 | } |
| 748 | |
| 749 | static void ep_done_scan(struct eventpoll *ep, |
| 750 | struct list_head *txlist) |
| 751 | { |
| 752 | struct epitem *epi, *nepi; |
| 753 | |
| 754 | write_lock_irq(&ep->lock); |
| 755 | /* |
| 756 | * During the time we spent inside the "sproc" callback, some |
| 757 | * other events might have been queued by the poll callback. |
| 758 | * We re-insert them inside the main ready-list here. |
| 759 | */ |
| 760 | for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL; |
| 761 | nepi = epi->next, epi->next = EP_UNACTIVE_PTR) { |
| 762 | /* |
| 763 | * We need to check if the item is already in the list. |
| 764 | * During the "sproc" callback execution time, items are |
| 765 | * queued into ->ovflist but the "txlist" might already |
| 766 | * contain them, and the list_splice() below takes care of them. |
| 767 | */ |
| 768 | if (!ep_is_linked(epi)) { |
| 769 | /* |
| 770 | * ->ovflist is LIFO, so we have to reverse it in order |
| 771 | * to keep in FIFO. |
| 772 | */ |
| 773 | list_add(&epi->rdllink, &ep->rdllist); |
| 774 | ep_pm_stay_awake(epi); |
| 775 | } |
| 776 | } |
| 777 | /* |
| 778 | * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after |
| 779 | * releasing the lock, events will be queued in the normal way inside |
| 780 | * ep->rdllist. |
| 781 | */ |
| 782 | WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR); |
| 783 | |
| 784 | /* |
| 785 | * Quickly re-inject items left on "txlist". |
| 786 | */ |
| 787 | list_splice(txlist, &ep->rdllist); |
| 788 | __pm_relax(ep->ws); |
| 789 | |
| 790 | if (!list_empty(&ep->rdllist)) { |
| 791 | if (waitqueue_active(&ep->wq)) |
| 792 | wake_up(&ep->wq); |
| 793 | } |
| 794 | |
| 795 | write_unlock_irq(&ep->lock); |
| 796 | } |
| 797 | |
| 798 | static void ep_get(struct eventpoll *ep) |
| 799 | { |
| 800 | refcount_inc(&ep->refcount); |
| 801 | } |
| 802 | |
| 803 | /* |
| 804 | * Returns true if the event poll can be disposed |
| 805 | */ |
| 806 | static bool ep_refcount_dec_and_test(struct eventpoll *ep) |
| 807 | { |
| 808 | if (!refcount_dec_and_test(&ep->refcount)) |
| 809 | return false; |
| 810 | |
| 811 | WARN_ON_ONCE(!RB_EMPTY_ROOT(&ep->rbr.rb_root)); |
| 812 | return true; |
| 813 | } |
| 814 | |
| 815 | static void ep_free(struct eventpoll *ep) |
| 816 | { |
| 817 | ep_resume_napi_irqs(ep); |
| 818 | mutex_destroy(&ep->mtx); |
| 819 | free_uid(ep->user); |
| 820 | wakeup_source_unregister(ep->ws); |
| 821 | kfree(ep); |
| 822 | } |
| 823 | |
| 824 | /* |
| 825 | * Removes a "struct epitem" from the eventpoll RB tree and deallocates |
| 826 | * all the associated resources. Must be called with "mtx" held. |
| 827 | * If the dying flag is set, do the removal only if force is true. |
| 828 | * This prevents ep_clear_and_put() from dropping all the ep references |
| 829 | * while running concurrently with eventpoll_release_file(). |
| 830 | * Returns true if the eventpoll can be disposed. |
| 831 | */ |
| 832 | static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force) |
| 833 | { |
| 834 | struct file *file = epi->ffd.file; |
| 835 | struct epitems_head *to_free; |
| 836 | struct hlist_head *head; |
| 837 | |
| 838 | lockdep_assert_irqs_enabled(); |
| 839 | |
| 840 | /* |
| 841 | * Removes poll wait queue hooks. |
| 842 | */ |
| 843 | ep_unregister_pollwait(ep, epi); |
| 844 | |
| 845 | /* Remove the current item from the list of epoll hooks */ |
| 846 | spin_lock(&file->f_lock); |
| 847 | if (epi->dying && !force) { |
| 848 | spin_unlock(&file->f_lock); |
| 849 | return false; |
| 850 | } |
| 851 | |
| 852 | to_free = NULL; |
| 853 | head = file->f_ep; |
| 854 | if (head->first == &epi->fllink && !epi->fllink.next) { |
| 855 | /* See eventpoll_release() for details. */ |
| 856 | WRITE_ONCE(file->f_ep, NULL); |
| 857 | if (!is_file_epoll(file)) { |
| 858 | struct epitems_head *v; |
| 859 | v = container_of(head, struct epitems_head, epitems); |
| 860 | if (!smp_load_acquire(&v->next)) |
| 861 | to_free = v; |
| 862 | } |
| 863 | } |
| 864 | hlist_del_rcu(&epi->fllink); |
| 865 | spin_unlock(&file->f_lock); |
| 866 | free_ephead(to_free); |
| 867 | |
| 868 | rb_erase_cached(&epi->rbn, &ep->rbr); |
| 869 | |
| 870 | write_lock_irq(&ep->lock); |
| 871 | if (ep_is_linked(epi)) |
| 872 | list_del_init(&epi->rdllink); |
| 873 | write_unlock_irq(&ep->lock); |
| 874 | |
| 875 | wakeup_source_unregister(ep_wakeup_source(epi)); |
| 876 | /* |
| 877 | * At this point it is safe to free the eventpoll item. Use the union |
| 878 | * field epi->rcu, since we are trying to minimize the size of |
| 879 | * 'struct epitem'. The 'rbn' field is no longer in use. Protected by |
| 880 | * ep->mtx. The rcu read side, reverse_path_check_proc(), does not make |
| 881 | * use of the rbn field. |
| 882 | */ |
| 883 | kfree_rcu(epi, rcu); |
| 884 | |
| 885 | percpu_counter_dec(&ep->user->epoll_watches); |
| 886 | return true; |
| 887 | } |
| 888 | |
| 889 | /* |
| 890 | * ep_remove variant for callers owing an additional reference to the ep |
| 891 | */ |
| 892 | static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi) |
| 893 | { |
| 894 | if (__ep_remove(ep, epi, false)) |
| 895 | WARN_ON_ONCE(ep_refcount_dec_and_test(ep)); |
| 896 | } |
| 897 | |
| 898 | static void ep_clear_and_put(struct eventpoll *ep) |
| 899 | { |
| 900 | struct rb_node *rbp, *next; |
| 901 | struct epitem *epi; |
| 902 | |
| 903 | /* We need to release all tasks waiting for these file */ |
| 904 | if (waitqueue_active(&ep->poll_wait)) |
| 905 | ep_poll_safewake(ep, NULL, 0); |
| 906 | |
| 907 | mutex_lock(&ep->mtx); |
| 908 | |
| 909 | /* |
| 910 | * Walks through the whole tree by unregistering poll callbacks. |
| 911 | */ |
| 912 | for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { |
| 913 | epi = rb_entry(rbp, struct epitem, rbn); |
| 914 | |
| 915 | ep_unregister_pollwait(ep, epi); |
| 916 | cond_resched(); |
| 917 | } |
| 918 | |
| 919 | /* |
| 920 | * Walks through the whole tree and try to free each "struct epitem". |
| 921 | * Note that ep_remove_safe() will not remove the epitem in case of a |
| 922 | * racing eventpoll_release_file(); the latter will do the removal. |
| 923 | * At this point we are sure no poll callbacks will be lingering around. |
| 924 | * Since we still own a reference to the eventpoll struct, the loop can't |
| 925 | * dispose it. |
| 926 | */ |
| 927 | for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = next) { |
| 928 | next = rb_next(rbp); |
| 929 | epi = rb_entry(rbp, struct epitem, rbn); |
| 930 | ep_remove_safe(ep, epi); |
| 931 | cond_resched(); |
| 932 | } |
| 933 | |
| 934 | mutex_unlock(&ep->mtx); |
| 935 | if (ep_refcount_dec_and_test(ep)) |
| 936 | ep_free(ep); |
| 937 | } |
| 938 | |
| 939 | static long ep_eventpoll_ioctl(struct file *file, unsigned int cmd, |
| 940 | unsigned long arg) |
| 941 | { |
| 942 | int ret; |
| 943 | |
| 944 | if (!is_file_epoll(file)) |
| 945 | return -EINVAL; |
| 946 | |
| 947 | switch (cmd) { |
| 948 | case EPIOCSPARAMS: |
| 949 | case EPIOCGPARAMS: |
| 950 | ret = ep_eventpoll_bp_ioctl(file, cmd, arg); |
| 951 | break; |
| 952 | default: |
| 953 | ret = -EINVAL; |
| 954 | break; |
| 955 | } |
| 956 | |
| 957 | return ret; |
| 958 | } |
| 959 | |
| 960 | static int ep_eventpoll_release(struct inode *inode, struct file *file) |
| 961 | { |
| 962 | struct eventpoll *ep = file->private_data; |
| 963 | |
| 964 | if (ep) |
| 965 | ep_clear_and_put(ep); |
| 966 | |
| 967 | return 0; |
| 968 | } |
| 969 | |
| 970 | static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, int depth); |
| 971 | |
| 972 | static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int depth) |
| 973 | { |
| 974 | struct eventpoll *ep = file->private_data; |
| 975 | LIST_HEAD(txlist); |
| 976 | struct epitem *epi, *tmp; |
| 977 | poll_table pt; |
| 978 | __poll_t res = 0; |
| 979 | |
| 980 | init_poll_funcptr(&pt, NULL); |
| 981 | |
| 982 | /* Insert inside our poll wait queue */ |
| 983 | poll_wait(file, &ep->poll_wait, wait); |
| 984 | |
| 985 | /* |
| 986 | * Proceed to find out if wanted events are really available inside |
| 987 | * the ready list. |
| 988 | */ |
| 989 | mutex_lock_nested(&ep->mtx, depth); |
| 990 | ep_start_scan(ep, &txlist); |
| 991 | list_for_each_entry_safe(epi, tmp, &txlist, rdllink) { |
| 992 | if (ep_item_poll(epi, &pt, depth + 1)) { |
| 993 | res = EPOLLIN | EPOLLRDNORM; |
| 994 | break; |
| 995 | } else { |
| 996 | /* |
| 997 | * Item has been dropped into the ready list by the poll |
| 998 | * callback, but it's not actually ready, as far as |
| 999 | * caller requested events goes. We can remove it here. |
| 1000 | */ |
| 1001 | __pm_relax(ep_wakeup_source(epi)); |
| 1002 | list_del_init(&epi->rdllink); |
| 1003 | } |
| 1004 | } |
| 1005 | ep_done_scan(ep, &txlist); |
| 1006 | mutex_unlock(&ep->mtx); |
| 1007 | return res; |
| 1008 | } |
| 1009 | |
| 1010 | /* |
| 1011 | * The ffd.file pointer may be in the process of being torn down due to |
| 1012 | * being closed, but we may not have finished eventpoll_release() yet. |
| 1013 | * |
| 1014 | * Normally, even with the atomic_long_inc_not_zero, the file may have |
| 1015 | * been free'd and then gotten re-allocated to something else (since |
| 1016 | * files are not RCU-delayed, they are SLAB_TYPESAFE_BY_RCU). |
| 1017 | * |
| 1018 | * But for epoll, users hold the ep->mtx mutex, and as such any file in |
| 1019 | * the process of being free'd will block in eventpoll_release_file() |
| 1020 | * and thus the underlying file allocation will not be free'd, and the |
| 1021 | * file re-use cannot happen. |
| 1022 | * |
| 1023 | * For the same reason we can avoid a rcu_read_lock() around the |
| 1024 | * operation - 'ffd.file' cannot go away even if the refcount has |
| 1025 | * reached zero (but we must still not call out to ->poll() functions |
| 1026 | * etc). |
| 1027 | */ |
| 1028 | static struct file *epi_fget(const struct epitem *epi) |
| 1029 | { |
| 1030 | struct file *file; |
| 1031 | |
| 1032 | file = epi->ffd.file; |
| 1033 | if (!file_ref_get(&file->f_ref)) |
| 1034 | file = NULL; |
| 1035 | return file; |
| 1036 | } |
| 1037 | |
| 1038 | /* |
| 1039 | * Differs from ep_eventpoll_poll() in that internal callers already have |
| 1040 | * the ep->mtx so we need to start from depth=1, such that mutex_lock_nested() |
| 1041 | * is correctly annotated. |
| 1042 | */ |
| 1043 | static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, |
| 1044 | int depth) |
| 1045 | { |
| 1046 | struct file *file = epi_fget(epi); |
| 1047 | __poll_t res; |
| 1048 | |
| 1049 | /* |
| 1050 | * We could return EPOLLERR | EPOLLHUP or something, but let's |
| 1051 | * treat this more as "file doesn't exist, poll didn't happen". |
| 1052 | */ |
| 1053 | if (!file) |
| 1054 | return 0; |
| 1055 | |
| 1056 | pt->_key = epi->event.events; |
| 1057 | if (!is_file_epoll(file)) |
| 1058 | res = vfs_poll(file, pt); |
| 1059 | else |
| 1060 | res = __ep_eventpoll_poll(file, pt, depth); |
| 1061 | fput(file); |
| 1062 | return res & epi->event.events; |
| 1063 | } |
| 1064 | |
| 1065 | static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait) |
| 1066 | { |
| 1067 | return __ep_eventpoll_poll(file, wait, 0); |
| 1068 | } |
| 1069 | |
| 1070 | #ifdef CONFIG_PROC_FS |
| 1071 | static void ep_show_fdinfo(struct seq_file *m, struct file *f) |
| 1072 | { |
| 1073 | struct eventpoll *ep = f->private_data; |
| 1074 | struct rb_node *rbp; |
| 1075 | |
| 1076 | mutex_lock(&ep->mtx); |
| 1077 | for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { |
| 1078 | struct epitem *epi = rb_entry(rbp, struct epitem, rbn); |
| 1079 | struct inode *inode = file_inode(epi->ffd.file); |
| 1080 | |
| 1081 | seq_printf(m, "tfd: %8d events: %8x data: %16llx " |
| 1082 | " pos:%lli ino:%lx sdev:%x\n", |
| 1083 | epi->ffd.fd, epi->event.events, |
| 1084 | (long long)epi->event.data, |
| 1085 | (long long)epi->ffd.file->f_pos, |
| 1086 | inode->i_ino, inode->i_sb->s_dev); |
| 1087 | if (seq_has_overflowed(m)) |
| 1088 | break; |
| 1089 | } |
| 1090 | mutex_unlock(&ep->mtx); |
| 1091 | } |
| 1092 | #endif |
| 1093 | |
| 1094 | /* File callbacks that implement the eventpoll file behaviour */ |
| 1095 | static const struct file_operations eventpoll_fops = { |
| 1096 | #ifdef CONFIG_PROC_FS |
| 1097 | .show_fdinfo = ep_show_fdinfo, |
| 1098 | #endif |
| 1099 | .release = ep_eventpoll_release, |
| 1100 | .poll = ep_eventpoll_poll, |
| 1101 | .llseek = noop_llseek, |
| 1102 | .unlocked_ioctl = ep_eventpoll_ioctl, |
| 1103 | .compat_ioctl = compat_ptr_ioctl, |
| 1104 | }; |
| 1105 | |
| 1106 | /* |
| 1107 | * This is called from eventpoll_release() to unlink files from the eventpoll |
| 1108 | * interface. We need to have this facility to cleanup correctly files that are |
| 1109 | * closed without being removed from the eventpoll interface. |
| 1110 | */ |
| 1111 | void eventpoll_release_file(struct file *file) |
| 1112 | { |
| 1113 | struct eventpoll *ep; |
| 1114 | struct epitem *epi; |
| 1115 | bool dispose; |
| 1116 | |
| 1117 | /* |
| 1118 | * Use the 'dying' flag to prevent a concurrent ep_clear_and_put() from |
| 1119 | * touching the epitems list before eventpoll_release_file() can access |
| 1120 | * the ep->mtx. |
| 1121 | */ |
| 1122 | again: |
| 1123 | spin_lock(&file->f_lock); |
| 1124 | if (file->f_ep && file->f_ep->first) { |
| 1125 | epi = hlist_entry(file->f_ep->first, struct epitem, fllink); |
| 1126 | epi->dying = true; |
| 1127 | spin_unlock(&file->f_lock); |
| 1128 | |
| 1129 | /* |
| 1130 | * ep access is safe as we still own a reference to the ep |
| 1131 | * struct |
| 1132 | */ |
| 1133 | ep = epi->ep; |
| 1134 | mutex_lock(&ep->mtx); |
| 1135 | dispose = __ep_remove(ep, epi, true); |
| 1136 | mutex_unlock(&ep->mtx); |
| 1137 | |
| 1138 | if (dispose && ep_refcount_dec_and_test(ep)) |
| 1139 | ep_free(ep); |
| 1140 | goto again; |
| 1141 | } |
| 1142 | spin_unlock(&file->f_lock); |
| 1143 | } |
| 1144 | |
| 1145 | static int ep_alloc(struct eventpoll **pep) |
| 1146 | { |
| 1147 | struct eventpoll *ep; |
| 1148 | |
| 1149 | ep = kzalloc(sizeof(*ep), GFP_KERNEL); |
| 1150 | if (unlikely(!ep)) |
| 1151 | return -ENOMEM; |
| 1152 | |
| 1153 | mutex_init(&ep->mtx); |
| 1154 | rwlock_init(&ep->lock); |
| 1155 | init_waitqueue_head(&ep->wq); |
| 1156 | init_waitqueue_head(&ep->poll_wait); |
| 1157 | INIT_LIST_HEAD(&ep->rdllist); |
| 1158 | ep->rbr = RB_ROOT_CACHED; |
| 1159 | ep->ovflist = EP_UNACTIVE_PTR; |
| 1160 | ep->user = get_current_user(); |
| 1161 | refcount_set(&ep->refcount, 1); |
| 1162 | |
| 1163 | *pep = ep; |
| 1164 | |
| 1165 | return 0; |
| 1166 | } |
| 1167 | |
| 1168 | /* |
| 1169 | * Search the file inside the eventpoll tree. The RB tree operations |
| 1170 | * are protected by the "mtx" mutex, and ep_find() must be called with |
| 1171 | * "mtx" held. |
| 1172 | */ |
| 1173 | static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) |
| 1174 | { |
| 1175 | int kcmp; |
| 1176 | struct rb_node *rbp; |
| 1177 | struct epitem *epi, *epir = NULL; |
| 1178 | struct epoll_filefd ffd; |
| 1179 | |
| 1180 | ep_set_ffd(&ffd, file, fd); |
| 1181 | for (rbp = ep->rbr.rb_root.rb_node; rbp; ) { |
| 1182 | epi = rb_entry(rbp, struct epitem, rbn); |
| 1183 | kcmp = ep_cmp_ffd(&ffd, &epi->ffd); |
| 1184 | if (kcmp > 0) |
| 1185 | rbp = rbp->rb_right; |
| 1186 | else if (kcmp < 0) |
| 1187 | rbp = rbp->rb_left; |
| 1188 | else { |
| 1189 | epir = epi; |
| 1190 | break; |
| 1191 | } |
| 1192 | } |
| 1193 | |
| 1194 | return epir; |
| 1195 | } |
| 1196 | |
| 1197 | #ifdef CONFIG_KCMP |
| 1198 | static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff) |
| 1199 | { |
| 1200 | struct rb_node *rbp; |
| 1201 | struct epitem *epi; |
| 1202 | |
| 1203 | for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { |
| 1204 | epi = rb_entry(rbp, struct epitem, rbn); |
| 1205 | if (epi->ffd.fd == tfd) { |
| 1206 | if (toff == 0) |
| 1207 | return epi; |
| 1208 | else |
| 1209 | toff--; |
| 1210 | } |
| 1211 | cond_resched(); |
| 1212 | } |
| 1213 | |
| 1214 | return NULL; |
| 1215 | } |
| 1216 | |
| 1217 | struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd, |
| 1218 | unsigned long toff) |
| 1219 | { |
| 1220 | struct file *file_raw; |
| 1221 | struct eventpoll *ep; |
| 1222 | struct epitem *epi; |
| 1223 | |
| 1224 | if (!is_file_epoll(file)) |
| 1225 | return ERR_PTR(-EINVAL); |
| 1226 | |
| 1227 | ep = file->private_data; |
| 1228 | |
| 1229 | mutex_lock(&ep->mtx); |
| 1230 | epi = ep_find_tfd(ep, tfd, toff); |
| 1231 | if (epi) |
| 1232 | file_raw = epi->ffd.file; |
| 1233 | else |
| 1234 | file_raw = ERR_PTR(-ENOENT); |
| 1235 | mutex_unlock(&ep->mtx); |
| 1236 | |
| 1237 | return file_raw; |
| 1238 | } |
| 1239 | #endif /* CONFIG_KCMP */ |
| 1240 | |
| 1241 | /* |
| 1242 | * Adds a new entry to the tail of the list in a lockless way, i.e. |
| 1243 | * multiple CPUs are allowed to call this function concurrently. |
| 1244 | * |
| 1245 | * Beware: it is necessary to prevent any other modifications of the |
| 1246 | * existing list until all changes are completed, in other words |
| 1247 | * concurrent list_add_tail_lockless() calls should be protected |
| 1248 | * with a read lock, where write lock acts as a barrier which |
| 1249 | * makes sure all list_add_tail_lockless() calls are fully |
| 1250 | * completed. |
| 1251 | * |
| 1252 | * Also an element can be locklessly added to the list only in one |
| 1253 | * direction i.e. either to the tail or to the head, otherwise |
| 1254 | * concurrent access will corrupt the list. |
| 1255 | * |
| 1256 | * Return: %false if element has been already added to the list, %true |
| 1257 | * otherwise. |
| 1258 | */ |
| 1259 | static inline bool list_add_tail_lockless(struct list_head *new, |
| 1260 | struct list_head *head) |
| 1261 | { |
| 1262 | struct list_head *prev; |
| 1263 | |
| 1264 | /* |
| 1265 | * This is simple 'new->next = head' operation, but cmpxchg() |
| 1266 | * is used in order to detect that same element has been just |
| 1267 | * added to the list from another CPU: the winner observes |
| 1268 | * new->next == new. |
| 1269 | */ |
| 1270 | if (!try_cmpxchg(&new->next, &new, head)) |
| 1271 | return false; |
| 1272 | |
| 1273 | /* |
| 1274 | * Initially ->next of a new element must be updated with the head |
| 1275 | * (we are inserting to the tail) and only then pointers are atomically |
| 1276 | * exchanged. XCHG guarantees memory ordering, thus ->next should be |
| 1277 | * updated before pointers are actually swapped and pointers are |
| 1278 | * swapped before prev->next is updated. |
| 1279 | */ |
| 1280 | |
| 1281 | prev = xchg(&head->prev, new); |
| 1282 | |
| 1283 | /* |
| 1284 | * It is safe to modify prev->next and new->prev, because a new element |
| 1285 | * is added only to the tail and new->next is updated before XCHG. |
| 1286 | */ |
| 1287 | |
| 1288 | prev->next = new; |
| 1289 | new->prev = prev; |
| 1290 | |
| 1291 | return true; |
| 1292 | } |
| 1293 | |
| 1294 | /* |
| 1295 | * Chains a new epi entry to the tail of the ep->ovflist in a lockless way, |
| 1296 | * i.e. multiple CPUs are allowed to call this function concurrently. |
| 1297 | * |
| 1298 | * Return: %false if epi element has been already chained, %true otherwise. |
| 1299 | */ |
| 1300 | static inline bool chain_epi_lockless(struct epitem *epi) |
| 1301 | { |
| 1302 | struct eventpoll *ep = epi->ep; |
| 1303 | |
| 1304 | /* Fast preliminary check */ |
| 1305 | if (epi->next != EP_UNACTIVE_PTR) |
| 1306 | return false; |
| 1307 | |
| 1308 | /* Check that the same epi has not been just chained from another CPU */ |
| 1309 | if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR) |
| 1310 | return false; |
| 1311 | |
| 1312 | /* Atomically exchange tail */ |
| 1313 | epi->next = xchg(&ep->ovflist, epi); |
| 1314 | |
| 1315 | return true; |
| 1316 | } |
| 1317 | |
| 1318 | /* |
| 1319 | * This is the callback that is passed to the wait queue wakeup |
| 1320 | * mechanism. It is called by the stored file descriptors when they |
| 1321 | * have events to report. |
| 1322 | * |
| 1323 | * This callback takes a read lock in order not to contend with concurrent |
| 1324 | * events from another file descriptor, thus all modifications to ->rdllist |
| 1325 | * or ->ovflist are lockless. Read lock is paired with the write lock from |
| 1326 | * ep_start/done_scan(), which stops all list modifications and guarantees |
| 1327 | * that lists state is seen correctly. |
| 1328 | * |
| 1329 | * Another thing worth to mention is that ep_poll_callback() can be called |
| 1330 | * concurrently for the same @epi from different CPUs if poll table was inited |
| 1331 | * with several wait queues entries. Plural wakeup from different CPUs of a |
| 1332 | * single wait queue is serialized by wq.lock, but the case when multiple wait |
| 1333 | * queues are used should be detected accordingly. This is detected using |
| 1334 | * cmpxchg() operation. |
| 1335 | */ |
| 1336 | static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) |
| 1337 | { |
| 1338 | int pwake = 0; |
| 1339 | struct epitem *epi = ep_item_from_wait(wait); |
| 1340 | struct eventpoll *ep = epi->ep; |
| 1341 | __poll_t pollflags = key_to_poll(key); |
| 1342 | unsigned long flags; |
| 1343 | int ewake = 0; |
| 1344 | |
| 1345 | read_lock_irqsave(&ep->lock, flags); |
| 1346 | |
| 1347 | ep_set_busy_poll_napi_id(epi); |
| 1348 | |
| 1349 | /* |
| 1350 | * If the event mask does not contain any poll(2) event, we consider the |
| 1351 | * descriptor to be disabled. This condition is likely the effect of the |
| 1352 | * EPOLLONESHOT bit that disables the descriptor when an event is received, |
| 1353 | * until the next EPOLL_CTL_MOD will be issued. |
| 1354 | */ |
| 1355 | if (!(epi->event.events & ~EP_PRIVATE_BITS)) |
| 1356 | goto out_unlock; |
| 1357 | |
| 1358 | /* |
| 1359 | * Check the events coming with the callback. At this stage, not |
| 1360 | * every device reports the events in the "key" parameter of the |
| 1361 | * callback. We need to be able to handle both cases here, hence the |
| 1362 | * test for "key" != NULL before the event match test. |
| 1363 | */ |
| 1364 | if (pollflags && !(pollflags & epi->event.events)) |
| 1365 | goto out_unlock; |
| 1366 | |
| 1367 | /* |
| 1368 | * If we are transferring events to userspace, we can hold no locks |
| 1369 | * (because we're accessing user memory, and because of linux f_op->poll() |
| 1370 | * semantics). All the events that happen during that period of time are |
| 1371 | * chained in ep->ovflist and requeued later on. |
| 1372 | */ |
| 1373 | if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) { |
| 1374 | if (chain_epi_lockless(epi)) |
| 1375 | ep_pm_stay_awake_rcu(epi); |
| 1376 | } else if (!ep_is_linked(epi)) { |
| 1377 | /* In the usual case, add event to ready list. */ |
| 1378 | if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) |
| 1379 | ep_pm_stay_awake_rcu(epi); |
| 1380 | } |
| 1381 | |
| 1382 | /* |
| 1383 | * Wake up ( if active ) both the eventpoll wait list and the ->poll() |
| 1384 | * wait list. |
| 1385 | */ |
| 1386 | if (waitqueue_active(&ep->wq)) { |
| 1387 | if ((epi->event.events & EPOLLEXCLUSIVE) && |
| 1388 | !(pollflags & POLLFREE)) { |
| 1389 | switch (pollflags & EPOLLINOUT_BITS) { |
| 1390 | case EPOLLIN: |
| 1391 | if (epi->event.events & EPOLLIN) |
| 1392 | ewake = 1; |
| 1393 | break; |
| 1394 | case EPOLLOUT: |
| 1395 | if (epi->event.events & EPOLLOUT) |
| 1396 | ewake = 1; |
| 1397 | break; |
| 1398 | case 0: |
| 1399 | ewake = 1; |
| 1400 | break; |
| 1401 | } |
| 1402 | } |
| 1403 | if (sync) |
| 1404 | wake_up_sync(&ep->wq); |
| 1405 | else |
| 1406 | wake_up(&ep->wq); |
| 1407 | } |
| 1408 | if (waitqueue_active(&ep->poll_wait)) |
| 1409 | pwake++; |
| 1410 | |
| 1411 | out_unlock: |
| 1412 | read_unlock_irqrestore(&ep->lock, flags); |
| 1413 | |
| 1414 | /* We have to call this outside the lock */ |
| 1415 | if (pwake) |
| 1416 | ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE); |
| 1417 | |
| 1418 | if (!(epi->event.events & EPOLLEXCLUSIVE)) |
| 1419 | ewake = 1; |
| 1420 | |
| 1421 | if (pollflags & POLLFREE) { |
| 1422 | /* |
| 1423 | * If we race with ep_remove_wait_queue() it can miss |
| 1424 | * ->whead = NULL and do another remove_wait_queue() after |
| 1425 | * us, so we can't use __remove_wait_queue(). |
| 1426 | */ |
| 1427 | list_del_init(&wait->entry); |
| 1428 | /* |
| 1429 | * ->whead != NULL protects us from the race with |
| 1430 | * ep_clear_and_put() or ep_remove(), ep_remove_wait_queue() |
| 1431 | * takes whead->lock held by the caller. Once we nullify it, |
| 1432 | * nothing protects ep/epi or even wait. |
| 1433 | */ |
| 1434 | smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL); |
| 1435 | } |
| 1436 | |
| 1437 | return ewake; |
| 1438 | } |
| 1439 | |
| 1440 | /* |
| 1441 | * This is the callback that is used to add our wait queue to the |
| 1442 | * target file wakeup lists. |
| 1443 | */ |
| 1444 | static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, |
| 1445 | poll_table *pt) |
| 1446 | { |
| 1447 | struct ep_pqueue *epq = container_of(pt, struct ep_pqueue, pt); |
| 1448 | struct epitem *epi = epq->epi; |
| 1449 | struct eppoll_entry *pwq; |
| 1450 | |
| 1451 | if (unlikely(!epi)) // an earlier allocation has failed |
| 1452 | return; |
| 1453 | |
| 1454 | pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL); |
| 1455 | if (unlikely(!pwq)) { |
| 1456 | epq->epi = NULL; |
| 1457 | return; |
| 1458 | } |
| 1459 | |
| 1460 | init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); |
| 1461 | pwq->whead = whead; |
| 1462 | pwq->base = epi; |
| 1463 | if (epi->event.events & EPOLLEXCLUSIVE) |
| 1464 | add_wait_queue_exclusive(whead, &pwq->wait); |
| 1465 | else |
| 1466 | add_wait_queue(whead, &pwq->wait); |
| 1467 | pwq->next = epi->pwqlist; |
| 1468 | epi->pwqlist = pwq; |
| 1469 | } |
| 1470 | |
| 1471 | static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) |
| 1472 | { |
| 1473 | int kcmp; |
| 1474 | struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL; |
| 1475 | struct epitem *epic; |
| 1476 | bool leftmost = true; |
| 1477 | |
| 1478 | while (*p) { |
| 1479 | parent = *p; |
| 1480 | epic = rb_entry(parent, struct epitem, rbn); |
| 1481 | kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd); |
| 1482 | if (kcmp > 0) { |
| 1483 | p = &parent->rb_right; |
| 1484 | leftmost = false; |
| 1485 | } else |
| 1486 | p = &parent->rb_left; |
| 1487 | } |
| 1488 | rb_link_node(&epi->rbn, parent, p); |
| 1489 | rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost); |
| 1490 | } |
| 1491 | |
| 1492 | |
| 1493 | |
| 1494 | #define PATH_ARR_SIZE 5 |
| 1495 | /* |
| 1496 | * These are the number paths of length 1 to 5, that we are allowing to emanate |
| 1497 | * from a single file of interest. For example, we allow 1000 paths of length |
| 1498 | * 1, to emanate from each file of interest. This essentially represents the |
| 1499 | * potential wakeup paths, which need to be limited in order to avoid massive |
| 1500 | * uncontrolled wakeup storms. The common use case should be a single ep which |
| 1501 | * is connected to n file sources. In this case each file source has 1 path |
| 1502 | * of length 1. Thus, the numbers below should be more than sufficient. These |
| 1503 | * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify |
| 1504 | * and delete can't add additional paths. Protected by the epnested_mutex. |
| 1505 | */ |
| 1506 | static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 }; |
| 1507 | static int path_count[PATH_ARR_SIZE]; |
| 1508 | |
| 1509 | static int path_count_inc(int nests) |
| 1510 | { |
| 1511 | /* Allow an arbitrary number of depth 1 paths */ |
| 1512 | if (nests == 0) |
| 1513 | return 0; |
| 1514 | |
| 1515 | if (++path_count[nests] > path_limits[nests]) |
| 1516 | return -1; |
| 1517 | return 0; |
| 1518 | } |
| 1519 | |
| 1520 | static void path_count_init(void) |
| 1521 | { |
| 1522 | int i; |
| 1523 | |
| 1524 | for (i = 0; i < PATH_ARR_SIZE; i++) |
| 1525 | path_count[i] = 0; |
| 1526 | } |
| 1527 | |
| 1528 | static int reverse_path_check_proc(struct hlist_head *refs, int depth) |
| 1529 | { |
| 1530 | int error = 0; |
| 1531 | struct epitem *epi; |
| 1532 | |
| 1533 | if (depth > EP_MAX_NESTS) /* too deep nesting */ |
| 1534 | return -1; |
| 1535 | |
| 1536 | /* CTL_DEL can remove links here, but that can't increase our count */ |
| 1537 | hlist_for_each_entry_rcu(epi, refs, fllink) { |
| 1538 | struct hlist_head *refs = &epi->ep->refs; |
| 1539 | if (hlist_empty(refs)) |
| 1540 | error = path_count_inc(depth); |
| 1541 | else |
| 1542 | error = reverse_path_check_proc(refs, depth + 1); |
| 1543 | if (error != 0) |
| 1544 | break; |
| 1545 | } |
| 1546 | return error; |
| 1547 | } |
| 1548 | |
| 1549 | /** |
| 1550 | * reverse_path_check - The tfile_check_list is list of epitem_head, which have |
| 1551 | * links that are proposed to be newly added. We need to |
| 1552 | * make sure that those added links don't add too many |
| 1553 | * paths such that we will spend all our time waking up |
| 1554 | * eventpoll objects. |
| 1555 | * |
| 1556 | * Return: %zero if the proposed links don't create too many paths, |
| 1557 | * %-1 otherwise. |
| 1558 | */ |
| 1559 | static int reverse_path_check(void) |
| 1560 | { |
| 1561 | struct epitems_head *p; |
| 1562 | |
| 1563 | for (p = tfile_check_list; p != EP_UNACTIVE_PTR; p = p->next) { |
| 1564 | int error; |
| 1565 | path_count_init(); |
| 1566 | rcu_read_lock(); |
| 1567 | error = reverse_path_check_proc(&p->epitems, 0); |
| 1568 | rcu_read_unlock(); |
| 1569 | if (error) |
| 1570 | return error; |
| 1571 | } |
| 1572 | return 0; |
| 1573 | } |
| 1574 | |
| 1575 | static int ep_create_wakeup_source(struct epitem *epi) |
| 1576 | { |
| 1577 | struct name_snapshot n; |
| 1578 | struct wakeup_source *ws; |
| 1579 | |
| 1580 | if (!epi->ep->ws) { |
| 1581 | epi->ep->ws = wakeup_source_register(NULL, "eventpoll"); |
| 1582 | if (!epi->ep->ws) |
| 1583 | return -ENOMEM; |
| 1584 | } |
| 1585 | |
| 1586 | take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry); |
| 1587 | ws = wakeup_source_register(NULL, n.name.name); |
| 1588 | release_dentry_name_snapshot(&n); |
| 1589 | |
| 1590 | if (!ws) |
| 1591 | return -ENOMEM; |
| 1592 | rcu_assign_pointer(epi->ws, ws); |
| 1593 | |
| 1594 | return 0; |
| 1595 | } |
| 1596 | |
| 1597 | /* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */ |
| 1598 | static noinline void ep_destroy_wakeup_source(struct epitem *epi) |
| 1599 | { |
| 1600 | struct wakeup_source *ws = ep_wakeup_source(epi); |
| 1601 | |
| 1602 | RCU_INIT_POINTER(epi->ws, NULL); |
| 1603 | |
| 1604 | /* |
| 1605 | * wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is |
| 1606 | * used internally by wakeup_source_remove, too (called by |
| 1607 | * wakeup_source_unregister), so we cannot use call_rcu |
| 1608 | */ |
| 1609 | synchronize_rcu(); |
| 1610 | wakeup_source_unregister(ws); |
| 1611 | } |
| 1612 | |
| 1613 | static int attach_epitem(struct file *file, struct epitem *epi) |
| 1614 | { |
| 1615 | struct epitems_head *to_free = NULL; |
| 1616 | struct hlist_head *head = NULL; |
| 1617 | struct eventpoll *ep = NULL; |
| 1618 | |
| 1619 | if (is_file_epoll(file)) |
| 1620 | ep = file->private_data; |
| 1621 | |
| 1622 | if (ep) { |
| 1623 | head = &ep->refs; |
| 1624 | } else if (!READ_ONCE(file->f_ep)) { |
| 1625 | allocate: |
| 1626 | to_free = kmem_cache_zalloc(ephead_cache, GFP_KERNEL); |
| 1627 | if (!to_free) |
| 1628 | return -ENOMEM; |
| 1629 | head = &to_free->epitems; |
| 1630 | } |
| 1631 | spin_lock(&file->f_lock); |
| 1632 | if (!file->f_ep) { |
| 1633 | if (unlikely(!head)) { |
| 1634 | spin_unlock(&file->f_lock); |
| 1635 | goto allocate; |
| 1636 | } |
| 1637 | /* See eventpoll_release() for details. */ |
| 1638 | WRITE_ONCE(file->f_ep, head); |
| 1639 | to_free = NULL; |
| 1640 | } |
| 1641 | hlist_add_head_rcu(&epi->fllink, file->f_ep); |
| 1642 | spin_unlock(&file->f_lock); |
| 1643 | free_ephead(to_free); |
| 1644 | return 0; |
| 1645 | } |
| 1646 | |
| 1647 | /* |
| 1648 | * Must be called with "mtx" held. |
| 1649 | */ |
| 1650 | static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, |
| 1651 | struct file *tfile, int fd, int full_check) |
| 1652 | { |
| 1653 | int error, pwake = 0; |
| 1654 | __poll_t revents; |
| 1655 | struct epitem *epi; |
| 1656 | struct ep_pqueue epq; |
| 1657 | struct eventpoll *tep = NULL; |
| 1658 | |
| 1659 | if (is_file_epoll(tfile)) |
| 1660 | tep = tfile->private_data; |
| 1661 | |
| 1662 | lockdep_assert_irqs_enabled(); |
| 1663 | |
| 1664 | if (unlikely(percpu_counter_compare(&ep->user->epoll_watches, |
| 1665 | max_user_watches) >= 0)) |
| 1666 | return -ENOSPC; |
| 1667 | percpu_counter_inc(&ep->user->epoll_watches); |
| 1668 | |
| 1669 | if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL))) { |
| 1670 | percpu_counter_dec(&ep->user->epoll_watches); |
| 1671 | return -ENOMEM; |
| 1672 | } |
| 1673 | |
| 1674 | /* Item initialization follow here ... */ |
| 1675 | INIT_LIST_HEAD(&epi->rdllink); |
| 1676 | epi->ep = ep; |
| 1677 | ep_set_ffd(&epi->ffd, tfile, fd); |
| 1678 | epi->event = *event; |
| 1679 | epi->next = EP_UNACTIVE_PTR; |
| 1680 | |
| 1681 | if (tep) |
| 1682 | mutex_lock_nested(&tep->mtx, 1); |
| 1683 | /* Add the current item to the list of active epoll hook for this file */ |
| 1684 | if (unlikely(attach_epitem(tfile, epi) < 0)) { |
| 1685 | if (tep) |
| 1686 | mutex_unlock(&tep->mtx); |
| 1687 | kmem_cache_free(epi_cache, epi); |
| 1688 | percpu_counter_dec(&ep->user->epoll_watches); |
| 1689 | return -ENOMEM; |
| 1690 | } |
| 1691 | |
| 1692 | if (full_check && !tep) |
| 1693 | list_file(tfile); |
| 1694 | |
| 1695 | /* |
| 1696 | * Add the current item to the RB tree. All RB tree operations are |
| 1697 | * protected by "mtx", and ep_insert() is called with "mtx" held. |
| 1698 | */ |
| 1699 | ep_rbtree_insert(ep, epi); |
| 1700 | if (tep) |
| 1701 | mutex_unlock(&tep->mtx); |
| 1702 | |
| 1703 | /* |
| 1704 | * ep_remove_safe() calls in the later error paths can't lead to |
| 1705 | * ep_free() as the ep file itself still holds an ep reference. |
| 1706 | */ |
| 1707 | ep_get(ep); |
| 1708 | |
| 1709 | /* now check if we've created too many backpaths */ |
| 1710 | if (unlikely(full_check && reverse_path_check())) { |
| 1711 | ep_remove_safe(ep, epi); |
| 1712 | return -EINVAL; |
| 1713 | } |
| 1714 | |
| 1715 | if (epi->event.events & EPOLLWAKEUP) { |
| 1716 | error = ep_create_wakeup_source(epi); |
| 1717 | if (error) { |
| 1718 | ep_remove_safe(ep, epi); |
| 1719 | return error; |
| 1720 | } |
| 1721 | } |
| 1722 | |
| 1723 | /* Initialize the poll table using the queue callback */ |
| 1724 | epq.epi = epi; |
| 1725 | init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); |
| 1726 | |
| 1727 | /* |
| 1728 | * Attach the item to the poll hooks and get current event bits. |
| 1729 | * We can safely use the file* here because its usage count has |
| 1730 | * been increased by the caller of this function. Note that after |
| 1731 | * this operation completes, the poll callback can start hitting |
| 1732 | * the new item. |
| 1733 | */ |
| 1734 | revents = ep_item_poll(epi, &epq.pt, 1); |
| 1735 | |
| 1736 | /* |
| 1737 | * We have to check if something went wrong during the poll wait queue |
| 1738 | * install process. Namely an allocation for a wait queue failed due |
| 1739 | * high memory pressure. |
| 1740 | */ |
| 1741 | if (unlikely(!epq.epi)) { |
| 1742 | ep_remove_safe(ep, epi); |
| 1743 | return -ENOMEM; |
| 1744 | } |
| 1745 | |
| 1746 | /* We have to drop the new item inside our item list to keep track of it */ |
| 1747 | write_lock_irq(&ep->lock); |
| 1748 | |
| 1749 | /* record NAPI ID of new item if present */ |
| 1750 | ep_set_busy_poll_napi_id(epi); |
| 1751 | |
| 1752 | /* If the file is already "ready" we drop it inside the ready list */ |
| 1753 | if (revents && !ep_is_linked(epi)) { |
| 1754 | list_add_tail(&epi->rdllink, &ep->rdllist); |
| 1755 | ep_pm_stay_awake(epi); |
| 1756 | |
| 1757 | /* Notify waiting tasks that events are available */ |
| 1758 | if (waitqueue_active(&ep->wq)) |
| 1759 | wake_up(&ep->wq); |
| 1760 | if (waitqueue_active(&ep->poll_wait)) |
| 1761 | pwake++; |
| 1762 | } |
| 1763 | |
| 1764 | write_unlock_irq(&ep->lock); |
| 1765 | |
| 1766 | /* We have to call this outside the lock */ |
| 1767 | if (pwake) |
| 1768 | ep_poll_safewake(ep, NULL, 0); |
| 1769 | |
| 1770 | return 0; |
| 1771 | } |
| 1772 | |
| 1773 | /* |
| 1774 | * Modify the interest event mask by dropping an event if the new mask |
| 1775 | * has a match in the current file status. Must be called with "mtx" held. |
| 1776 | */ |
| 1777 | static int ep_modify(struct eventpoll *ep, struct epitem *epi, |
| 1778 | const struct epoll_event *event) |
| 1779 | { |
| 1780 | int pwake = 0; |
| 1781 | poll_table pt; |
| 1782 | |
| 1783 | lockdep_assert_irqs_enabled(); |
| 1784 | |
| 1785 | init_poll_funcptr(&pt, NULL); |
| 1786 | |
| 1787 | /* |
| 1788 | * Set the new event interest mask before calling f_op->poll(); |
| 1789 | * otherwise we might miss an event that happens between the |
| 1790 | * f_op->poll() call and the new event set registering. |
| 1791 | */ |
| 1792 | epi->event.events = event->events; /* need barrier below */ |
| 1793 | epi->event.data = event->data; /* protected by mtx */ |
| 1794 | if (epi->event.events & EPOLLWAKEUP) { |
| 1795 | if (!ep_has_wakeup_source(epi)) |
| 1796 | ep_create_wakeup_source(epi); |
| 1797 | } else if (ep_has_wakeup_source(epi)) { |
| 1798 | ep_destroy_wakeup_source(epi); |
| 1799 | } |
| 1800 | |
| 1801 | /* |
| 1802 | * The following barrier has two effects: |
| 1803 | * |
| 1804 | * 1) Flush epi changes above to other CPUs. This ensures |
| 1805 | * we do not miss events from ep_poll_callback if an |
| 1806 | * event occurs immediately after we call f_op->poll(). |
| 1807 | * We need this because we did not take ep->lock while |
| 1808 | * changing epi above (but ep_poll_callback does take |
| 1809 | * ep->lock). |
| 1810 | * |
| 1811 | * 2) We also need to ensure we do not miss _past_ events |
| 1812 | * when calling f_op->poll(). This barrier also |
| 1813 | * pairs with the barrier in wq_has_sleeper (see |
| 1814 | * comments for wq_has_sleeper). |
| 1815 | * |
| 1816 | * This barrier will now guarantee ep_poll_callback or f_op->poll |
| 1817 | * (or both) will notice the readiness of an item. |
| 1818 | */ |
| 1819 | smp_mb(); |
| 1820 | |
| 1821 | /* |
| 1822 | * Get current event bits. We can safely use the file* here because |
| 1823 | * its usage count has been increased by the caller of this function. |
| 1824 | * If the item is "hot" and it is not registered inside the ready |
| 1825 | * list, push it inside. |
| 1826 | */ |
| 1827 | if (ep_item_poll(epi, &pt, 1)) { |
| 1828 | write_lock_irq(&ep->lock); |
| 1829 | if (!ep_is_linked(epi)) { |
| 1830 | list_add_tail(&epi->rdllink, &ep->rdllist); |
| 1831 | ep_pm_stay_awake(epi); |
| 1832 | |
| 1833 | /* Notify waiting tasks that events are available */ |
| 1834 | if (waitqueue_active(&ep->wq)) |
| 1835 | wake_up(&ep->wq); |
| 1836 | if (waitqueue_active(&ep->poll_wait)) |
| 1837 | pwake++; |
| 1838 | } |
| 1839 | write_unlock_irq(&ep->lock); |
| 1840 | } |
| 1841 | |
| 1842 | /* We have to call this outside the lock */ |
| 1843 | if (pwake) |
| 1844 | ep_poll_safewake(ep, NULL, 0); |
| 1845 | |
| 1846 | return 0; |
| 1847 | } |
| 1848 | |
| 1849 | static int ep_send_events(struct eventpoll *ep, |
| 1850 | struct epoll_event __user *events, int maxevents) |
| 1851 | { |
| 1852 | struct epitem *epi, *tmp; |
| 1853 | LIST_HEAD(txlist); |
| 1854 | poll_table pt; |
| 1855 | int res = 0; |
| 1856 | |
| 1857 | /* |
| 1858 | * Always short-circuit for fatal signals to allow threads to make a |
| 1859 | * timely exit without the chance of finding more events available and |
| 1860 | * fetching repeatedly. |
| 1861 | */ |
| 1862 | if (fatal_signal_pending(current)) |
| 1863 | return -EINTR; |
| 1864 | |
| 1865 | init_poll_funcptr(&pt, NULL); |
| 1866 | |
| 1867 | mutex_lock(&ep->mtx); |
| 1868 | ep_start_scan(ep, &txlist); |
| 1869 | |
| 1870 | /* |
| 1871 | * We can loop without lock because we are passed a task private list. |
| 1872 | * Items cannot vanish during the loop we are holding ep->mtx. |
| 1873 | */ |
| 1874 | list_for_each_entry_safe(epi, tmp, &txlist, rdllink) { |
| 1875 | struct wakeup_source *ws; |
| 1876 | __poll_t revents; |
| 1877 | |
| 1878 | if (res >= maxevents) |
| 1879 | break; |
| 1880 | |
| 1881 | /* |
| 1882 | * Activate ep->ws before deactivating epi->ws to prevent |
| 1883 | * triggering auto-suspend here (in case we reactive epi->ws |
| 1884 | * below). |
| 1885 | * |
| 1886 | * This could be rearranged to delay the deactivation of epi->ws |
| 1887 | * instead, but then epi->ws would temporarily be out of sync |
| 1888 | * with ep_is_linked(). |
| 1889 | */ |
| 1890 | ws = ep_wakeup_source(epi); |
| 1891 | if (ws) { |
| 1892 | if (ws->active) |
| 1893 | __pm_stay_awake(ep->ws); |
| 1894 | __pm_relax(ws); |
| 1895 | } |
| 1896 | |
| 1897 | list_del_init(&epi->rdllink); |
| 1898 | |
| 1899 | /* |
| 1900 | * If the event mask intersect the caller-requested one, |
| 1901 | * deliver the event to userspace. Again, we are holding ep->mtx, |
| 1902 | * so no operations coming from userspace can change the item. |
| 1903 | */ |
| 1904 | revents = ep_item_poll(epi, &pt, 1); |
| 1905 | if (!revents) |
| 1906 | continue; |
| 1907 | |
| 1908 | events = epoll_put_uevent(revents, epi->event.data, events); |
| 1909 | if (!events) { |
| 1910 | list_add(&epi->rdllink, &txlist); |
| 1911 | ep_pm_stay_awake(epi); |
| 1912 | if (!res) |
| 1913 | res = -EFAULT; |
| 1914 | break; |
| 1915 | } |
| 1916 | res++; |
| 1917 | if (epi->event.events & EPOLLONESHOT) |
| 1918 | epi->event.events &= EP_PRIVATE_BITS; |
| 1919 | else if (!(epi->event.events & EPOLLET)) { |
| 1920 | /* |
| 1921 | * If this file has been added with Level |
| 1922 | * Trigger mode, we need to insert back inside |
| 1923 | * the ready list, so that the next call to |
| 1924 | * epoll_wait() will check again the events |
| 1925 | * availability. At this point, no one can insert |
| 1926 | * into ep->rdllist besides us. The epoll_ctl() |
| 1927 | * callers are locked out by |
| 1928 | * ep_send_events() holding "mtx" and the |
| 1929 | * poll callback will queue them in ep->ovflist. |
| 1930 | */ |
| 1931 | list_add_tail(&epi->rdllink, &ep->rdllist); |
| 1932 | ep_pm_stay_awake(epi); |
| 1933 | } |
| 1934 | } |
| 1935 | ep_done_scan(ep, &txlist); |
| 1936 | mutex_unlock(&ep->mtx); |
| 1937 | |
| 1938 | return res; |
| 1939 | } |
| 1940 | |
| 1941 | static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms) |
| 1942 | { |
| 1943 | struct timespec64 now; |
| 1944 | |
| 1945 | if (ms < 0) |
| 1946 | return NULL; |
| 1947 | |
| 1948 | if (!ms) { |
| 1949 | to->tv_sec = 0; |
| 1950 | to->tv_nsec = 0; |
| 1951 | return to; |
| 1952 | } |
| 1953 | |
| 1954 | to->tv_sec = ms / MSEC_PER_SEC; |
| 1955 | to->tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC); |
| 1956 | |
| 1957 | ktime_get_ts64(&now); |
| 1958 | *to = timespec64_add_safe(now, *to); |
| 1959 | return to; |
| 1960 | } |
| 1961 | |
| 1962 | /* |
| 1963 | * autoremove_wake_function, but remove even on failure to wake up, because we |
| 1964 | * know that default_wake_function/ttwu will only fail if the thread is already |
| 1965 | * woken, and in that case the ep_poll loop will remove the entry anyways, not |
| 1966 | * try to reuse it. |
| 1967 | */ |
| 1968 | static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry, |
| 1969 | unsigned int mode, int sync, void *key) |
| 1970 | { |
| 1971 | int ret = default_wake_function(wq_entry, mode, sync, key); |
| 1972 | |
| 1973 | /* |
| 1974 | * Pairs with list_empty_careful in ep_poll, and ensures future loop |
| 1975 | * iterations see the cause of this wakeup. |
| 1976 | */ |
| 1977 | list_del_init_careful(&wq_entry->entry); |
| 1978 | return ret; |
| 1979 | } |
| 1980 | |
| 1981 | static int ep_try_send_events(struct eventpoll *ep, |
| 1982 | struct epoll_event __user *events, int maxevents) |
| 1983 | { |
| 1984 | int res; |
| 1985 | |
| 1986 | /* |
| 1987 | * Try to transfer events to user space. In case we get 0 events and |
| 1988 | * there's still timeout left over, we go trying again in search of |
| 1989 | * more luck. |
| 1990 | */ |
| 1991 | res = ep_send_events(ep, events, maxevents); |
| 1992 | if (res > 0) |
| 1993 | ep_suspend_napi_irqs(ep); |
| 1994 | return res; |
| 1995 | } |
| 1996 | |
| 1997 | static int ep_schedule_timeout(ktime_t *to) |
| 1998 | { |
| 1999 | if (to) |
| 2000 | return ktime_after(*to, ktime_get()); |
| 2001 | else |
| 2002 | return 1; |
| 2003 | } |
| 2004 | |
| 2005 | /** |
| 2006 | * ep_poll - Retrieves ready events, and delivers them to the caller-supplied |
| 2007 | * event buffer. |
| 2008 | * |
| 2009 | * @ep: Pointer to the eventpoll context. |
| 2010 | * @events: Pointer to the userspace buffer where the ready events should be |
| 2011 | * stored. |
| 2012 | * @maxevents: Size (in terms of number of events) of the caller event buffer. |
| 2013 | * @timeout: Maximum timeout for the ready events fetch operation, in |
| 2014 | * timespec. If the timeout is zero, the function will not block, |
| 2015 | * while if the @timeout ptr is NULL, the function will block |
| 2016 | * until at least one event has been retrieved (or an error |
| 2017 | * occurred). |
| 2018 | * |
| 2019 | * Return: the number of ready events which have been fetched, or an |
| 2020 | * error code, in case of error. |
| 2021 | */ |
| 2022 | static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, |
| 2023 | int maxevents, struct timespec64 *timeout) |
| 2024 | { |
| 2025 | int res, eavail, timed_out = 0; |
| 2026 | u64 slack = 0; |
| 2027 | wait_queue_entry_t wait; |
| 2028 | ktime_t expires, *to = NULL; |
| 2029 | |
| 2030 | lockdep_assert_irqs_enabled(); |
| 2031 | |
| 2032 | if (timeout && (timeout->tv_sec | timeout->tv_nsec)) { |
| 2033 | slack = select_estimate_accuracy(timeout); |
| 2034 | to = &expires; |
| 2035 | *to = timespec64_to_ktime(*timeout); |
| 2036 | } else if (timeout) { |
| 2037 | /* |
| 2038 | * Avoid the unnecessary trip to the wait queue loop, if the |
| 2039 | * caller specified a non blocking operation. |
| 2040 | */ |
| 2041 | timed_out = 1; |
| 2042 | } |
| 2043 | |
| 2044 | /* |
| 2045 | * This call is racy: We may or may not see events that are being added |
| 2046 | * to the ready list under the lock (e.g., in IRQ callbacks). For cases |
| 2047 | * with a non-zero timeout, this thread will check the ready list under |
| 2048 | * lock and will add to the wait queue. For cases with a zero |
| 2049 | * timeout, the user by definition should not care and will have to |
| 2050 | * recheck again. |
| 2051 | */ |
| 2052 | eavail = ep_events_available(ep); |
| 2053 | |
| 2054 | while (1) { |
| 2055 | if (eavail) { |
| 2056 | res = ep_try_send_events(ep, events, maxevents); |
| 2057 | if (res) |
| 2058 | return res; |
| 2059 | } |
| 2060 | |
| 2061 | if (timed_out) |
| 2062 | return 0; |
| 2063 | |
| 2064 | eavail = ep_busy_loop(ep); |
| 2065 | if (eavail) |
| 2066 | continue; |
| 2067 | |
| 2068 | if (signal_pending(current)) |
| 2069 | return -EINTR; |
| 2070 | |
| 2071 | /* |
| 2072 | * Internally init_wait() uses autoremove_wake_function(), |
| 2073 | * thus wait entry is removed from the wait queue on each |
| 2074 | * wakeup. Why it is important? In case of several waiters |
| 2075 | * each new wakeup will hit the next waiter, giving it the |
| 2076 | * chance to harvest new event. Otherwise wakeup can be |
| 2077 | * lost. This is also good performance-wise, because on |
| 2078 | * normal wakeup path no need to call __remove_wait_queue() |
| 2079 | * explicitly, thus ep->lock is not taken, which halts the |
| 2080 | * event delivery. |
| 2081 | * |
| 2082 | * In fact, we now use an even more aggressive function that |
| 2083 | * unconditionally removes, because we don't reuse the wait |
| 2084 | * entry between loop iterations. This lets us also avoid the |
| 2085 | * performance issue if a process is killed, causing all of its |
| 2086 | * threads to wake up without being removed normally. |
| 2087 | */ |
| 2088 | init_wait(&wait); |
| 2089 | wait.func = ep_autoremove_wake_function; |
| 2090 | |
| 2091 | write_lock_irq(&ep->lock); |
| 2092 | /* |
| 2093 | * Barrierless variant, waitqueue_active() is called under |
| 2094 | * the same lock on wakeup ep_poll_callback() side, so it |
| 2095 | * is safe to avoid an explicit barrier. |
| 2096 | */ |
| 2097 | __set_current_state(TASK_INTERRUPTIBLE); |
| 2098 | |
| 2099 | /* |
| 2100 | * Do the final check under the lock. ep_start/done_scan() |
| 2101 | * plays with two lists (->rdllist and ->ovflist) and there |
| 2102 | * is always a race when both lists are empty for short |
| 2103 | * period of time although events are pending, so lock is |
| 2104 | * important. |
| 2105 | */ |
| 2106 | eavail = ep_events_available(ep); |
| 2107 | if (!eavail) |
| 2108 | __add_wait_queue_exclusive(&ep->wq, &wait); |
| 2109 | |
| 2110 | write_unlock_irq(&ep->lock); |
| 2111 | |
| 2112 | if (!eavail) |
| 2113 | timed_out = !ep_schedule_timeout(to) || |
| 2114 | !schedule_hrtimeout_range(to, slack, |
| 2115 | HRTIMER_MODE_ABS); |
| 2116 | __set_current_state(TASK_RUNNING); |
| 2117 | |
| 2118 | /* |
| 2119 | * We were woken up, thus go and try to harvest some events. |
| 2120 | * If timed out and still on the wait queue, recheck eavail |
| 2121 | * carefully under lock, below. |
| 2122 | */ |
| 2123 | eavail = 1; |
| 2124 | |
| 2125 | if (!list_empty_careful(&wait.entry)) { |
| 2126 | write_lock_irq(&ep->lock); |
| 2127 | /* |
| 2128 | * If the thread timed out and is not on the wait queue, |
| 2129 | * it means that the thread was woken up after its |
| 2130 | * timeout expired before it could reacquire the lock. |
| 2131 | * Thus, when wait.entry is empty, it needs to harvest |
| 2132 | * events. |
| 2133 | */ |
| 2134 | if (timed_out) |
| 2135 | eavail = list_empty(&wait.entry); |
| 2136 | __remove_wait_queue(&ep->wq, &wait); |
| 2137 | write_unlock_irq(&ep->lock); |
| 2138 | } |
| 2139 | } |
| 2140 | } |
| 2141 | |
| 2142 | /** |
| 2143 | * ep_loop_check_proc - verify that adding an epoll file inside another |
| 2144 | * epoll structure does not violate the constraints, in |
| 2145 | * terms of closed loops, or too deep chains (which can |
| 2146 | * result in excessive stack usage). |
| 2147 | * |
| 2148 | * @ep: the &struct eventpoll to be currently checked. |
| 2149 | * @depth: Current depth of the path being checked. |
| 2150 | * |
| 2151 | * Return: %zero if adding the epoll @file inside current epoll |
| 2152 | * structure @ep does not violate the constraints, or %-1 otherwise. |
| 2153 | */ |
| 2154 | static int ep_loop_check_proc(struct eventpoll *ep, int depth) |
| 2155 | { |
| 2156 | int error = 0; |
| 2157 | struct rb_node *rbp; |
| 2158 | struct epitem *epi; |
| 2159 | |
| 2160 | mutex_lock_nested(&ep->mtx, depth + 1); |
| 2161 | ep->gen = loop_check_gen; |
| 2162 | for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { |
| 2163 | epi = rb_entry(rbp, struct epitem, rbn); |
| 2164 | if (unlikely(is_file_epoll(epi->ffd.file))) { |
| 2165 | struct eventpoll *ep_tovisit; |
| 2166 | ep_tovisit = epi->ffd.file->private_data; |
| 2167 | if (ep_tovisit->gen == loop_check_gen) |
| 2168 | continue; |
| 2169 | if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS) |
| 2170 | error = -1; |
| 2171 | else |
| 2172 | error = ep_loop_check_proc(ep_tovisit, depth + 1); |
| 2173 | if (error != 0) |
| 2174 | break; |
| 2175 | } else { |
| 2176 | /* |
| 2177 | * If we've reached a file that is not associated with |
| 2178 | * an ep, then we need to check if the newly added |
| 2179 | * links are going to add too many wakeup paths. We do |
| 2180 | * this by adding it to the tfile_check_list, if it's |
| 2181 | * not already there, and calling reverse_path_check() |
| 2182 | * during ep_insert(). |
| 2183 | */ |
| 2184 | list_file(epi->ffd.file); |
| 2185 | } |
| 2186 | } |
| 2187 | mutex_unlock(&ep->mtx); |
| 2188 | |
| 2189 | return error; |
| 2190 | } |
| 2191 | |
| 2192 | /** |
| 2193 | * ep_loop_check - Performs a check to verify that adding an epoll file (@to) |
| 2194 | * into another epoll file (represented by @ep) does not create |
| 2195 | * closed loops or too deep chains. |
| 2196 | * |
| 2197 | * @ep: Pointer to the epoll we are inserting into. |
| 2198 | * @to: Pointer to the epoll to be inserted. |
| 2199 | * |
| 2200 | * Return: %zero if adding the epoll @to inside the epoll @from |
| 2201 | * does not violate the constraints, or %-1 otherwise. |
| 2202 | */ |
| 2203 | static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to) |
| 2204 | { |
| 2205 | inserting_into = ep; |
| 2206 | return ep_loop_check_proc(to, 0); |
| 2207 | } |
| 2208 | |
| 2209 | static void clear_tfile_check_list(void) |
| 2210 | { |
| 2211 | rcu_read_lock(); |
| 2212 | while (tfile_check_list != EP_UNACTIVE_PTR) { |
| 2213 | struct epitems_head *head = tfile_check_list; |
| 2214 | tfile_check_list = head->next; |
| 2215 | unlist_file(head); |
| 2216 | } |
| 2217 | rcu_read_unlock(); |
| 2218 | } |
| 2219 | |
| 2220 | /* |
| 2221 | * Open an eventpoll file descriptor. |
| 2222 | */ |
| 2223 | static int do_epoll_create(int flags) |
| 2224 | { |
| 2225 | int error, fd; |
| 2226 | struct eventpoll *ep = NULL; |
| 2227 | struct file *file; |
| 2228 | |
| 2229 | /* Check the EPOLL_* constant for consistency. */ |
| 2230 | BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC); |
| 2231 | |
| 2232 | if (flags & ~EPOLL_CLOEXEC) |
| 2233 | return -EINVAL; |
| 2234 | /* |
| 2235 | * Create the internal data structure ("struct eventpoll"). |
| 2236 | */ |
| 2237 | error = ep_alloc(&ep); |
| 2238 | if (error < 0) |
| 2239 | return error; |
| 2240 | /* |
| 2241 | * Creates all the items needed to setup an eventpoll file. That is, |
| 2242 | * a file structure and a free file descriptor. |
| 2243 | */ |
| 2244 | fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC)); |
| 2245 | if (fd < 0) { |
| 2246 | error = fd; |
| 2247 | goto out_free_ep; |
| 2248 | } |
| 2249 | file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep, |
| 2250 | O_RDWR | (flags & O_CLOEXEC)); |
| 2251 | if (IS_ERR(file)) { |
| 2252 | error = PTR_ERR(file); |
| 2253 | goto out_free_fd; |
| 2254 | } |
| 2255 | ep->file = file; |
| 2256 | fd_install(fd, file); |
| 2257 | return fd; |
| 2258 | |
| 2259 | out_free_fd: |
| 2260 | put_unused_fd(fd); |
| 2261 | out_free_ep: |
| 2262 | ep_clear_and_put(ep); |
| 2263 | return error; |
| 2264 | } |
| 2265 | |
| 2266 | SYSCALL_DEFINE1(epoll_create1, int, flags) |
| 2267 | { |
| 2268 | return do_epoll_create(flags); |
| 2269 | } |
| 2270 | |
| 2271 | SYSCALL_DEFINE1(epoll_create, int, size) |
| 2272 | { |
| 2273 | if (size <= 0) |
| 2274 | return -EINVAL; |
| 2275 | |
| 2276 | return do_epoll_create(0); |
| 2277 | } |
| 2278 | |
| 2279 | #ifdef CONFIG_PM_SLEEP |
| 2280 | static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev) |
| 2281 | { |
| 2282 | if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) |
| 2283 | epev->events &= ~EPOLLWAKEUP; |
| 2284 | } |
| 2285 | #else |
| 2286 | static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev) |
| 2287 | { |
| 2288 | epev->events &= ~EPOLLWAKEUP; |
| 2289 | } |
| 2290 | #endif |
| 2291 | |
| 2292 | static inline int epoll_mutex_lock(struct mutex *mutex, int depth, |
| 2293 | bool nonblock) |
| 2294 | { |
| 2295 | if (!nonblock) { |
| 2296 | mutex_lock_nested(mutex, depth); |
| 2297 | return 0; |
| 2298 | } |
| 2299 | if (mutex_trylock(mutex)) |
| 2300 | return 0; |
| 2301 | return -EAGAIN; |
| 2302 | } |
| 2303 | |
| 2304 | int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, |
| 2305 | bool nonblock) |
| 2306 | { |
| 2307 | int error; |
| 2308 | int full_check = 0; |
| 2309 | struct eventpoll *ep; |
| 2310 | struct epitem *epi; |
| 2311 | struct eventpoll *tep = NULL; |
| 2312 | |
| 2313 | CLASS(fd, f)(epfd); |
| 2314 | if (fd_empty(f)) |
| 2315 | return -EBADF; |
| 2316 | |
| 2317 | /* Get the "struct file *" for the target file */ |
| 2318 | CLASS(fd, tf)(fd); |
| 2319 | if (fd_empty(tf)) |
| 2320 | return -EBADF; |
| 2321 | |
| 2322 | /* The target file descriptor must support poll */ |
| 2323 | if (!file_can_poll(fd_file(tf))) |
| 2324 | return -EPERM; |
| 2325 | |
| 2326 | /* Check if EPOLLWAKEUP is allowed */ |
| 2327 | if (ep_op_has_event(op)) |
| 2328 | ep_take_care_of_epollwakeup(epds); |
| 2329 | |
| 2330 | /* |
| 2331 | * We have to check that the file structure underneath the file descriptor |
| 2332 | * the user passed to us _is_ an eventpoll file. And also we do not permit |
| 2333 | * adding an epoll file descriptor inside itself. |
| 2334 | */ |
| 2335 | error = -EINVAL; |
| 2336 | if (fd_file(f) == fd_file(tf) || !is_file_epoll(fd_file(f))) |
| 2337 | goto error_tgt_fput; |
| 2338 | |
| 2339 | /* |
| 2340 | * epoll adds to the wakeup queue at EPOLL_CTL_ADD time only, |
| 2341 | * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation. |
| 2342 | * Also, we do not currently supported nested exclusive wakeups. |
| 2343 | */ |
| 2344 | if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) { |
| 2345 | if (op == EPOLL_CTL_MOD) |
| 2346 | goto error_tgt_fput; |
| 2347 | if (op == EPOLL_CTL_ADD && (is_file_epoll(fd_file(tf)) || |
| 2348 | (epds->events & ~EPOLLEXCLUSIVE_OK_BITS))) |
| 2349 | goto error_tgt_fput; |
| 2350 | } |
| 2351 | |
| 2352 | /* |
| 2353 | * At this point it is safe to assume that the "private_data" contains |
| 2354 | * our own data structure. |
| 2355 | */ |
| 2356 | ep = fd_file(f)->private_data; |
| 2357 | |
| 2358 | /* |
| 2359 | * When we insert an epoll file descriptor inside another epoll file |
| 2360 | * descriptor, there is the chance of creating closed loops, which are |
| 2361 | * better be handled here, than in more critical paths. While we are |
| 2362 | * checking for loops we also determine the list of files reachable |
| 2363 | * and hang them on the tfile_check_list, so we can check that we |
| 2364 | * haven't created too many possible wakeup paths. |
| 2365 | * |
| 2366 | * We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when |
| 2367 | * the epoll file descriptor is attaching directly to a wakeup source, |
| 2368 | * unless the epoll file descriptor is nested. The purpose of taking the |
| 2369 | * 'epnested_mutex' on add is to prevent complex toplogies such as loops and |
| 2370 | * deep wakeup paths from forming in parallel through multiple |
| 2371 | * EPOLL_CTL_ADD operations. |
| 2372 | */ |
| 2373 | error = epoll_mutex_lock(&ep->mtx, 0, nonblock); |
| 2374 | if (error) |
| 2375 | goto error_tgt_fput; |
| 2376 | if (op == EPOLL_CTL_ADD) { |
| 2377 | if (READ_ONCE(fd_file(f)->f_ep) || ep->gen == loop_check_gen || |
| 2378 | is_file_epoll(fd_file(tf))) { |
| 2379 | mutex_unlock(&ep->mtx); |
| 2380 | error = epoll_mutex_lock(&epnested_mutex, 0, nonblock); |
| 2381 | if (error) |
| 2382 | goto error_tgt_fput; |
| 2383 | loop_check_gen++; |
| 2384 | full_check = 1; |
| 2385 | if (is_file_epoll(fd_file(tf))) { |
| 2386 | tep = fd_file(tf)->private_data; |
| 2387 | error = -ELOOP; |
| 2388 | if (ep_loop_check(ep, tep) != 0) |
| 2389 | goto error_tgt_fput; |
| 2390 | } |
| 2391 | error = epoll_mutex_lock(&ep->mtx, 0, nonblock); |
| 2392 | if (error) |
| 2393 | goto error_tgt_fput; |
| 2394 | } |
| 2395 | } |
| 2396 | |
| 2397 | /* |
| 2398 | * Try to lookup the file inside our RB tree. Since we grabbed "mtx" |
| 2399 | * above, we can be sure to be able to use the item looked up by |
| 2400 | * ep_find() till we release the mutex. |
| 2401 | */ |
| 2402 | epi = ep_find(ep, fd_file(tf), fd); |
| 2403 | |
| 2404 | error = -EINVAL; |
| 2405 | switch (op) { |
| 2406 | case EPOLL_CTL_ADD: |
| 2407 | if (!epi) { |
| 2408 | epds->events |= EPOLLERR | EPOLLHUP; |
| 2409 | error = ep_insert(ep, epds, fd_file(tf), fd, full_check); |
| 2410 | } else |
| 2411 | error = -EEXIST; |
| 2412 | break; |
| 2413 | case EPOLL_CTL_DEL: |
| 2414 | if (epi) { |
| 2415 | /* |
| 2416 | * The eventpoll itself is still alive: the refcount |
| 2417 | * can't go to zero here. |
| 2418 | */ |
| 2419 | ep_remove_safe(ep, epi); |
| 2420 | error = 0; |
| 2421 | } else { |
| 2422 | error = -ENOENT; |
| 2423 | } |
| 2424 | break; |
| 2425 | case EPOLL_CTL_MOD: |
| 2426 | if (epi) { |
| 2427 | if (!(epi->event.events & EPOLLEXCLUSIVE)) { |
| 2428 | epds->events |= EPOLLERR | EPOLLHUP; |
| 2429 | error = ep_modify(ep, epi, epds); |
| 2430 | } |
| 2431 | } else |
| 2432 | error = -ENOENT; |
| 2433 | break; |
| 2434 | } |
| 2435 | mutex_unlock(&ep->mtx); |
| 2436 | |
| 2437 | error_tgt_fput: |
| 2438 | if (full_check) { |
| 2439 | clear_tfile_check_list(); |
| 2440 | loop_check_gen++; |
| 2441 | mutex_unlock(&epnested_mutex); |
| 2442 | } |
| 2443 | return error; |
| 2444 | } |
| 2445 | |
| 2446 | /* |
| 2447 | * The following function implements the controller interface for |
| 2448 | * the eventpoll file that enables the insertion/removal/change of |
| 2449 | * file descriptors inside the interest set. |
| 2450 | */ |
| 2451 | SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, |
| 2452 | struct epoll_event __user *, event) |
| 2453 | { |
| 2454 | struct epoll_event epds; |
| 2455 | |
| 2456 | if (ep_op_has_event(op) && |
| 2457 | copy_from_user(&epds, event, sizeof(struct epoll_event))) |
| 2458 | return -EFAULT; |
| 2459 | |
| 2460 | return do_epoll_ctl(epfd, op, fd, &epds, false); |
| 2461 | } |
| 2462 | |
| 2463 | static int ep_check_params(struct file *file, struct epoll_event __user *evs, |
| 2464 | int maxevents) |
| 2465 | { |
| 2466 | /* The maximum number of event must be greater than zero */ |
| 2467 | if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) |
| 2468 | return -EINVAL; |
| 2469 | |
| 2470 | /* Verify that the area passed by the user is writeable */ |
| 2471 | if (!access_ok(evs, maxevents * sizeof(struct epoll_event))) |
| 2472 | return -EFAULT; |
| 2473 | |
| 2474 | /* |
| 2475 | * We have to check that the file structure underneath the fd |
| 2476 | * the user passed to us _is_ an eventpoll file. |
| 2477 | */ |
| 2478 | if (!is_file_epoll(file)) |
| 2479 | return -EINVAL; |
| 2480 | |
| 2481 | return 0; |
| 2482 | } |
| 2483 | |
| 2484 | int epoll_sendevents(struct file *file, struct epoll_event __user *events, |
| 2485 | int maxevents) |
| 2486 | { |
| 2487 | struct eventpoll *ep; |
| 2488 | int ret; |
| 2489 | |
| 2490 | ret = ep_check_params(file, events, maxevents); |
| 2491 | if (unlikely(ret)) |
| 2492 | return ret; |
| 2493 | |
| 2494 | ep = file->private_data; |
| 2495 | /* |
| 2496 | * Racy call, but that's ok - it should get retried based on |
| 2497 | * poll readiness anyway. |
| 2498 | */ |
| 2499 | if (ep_events_available(ep)) |
| 2500 | return ep_try_send_events(ep, events, maxevents); |
| 2501 | return 0; |
| 2502 | } |
| 2503 | |
| 2504 | /* |
| 2505 | * Implement the event wait interface for the eventpoll file. It is the kernel |
| 2506 | * part of the user space epoll_wait(2). |
| 2507 | */ |
| 2508 | static int do_epoll_wait(int epfd, struct epoll_event __user *events, |
| 2509 | int maxevents, struct timespec64 *to) |
| 2510 | { |
| 2511 | struct eventpoll *ep; |
| 2512 | int ret; |
| 2513 | |
| 2514 | /* Get the "struct file *" for the eventpoll file */ |
| 2515 | CLASS(fd, f)(epfd); |
| 2516 | if (fd_empty(f)) |
| 2517 | return -EBADF; |
| 2518 | |
| 2519 | ret = ep_check_params(fd_file(f), events, maxevents); |
| 2520 | if (unlikely(ret)) |
| 2521 | return ret; |
| 2522 | |
| 2523 | /* |
| 2524 | * At this point it is safe to assume that the "private_data" contains |
| 2525 | * our own data structure. |
| 2526 | */ |
| 2527 | ep = fd_file(f)->private_data; |
| 2528 | |
| 2529 | /* Time to fish for events ... */ |
| 2530 | return ep_poll(ep, events, maxevents, to); |
| 2531 | } |
| 2532 | |
| 2533 | SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events, |
| 2534 | int, maxevents, int, timeout) |
| 2535 | { |
| 2536 | struct timespec64 to; |
| 2537 | |
| 2538 | return do_epoll_wait(epfd, events, maxevents, |
| 2539 | ep_timeout_to_timespec(&to, timeout)); |
| 2540 | } |
| 2541 | |
| 2542 | /* |
| 2543 | * Implement the event wait interface for the eventpoll file. It is the kernel |
| 2544 | * part of the user space epoll_pwait(2). |
| 2545 | */ |
| 2546 | static int do_epoll_pwait(int epfd, struct epoll_event __user *events, |
| 2547 | int maxevents, struct timespec64 *to, |
| 2548 | const sigset_t __user *sigmask, size_t sigsetsize) |
| 2549 | { |
| 2550 | int error; |
| 2551 | |
| 2552 | /* |
| 2553 | * If the caller wants a certain signal mask to be set during the wait, |
| 2554 | * we apply it here. |
| 2555 | */ |
| 2556 | error = set_user_sigmask(sigmask, sigsetsize); |
| 2557 | if (error) |
| 2558 | return error; |
| 2559 | |
| 2560 | error = do_epoll_wait(epfd, events, maxevents, to); |
| 2561 | |
| 2562 | restore_saved_sigmask_unless(error == -EINTR); |
| 2563 | |
| 2564 | return error; |
| 2565 | } |
| 2566 | |
| 2567 | SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, |
| 2568 | int, maxevents, int, timeout, const sigset_t __user *, sigmask, |
| 2569 | size_t, sigsetsize) |
| 2570 | { |
| 2571 | struct timespec64 to; |
| 2572 | |
| 2573 | return do_epoll_pwait(epfd, events, maxevents, |
| 2574 | ep_timeout_to_timespec(&to, timeout), |
| 2575 | sigmask, sigsetsize); |
| 2576 | } |
| 2577 | |
| 2578 | SYSCALL_DEFINE6(epoll_pwait2, int, epfd, struct epoll_event __user *, events, |
| 2579 | int, maxevents, const struct __kernel_timespec __user *, timeout, |
| 2580 | const sigset_t __user *, sigmask, size_t, sigsetsize) |
| 2581 | { |
| 2582 | struct timespec64 ts, *to = NULL; |
| 2583 | |
| 2584 | if (timeout) { |
| 2585 | if (get_timespec64(&ts, timeout)) |
| 2586 | return -EFAULT; |
| 2587 | to = &ts; |
| 2588 | if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) |
| 2589 | return -EINVAL; |
| 2590 | } |
| 2591 | |
| 2592 | return do_epoll_pwait(epfd, events, maxevents, to, |
| 2593 | sigmask, sigsetsize); |
| 2594 | } |
| 2595 | |
| 2596 | #ifdef CONFIG_COMPAT |
| 2597 | static int do_compat_epoll_pwait(int epfd, struct epoll_event __user *events, |
| 2598 | int maxevents, struct timespec64 *timeout, |
| 2599 | const compat_sigset_t __user *sigmask, |
| 2600 | compat_size_t sigsetsize) |
| 2601 | { |
| 2602 | long err; |
| 2603 | |
| 2604 | /* |
| 2605 | * If the caller wants a certain signal mask to be set during the wait, |
| 2606 | * we apply it here. |
| 2607 | */ |
| 2608 | err = set_compat_user_sigmask(sigmask, sigsetsize); |
| 2609 | if (err) |
| 2610 | return err; |
| 2611 | |
| 2612 | err = do_epoll_wait(epfd, events, maxevents, timeout); |
| 2613 | |
| 2614 | restore_saved_sigmask_unless(err == -EINTR); |
| 2615 | |
| 2616 | return err; |
| 2617 | } |
| 2618 | |
| 2619 | COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd, |
| 2620 | struct epoll_event __user *, events, |
| 2621 | int, maxevents, int, timeout, |
| 2622 | const compat_sigset_t __user *, sigmask, |
| 2623 | compat_size_t, sigsetsize) |
| 2624 | { |
| 2625 | struct timespec64 to; |
| 2626 | |
| 2627 | return do_compat_epoll_pwait(epfd, events, maxevents, |
| 2628 | ep_timeout_to_timespec(&to, timeout), |
| 2629 | sigmask, sigsetsize); |
| 2630 | } |
| 2631 | |
| 2632 | COMPAT_SYSCALL_DEFINE6(epoll_pwait2, int, epfd, |
| 2633 | struct epoll_event __user *, events, |
| 2634 | int, maxevents, |
| 2635 | const struct __kernel_timespec __user *, timeout, |
| 2636 | const compat_sigset_t __user *, sigmask, |
| 2637 | compat_size_t, sigsetsize) |
| 2638 | { |
| 2639 | struct timespec64 ts, *to = NULL; |
| 2640 | |
| 2641 | if (timeout) { |
| 2642 | if (get_timespec64(&ts, timeout)) |
| 2643 | return -EFAULT; |
| 2644 | to = &ts; |
| 2645 | if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) |
| 2646 | return -EINVAL; |
| 2647 | } |
| 2648 | |
| 2649 | return do_compat_epoll_pwait(epfd, events, maxevents, to, |
| 2650 | sigmask, sigsetsize); |
| 2651 | } |
| 2652 | |
| 2653 | #endif |
| 2654 | |
| 2655 | static int __init eventpoll_init(void) |
| 2656 | { |
| 2657 | struct sysinfo si; |
| 2658 | |
| 2659 | si_meminfo(&si); |
| 2660 | /* |
| 2661 | * Allows top 4% of lomem to be allocated for epoll watches (per user). |
| 2662 | */ |
| 2663 | max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / |
| 2664 | EP_ITEM_COST; |
| 2665 | BUG_ON(max_user_watches < 0); |
| 2666 | |
| 2667 | /* |
| 2668 | * We can have many thousands of epitems, so prevent this from |
| 2669 | * using an extra cache line on 64-bit (and smaller) CPUs |
| 2670 | */ |
| 2671 | BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128); |
| 2672 | |
| 2673 | /* Allocates slab cache used to allocate "struct epitem" items */ |
| 2674 | epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), |
| 2675 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); |
| 2676 | |
| 2677 | /* Allocates slab cache used to allocate "struct eppoll_entry" */ |
| 2678 | pwq_cache = kmem_cache_create("eventpoll_pwq", |
| 2679 | sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL); |
| 2680 | epoll_sysctls_init(); |
| 2681 | |
| 2682 | ephead_cache = kmem_cache_create("ep_head", |
| 2683 | sizeof(struct epitems_head), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL); |
| 2684 | |
| 2685 | return 0; |
| 2686 | } |
| 2687 | fs_initcall(eventpoll_init); |