io_uring/poll: fix double poll req->flags races
[linux-block.git] / io_uring / poll.c
CommitLineData
329061d3
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/poll.h>
9#include <linux/hashtable.h>
10#include <linux/io_uring.h>
11
12#include <trace/events/io_uring.h>
13
14#include <uapi/linux/io_uring.h>
15
329061d3
JA
16#include "io_uring.h"
17#include "refs.h"
18#include "opdef.h"
3b77495a 19#include "kbuf.h"
329061d3 20#include "poll.h"
38513c46 21#include "cancel.h"
329061d3
JA
22
23struct io_poll_update {
24 struct file *file;
25 u64 old_user_data;
26 u64 new_user_data;
27 __poll_t events;
28 bool update_events;
29 bool update_user_data;
30};
31
32struct io_poll_table {
33 struct poll_table_struct pt;
34 struct io_kiocb *req;
35 int nr_entries;
36 int error;
49f1c68e 37 bool owning;
063a0079
PB
38 /* output value, set only if arm poll returns >0 */
39 __poll_t result_mask;
329061d3
JA
40};
41
42#define IO_POLL_CANCEL_FLAG BIT(31)
43#define IO_POLL_REF_MASK GENMASK(30, 0)
44
0638cd7b
PB
45#define IO_WQE_F_DOUBLE 1
46
47static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
48{
49 unsigned long priv = (unsigned long)wqe->private;
50
51 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
52}
53
54static inline bool wqe_is_double(struct wait_queue_entry *wqe)
55{
56 unsigned long priv = (unsigned long)wqe->private;
57
58 return priv & IO_WQE_F_DOUBLE;
59}
60
329061d3
JA
61/*
62 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
63 * bump it and acquire ownership. It's disallowed to modify requests while not
64 * owning it, that prevents from races for enqueueing task_work's and b/w
65 * arming poll and wakeups.
66 */
67static inline bool io_poll_get_ownership(struct io_kiocb *req)
68{
69 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
70}
71
72static void io_poll_mark_cancelled(struct io_kiocb *req)
73{
74 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
75}
76
77static struct io_poll *io_poll_get_double(struct io_kiocb *req)
78{
79 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
80 if (req->opcode == IORING_OP_POLL_ADD)
81 return req->async_data;
82 return req->apoll->double_poll;
83}
84
85static struct io_poll *io_poll_get_single(struct io_kiocb *req)
86{
87 if (req->opcode == IORING_OP_POLL_ADD)
f2ccb5ae 88 return io_kiocb_to_cmd(req, struct io_poll);
329061d3
JA
89 return &req->apoll->poll;
90}
91
92static void io_poll_req_insert(struct io_kiocb *req)
93{
e6f89be6
PB
94 struct io_hash_table *table = &req->ctx->cancel_table;
95 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
96 struct io_hash_bucket *hb = &table->hbs[index];
329061d3 97
38513c46
HX
98 spin_lock(&hb->lock);
99 hlist_add_head(&req->hash_node, &hb->list);
100 spin_unlock(&hb->lock);
101}
102
103static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
104{
e6f89be6
PB
105 struct io_hash_table *table = &req->ctx->cancel_table;
106 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
107 spinlock_t *lock = &table->hbs[index].lock;
38513c46
HX
108
109 spin_lock(lock);
110 hash_del(&req->hash_node);
111 spin_unlock(lock);
329061d3
JA
112}
113
9ca9fb24
PB
114static void io_poll_req_insert_locked(struct io_kiocb *req)
115{
116 struct io_hash_table *table = &req->ctx->cancel_table_locked;
117 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
118
119 hlist_add_head(&req->hash_node, &table->hbs[index].list);
120}
121
122static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
123{
124 struct io_ring_ctx *ctx = req->ctx;
125
126 if (req->flags & REQ_F_HASH_LOCKED) {
127 /*
128 * ->cancel_table_locked is protected by ->uring_lock in
129 * contrast to per bucket spinlocks. Likely, tctx_task_work()
130 * already grabbed the mutex for us, but there is a chance it
131 * failed.
132 */
133 io_tw_lock(ctx, locked);
134 hash_del(&req->hash_node);
b21a51e2 135 req->flags &= ~REQ_F_HASH_LOCKED;
9ca9fb24
PB
136 } else {
137 io_poll_req_delete(req, ctx);
138 }
139}
140
329061d3
JA
141static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
142 wait_queue_func_t wake_func)
143{
144 poll->head = NULL;
145#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
146 /* mask in events that we always want/need */
147 poll->events = events | IO_POLL_UNMASK;
148 INIT_LIST_HEAD(&poll->wait.entry);
149 init_waitqueue_func_entry(&poll->wait, wake_func);
150}
151
152static inline void io_poll_remove_entry(struct io_poll *poll)
153{
154 struct wait_queue_head *head = smp_load_acquire(&poll->head);
155
156 if (head) {
157 spin_lock_irq(&head->lock);
158 list_del_init(&poll->wait.entry);
159 poll->head = NULL;
160 spin_unlock_irq(&head->lock);
161 }
162}
163
164static void io_poll_remove_entries(struct io_kiocb *req)
165{
166 /*
167 * Nothing to do if neither of those flags are set. Avoid dipping
168 * into the poll/apoll/double cachelines if we can.
169 */
170 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
171 return;
172
173 /*
174 * While we hold the waitqueue lock and the waitqueue is nonempty,
175 * wake_up_pollfree() will wait for us. However, taking the waitqueue
176 * lock in the first place can race with the waitqueue being freed.
177 *
178 * We solve this as eventpoll does: by taking advantage of the fact that
179 * all users of wake_up_pollfree() will RCU-delay the actual free. If
180 * we enter rcu_read_lock() and see that the pointer to the queue is
181 * non-NULL, we can then lock it without the memory being freed out from
182 * under us.
183 *
184 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
185 * case the caller deletes the entry from the queue, leaving it empty.
186 * In that case, only RCU prevents the queue memory from being freed.
187 */
188 rcu_read_lock();
189 if (req->flags & REQ_F_SINGLE_POLL)
190 io_poll_remove_entry(io_poll_get_single(req));
191 if (req->flags & REQ_F_DOUBLE_POLL)
192 io_poll_remove_entry(io_poll_get_double(req));
193 rcu_read_unlock();
194}
195
2ba69707
DY
196enum {
197 IOU_POLL_DONE = 0,
198 IOU_POLL_NO_ACTION = 1,
114eccdf 199 IOU_POLL_REMOVE_POLL_USE_RES = 2,
2ba69707
DY
200};
201
329061d3
JA
202/*
203 * All poll tw should go through this. Checks for poll events, manages
204 * references, does rewait, etc.
205 *
2ba69707
DY
206 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require,
207 * which is either spurious wakeup or multishot CQE is served.
208 * IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res.
114eccdf
DY
209 * IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result
210 * is stored in req->cqe.
329061d3
JA
211 */
212static int io_poll_check_events(struct io_kiocb *req, bool *locked)
213{
214 struct io_ring_ctx *ctx = req->ctx;
215 int v, ret;
216
217 /* req->task == current here, checking PF_EXITING is safe */
218 if (unlikely(req->task->flags & PF_EXITING))
219 return -ECANCELED;
220
221 do {
222 v = atomic_read(&req->poll_refs);
223
224 /* tw handler should be the owner, and so have some references */
225 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
2ba69707 226 return IOU_POLL_DONE;
329061d3
JA
227 if (v & IO_POLL_CANCEL_FLAG)
228 return -ECANCELED;
229
2ba69707 230 /* the mask was stashed in __io_poll_execute */
329061d3
JA
231 if (!req->cqe.res) {
232 struct poll_table_struct pt = { ._key = req->apoll_events };
233 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
234 }
235
236 if ((unlikely(!req->cqe.res)))
237 continue;
238 if (req->apoll_events & EPOLLONESHOT)
2ba69707 239 return IOU_POLL_DONE;
329061d3
JA
240
241 /* multishot, just fill a CQE and proceed */
242 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
243 __poll_t mask = mangle_poll(req->cqe.res &
244 req->apoll_events);
329061d3 245
d245bca6 246 if (!io_post_aux_cqe(ctx, req->cqe.user_data,
a2da6763
DY
247 mask, IORING_CQE_F_MORE, false)) {
248 io_req_set_res(req, mask, 0);
249 return IOU_POLL_REMOVE_POLL_USE_RES;
250 }
d245bca6
PB
251 } else {
252 ret = io_poll_issue(req, locked);
114eccdf
DY
253 if (ret == IOU_STOP_MULTISHOT)
254 return IOU_POLL_REMOVE_POLL_USE_RES;
2ba69707 255 if (ret < 0)
d245bca6
PB
256 return ret;
257 }
329061d3
JA
258
259 /*
260 * Release all references, retry if someone tried to restart
261 * task_work while we were executing it.
262 */
263 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
264
2ba69707 265 return IOU_POLL_NO_ACTION;
329061d3
JA
266}
267
268static void io_poll_task_func(struct io_kiocb *req, bool *locked)
269{
329061d3
JA
270 int ret;
271
272 ret = io_poll_check_events(req, locked);
2ba69707 273 if (ret == IOU_POLL_NO_ACTION)
329061d3
JA
274 return;
275
2ba69707 276 if (ret == IOU_POLL_DONE) {
f2ccb5ae 277 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
329061d3 278 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
114eccdf 279 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
329061d3
JA
280 req->cqe.res = ret;
281 req_set_fail(req);
282 }
283
284 io_poll_remove_entries(req);
9ca9fb24
PB
285 io_poll_tw_hash_eject(req, locked);
286
0ec6dca2
PB
287 io_req_set_res(req, req->cqe.res, 0);
288 io_req_task_complete(req, locked);
329061d3
JA
289}
290
291static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
292{
329061d3
JA
293 int ret;
294
295 ret = io_poll_check_events(req, locked);
2ba69707 296 if (ret == IOU_POLL_NO_ACTION)
329061d3
JA
297 return;
298
299 io_poll_remove_entries(req);
9ca9fb24 300 io_poll_tw_hash_eject(req, locked);
329061d3 301
114eccdf
DY
302 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
303 io_req_complete_post(req);
304 else if (ret == IOU_POLL_DONE)
329061d3
JA
305 io_req_task_submit(req, locked);
306 else
307 io_req_complete_failed(req, ret);
308}
309
13a99017 310static void __io_poll_execute(struct io_kiocb *req, int mask)
329061d3
JA
311{
312 io_req_set_res(req, mask, 0);
313 /*
314 * This is useful for poll that is armed on behalf of another
315 * request, and where the wakeup path could be on a different
316 * CPU. We want to avoid pulling in req->apoll->events for that
317 * case.
318 */
319 if (req->opcode == IORING_OP_POLL_ADD)
320 req->io_task_work.func = io_poll_task_func;
321 else
322 req->io_task_work.func = io_apoll_task_func;
323
48863ffd 324 trace_io_uring_task_add(req, mask);
329061d3
JA
325 io_req_task_work_add(req);
326}
327
13a99017 328static inline void io_poll_execute(struct io_kiocb *req, int res)
329061d3
JA
329{
330 if (io_poll_get_ownership(req))
13a99017 331 __io_poll_execute(req, res);
329061d3
JA
332}
333
334static void io_poll_cancel_req(struct io_kiocb *req)
335{
336 io_poll_mark_cancelled(req);
337 /* kick tw, which should complete the request */
13a99017 338 io_poll_execute(req, 0);
329061d3
JA
339}
340
329061d3
JA
341#define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
342
fe991a76
JA
343static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
344{
345 io_poll_mark_cancelled(req);
346 /* we have to kick tw in case it's not already */
347 io_poll_execute(req, 0);
348
349 /*
350 * If the waitqueue is being freed early but someone is already
351 * holds ownership over it, we have to tear down the request as
352 * best we can. That means immediately removing the request from
353 * its waitqueue and preventing all further accesses to the
354 * waitqueue via the request.
355 */
356 list_del_init(&poll->wait.entry);
357
358 /*
359 * Careful: this *must* be the last step, since as soon
360 * as req->head is NULL'ed out, the request can be
361 * completed and freed, since aio_poll_complete_work()
362 * will no longer need to take the waitqueue lock.
363 */
364 smp_store_release(&poll->head, NULL);
365 return 1;
366}
367
329061d3
JA
368static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
369 void *key)
370{
371 struct io_kiocb *req = wqe_to_req(wait);
372 struct io_poll *poll = container_of(wait, struct io_poll, wait);
373 __poll_t mask = key_to_poll(key);
374
fe991a76
JA
375 if (unlikely(mask & POLLFREE))
376 return io_pollfree_wake(req, poll);
329061d3
JA
377
378 /* for instances that support it check for an event match first */
379 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
380 return 0;
381
382 if (io_poll_get_ownership(req)) {
383 /* optional, saves extra locking for removal in tw handler */
384 if (mask && poll->events & EPOLLONESHOT) {
385 list_del_init(&poll->wait.entry);
386 poll->head = NULL;
387 if (wqe_is_double(wait))
388 req->flags &= ~REQ_F_DOUBLE_POLL;
389 else
390 req->flags &= ~REQ_F_SINGLE_POLL;
391 }
13a99017 392 __io_poll_execute(req, mask);
329061d3
JA
393 }
394 return 1;
395}
396
30a33669
PB
397/* fails only when polling is already completing by the first entry */
398static bool io_poll_double_prepare(struct io_kiocb *req)
49f1c68e
PB
399{
400 struct wait_queue_head *head;
401 struct io_poll *poll = io_poll_get_single(req);
402
403 /* head is RCU protected, see io_poll_remove_entries() comments */
404 rcu_read_lock();
405 head = smp_load_acquire(&poll->head);
7a121ced 406 /*
30a33669
PB
407 * poll arm might not hold ownership and so race for req->flags with
408 * io_poll_wake(). There is only one poll entry queued, serialise with
409 * it by taking its head lock. As we're still arming the tw hanlder
410 * is not going to be run, so there are no races with it.
7a121ced 411 */
30a33669 412 if (head) {
49f1c68e 413 spin_lock_irq(&head->lock);
30a33669
PB
414 req->flags |= REQ_F_DOUBLE_POLL;
415 if (req->opcode == IORING_OP_POLL_ADD)
416 req->flags |= REQ_F_ASYNC_DATA;
49f1c68e 417 spin_unlock_irq(&head->lock);
30a33669 418 }
49f1c68e 419 rcu_read_unlock();
30a33669 420 return !!head;
49f1c68e
PB
421}
422
329061d3
JA
423static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
424 struct wait_queue_head *head,
425 struct io_poll **poll_ptr)
426{
427 struct io_kiocb *req = pt->req;
428 unsigned long wqe_private = (unsigned long) req;
429
430 /*
431 * The file being polled uses multiple waitqueues for poll handling
432 * (e.g. one for read, one for write). Setup a separate io_poll
433 * if this happens.
434 */
435 if (unlikely(pt->nr_entries)) {
436 struct io_poll *first = poll;
437
438 /* double add on the same waitqueue head, ignore */
439 if (first->head == head)
440 return;
441 /* already have a 2nd entry, fail a third attempt */
442 if (*poll_ptr) {
443 if ((*poll_ptr)->head == head)
444 return;
445 pt->error = -EINVAL;
446 return;
447 }
448
449 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
450 if (!poll) {
451 pt->error = -ENOMEM;
452 return;
453 }
49f1c68e 454
329061d3 455 /* mark as double wq entry */
0638cd7b 456 wqe_private |= IO_WQE_F_DOUBLE;
329061d3 457 io_init_poll_iocb(poll, first->events, first->wait.func);
30a33669
PB
458 if (!io_poll_double_prepare(req)) {
459 /* the request is completing, just back off */
460 kfree(poll);
461 return;
462 }
329061d3 463 *poll_ptr = poll;
49f1c68e
PB
464 } else {
465 /* fine to modify, there is no poll queued to race with us */
466 req->flags |= REQ_F_SINGLE_POLL;
329061d3
JA
467 }
468
329061d3
JA
469 pt->nr_entries++;
470 poll->head = head;
471 poll->wait.private = (void *) wqe_private;
472
473 if (poll->events & EPOLLEXCLUSIVE)
474 add_wait_queue_exclusive(head, &poll->wait);
475 else
476 add_wait_queue(head, &poll->wait);
477}
478
479static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
480 struct poll_table_struct *p)
481{
482 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
f2ccb5ae 483 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
329061d3
JA
484
485 __io_queue_proc(poll, pt, head,
486 (struct io_poll **) &pt->req->async_data);
487}
488
49f1c68e
PB
489static bool io_poll_can_finish_inline(struct io_kiocb *req,
490 struct io_poll_table *pt)
491{
492 return pt->owning || io_poll_get_ownership(req);
493}
494
de08356f
PB
495/*
496 * Returns 0 when it's handed over for polling. The caller owns the requests if
497 * it returns non-zero, but otherwise should not touch it. Negative values
498 * contain an error code. When the result is >0, the polling has completed
499 * inline and ipt.result_mask is set to the mask.
500 */
329061d3
JA
501static int __io_arm_poll_handler(struct io_kiocb *req,
502 struct io_poll *poll,
49f1c68e
PB
503 struct io_poll_table *ipt, __poll_t mask,
504 unsigned issue_flags)
329061d3
JA
505{
506 struct io_ring_ctx *ctx = req->ctx;
507 int v;
508
509 INIT_HLIST_NODE(&req->hash_node);
510 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
511 io_init_poll_iocb(poll, mask, io_poll_wake);
512 poll->file = req->file;
329061d3
JA
513 req->apoll_events = poll->events;
514
515 ipt->pt._key = mask;
516 ipt->req = req;
517 ipt->error = 0;
518 ipt->nr_entries = 0;
329061d3 519 /*
49f1c68e
PB
520 * Polling is either completed here or via task_work, so if we're in the
521 * task context we're naturally serialised with tw by merit of running
522 * the same task. When it's io-wq, take the ownership to prevent tw
523 * from running. However, when we're in the task context, skip taking
524 * it as an optimisation.
525 *
526 * Note: even though the request won't be completed/freed, without
527 * ownership we still can race with io_poll_wake().
528 * io_poll_can_finish_inline() tries to deal with that.
329061d3 529 */
49f1c68e 530 ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
49f1c68e 531 atomic_set(&req->poll_refs, (int)ipt->owning);
e8375e43
PB
532
533 /* io-wq doesn't hold uring_lock */
534 if (issue_flags & IO_URING_F_UNLOCKED)
535 req->flags &= ~REQ_F_HASH_LOCKED;
536
329061d3
JA
537 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
538
de08356f
PB
539 if (unlikely(ipt->error || !ipt->nr_entries)) {
540 io_poll_remove_entries(req);
541
49f1c68e
PB
542 if (!io_poll_can_finish_inline(req, ipt)) {
543 io_poll_mark_cancelled(req);
544 return 0;
545 } else if (mask && (poll->events & EPOLLET)) {
de08356f
PB
546 ipt->result_mask = mask;
547 return 1;
de08356f 548 }
49f1c68e 549 return ipt->error ?: -EINVAL;
de08356f
PB
550 }
551
b9ba8a44
JA
552 if (mask &&
553 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
49f1c68e
PB
554 if (!io_poll_can_finish_inline(req, ipt))
555 return 0;
329061d3 556 io_poll_remove_entries(req);
063a0079 557 ipt->result_mask = mask;
329061d3 558 /* no one else has access to the req, forget about the ref */
063a0079 559 return 1;
329061d3 560 }
b9ba8a44 561
9ca9fb24
PB
562 if (req->flags & REQ_F_HASH_LOCKED)
563 io_poll_req_insert_locked(req);
564 else
565 io_poll_req_insert(req);
329061d3 566
49f1c68e
PB
567 if (mask && (poll->events & EPOLLET) &&
568 io_poll_can_finish_inline(req, ipt)) {
13a99017 569 __io_poll_execute(req, mask);
329061d3
JA
570 return 0;
571 }
572
49f1c68e
PB
573 if (ipt->owning) {
574 /*
575 * Release ownership. If someone tried to queue a tw while it was
576 * locked, kick it off for them.
577 */
578 v = atomic_dec_return(&req->poll_refs);
579 if (unlikely(v & IO_POLL_REF_MASK))
580 __io_poll_execute(req, 0);
581 }
329061d3
JA
582 return 0;
583}
584
585static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
586 struct poll_table_struct *p)
587{
588 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
589 struct async_poll *apoll = pt->req->apoll;
590
591 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
592}
593
5204aa8c
PB
594static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
595 unsigned issue_flags)
596{
597 struct io_ring_ctx *ctx = req->ctx;
9b797a37 598 struct io_cache_entry *entry;
5204aa8c
PB
599 struct async_poll *apoll;
600
601 if (req->flags & REQ_F_POLLED) {
602 apoll = req->apoll;
603 kfree(apoll->double_poll);
604 } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
9b797a37
JA
605 (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
606 apoll = container_of(entry, struct async_poll, cache);
5204aa8c
PB
607 } else {
608 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
609 if (unlikely(!apoll))
610 return NULL;
611 }
612 apoll->double_poll = NULL;
613 req->apoll = apoll;
614 return apoll;
615}
616
329061d3
JA
617int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
618{
619 const struct io_op_def *def = &io_op_defs[req->opcode];
329061d3
JA
620 struct async_poll *apoll;
621 struct io_poll_table ipt;
b9ba8a44 622 __poll_t mask = POLLPRI | POLLERR | EPOLLET;
329061d3
JA
623 int ret;
624
9ca9fb24
PB
625 /*
626 * apoll requests already grab the mutex to complete in the tw handler,
627 * so removal from the mutex-backed hash is free, use it by default.
628 */
e8375e43 629 req->flags |= REQ_F_HASH_LOCKED;
9ca9fb24 630
329061d3
JA
631 if (!def->pollin && !def->pollout)
632 return IO_APOLL_ABORTED;
633 if (!file_can_poll(req->file))
634 return IO_APOLL_ABORTED;
635 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
636 return IO_APOLL_ABORTED;
637 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
638 mask |= EPOLLONESHOT;
639
640 if (def->pollin) {
641 mask |= EPOLLIN | EPOLLRDNORM;
642
643 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
644 if (req->flags & REQ_F_CLEAR_POLLIN)
645 mask &= ~EPOLLIN;
646 } else {
647 mask |= EPOLLOUT | EPOLLWRNORM;
648 }
649 if (def->poll_exclusive)
650 mask |= EPOLLEXCLUSIVE;
5204aa8c
PB
651
652 apoll = io_req_alloc_apoll(req, issue_flags);
653 if (!apoll)
654 return IO_APOLL_ABORTED;
329061d3
JA
655 req->flags |= REQ_F_POLLED;
656 ipt.pt._qproc = io_async_queue_proc;
657
658 io_kbuf_recycle(req, issue_flags);
659
49f1c68e 660 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
de08356f
PB
661 if (ret)
662 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
48863ffd 663 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
329061d3
JA
664 return IO_APOLL_OK;
665}
666
9ca9fb24
PB
667static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
668 struct io_hash_table *table,
669 bool cancel_all)
329061d3 670{
e6f89be6 671 unsigned nr_buckets = 1U << table->hash_bits;
329061d3
JA
672 struct hlist_node *tmp;
673 struct io_kiocb *req;
674 bool found = false;
675 int i;
676
e6f89be6
PB
677 for (i = 0; i < nr_buckets; i++) {
678 struct io_hash_bucket *hb = &table->hbs[i];
329061d3 679
38513c46
HX
680 spin_lock(&hb->lock);
681 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
329061d3
JA
682 if (io_match_task_safe(req, tsk, cancel_all)) {
683 hlist_del_init(&req->hash_node);
684 io_poll_cancel_req(req);
685 found = true;
686 }
687 }
38513c46 688 spin_unlock(&hb->lock);
329061d3 689 }
329061d3
JA
690 return found;
691}
692
9ca9fb24
PB
693/*
694 * Returns true if we found and killed one or more poll requests
695 */
696__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
697 bool cancel_all)
698 __must_hold(&ctx->uring_lock)
699{
b321823a
PB
700 bool ret;
701
702 ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
703 ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
704 return ret;
9ca9fb24
PB
705}
706
329061d3 707static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
1ab1edb0 708 struct io_cancel_data *cd,
e6f89be6 709 struct io_hash_table *table,
1ab1edb0 710 struct io_hash_bucket **out_bucket)
329061d3 711{
329061d3 712 struct io_kiocb *req;
e6f89be6
PB
713 u32 index = hash_long(cd->data, table->hash_bits);
714 struct io_hash_bucket *hb = &table->hbs[index];
329061d3 715
1ab1edb0
PB
716 *out_bucket = NULL;
717
38513c46
HX
718 spin_lock(&hb->lock);
719 hlist_for_each_entry(req, &hb->list, hash_node) {
329061d3
JA
720 if (cd->data != req->cqe.user_data)
721 continue;
722 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
723 continue;
724 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
725 if (cd->seq == req->work.cancel_seq)
726 continue;
727 req->work.cancel_seq = cd->seq;
728 }
1ab1edb0 729 *out_bucket = hb;
329061d3
JA
730 return req;
731 }
38513c46 732 spin_unlock(&hb->lock);
329061d3
JA
733 return NULL;
734}
735
736static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
1ab1edb0 737 struct io_cancel_data *cd,
e6f89be6 738 struct io_hash_table *table,
1ab1edb0 739 struct io_hash_bucket **out_bucket)
329061d3 740{
e6f89be6 741 unsigned nr_buckets = 1U << table->hash_bits;
329061d3
JA
742 struct io_kiocb *req;
743 int i;
744
1ab1edb0
PB
745 *out_bucket = NULL;
746
e6f89be6
PB
747 for (i = 0; i < nr_buckets; i++) {
748 struct io_hash_bucket *hb = &table->hbs[i];
329061d3 749
38513c46
HX
750 spin_lock(&hb->lock);
751 hlist_for_each_entry(req, &hb->list, hash_node) {
329061d3
JA
752 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
753 req->file != cd->file)
754 continue;
755 if (cd->seq == req->work.cancel_seq)
756 continue;
757 req->work.cancel_seq = cd->seq;
1ab1edb0 758 *out_bucket = hb;
329061d3
JA
759 return req;
760 }
38513c46 761 spin_unlock(&hb->lock);
329061d3
JA
762 }
763 return NULL;
764}
765
9ca9fb24 766static int io_poll_disarm(struct io_kiocb *req)
329061d3 767{
9ca9fb24
PB
768 if (!req)
769 return -ENOENT;
329061d3 770 if (!io_poll_get_ownership(req))
9ca9fb24 771 return -EALREADY;
329061d3
JA
772 io_poll_remove_entries(req);
773 hash_del(&req->hash_node);
9ca9fb24 774 return 0;
329061d3
JA
775}
776
a2cdd519 777static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
e6f89be6 778 struct io_hash_table *table)
329061d3 779{
1ab1edb0 780 struct io_hash_bucket *bucket;
329061d3
JA
781 struct io_kiocb *req;
782
783 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
e6f89be6 784 req = io_poll_file_find(ctx, cd, table, &bucket);
329061d3 785 else
e6f89be6 786 req = io_poll_find(ctx, false, cd, table, &bucket);
1ab1edb0
PB
787
788 if (req)
789 io_poll_cancel_req(req);
790 if (bucket)
791 spin_unlock(&bucket->lock);
792 return req ? 0 : -ENOENT;
329061d3
JA
793}
794
5d7943d9
PB
795int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
796 unsigned issue_flags)
a2cdd519 797{
9ca9fb24
PB
798 int ret;
799
800 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
801 if (ret != -ENOENT)
802 return ret;
803
804 io_ring_submit_lock(ctx, issue_flags);
805 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
806 io_ring_submit_unlock(ctx, issue_flags);
807 return ret;
a2cdd519
PB
808}
809
329061d3
JA
810static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
811 unsigned int flags)
812{
813 u32 events;
814
815 events = READ_ONCE(sqe->poll32_events);
816#ifdef __BIG_ENDIAN
817 events = swahw32(events);
818#endif
819 if (!(flags & IORING_POLL_ADD_MULTI))
820 events |= EPOLLONESHOT;
b9ba8a44
JA
821 if (!(flags & IORING_POLL_ADD_LEVEL))
822 events |= EPOLLET;
823 return demangle_poll(events) |
824 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
329061d3
JA
825}
826
827int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
828{
f2ccb5ae 829 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
329061d3
JA
830 u32 flags;
831
832 if (sqe->buf_index || sqe->splice_fd_in)
833 return -EINVAL;
834 flags = READ_ONCE(sqe->len);
835 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
836 IORING_POLL_ADD_MULTI))
837 return -EINVAL;
838 /* meaningless without update */
839 if (flags == IORING_POLL_ADD_MULTI)
840 return -EINVAL;
841
842 upd->old_user_data = READ_ONCE(sqe->addr);
843 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
844 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
845
846 upd->new_user_data = READ_ONCE(sqe->off);
847 if (!upd->update_user_data && upd->new_user_data)
848 return -EINVAL;
849 if (upd->update_events)
850 upd->events = io_poll_parse_events(sqe, flags);
851 else if (sqe->poll32_events)
852 return -EINVAL;
853
854 return 0;
855}
856
857int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
858{
f2ccb5ae 859 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
329061d3
JA
860 u32 flags;
861
862 if (sqe->buf_index || sqe->off || sqe->addr)
863 return -EINVAL;
864 flags = READ_ONCE(sqe->len);
d59bd748 865 if (flags & ~IORING_POLL_ADD_MULTI)
329061d3
JA
866 return -EINVAL;
867 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
868 return -EINVAL;
869
329061d3
JA
870 poll->events = io_poll_parse_events(sqe, flags);
871 return 0;
872}
873
874int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
875{
f2ccb5ae 876 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
329061d3
JA
877 struct io_poll_table ipt;
878 int ret;
879
880 ipt.pt._qproc = io_poll_queue_proc;
881
9ca9fb24
PB
882 /*
883 * If sqpoll or single issuer, there is no contention for ->uring_lock
884 * and we'll end up holding it in tw handlers anyway.
885 */
e8375e43 886 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
9ca9fb24 887 req->flags |= REQ_F_HASH_LOCKED;
9ca9fb24 888
49f1c68e 889 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
de08356f 890 if (ret > 0) {
063a0079 891 io_req_set_res(req, ipt.result_mask, 0);
329061d3
JA
892 return IOU_OK;
893 }
de08356f 894 return ret ?: IOU_ISSUE_SKIP_COMPLETE;
329061d3
JA
895}
896
897int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
898{
f2ccb5ae 899 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
329061d3
JA
900 struct io_cancel_data cd = { .data = poll_update->old_user_data, };
901 struct io_ring_ctx *ctx = req->ctx;
1ab1edb0 902 struct io_hash_bucket *bucket;
329061d3
JA
903 struct io_kiocb *preq;
904 int ret2, ret = 0;
905 bool locked;
906
e6f89be6 907 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
9ca9fb24 908 ret2 = io_poll_disarm(preq);
1ab1edb0
PB
909 if (bucket)
910 spin_unlock(&bucket->lock);
9ca9fb24
PB
911 if (!ret2)
912 goto found;
913 if (ret2 != -ENOENT) {
914 ret = ret2;
38513c46
HX
915 goto out;
916 }
9ca9fb24
PB
917
918 io_ring_submit_lock(ctx, issue_flags);
919 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
920 ret2 = io_poll_disarm(preq);
921 if (bucket)
922 spin_unlock(&bucket->lock);
923 io_ring_submit_unlock(ctx, issue_flags);
924 if (ret2) {
925 ret = ret2;
329061d3
JA
926 goto out;
927 }
329061d3 928
9ca9fb24 929found:
bce5d70c
PB
930 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
931 ret = -EFAULT;
932 goto out;
933 }
934
329061d3
JA
935 if (poll_update->update_events || poll_update->update_user_data) {
936 /* only mask one event flags, keep behavior flags */
937 if (poll_update->update_events) {
f2ccb5ae 938 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
329061d3
JA
939
940 poll->events &= ~0xffff;
941 poll->events |= poll_update->events & 0xffff;
942 poll->events |= IO_POLL_UNMASK;
943 }
944 if (poll_update->update_user_data)
945 preq->cqe.user_data = poll_update->new_user_data;
946
947 ret2 = io_poll_add(preq, issue_flags);
948 /* successfully updated, don't complete poll request */
949 if (!ret2 || ret2 == -EIOCBQUEUED)
950 goto out;
951 }
952
953 req_set_fail(preq);
954 io_req_set_res(preq, -ECANCELED, 0);
955 locked = !(issue_flags & IO_URING_F_UNLOCKED);
956 io_req_task_complete(preq, &locked);
957out:
958 if (ret < 0) {
959 req_set_fail(req);
960 return ret;
961 }
962 /* complete update request, we're done with it */
963 io_req_set_res(req, ret, 0);
964 return IOU_OK;
965}
9da7471e 966
9b797a37 967void io_apoll_cache_free(struct io_cache_entry *entry)
9da7471e 968{
9b797a37 969 kfree(container_of(entry, struct async_poll, cache));
9da7471e 970}