io_uring: don't expose io_fill_cqe_aux()
[linux-block.git] / io_uring / poll.c
CommitLineData
329061d3
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/poll.h>
9#include <linux/hashtable.h>
10#include <linux/io_uring.h>
11
12#include <trace/events/io_uring.h>
13
14#include <uapi/linux/io_uring.h>
15
16#include "io_uring_types.h"
17#include "io_uring.h"
18#include "refs.h"
19#include "opdef.h"
3b77495a 20#include "kbuf.h"
329061d3 21#include "poll.h"
38513c46 22#include "cancel.h"
329061d3
JA
23
24struct io_poll_update {
25 struct file *file;
26 u64 old_user_data;
27 u64 new_user_data;
28 __poll_t events;
29 bool update_events;
30 bool update_user_data;
31};
32
33struct io_poll_table {
34 struct poll_table_struct pt;
35 struct io_kiocb *req;
36 int nr_entries;
37 int error;
38};
39
40#define IO_POLL_CANCEL_FLAG BIT(31)
41#define IO_POLL_REF_MASK GENMASK(30, 0)
42
43/*
44 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
45 * bump it and acquire ownership. It's disallowed to modify requests while not
46 * owning it, that prevents from races for enqueueing task_work's and b/w
47 * arming poll and wakeups.
48 */
49static inline bool io_poll_get_ownership(struct io_kiocb *req)
50{
51 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
52}
53
54static void io_poll_mark_cancelled(struct io_kiocb *req)
55{
56 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
57}
58
59static struct io_poll *io_poll_get_double(struct io_kiocb *req)
60{
61 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
62 if (req->opcode == IORING_OP_POLL_ADD)
63 return req->async_data;
64 return req->apoll->double_poll;
65}
66
67static struct io_poll *io_poll_get_single(struct io_kiocb *req)
68{
69 if (req->opcode == IORING_OP_POLL_ADD)
70 return io_kiocb_to_cmd(req);
71 return &req->apoll->poll;
72}
73
74static void io_poll_req_insert(struct io_kiocb *req)
75{
e6f89be6
PB
76 struct io_hash_table *table = &req->ctx->cancel_table;
77 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
78 struct io_hash_bucket *hb = &table->hbs[index];
329061d3 79
38513c46
HX
80 spin_lock(&hb->lock);
81 hlist_add_head(&req->hash_node, &hb->list);
82 spin_unlock(&hb->lock);
83}
84
85static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
86{
e6f89be6
PB
87 struct io_hash_table *table = &req->ctx->cancel_table;
88 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
89 spinlock_t *lock = &table->hbs[index].lock;
38513c46
HX
90
91 spin_lock(lock);
92 hash_del(&req->hash_node);
93 spin_unlock(lock);
329061d3
JA
94}
95
9ca9fb24
PB
96static void io_poll_req_insert_locked(struct io_kiocb *req)
97{
98 struct io_hash_table *table = &req->ctx->cancel_table_locked;
99 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
100
101 hlist_add_head(&req->hash_node, &table->hbs[index].list);
102}
103
104static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked)
105{
106 struct io_ring_ctx *ctx = req->ctx;
107
108 if (req->flags & REQ_F_HASH_LOCKED) {
109 /*
110 * ->cancel_table_locked is protected by ->uring_lock in
111 * contrast to per bucket spinlocks. Likely, tctx_task_work()
112 * already grabbed the mutex for us, but there is a chance it
113 * failed.
114 */
115 io_tw_lock(ctx, locked);
116 hash_del(&req->hash_node);
117 } else {
118 io_poll_req_delete(req, ctx);
119 }
120}
121
329061d3
JA
122static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
123 wait_queue_func_t wake_func)
124{
125 poll->head = NULL;
126#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
127 /* mask in events that we always want/need */
128 poll->events = events | IO_POLL_UNMASK;
129 INIT_LIST_HEAD(&poll->wait.entry);
130 init_waitqueue_func_entry(&poll->wait, wake_func);
131}
132
133static inline void io_poll_remove_entry(struct io_poll *poll)
134{
135 struct wait_queue_head *head = smp_load_acquire(&poll->head);
136
137 if (head) {
138 spin_lock_irq(&head->lock);
139 list_del_init(&poll->wait.entry);
140 poll->head = NULL;
141 spin_unlock_irq(&head->lock);
142 }
143}
144
145static void io_poll_remove_entries(struct io_kiocb *req)
146{
147 /*
148 * Nothing to do if neither of those flags are set. Avoid dipping
149 * into the poll/apoll/double cachelines if we can.
150 */
151 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
152 return;
153
154 /*
155 * While we hold the waitqueue lock and the waitqueue is nonempty,
156 * wake_up_pollfree() will wait for us. However, taking the waitqueue
157 * lock in the first place can race with the waitqueue being freed.
158 *
159 * We solve this as eventpoll does: by taking advantage of the fact that
160 * all users of wake_up_pollfree() will RCU-delay the actual free. If
161 * we enter rcu_read_lock() and see that the pointer to the queue is
162 * non-NULL, we can then lock it without the memory being freed out from
163 * under us.
164 *
165 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
166 * case the caller deletes the entry from the queue, leaving it empty.
167 * In that case, only RCU prevents the queue memory from being freed.
168 */
169 rcu_read_lock();
170 if (req->flags & REQ_F_SINGLE_POLL)
171 io_poll_remove_entry(io_poll_get_single(req));
172 if (req->flags & REQ_F_DOUBLE_POLL)
173 io_poll_remove_entry(io_poll_get_double(req));
174 rcu_read_unlock();
175}
176
177/*
178 * All poll tw should go through this. Checks for poll events, manages
179 * references, does rewait, etc.
180 *
181 * Returns a negative error on failure. >0 when no action require, which is
182 * either spurious wakeup or multishot CQE is served. 0 when it's done with
183 * the request, then the mask is stored in req->cqe.res.
184 */
185static int io_poll_check_events(struct io_kiocb *req, bool *locked)
186{
187 struct io_ring_ctx *ctx = req->ctx;
188 int v, ret;
189
190 /* req->task == current here, checking PF_EXITING is safe */
191 if (unlikely(req->task->flags & PF_EXITING))
192 return -ECANCELED;
193
194 do {
195 v = atomic_read(&req->poll_refs);
196
197 /* tw handler should be the owner, and so have some references */
198 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
199 return 0;
200 if (v & IO_POLL_CANCEL_FLAG)
201 return -ECANCELED;
202
203 if (!req->cqe.res) {
204 struct poll_table_struct pt = { ._key = req->apoll_events };
205 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
206 }
207
208 if ((unlikely(!req->cqe.res)))
209 continue;
210 if (req->apoll_events & EPOLLONESHOT)
211 return 0;
212
213 /* multishot, just fill a CQE and proceed */
214 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
215 __poll_t mask = mangle_poll(req->cqe.res &
216 req->apoll_events);
329061d3 217
d245bca6
PB
218 if (!io_post_aux_cqe(ctx, req->cqe.user_data,
219 mask, IORING_CQE_F_MORE))
220 return -ECANCELED;
221 } else {
222 ret = io_poll_issue(req, locked);
223 if (ret)
224 return ret;
225 }
329061d3
JA
226
227 /*
228 * Release all references, retry if someone tried to restart
229 * task_work while we were executing it.
230 */
231 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
232
233 return 1;
234}
235
236static void io_poll_task_func(struct io_kiocb *req, bool *locked)
237{
329061d3
JA
238 int ret;
239
240 ret = io_poll_check_events(req, locked);
241 if (ret > 0)
242 return;
243
244 if (!ret) {
245 struct io_poll *poll = io_kiocb_to_cmd(req);
246
247 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
248 } else {
249 req->cqe.res = ret;
250 req_set_fail(req);
251 }
252
253 io_poll_remove_entries(req);
9ca9fb24
PB
254 io_poll_tw_hash_eject(req, locked);
255
0ec6dca2
PB
256 io_req_set_res(req, req->cqe.res, 0);
257 io_req_task_complete(req, locked);
329061d3
JA
258}
259
260static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
261{
329061d3
JA
262 int ret;
263
264 ret = io_poll_check_events(req, locked);
265 if (ret > 0)
266 return;
267
268 io_poll_remove_entries(req);
9ca9fb24 269 io_poll_tw_hash_eject(req, locked);
329061d3
JA
270
271 if (!ret)
272 io_req_task_submit(req, locked);
273 else
274 io_req_complete_failed(req, ret);
275}
276
277static void __io_poll_execute(struct io_kiocb *req, int mask,
278 __poll_t __maybe_unused events)
279{
280 io_req_set_res(req, mask, 0);
281 /*
282 * This is useful for poll that is armed on behalf of another
283 * request, and where the wakeup path could be on a different
284 * CPU. We want to avoid pulling in req->apoll->events for that
285 * case.
286 */
287 if (req->opcode == IORING_OP_POLL_ADD)
288 req->io_task_work.func = io_poll_task_func;
289 else
290 req->io_task_work.func = io_apoll_task_func;
291
292 trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask);
293 io_req_task_work_add(req);
294}
295
296static inline void io_poll_execute(struct io_kiocb *req, int res,
297 __poll_t events)
298{
299 if (io_poll_get_ownership(req))
300 __io_poll_execute(req, res, events);
301}
302
303static void io_poll_cancel_req(struct io_kiocb *req)
304{
305 io_poll_mark_cancelled(req);
306 /* kick tw, which should complete the request */
307 io_poll_execute(req, 0, 0);
308}
309
310#define wqe_to_req(wait) ((void *)((unsigned long) (wait)->private & ~1))
311#define wqe_is_double(wait) ((unsigned long) (wait)->private & 1)
312#define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
313
314static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
315 void *key)
316{
317 struct io_kiocb *req = wqe_to_req(wait);
318 struct io_poll *poll = container_of(wait, struct io_poll, wait);
319 __poll_t mask = key_to_poll(key);
320
321 if (unlikely(mask & POLLFREE)) {
322 io_poll_mark_cancelled(req);
323 /* we have to kick tw in case it's not already */
324 io_poll_execute(req, 0, poll->events);
325
326 /*
327 * If the waitqueue is being freed early but someone is already
328 * holds ownership over it, we have to tear down the request as
329 * best we can. That means immediately removing the request from
330 * its waitqueue and preventing all further accesses to the
331 * waitqueue via the request.
332 */
333 list_del_init(&poll->wait.entry);
334
335 /*
336 * Careful: this *must* be the last step, since as soon
337 * as req->head is NULL'ed out, the request can be
338 * completed and freed, since aio_poll_complete_work()
339 * will no longer need to take the waitqueue lock.
340 */
341 smp_store_release(&poll->head, NULL);
342 return 1;
343 }
344
345 /* for instances that support it check for an event match first */
346 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
347 return 0;
348
349 if (io_poll_get_ownership(req)) {
350 /* optional, saves extra locking for removal in tw handler */
351 if (mask && poll->events & EPOLLONESHOT) {
352 list_del_init(&poll->wait.entry);
353 poll->head = NULL;
354 if (wqe_is_double(wait))
355 req->flags &= ~REQ_F_DOUBLE_POLL;
356 else
357 req->flags &= ~REQ_F_SINGLE_POLL;
358 }
359 __io_poll_execute(req, mask, poll->events);
360 }
361 return 1;
362}
363
364static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
365 struct wait_queue_head *head,
366 struct io_poll **poll_ptr)
367{
368 struct io_kiocb *req = pt->req;
369 unsigned long wqe_private = (unsigned long) req;
370
371 /*
372 * The file being polled uses multiple waitqueues for poll handling
373 * (e.g. one for read, one for write). Setup a separate io_poll
374 * if this happens.
375 */
376 if (unlikely(pt->nr_entries)) {
377 struct io_poll *first = poll;
378
379 /* double add on the same waitqueue head, ignore */
380 if (first->head == head)
381 return;
382 /* already have a 2nd entry, fail a third attempt */
383 if (*poll_ptr) {
384 if ((*poll_ptr)->head == head)
385 return;
386 pt->error = -EINVAL;
387 return;
388 }
389
390 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
391 if (!poll) {
392 pt->error = -ENOMEM;
393 return;
394 }
395 /* mark as double wq entry */
396 wqe_private |= 1;
397 req->flags |= REQ_F_DOUBLE_POLL;
398 io_init_poll_iocb(poll, first->events, first->wait.func);
399 *poll_ptr = poll;
400 if (req->opcode == IORING_OP_POLL_ADD)
401 req->flags |= REQ_F_ASYNC_DATA;
402 }
403
404 req->flags |= REQ_F_SINGLE_POLL;
405 pt->nr_entries++;
406 poll->head = head;
407 poll->wait.private = (void *) wqe_private;
408
409 if (poll->events & EPOLLEXCLUSIVE)
410 add_wait_queue_exclusive(head, &poll->wait);
411 else
412 add_wait_queue(head, &poll->wait);
413}
414
415static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
416 struct poll_table_struct *p)
417{
418 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
419 struct io_poll *poll = io_kiocb_to_cmd(pt->req);
420
421 __io_queue_proc(poll, pt, head,
422 (struct io_poll **) &pt->req->async_data);
423}
424
425static int __io_arm_poll_handler(struct io_kiocb *req,
426 struct io_poll *poll,
427 struct io_poll_table *ipt, __poll_t mask)
428{
429 struct io_ring_ctx *ctx = req->ctx;
430 int v;
431
432 INIT_HLIST_NODE(&req->hash_node);
433 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
434 io_init_poll_iocb(poll, mask, io_poll_wake);
435 poll->file = req->file;
436
437 req->apoll_events = poll->events;
438
439 ipt->pt._key = mask;
440 ipt->req = req;
441 ipt->error = 0;
442 ipt->nr_entries = 0;
443
444 /*
445 * Take the ownership to delay any tw execution up until we're done
446 * with poll arming. see io_poll_get_ownership().
447 */
448 atomic_set(&req->poll_refs, 1);
449 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
450
b9ba8a44
JA
451 if (mask &&
452 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
329061d3
JA
453 io_poll_remove_entries(req);
454 /* no one else has access to the req, forget about the ref */
455 return mask;
456 }
b9ba8a44 457
329061d3
JA
458 if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
459 io_poll_remove_entries(req);
460 if (!ipt->error)
461 ipt->error = -EINVAL;
462 return 0;
463 }
464
9ca9fb24
PB
465 if (req->flags & REQ_F_HASH_LOCKED)
466 io_poll_req_insert_locked(req);
467 else
468 io_poll_req_insert(req);
329061d3 469
b9ba8a44 470 if (mask && (poll->events & EPOLLET)) {
329061d3
JA
471 /* can't multishot if failed, just queue the event we've got */
472 if (unlikely(ipt->error || !ipt->nr_entries)) {
473 poll->events |= EPOLLONESHOT;
474 req->apoll_events |= EPOLLONESHOT;
475 ipt->error = 0;
476 }
477 __io_poll_execute(req, mask, poll->events);
478 return 0;
479 }
480
481 /*
482 * Release ownership. If someone tried to queue a tw while it was
483 * locked, kick it off for them.
484 */
485 v = atomic_dec_return(&req->poll_refs);
486 if (unlikely(v & IO_POLL_REF_MASK))
487 __io_poll_execute(req, 0, poll->events);
488 return 0;
489}
490
491static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
492 struct poll_table_struct *p)
493{
494 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
495 struct async_poll *apoll = pt->req->apoll;
496
497 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
498}
499
500int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
501{
502 const struct io_op_def *def = &io_op_defs[req->opcode];
503 struct io_ring_ctx *ctx = req->ctx;
504 struct async_poll *apoll;
505 struct io_poll_table ipt;
b9ba8a44 506 __poll_t mask = POLLPRI | POLLERR | EPOLLET;
329061d3
JA
507 int ret;
508
9ca9fb24
PB
509 /*
510 * apoll requests already grab the mutex to complete in the tw handler,
511 * so removal from the mutex-backed hash is free, use it by default.
512 */
513 if (issue_flags & IO_URING_F_UNLOCKED)
514 req->flags &= ~REQ_F_HASH_LOCKED;
515 else
516 req->flags |= REQ_F_HASH_LOCKED;
517
329061d3
JA
518 if (!def->pollin && !def->pollout)
519 return IO_APOLL_ABORTED;
520 if (!file_can_poll(req->file))
521 return IO_APOLL_ABORTED;
522 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
523 return IO_APOLL_ABORTED;
524 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
525 mask |= EPOLLONESHOT;
526
527 if (def->pollin) {
528 mask |= EPOLLIN | EPOLLRDNORM;
529
530 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
531 if (req->flags & REQ_F_CLEAR_POLLIN)
532 mask &= ~EPOLLIN;
533 } else {
534 mask |= EPOLLOUT | EPOLLWRNORM;
535 }
536 if (def->poll_exclusive)
537 mask |= EPOLLEXCLUSIVE;
538 if (req->flags & REQ_F_POLLED) {
539 apoll = req->apoll;
540 kfree(apoll->double_poll);
541 } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
542 !list_empty(&ctx->apoll_cache)) {
543 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
544 poll.wait.entry);
545 list_del_init(&apoll->poll.wait.entry);
546 } else {
547 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
548 if (unlikely(!apoll))
549 return IO_APOLL_ABORTED;
550 }
551 apoll->double_poll = NULL;
552 req->apoll = apoll;
553 req->flags |= REQ_F_POLLED;
554 ipt.pt._qproc = io_async_queue_proc;
555
556 io_kbuf_recycle(req, issue_flags);
557
558 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
559 if (ret || ipt.error)
560 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
561
562 trace_io_uring_poll_arm(ctx, req, req->cqe.user_data, req->opcode,
563 mask, apoll->poll.events);
564 return IO_APOLL_OK;
565}
566
9ca9fb24
PB
567static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
568 struct io_hash_table *table,
569 bool cancel_all)
329061d3 570{
e6f89be6 571 unsigned nr_buckets = 1U << table->hash_bits;
329061d3
JA
572 struct hlist_node *tmp;
573 struct io_kiocb *req;
574 bool found = false;
575 int i;
576
e6f89be6
PB
577 for (i = 0; i < nr_buckets; i++) {
578 struct io_hash_bucket *hb = &table->hbs[i];
329061d3 579
38513c46
HX
580 spin_lock(&hb->lock);
581 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
329061d3
JA
582 if (io_match_task_safe(req, tsk, cancel_all)) {
583 hlist_del_init(&req->hash_node);
584 io_poll_cancel_req(req);
585 found = true;
586 }
587 }
38513c46 588 spin_unlock(&hb->lock);
329061d3 589 }
329061d3
JA
590 return found;
591}
592
9ca9fb24
PB
593/*
594 * Returns true if we found and killed one or more poll requests
595 */
596__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
597 bool cancel_all)
598 __must_hold(&ctx->uring_lock)
599{
600 return io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all) |
601 io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
602}
603
329061d3 604static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
1ab1edb0 605 struct io_cancel_data *cd,
e6f89be6 606 struct io_hash_table *table,
1ab1edb0 607 struct io_hash_bucket **out_bucket)
329061d3 608{
329061d3 609 struct io_kiocb *req;
e6f89be6
PB
610 u32 index = hash_long(cd->data, table->hash_bits);
611 struct io_hash_bucket *hb = &table->hbs[index];
329061d3 612
1ab1edb0
PB
613 *out_bucket = NULL;
614
38513c46
HX
615 spin_lock(&hb->lock);
616 hlist_for_each_entry(req, &hb->list, hash_node) {
329061d3
JA
617 if (cd->data != req->cqe.user_data)
618 continue;
619 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
620 continue;
621 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
622 if (cd->seq == req->work.cancel_seq)
623 continue;
624 req->work.cancel_seq = cd->seq;
625 }
1ab1edb0 626 *out_bucket = hb;
329061d3
JA
627 return req;
628 }
38513c46 629 spin_unlock(&hb->lock);
329061d3
JA
630 return NULL;
631}
632
633static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
1ab1edb0 634 struct io_cancel_data *cd,
e6f89be6 635 struct io_hash_table *table,
1ab1edb0 636 struct io_hash_bucket **out_bucket)
329061d3 637{
e6f89be6 638 unsigned nr_buckets = 1U << table->hash_bits;
329061d3
JA
639 struct io_kiocb *req;
640 int i;
641
1ab1edb0
PB
642 *out_bucket = NULL;
643
e6f89be6
PB
644 for (i = 0; i < nr_buckets; i++) {
645 struct io_hash_bucket *hb = &table->hbs[i];
329061d3 646
38513c46
HX
647 spin_lock(&hb->lock);
648 hlist_for_each_entry(req, &hb->list, hash_node) {
329061d3
JA
649 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
650 req->file != cd->file)
651 continue;
652 if (cd->seq == req->work.cancel_seq)
653 continue;
654 req->work.cancel_seq = cd->seq;
1ab1edb0 655 *out_bucket = hb;
329061d3
JA
656 return req;
657 }
38513c46 658 spin_unlock(&hb->lock);
329061d3
JA
659 }
660 return NULL;
661}
662
9ca9fb24 663static int io_poll_disarm(struct io_kiocb *req)
329061d3 664{
9ca9fb24
PB
665 if (!req)
666 return -ENOENT;
329061d3 667 if (!io_poll_get_ownership(req))
9ca9fb24 668 return -EALREADY;
329061d3
JA
669 io_poll_remove_entries(req);
670 hash_del(&req->hash_node);
9ca9fb24 671 return 0;
329061d3
JA
672}
673
a2cdd519 674static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
e6f89be6 675 struct io_hash_table *table)
329061d3 676{
1ab1edb0 677 struct io_hash_bucket *bucket;
329061d3
JA
678 struct io_kiocb *req;
679
680 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
e6f89be6 681 req = io_poll_file_find(ctx, cd, table, &bucket);
329061d3 682 else
e6f89be6 683 req = io_poll_find(ctx, false, cd, table, &bucket);
1ab1edb0
PB
684
685 if (req)
686 io_poll_cancel_req(req);
687 if (bucket)
688 spin_unlock(&bucket->lock);
689 return req ? 0 : -ENOENT;
329061d3
JA
690}
691
5d7943d9
PB
692int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
693 unsigned issue_flags)
a2cdd519 694{
9ca9fb24
PB
695 int ret;
696
697 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
698 if (ret != -ENOENT)
699 return ret;
700
701 io_ring_submit_lock(ctx, issue_flags);
702 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
703 io_ring_submit_unlock(ctx, issue_flags);
704 return ret;
a2cdd519
PB
705}
706
329061d3
JA
707static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
708 unsigned int flags)
709{
710 u32 events;
711
712 events = READ_ONCE(sqe->poll32_events);
713#ifdef __BIG_ENDIAN
714 events = swahw32(events);
715#endif
716 if (!(flags & IORING_POLL_ADD_MULTI))
717 events |= EPOLLONESHOT;
b9ba8a44
JA
718 if (!(flags & IORING_POLL_ADD_LEVEL))
719 events |= EPOLLET;
720 return demangle_poll(events) |
721 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
329061d3
JA
722}
723
724int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
725{
726 struct io_poll_update *upd = io_kiocb_to_cmd(req);
727 u32 flags;
728
729 if (sqe->buf_index || sqe->splice_fd_in)
730 return -EINVAL;
731 flags = READ_ONCE(sqe->len);
732 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
733 IORING_POLL_ADD_MULTI))
734 return -EINVAL;
735 /* meaningless without update */
736 if (flags == IORING_POLL_ADD_MULTI)
737 return -EINVAL;
738
739 upd->old_user_data = READ_ONCE(sqe->addr);
740 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
741 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
742
743 upd->new_user_data = READ_ONCE(sqe->off);
744 if (!upd->update_user_data && upd->new_user_data)
745 return -EINVAL;
746 if (upd->update_events)
747 upd->events = io_poll_parse_events(sqe, flags);
748 else if (sqe->poll32_events)
749 return -EINVAL;
750
751 return 0;
752}
753
754int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
755{
756 struct io_poll *poll = io_kiocb_to_cmd(req);
757 u32 flags;
758
759 if (sqe->buf_index || sqe->off || sqe->addr)
760 return -EINVAL;
761 flags = READ_ONCE(sqe->len);
b9ba8a44 762 if (flags & ~(IORING_POLL_ADD_MULTI|IORING_POLL_ADD_LEVEL))
329061d3
JA
763 return -EINVAL;
764 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
765 return -EINVAL;
766
329061d3
JA
767 poll->events = io_poll_parse_events(sqe, flags);
768 return 0;
769}
770
771int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
772{
773 struct io_poll *poll = io_kiocb_to_cmd(req);
774 struct io_poll_table ipt;
775 int ret;
776
777 ipt.pt._qproc = io_poll_queue_proc;
778
9ca9fb24
PB
779 /*
780 * If sqpoll or single issuer, there is no contention for ->uring_lock
781 * and we'll end up holding it in tw handlers anyway.
782 */
783 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
784 (req->ctx->flags & (IORING_SETUP_SQPOLL | IORING_SETUP_SINGLE_ISSUER)))
785 req->flags |= REQ_F_HASH_LOCKED;
786 else
787 req->flags &= ~REQ_F_HASH_LOCKED;
788
329061d3
JA
789 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events);
790 if (ret) {
791 io_req_set_res(req, ret, 0);
792 return IOU_OK;
793 }
794 if (ipt.error) {
795 req_set_fail(req);
796 return ipt.error;
797 }
798
799 return IOU_ISSUE_SKIP_COMPLETE;
800}
801
802int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
803{
804 struct io_poll_update *poll_update = io_kiocb_to_cmd(req);
805 struct io_cancel_data cd = { .data = poll_update->old_user_data, };
806 struct io_ring_ctx *ctx = req->ctx;
1ab1edb0 807 struct io_hash_bucket *bucket;
329061d3
JA
808 struct io_kiocb *preq;
809 int ret2, ret = 0;
810 bool locked;
811
e6f89be6 812 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
9ca9fb24 813 ret2 = io_poll_disarm(preq);
1ab1edb0
PB
814 if (bucket)
815 spin_unlock(&bucket->lock);
9ca9fb24
PB
816 if (!ret2)
817 goto found;
818 if (ret2 != -ENOENT) {
819 ret = ret2;
38513c46
HX
820 goto out;
821 }
9ca9fb24
PB
822
823 io_ring_submit_lock(ctx, issue_flags);
824 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
825 ret2 = io_poll_disarm(preq);
826 if (bucket)
827 spin_unlock(&bucket->lock);
828 io_ring_submit_unlock(ctx, issue_flags);
829 if (ret2) {
830 ret = ret2;
329061d3
JA
831 goto out;
832 }
329061d3 833
9ca9fb24 834found:
329061d3
JA
835 if (poll_update->update_events || poll_update->update_user_data) {
836 /* only mask one event flags, keep behavior flags */
837 if (poll_update->update_events) {
838 struct io_poll *poll = io_kiocb_to_cmd(preq);
839
840 poll->events &= ~0xffff;
841 poll->events |= poll_update->events & 0xffff;
842 poll->events |= IO_POLL_UNMASK;
843 }
844 if (poll_update->update_user_data)
845 preq->cqe.user_data = poll_update->new_user_data;
846
847 ret2 = io_poll_add(preq, issue_flags);
848 /* successfully updated, don't complete poll request */
849 if (!ret2 || ret2 == -EIOCBQUEUED)
850 goto out;
851 }
852
853 req_set_fail(preq);
854 io_req_set_res(preq, -ECANCELED, 0);
855 locked = !(issue_flags & IO_URING_F_UNLOCKED);
856 io_req_task_complete(preq, &locked);
857out:
858 if (ret < 0) {
859 req_set_fail(req);
860 return ret;
861 }
862 /* complete update request, we're done with it */
863 io_req_set_res(req, ret, 0);
864 return IOU_OK;
865}