io_uring: move cancelation into its own file
[linux-block.git] / io_uring / poll.c
CommitLineData
329061d3
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/fs.h>
5#include <linux/file.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
8#include <linux/poll.h>
9#include <linux/hashtable.h>
10#include <linux/io_uring.h>
11
12#include <trace/events/io_uring.h>
13
14#include <uapi/linux/io_uring.h>
15
16#include "io_uring_types.h"
17#include "io_uring.h"
18#include "refs.h"
19#include "opdef.h"
20#include "poll.h"
21
22struct io_poll_update {
23 struct file *file;
24 u64 old_user_data;
25 u64 new_user_data;
26 __poll_t events;
27 bool update_events;
28 bool update_user_data;
29};
30
31struct io_poll_table {
32 struct poll_table_struct pt;
33 struct io_kiocb *req;
34 int nr_entries;
35 int error;
36};
37
38#define IO_POLL_CANCEL_FLAG BIT(31)
39#define IO_POLL_REF_MASK GENMASK(30, 0)
40
41/*
42 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
43 * bump it and acquire ownership. It's disallowed to modify requests while not
44 * owning it, that prevents from races for enqueueing task_work's and b/w
45 * arming poll and wakeups.
46 */
47static inline bool io_poll_get_ownership(struct io_kiocb *req)
48{
49 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
50}
51
52static void io_poll_mark_cancelled(struct io_kiocb *req)
53{
54 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
55}
56
57static struct io_poll *io_poll_get_double(struct io_kiocb *req)
58{
59 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
60 if (req->opcode == IORING_OP_POLL_ADD)
61 return req->async_data;
62 return req->apoll->double_poll;
63}
64
65static struct io_poll *io_poll_get_single(struct io_kiocb *req)
66{
67 if (req->opcode == IORING_OP_POLL_ADD)
68 return io_kiocb_to_cmd(req);
69 return &req->apoll->poll;
70}
71
72static void io_poll_req_insert(struct io_kiocb *req)
73{
74 struct io_ring_ctx *ctx = req->ctx;
75 struct hlist_head *list;
76
77 list = &ctx->cancel_hash[hash_long(req->cqe.user_data, ctx->cancel_hash_bits)];
78 hlist_add_head(&req->hash_node, list);
79}
80
81static void io_init_poll_iocb(struct io_poll *poll, __poll_t events,
82 wait_queue_func_t wake_func)
83{
84 poll->head = NULL;
85#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
86 /* mask in events that we always want/need */
87 poll->events = events | IO_POLL_UNMASK;
88 INIT_LIST_HEAD(&poll->wait.entry);
89 init_waitqueue_func_entry(&poll->wait, wake_func);
90}
91
92static inline void io_poll_remove_entry(struct io_poll *poll)
93{
94 struct wait_queue_head *head = smp_load_acquire(&poll->head);
95
96 if (head) {
97 spin_lock_irq(&head->lock);
98 list_del_init(&poll->wait.entry);
99 poll->head = NULL;
100 spin_unlock_irq(&head->lock);
101 }
102}
103
104static void io_poll_remove_entries(struct io_kiocb *req)
105{
106 /*
107 * Nothing to do if neither of those flags are set. Avoid dipping
108 * into the poll/apoll/double cachelines if we can.
109 */
110 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
111 return;
112
113 /*
114 * While we hold the waitqueue lock and the waitqueue is nonempty,
115 * wake_up_pollfree() will wait for us. However, taking the waitqueue
116 * lock in the first place can race with the waitqueue being freed.
117 *
118 * We solve this as eventpoll does: by taking advantage of the fact that
119 * all users of wake_up_pollfree() will RCU-delay the actual free. If
120 * we enter rcu_read_lock() and see that the pointer to the queue is
121 * non-NULL, we can then lock it without the memory being freed out from
122 * under us.
123 *
124 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
125 * case the caller deletes the entry from the queue, leaving it empty.
126 * In that case, only RCU prevents the queue memory from being freed.
127 */
128 rcu_read_lock();
129 if (req->flags & REQ_F_SINGLE_POLL)
130 io_poll_remove_entry(io_poll_get_single(req));
131 if (req->flags & REQ_F_DOUBLE_POLL)
132 io_poll_remove_entry(io_poll_get_double(req));
133 rcu_read_unlock();
134}
135
136/*
137 * All poll tw should go through this. Checks for poll events, manages
138 * references, does rewait, etc.
139 *
140 * Returns a negative error on failure. >0 when no action require, which is
141 * either spurious wakeup or multishot CQE is served. 0 when it's done with
142 * the request, then the mask is stored in req->cqe.res.
143 */
144static int io_poll_check_events(struct io_kiocb *req, bool *locked)
145{
146 struct io_ring_ctx *ctx = req->ctx;
147 int v, ret;
148
149 /* req->task == current here, checking PF_EXITING is safe */
150 if (unlikely(req->task->flags & PF_EXITING))
151 return -ECANCELED;
152
153 do {
154 v = atomic_read(&req->poll_refs);
155
156 /* tw handler should be the owner, and so have some references */
157 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
158 return 0;
159 if (v & IO_POLL_CANCEL_FLAG)
160 return -ECANCELED;
161
162 if (!req->cqe.res) {
163 struct poll_table_struct pt = { ._key = req->apoll_events };
164 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
165 }
166
167 if ((unlikely(!req->cqe.res)))
168 continue;
169 if (req->apoll_events & EPOLLONESHOT)
170 return 0;
171
172 /* multishot, just fill a CQE and proceed */
173 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
174 __poll_t mask = mangle_poll(req->cqe.res &
175 req->apoll_events);
176 bool filled;
177
178 spin_lock(&ctx->completion_lock);
179 filled = io_fill_cqe_aux(ctx, req->cqe.user_data,
180 mask, IORING_CQE_F_MORE);
181 io_commit_cqring(ctx);
182 spin_unlock(&ctx->completion_lock);
183 if (filled) {
184 io_cqring_ev_posted(ctx);
185 continue;
186 }
187 return -ECANCELED;
188 }
189
190 ret = io_poll_issue(req, locked);
191 if (ret)
192 return ret;
193
194 /*
195 * Release all references, retry if someone tried to restart
196 * task_work while we were executing it.
197 */
198 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
199
200 return 1;
201}
202
203static void io_poll_task_func(struct io_kiocb *req, bool *locked)
204{
205 struct io_ring_ctx *ctx = req->ctx;
206 int ret;
207
208 ret = io_poll_check_events(req, locked);
209 if (ret > 0)
210 return;
211
212 if (!ret) {
213 struct io_poll *poll = io_kiocb_to_cmd(req);
214
215 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
216 } else {
217 req->cqe.res = ret;
218 req_set_fail(req);
219 }
220
221 io_poll_remove_entries(req);
222 spin_lock(&ctx->completion_lock);
223 hash_del(&req->hash_node);
224 req->cqe.flags = 0;
225 __io_req_complete_post(req);
226 io_commit_cqring(ctx);
227 spin_unlock(&ctx->completion_lock);
228 io_cqring_ev_posted(ctx);
229}
230
231static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
232{
233 struct io_ring_ctx *ctx = req->ctx;
234 int ret;
235
236 ret = io_poll_check_events(req, locked);
237 if (ret > 0)
238 return;
239
240 io_poll_remove_entries(req);
241 spin_lock(&ctx->completion_lock);
242 hash_del(&req->hash_node);
243 spin_unlock(&ctx->completion_lock);
244
245 if (!ret)
246 io_req_task_submit(req, locked);
247 else
248 io_req_complete_failed(req, ret);
249}
250
251static void __io_poll_execute(struct io_kiocb *req, int mask,
252 __poll_t __maybe_unused events)
253{
254 io_req_set_res(req, mask, 0);
255 /*
256 * This is useful for poll that is armed on behalf of another
257 * request, and where the wakeup path could be on a different
258 * CPU. We want to avoid pulling in req->apoll->events for that
259 * case.
260 */
261 if (req->opcode == IORING_OP_POLL_ADD)
262 req->io_task_work.func = io_poll_task_func;
263 else
264 req->io_task_work.func = io_apoll_task_func;
265
266 trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask);
267 io_req_task_work_add(req);
268}
269
270static inline void io_poll_execute(struct io_kiocb *req, int res,
271 __poll_t events)
272{
273 if (io_poll_get_ownership(req))
274 __io_poll_execute(req, res, events);
275}
276
277static void io_poll_cancel_req(struct io_kiocb *req)
278{
279 io_poll_mark_cancelled(req);
280 /* kick tw, which should complete the request */
281 io_poll_execute(req, 0, 0);
282}
283
284#define wqe_to_req(wait) ((void *)((unsigned long) (wait)->private & ~1))
285#define wqe_is_double(wait) ((unsigned long) (wait)->private & 1)
286#define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
287
288static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
289 void *key)
290{
291 struct io_kiocb *req = wqe_to_req(wait);
292 struct io_poll *poll = container_of(wait, struct io_poll, wait);
293 __poll_t mask = key_to_poll(key);
294
295 if (unlikely(mask & POLLFREE)) {
296 io_poll_mark_cancelled(req);
297 /* we have to kick tw in case it's not already */
298 io_poll_execute(req, 0, poll->events);
299
300 /*
301 * If the waitqueue is being freed early but someone is already
302 * holds ownership over it, we have to tear down the request as
303 * best we can. That means immediately removing the request from
304 * its waitqueue and preventing all further accesses to the
305 * waitqueue via the request.
306 */
307 list_del_init(&poll->wait.entry);
308
309 /*
310 * Careful: this *must* be the last step, since as soon
311 * as req->head is NULL'ed out, the request can be
312 * completed and freed, since aio_poll_complete_work()
313 * will no longer need to take the waitqueue lock.
314 */
315 smp_store_release(&poll->head, NULL);
316 return 1;
317 }
318
319 /* for instances that support it check for an event match first */
320 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
321 return 0;
322
323 if (io_poll_get_ownership(req)) {
324 /* optional, saves extra locking for removal in tw handler */
325 if (mask && poll->events & EPOLLONESHOT) {
326 list_del_init(&poll->wait.entry);
327 poll->head = NULL;
328 if (wqe_is_double(wait))
329 req->flags &= ~REQ_F_DOUBLE_POLL;
330 else
331 req->flags &= ~REQ_F_SINGLE_POLL;
332 }
333 __io_poll_execute(req, mask, poll->events);
334 }
335 return 1;
336}
337
338static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
339 struct wait_queue_head *head,
340 struct io_poll **poll_ptr)
341{
342 struct io_kiocb *req = pt->req;
343 unsigned long wqe_private = (unsigned long) req;
344
345 /*
346 * The file being polled uses multiple waitqueues for poll handling
347 * (e.g. one for read, one for write). Setup a separate io_poll
348 * if this happens.
349 */
350 if (unlikely(pt->nr_entries)) {
351 struct io_poll *first = poll;
352
353 /* double add on the same waitqueue head, ignore */
354 if (first->head == head)
355 return;
356 /* already have a 2nd entry, fail a third attempt */
357 if (*poll_ptr) {
358 if ((*poll_ptr)->head == head)
359 return;
360 pt->error = -EINVAL;
361 return;
362 }
363
364 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
365 if (!poll) {
366 pt->error = -ENOMEM;
367 return;
368 }
369 /* mark as double wq entry */
370 wqe_private |= 1;
371 req->flags |= REQ_F_DOUBLE_POLL;
372 io_init_poll_iocb(poll, first->events, first->wait.func);
373 *poll_ptr = poll;
374 if (req->opcode == IORING_OP_POLL_ADD)
375 req->flags |= REQ_F_ASYNC_DATA;
376 }
377
378 req->flags |= REQ_F_SINGLE_POLL;
379 pt->nr_entries++;
380 poll->head = head;
381 poll->wait.private = (void *) wqe_private;
382
383 if (poll->events & EPOLLEXCLUSIVE)
384 add_wait_queue_exclusive(head, &poll->wait);
385 else
386 add_wait_queue(head, &poll->wait);
387}
388
389static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
390 struct poll_table_struct *p)
391{
392 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
393 struct io_poll *poll = io_kiocb_to_cmd(pt->req);
394
395 __io_queue_proc(poll, pt, head,
396 (struct io_poll **) &pt->req->async_data);
397}
398
399static int __io_arm_poll_handler(struct io_kiocb *req,
400 struct io_poll *poll,
401 struct io_poll_table *ipt, __poll_t mask)
402{
403 struct io_ring_ctx *ctx = req->ctx;
404 int v;
405
406 INIT_HLIST_NODE(&req->hash_node);
407 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
408 io_init_poll_iocb(poll, mask, io_poll_wake);
409 poll->file = req->file;
410
411 req->apoll_events = poll->events;
412
413 ipt->pt._key = mask;
414 ipt->req = req;
415 ipt->error = 0;
416 ipt->nr_entries = 0;
417
418 /*
419 * Take the ownership to delay any tw execution up until we're done
420 * with poll arming. see io_poll_get_ownership().
421 */
422 atomic_set(&req->poll_refs, 1);
423 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
424
425 if (mask && (poll->events & EPOLLONESHOT)) {
426 io_poll_remove_entries(req);
427 /* no one else has access to the req, forget about the ref */
428 return mask;
429 }
430 if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
431 io_poll_remove_entries(req);
432 if (!ipt->error)
433 ipt->error = -EINVAL;
434 return 0;
435 }
436
437 spin_lock(&ctx->completion_lock);
438 io_poll_req_insert(req);
439 spin_unlock(&ctx->completion_lock);
440
441 if (mask) {
442 /* can't multishot if failed, just queue the event we've got */
443 if (unlikely(ipt->error || !ipt->nr_entries)) {
444 poll->events |= EPOLLONESHOT;
445 req->apoll_events |= EPOLLONESHOT;
446 ipt->error = 0;
447 }
448 __io_poll_execute(req, mask, poll->events);
449 return 0;
450 }
451
452 /*
453 * Release ownership. If someone tried to queue a tw while it was
454 * locked, kick it off for them.
455 */
456 v = atomic_dec_return(&req->poll_refs);
457 if (unlikely(v & IO_POLL_REF_MASK))
458 __io_poll_execute(req, 0, poll->events);
459 return 0;
460}
461
462static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
463 struct poll_table_struct *p)
464{
465 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
466 struct async_poll *apoll = pt->req->apoll;
467
468 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
469}
470
471int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
472{
473 const struct io_op_def *def = &io_op_defs[req->opcode];
474 struct io_ring_ctx *ctx = req->ctx;
475 struct async_poll *apoll;
476 struct io_poll_table ipt;
477 __poll_t mask = POLLPRI | POLLERR;
478 int ret;
479
480 if (!def->pollin && !def->pollout)
481 return IO_APOLL_ABORTED;
482 if (!file_can_poll(req->file))
483 return IO_APOLL_ABORTED;
484 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
485 return IO_APOLL_ABORTED;
486 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
487 mask |= EPOLLONESHOT;
488
489 if (def->pollin) {
490 mask |= EPOLLIN | EPOLLRDNORM;
491
492 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
493 if (req->flags & REQ_F_CLEAR_POLLIN)
494 mask &= ~EPOLLIN;
495 } else {
496 mask |= EPOLLOUT | EPOLLWRNORM;
497 }
498 if (def->poll_exclusive)
499 mask |= EPOLLEXCLUSIVE;
500 if (req->flags & REQ_F_POLLED) {
501 apoll = req->apoll;
502 kfree(apoll->double_poll);
503 } else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
504 !list_empty(&ctx->apoll_cache)) {
505 apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
506 poll.wait.entry);
507 list_del_init(&apoll->poll.wait.entry);
508 } else {
509 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
510 if (unlikely(!apoll))
511 return IO_APOLL_ABORTED;
512 }
513 apoll->double_poll = NULL;
514 req->apoll = apoll;
515 req->flags |= REQ_F_POLLED;
516 ipt.pt._qproc = io_async_queue_proc;
517
518 io_kbuf_recycle(req, issue_flags);
519
520 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
521 if (ret || ipt.error)
522 return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
523
524 trace_io_uring_poll_arm(ctx, req, req->cqe.user_data, req->opcode,
525 mask, apoll->poll.events);
526 return IO_APOLL_OK;
527}
528
529/*
530 * Returns true if we found and killed one or more poll requests
531 */
532__cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
533 bool cancel_all)
534{
535 struct hlist_node *tmp;
536 struct io_kiocb *req;
537 bool found = false;
538 int i;
539
540 spin_lock(&ctx->completion_lock);
541 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
542 struct hlist_head *list;
543
544 list = &ctx->cancel_hash[i];
545 hlist_for_each_entry_safe(req, tmp, list, hash_node) {
546 if (io_match_task_safe(req, tsk, cancel_all)) {
547 hlist_del_init(&req->hash_node);
548 io_poll_cancel_req(req);
549 found = true;
550 }
551 }
552 }
553 spin_unlock(&ctx->completion_lock);
554 return found;
555}
556
557static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
558 struct io_cancel_data *cd)
559 __must_hold(&ctx->completion_lock)
560{
561 struct hlist_head *list;
562 struct io_kiocb *req;
563
564 list = &ctx->cancel_hash[hash_long(cd->data, ctx->cancel_hash_bits)];
565 hlist_for_each_entry(req, list, hash_node) {
566 if (cd->data != req->cqe.user_data)
567 continue;
568 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
569 continue;
570 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
571 if (cd->seq == req->work.cancel_seq)
572 continue;
573 req->work.cancel_seq = cd->seq;
574 }
575 return req;
576 }
577 return NULL;
578}
579
580static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
581 struct io_cancel_data *cd)
582 __must_hold(&ctx->completion_lock)
583{
584 struct io_kiocb *req;
585 int i;
586
587 for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
588 struct hlist_head *list;
589
590 list = &ctx->cancel_hash[i];
591 hlist_for_each_entry(req, list, hash_node) {
592 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
593 req->file != cd->file)
594 continue;
595 if (cd->seq == req->work.cancel_seq)
596 continue;
597 req->work.cancel_seq = cd->seq;
598 return req;
599 }
600 }
601 return NULL;
602}
603
604static bool io_poll_disarm(struct io_kiocb *req)
605 __must_hold(&ctx->completion_lock)
606{
607 if (!io_poll_get_ownership(req))
608 return false;
609 io_poll_remove_entries(req);
610 hash_del(&req->hash_node);
611 return true;
612}
613
614int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
615 __must_hold(&ctx->completion_lock)
616{
617 struct io_kiocb *req;
618
619 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
620 req = io_poll_file_find(ctx, cd);
621 else
622 req = io_poll_find(ctx, false, cd);
623 if (!req)
624 return -ENOENT;
625 io_poll_cancel_req(req);
626 return 0;
627}
628
629static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
630 unsigned int flags)
631{
632 u32 events;
633
634 events = READ_ONCE(sqe->poll32_events);
635#ifdef __BIG_ENDIAN
636 events = swahw32(events);
637#endif
638 if (!(flags & IORING_POLL_ADD_MULTI))
639 events |= EPOLLONESHOT;
640 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
641}
642
643int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
644{
645 struct io_poll_update *upd = io_kiocb_to_cmd(req);
646 u32 flags;
647
648 if (sqe->buf_index || sqe->splice_fd_in)
649 return -EINVAL;
650 flags = READ_ONCE(sqe->len);
651 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
652 IORING_POLL_ADD_MULTI))
653 return -EINVAL;
654 /* meaningless without update */
655 if (flags == IORING_POLL_ADD_MULTI)
656 return -EINVAL;
657
658 upd->old_user_data = READ_ONCE(sqe->addr);
659 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
660 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
661
662 upd->new_user_data = READ_ONCE(sqe->off);
663 if (!upd->update_user_data && upd->new_user_data)
664 return -EINVAL;
665 if (upd->update_events)
666 upd->events = io_poll_parse_events(sqe, flags);
667 else if (sqe->poll32_events)
668 return -EINVAL;
669
670 return 0;
671}
672
673int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
674{
675 struct io_poll *poll = io_kiocb_to_cmd(req);
676 u32 flags;
677
678 if (sqe->buf_index || sqe->off || sqe->addr)
679 return -EINVAL;
680 flags = READ_ONCE(sqe->len);
681 if (flags & ~IORING_POLL_ADD_MULTI)
682 return -EINVAL;
683 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
684 return -EINVAL;
685
686 io_req_set_refcount(req);
687 poll->events = io_poll_parse_events(sqe, flags);
688 return 0;
689}
690
691int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
692{
693 struct io_poll *poll = io_kiocb_to_cmd(req);
694 struct io_poll_table ipt;
695 int ret;
696
697 ipt.pt._qproc = io_poll_queue_proc;
698
699 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events);
700 if (ret) {
701 io_req_set_res(req, ret, 0);
702 return IOU_OK;
703 }
704 if (ipt.error) {
705 req_set_fail(req);
706 return ipt.error;
707 }
708
709 return IOU_ISSUE_SKIP_COMPLETE;
710}
711
712int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
713{
714 struct io_poll_update *poll_update = io_kiocb_to_cmd(req);
715 struct io_cancel_data cd = { .data = poll_update->old_user_data, };
716 struct io_ring_ctx *ctx = req->ctx;
717 struct io_kiocb *preq;
718 int ret2, ret = 0;
719 bool locked;
720
721 spin_lock(&ctx->completion_lock);
722 preq = io_poll_find(ctx, true, &cd);
723 if (!preq || !io_poll_disarm(preq)) {
724 spin_unlock(&ctx->completion_lock);
725 ret = preq ? -EALREADY : -ENOENT;
726 goto out;
727 }
728 spin_unlock(&ctx->completion_lock);
729
730 if (poll_update->update_events || poll_update->update_user_data) {
731 /* only mask one event flags, keep behavior flags */
732 if (poll_update->update_events) {
733 struct io_poll *poll = io_kiocb_to_cmd(preq);
734
735 poll->events &= ~0xffff;
736 poll->events |= poll_update->events & 0xffff;
737 poll->events |= IO_POLL_UNMASK;
738 }
739 if (poll_update->update_user_data)
740 preq->cqe.user_data = poll_update->new_user_data;
741
742 ret2 = io_poll_add(preq, issue_flags);
743 /* successfully updated, don't complete poll request */
744 if (!ret2 || ret2 == -EIOCBQUEUED)
745 goto out;
746 }
747
748 req_set_fail(preq);
749 io_req_set_res(preq, -ECANCELED, 0);
750 locked = !(issue_flags & IO_URING_F_UNLOCKED);
751 io_req_task_complete(preq, &locked);
752out:
753 if (ret < 0) {
754 req_set_fail(req);
755 return ret;
756 }
757 /* complete update request, we're done with it */
758 io_req_set_res(req, ret, 0);
759 return IOU_OK;
760}