Commit | Line | Data |
---|---|---|
329061d3 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/fs.h> | |
5 | #include <linux/file.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/poll.h> | |
9 | #include <linux/hashtable.h> | |
10 | #include <linux/io_uring.h> | |
11 | ||
12 | #include <trace/events/io_uring.h> | |
13 | ||
14 | #include <uapi/linux/io_uring.h> | |
15 | ||
16 | #include "io_uring_types.h" | |
17 | #include "io_uring.h" | |
18 | #include "refs.h" | |
19 | #include "opdef.h" | |
3b77495a | 20 | #include "kbuf.h" |
329061d3 | 21 | #include "poll.h" |
38513c46 | 22 | #include "cancel.h" |
329061d3 JA |
23 | |
24 | struct io_poll_update { | |
25 | struct file *file; | |
26 | u64 old_user_data; | |
27 | u64 new_user_data; | |
28 | __poll_t events; | |
29 | bool update_events; | |
30 | bool update_user_data; | |
31 | }; | |
32 | ||
33 | struct io_poll_table { | |
34 | struct poll_table_struct pt; | |
35 | struct io_kiocb *req; | |
36 | int nr_entries; | |
37 | int error; | |
38 | }; | |
39 | ||
40 | #define IO_POLL_CANCEL_FLAG BIT(31) | |
41 | #define IO_POLL_REF_MASK GENMASK(30, 0) | |
42 | ||
43 | /* | |
44 | * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can | |
45 | * bump it and acquire ownership. It's disallowed to modify requests while not | |
46 | * owning it, that prevents from races for enqueueing task_work's and b/w | |
47 | * arming poll and wakeups. | |
48 | */ | |
49 | static inline bool io_poll_get_ownership(struct io_kiocb *req) | |
50 | { | |
51 | return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); | |
52 | } | |
53 | ||
54 | static void io_poll_mark_cancelled(struct io_kiocb *req) | |
55 | { | |
56 | atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); | |
57 | } | |
58 | ||
59 | static struct io_poll *io_poll_get_double(struct io_kiocb *req) | |
60 | { | |
61 | /* pure poll stashes this in ->async_data, poll driven retry elsewhere */ | |
62 | if (req->opcode == IORING_OP_POLL_ADD) | |
63 | return req->async_data; | |
64 | return req->apoll->double_poll; | |
65 | } | |
66 | ||
67 | static struct io_poll *io_poll_get_single(struct io_kiocb *req) | |
68 | { | |
69 | if (req->opcode == IORING_OP_POLL_ADD) | |
70 | return io_kiocb_to_cmd(req); | |
71 | return &req->apoll->poll; | |
72 | } | |
73 | ||
74 | static void io_poll_req_insert(struct io_kiocb *req) | |
75 | { | |
76 | struct io_ring_ctx *ctx = req->ctx; | |
38513c46 HX |
77 | u32 index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits); |
78 | struct io_hash_bucket *hb = &ctx->cancel_hash[index]; | |
329061d3 | 79 | |
38513c46 HX |
80 | spin_lock(&hb->lock); |
81 | hlist_add_head(&req->hash_node, &hb->list); | |
82 | spin_unlock(&hb->lock); | |
83 | } | |
84 | ||
85 | static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) | |
86 | { | |
87 | u32 index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits); | |
88 | spinlock_t *lock = &ctx->cancel_hash[index].lock; | |
89 | ||
90 | spin_lock(lock); | |
91 | hash_del(&req->hash_node); | |
92 | spin_unlock(lock); | |
329061d3 JA |
93 | } |
94 | ||
95 | static void io_init_poll_iocb(struct io_poll *poll, __poll_t events, | |
96 | wait_queue_func_t wake_func) | |
97 | { | |
98 | poll->head = NULL; | |
99 | #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP) | |
100 | /* mask in events that we always want/need */ | |
101 | poll->events = events | IO_POLL_UNMASK; | |
102 | INIT_LIST_HEAD(&poll->wait.entry); | |
103 | init_waitqueue_func_entry(&poll->wait, wake_func); | |
104 | } | |
105 | ||
106 | static inline void io_poll_remove_entry(struct io_poll *poll) | |
107 | { | |
108 | struct wait_queue_head *head = smp_load_acquire(&poll->head); | |
109 | ||
110 | if (head) { | |
111 | spin_lock_irq(&head->lock); | |
112 | list_del_init(&poll->wait.entry); | |
113 | poll->head = NULL; | |
114 | spin_unlock_irq(&head->lock); | |
115 | } | |
116 | } | |
117 | ||
118 | static void io_poll_remove_entries(struct io_kiocb *req) | |
119 | { | |
120 | /* | |
121 | * Nothing to do if neither of those flags are set. Avoid dipping | |
122 | * into the poll/apoll/double cachelines if we can. | |
123 | */ | |
124 | if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) | |
125 | return; | |
126 | ||
127 | /* | |
128 | * While we hold the waitqueue lock and the waitqueue is nonempty, | |
129 | * wake_up_pollfree() will wait for us. However, taking the waitqueue | |
130 | * lock in the first place can race with the waitqueue being freed. | |
131 | * | |
132 | * We solve this as eventpoll does: by taking advantage of the fact that | |
133 | * all users of wake_up_pollfree() will RCU-delay the actual free. If | |
134 | * we enter rcu_read_lock() and see that the pointer to the queue is | |
135 | * non-NULL, we can then lock it without the memory being freed out from | |
136 | * under us. | |
137 | * | |
138 | * Keep holding rcu_read_lock() as long as we hold the queue lock, in | |
139 | * case the caller deletes the entry from the queue, leaving it empty. | |
140 | * In that case, only RCU prevents the queue memory from being freed. | |
141 | */ | |
142 | rcu_read_lock(); | |
143 | if (req->flags & REQ_F_SINGLE_POLL) | |
144 | io_poll_remove_entry(io_poll_get_single(req)); | |
145 | if (req->flags & REQ_F_DOUBLE_POLL) | |
146 | io_poll_remove_entry(io_poll_get_double(req)); | |
147 | rcu_read_unlock(); | |
148 | } | |
149 | ||
150 | /* | |
151 | * All poll tw should go through this. Checks for poll events, manages | |
152 | * references, does rewait, etc. | |
153 | * | |
154 | * Returns a negative error on failure. >0 when no action require, which is | |
155 | * either spurious wakeup or multishot CQE is served. 0 when it's done with | |
156 | * the request, then the mask is stored in req->cqe.res. | |
157 | */ | |
158 | static int io_poll_check_events(struct io_kiocb *req, bool *locked) | |
159 | { | |
160 | struct io_ring_ctx *ctx = req->ctx; | |
161 | int v, ret; | |
162 | ||
163 | /* req->task == current here, checking PF_EXITING is safe */ | |
164 | if (unlikely(req->task->flags & PF_EXITING)) | |
165 | return -ECANCELED; | |
166 | ||
167 | do { | |
168 | v = atomic_read(&req->poll_refs); | |
169 | ||
170 | /* tw handler should be the owner, and so have some references */ | |
171 | if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK))) | |
172 | return 0; | |
173 | if (v & IO_POLL_CANCEL_FLAG) | |
174 | return -ECANCELED; | |
175 | ||
176 | if (!req->cqe.res) { | |
177 | struct poll_table_struct pt = { ._key = req->apoll_events }; | |
178 | req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; | |
179 | } | |
180 | ||
181 | if ((unlikely(!req->cqe.res))) | |
182 | continue; | |
183 | if (req->apoll_events & EPOLLONESHOT) | |
184 | return 0; | |
185 | ||
186 | /* multishot, just fill a CQE and proceed */ | |
187 | if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { | |
188 | __poll_t mask = mangle_poll(req->cqe.res & | |
189 | req->apoll_events); | |
190 | bool filled; | |
191 | ||
192 | spin_lock(&ctx->completion_lock); | |
193 | filled = io_fill_cqe_aux(ctx, req->cqe.user_data, | |
194 | mask, IORING_CQE_F_MORE); | |
195 | io_commit_cqring(ctx); | |
196 | spin_unlock(&ctx->completion_lock); | |
197 | if (filled) { | |
198 | io_cqring_ev_posted(ctx); | |
199 | continue; | |
200 | } | |
201 | return -ECANCELED; | |
202 | } | |
203 | ||
204 | ret = io_poll_issue(req, locked); | |
205 | if (ret) | |
206 | return ret; | |
207 | ||
208 | /* | |
209 | * Release all references, retry if someone tried to restart | |
210 | * task_work while we were executing it. | |
211 | */ | |
212 | } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs)); | |
213 | ||
214 | return 1; | |
215 | } | |
216 | ||
217 | static void io_poll_task_func(struct io_kiocb *req, bool *locked) | |
218 | { | |
219 | struct io_ring_ctx *ctx = req->ctx; | |
220 | int ret; | |
221 | ||
222 | ret = io_poll_check_events(req, locked); | |
223 | if (ret > 0) | |
224 | return; | |
225 | ||
226 | if (!ret) { | |
227 | struct io_poll *poll = io_kiocb_to_cmd(req); | |
228 | ||
229 | req->cqe.res = mangle_poll(req->cqe.res & poll->events); | |
230 | } else { | |
231 | req->cqe.res = ret; | |
232 | req_set_fail(req); | |
233 | } | |
234 | ||
235 | io_poll_remove_entries(req); | |
38513c46 | 236 | io_poll_req_delete(req, ctx); |
329061d3 | 237 | spin_lock(&ctx->completion_lock); |
329061d3 JA |
238 | req->cqe.flags = 0; |
239 | __io_req_complete_post(req); | |
240 | io_commit_cqring(ctx); | |
241 | spin_unlock(&ctx->completion_lock); | |
242 | io_cqring_ev_posted(ctx); | |
243 | } | |
244 | ||
245 | static void io_apoll_task_func(struct io_kiocb *req, bool *locked) | |
246 | { | |
329061d3 JA |
247 | int ret; |
248 | ||
249 | ret = io_poll_check_events(req, locked); | |
250 | if (ret > 0) | |
251 | return; | |
252 | ||
253 | io_poll_remove_entries(req); | |
38513c46 | 254 | io_poll_req_delete(req, req->ctx); |
329061d3 JA |
255 | |
256 | if (!ret) | |
257 | io_req_task_submit(req, locked); | |
258 | else | |
259 | io_req_complete_failed(req, ret); | |
260 | } | |
261 | ||
262 | static void __io_poll_execute(struct io_kiocb *req, int mask, | |
263 | __poll_t __maybe_unused events) | |
264 | { | |
265 | io_req_set_res(req, mask, 0); | |
266 | /* | |
267 | * This is useful for poll that is armed on behalf of another | |
268 | * request, and where the wakeup path could be on a different | |
269 | * CPU. We want to avoid pulling in req->apoll->events for that | |
270 | * case. | |
271 | */ | |
272 | if (req->opcode == IORING_OP_POLL_ADD) | |
273 | req->io_task_work.func = io_poll_task_func; | |
274 | else | |
275 | req->io_task_work.func = io_apoll_task_func; | |
276 | ||
277 | trace_io_uring_task_add(req->ctx, req, req->cqe.user_data, req->opcode, mask); | |
278 | io_req_task_work_add(req); | |
279 | } | |
280 | ||
281 | static inline void io_poll_execute(struct io_kiocb *req, int res, | |
282 | __poll_t events) | |
283 | { | |
284 | if (io_poll_get_ownership(req)) | |
285 | __io_poll_execute(req, res, events); | |
286 | } | |
287 | ||
288 | static void io_poll_cancel_req(struct io_kiocb *req) | |
289 | { | |
290 | io_poll_mark_cancelled(req); | |
291 | /* kick tw, which should complete the request */ | |
292 | io_poll_execute(req, 0, 0); | |
293 | } | |
294 | ||
295 | #define wqe_to_req(wait) ((void *)((unsigned long) (wait)->private & ~1)) | |
296 | #define wqe_is_double(wait) ((unsigned long) (wait)->private & 1) | |
297 | #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI) | |
298 | ||
299 | static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, | |
300 | void *key) | |
301 | { | |
302 | struct io_kiocb *req = wqe_to_req(wait); | |
303 | struct io_poll *poll = container_of(wait, struct io_poll, wait); | |
304 | __poll_t mask = key_to_poll(key); | |
305 | ||
306 | if (unlikely(mask & POLLFREE)) { | |
307 | io_poll_mark_cancelled(req); | |
308 | /* we have to kick tw in case it's not already */ | |
309 | io_poll_execute(req, 0, poll->events); | |
310 | ||
311 | /* | |
312 | * If the waitqueue is being freed early but someone is already | |
313 | * holds ownership over it, we have to tear down the request as | |
314 | * best we can. That means immediately removing the request from | |
315 | * its waitqueue and preventing all further accesses to the | |
316 | * waitqueue via the request. | |
317 | */ | |
318 | list_del_init(&poll->wait.entry); | |
319 | ||
320 | /* | |
321 | * Careful: this *must* be the last step, since as soon | |
322 | * as req->head is NULL'ed out, the request can be | |
323 | * completed and freed, since aio_poll_complete_work() | |
324 | * will no longer need to take the waitqueue lock. | |
325 | */ | |
326 | smp_store_release(&poll->head, NULL); | |
327 | return 1; | |
328 | } | |
329 | ||
330 | /* for instances that support it check for an event match first */ | |
331 | if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON))) | |
332 | return 0; | |
333 | ||
334 | if (io_poll_get_ownership(req)) { | |
335 | /* optional, saves extra locking for removal in tw handler */ | |
336 | if (mask && poll->events & EPOLLONESHOT) { | |
337 | list_del_init(&poll->wait.entry); | |
338 | poll->head = NULL; | |
339 | if (wqe_is_double(wait)) | |
340 | req->flags &= ~REQ_F_DOUBLE_POLL; | |
341 | else | |
342 | req->flags &= ~REQ_F_SINGLE_POLL; | |
343 | } | |
344 | __io_poll_execute(req, mask, poll->events); | |
345 | } | |
346 | return 1; | |
347 | } | |
348 | ||
349 | static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, | |
350 | struct wait_queue_head *head, | |
351 | struct io_poll **poll_ptr) | |
352 | { | |
353 | struct io_kiocb *req = pt->req; | |
354 | unsigned long wqe_private = (unsigned long) req; | |
355 | ||
356 | /* | |
357 | * The file being polled uses multiple waitqueues for poll handling | |
358 | * (e.g. one for read, one for write). Setup a separate io_poll | |
359 | * if this happens. | |
360 | */ | |
361 | if (unlikely(pt->nr_entries)) { | |
362 | struct io_poll *first = poll; | |
363 | ||
364 | /* double add on the same waitqueue head, ignore */ | |
365 | if (first->head == head) | |
366 | return; | |
367 | /* already have a 2nd entry, fail a third attempt */ | |
368 | if (*poll_ptr) { | |
369 | if ((*poll_ptr)->head == head) | |
370 | return; | |
371 | pt->error = -EINVAL; | |
372 | return; | |
373 | } | |
374 | ||
375 | poll = kmalloc(sizeof(*poll), GFP_ATOMIC); | |
376 | if (!poll) { | |
377 | pt->error = -ENOMEM; | |
378 | return; | |
379 | } | |
380 | /* mark as double wq entry */ | |
381 | wqe_private |= 1; | |
382 | req->flags |= REQ_F_DOUBLE_POLL; | |
383 | io_init_poll_iocb(poll, first->events, first->wait.func); | |
384 | *poll_ptr = poll; | |
385 | if (req->opcode == IORING_OP_POLL_ADD) | |
386 | req->flags |= REQ_F_ASYNC_DATA; | |
387 | } | |
388 | ||
389 | req->flags |= REQ_F_SINGLE_POLL; | |
390 | pt->nr_entries++; | |
391 | poll->head = head; | |
392 | poll->wait.private = (void *) wqe_private; | |
393 | ||
394 | if (poll->events & EPOLLEXCLUSIVE) | |
395 | add_wait_queue_exclusive(head, &poll->wait); | |
396 | else | |
397 | add_wait_queue(head, &poll->wait); | |
398 | } | |
399 | ||
400 | static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, | |
401 | struct poll_table_struct *p) | |
402 | { | |
403 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); | |
404 | struct io_poll *poll = io_kiocb_to_cmd(pt->req); | |
405 | ||
406 | __io_queue_proc(poll, pt, head, | |
407 | (struct io_poll **) &pt->req->async_data); | |
408 | } | |
409 | ||
410 | static int __io_arm_poll_handler(struct io_kiocb *req, | |
411 | struct io_poll *poll, | |
412 | struct io_poll_table *ipt, __poll_t mask) | |
413 | { | |
414 | struct io_ring_ctx *ctx = req->ctx; | |
415 | int v; | |
416 | ||
417 | INIT_HLIST_NODE(&req->hash_node); | |
418 | req->work.cancel_seq = atomic_read(&ctx->cancel_seq); | |
419 | io_init_poll_iocb(poll, mask, io_poll_wake); | |
420 | poll->file = req->file; | |
421 | ||
422 | req->apoll_events = poll->events; | |
423 | ||
424 | ipt->pt._key = mask; | |
425 | ipt->req = req; | |
426 | ipt->error = 0; | |
427 | ipt->nr_entries = 0; | |
428 | ||
429 | /* | |
430 | * Take the ownership to delay any tw execution up until we're done | |
431 | * with poll arming. see io_poll_get_ownership(). | |
432 | */ | |
433 | atomic_set(&req->poll_refs, 1); | |
434 | mask = vfs_poll(req->file, &ipt->pt) & poll->events; | |
435 | ||
b9ba8a44 JA |
436 | if (mask && |
437 | ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) { | |
329061d3 JA |
438 | io_poll_remove_entries(req); |
439 | /* no one else has access to the req, forget about the ref */ | |
440 | return mask; | |
441 | } | |
b9ba8a44 | 442 | |
329061d3 JA |
443 | if (!mask && unlikely(ipt->error || !ipt->nr_entries)) { |
444 | io_poll_remove_entries(req); | |
445 | if (!ipt->error) | |
446 | ipt->error = -EINVAL; | |
447 | return 0; | |
448 | } | |
449 | ||
329061d3 | 450 | io_poll_req_insert(req); |
329061d3 | 451 | |
b9ba8a44 | 452 | if (mask && (poll->events & EPOLLET)) { |
329061d3 JA |
453 | /* can't multishot if failed, just queue the event we've got */ |
454 | if (unlikely(ipt->error || !ipt->nr_entries)) { | |
455 | poll->events |= EPOLLONESHOT; | |
456 | req->apoll_events |= EPOLLONESHOT; | |
457 | ipt->error = 0; | |
458 | } | |
459 | __io_poll_execute(req, mask, poll->events); | |
460 | return 0; | |
461 | } | |
462 | ||
463 | /* | |
464 | * Release ownership. If someone tried to queue a tw while it was | |
465 | * locked, kick it off for them. | |
466 | */ | |
467 | v = atomic_dec_return(&req->poll_refs); | |
468 | if (unlikely(v & IO_POLL_REF_MASK)) | |
469 | __io_poll_execute(req, 0, poll->events); | |
470 | return 0; | |
471 | } | |
472 | ||
473 | static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, | |
474 | struct poll_table_struct *p) | |
475 | { | |
476 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); | |
477 | struct async_poll *apoll = pt->req->apoll; | |
478 | ||
479 | __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); | |
480 | } | |
481 | ||
482 | int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) | |
483 | { | |
484 | const struct io_op_def *def = &io_op_defs[req->opcode]; | |
485 | struct io_ring_ctx *ctx = req->ctx; | |
486 | struct async_poll *apoll; | |
487 | struct io_poll_table ipt; | |
b9ba8a44 | 488 | __poll_t mask = POLLPRI | POLLERR | EPOLLET; |
329061d3 JA |
489 | int ret; |
490 | ||
491 | if (!def->pollin && !def->pollout) | |
492 | return IO_APOLL_ABORTED; | |
493 | if (!file_can_poll(req->file)) | |
494 | return IO_APOLL_ABORTED; | |
495 | if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) | |
496 | return IO_APOLL_ABORTED; | |
497 | if (!(req->flags & REQ_F_APOLL_MULTISHOT)) | |
498 | mask |= EPOLLONESHOT; | |
499 | ||
500 | if (def->pollin) { | |
501 | mask |= EPOLLIN | EPOLLRDNORM; | |
502 | ||
503 | /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ | |
504 | if (req->flags & REQ_F_CLEAR_POLLIN) | |
505 | mask &= ~EPOLLIN; | |
506 | } else { | |
507 | mask |= EPOLLOUT | EPOLLWRNORM; | |
508 | } | |
509 | if (def->poll_exclusive) | |
510 | mask |= EPOLLEXCLUSIVE; | |
511 | if (req->flags & REQ_F_POLLED) { | |
512 | apoll = req->apoll; | |
513 | kfree(apoll->double_poll); | |
514 | } else if (!(issue_flags & IO_URING_F_UNLOCKED) && | |
515 | !list_empty(&ctx->apoll_cache)) { | |
516 | apoll = list_first_entry(&ctx->apoll_cache, struct async_poll, | |
517 | poll.wait.entry); | |
518 | list_del_init(&apoll->poll.wait.entry); | |
519 | } else { | |
520 | apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); | |
521 | if (unlikely(!apoll)) | |
522 | return IO_APOLL_ABORTED; | |
523 | } | |
524 | apoll->double_poll = NULL; | |
525 | req->apoll = apoll; | |
526 | req->flags |= REQ_F_POLLED; | |
527 | ipt.pt._qproc = io_async_queue_proc; | |
528 | ||
529 | io_kbuf_recycle(req, issue_flags); | |
530 | ||
531 | ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask); | |
532 | if (ret || ipt.error) | |
533 | return ret ? IO_APOLL_READY : IO_APOLL_ABORTED; | |
534 | ||
535 | trace_io_uring_poll_arm(ctx, req, req->cqe.user_data, req->opcode, | |
536 | mask, apoll->poll.events); | |
537 | return IO_APOLL_OK; | |
538 | } | |
539 | ||
540 | /* | |
541 | * Returns true if we found and killed one or more poll requests | |
542 | */ | |
543 | __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, | |
544 | bool cancel_all) | |
545 | { | |
546 | struct hlist_node *tmp; | |
547 | struct io_kiocb *req; | |
548 | bool found = false; | |
549 | int i; | |
550 | ||
329061d3 | 551 | for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { |
38513c46 | 552 | struct io_hash_bucket *hb = &ctx->cancel_hash[i]; |
329061d3 | 553 | |
38513c46 HX |
554 | spin_lock(&hb->lock); |
555 | hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { | |
329061d3 JA |
556 | if (io_match_task_safe(req, tsk, cancel_all)) { |
557 | hlist_del_init(&req->hash_node); | |
558 | io_poll_cancel_req(req); | |
559 | found = true; | |
560 | } | |
561 | } | |
38513c46 | 562 | spin_unlock(&hb->lock); |
329061d3 | 563 | } |
329061d3 JA |
564 | return found; |
565 | } | |
566 | ||
567 | static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, | |
568 | struct io_cancel_data *cd) | |
329061d3 | 569 | { |
329061d3 | 570 | struct io_kiocb *req; |
38513c46 HX |
571 | u32 index = hash_long(cd->data, ctx->cancel_hash_bits); |
572 | struct io_hash_bucket *hb = &ctx->cancel_hash[index]; | |
329061d3 | 573 | |
38513c46 HX |
574 | spin_lock(&hb->lock); |
575 | hlist_for_each_entry(req, &hb->list, hash_node) { | |
329061d3 JA |
576 | if (cd->data != req->cqe.user_data) |
577 | continue; | |
578 | if (poll_only && req->opcode != IORING_OP_POLL_ADD) | |
579 | continue; | |
580 | if (cd->flags & IORING_ASYNC_CANCEL_ALL) { | |
581 | if (cd->seq == req->work.cancel_seq) | |
582 | continue; | |
583 | req->work.cancel_seq = cd->seq; | |
584 | } | |
585 | return req; | |
586 | } | |
38513c46 | 587 | spin_unlock(&hb->lock); |
329061d3 JA |
588 | return NULL; |
589 | } | |
590 | ||
591 | static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, | |
592 | struct io_cancel_data *cd) | |
329061d3 JA |
593 | { |
594 | struct io_kiocb *req; | |
595 | int i; | |
596 | ||
597 | for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { | |
38513c46 | 598 | struct io_hash_bucket *hb = &ctx->cancel_hash[i]; |
329061d3 | 599 | |
38513c46 HX |
600 | spin_lock(&hb->lock); |
601 | hlist_for_each_entry(req, &hb->list, hash_node) { | |
329061d3 JA |
602 | if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && |
603 | req->file != cd->file) | |
604 | continue; | |
605 | if (cd->seq == req->work.cancel_seq) | |
606 | continue; | |
607 | req->work.cancel_seq = cd->seq; | |
608 | return req; | |
609 | } | |
38513c46 | 610 | spin_unlock(&hb->lock); |
329061d3 JA |
611 | } |
612 | return NULL; | |
613 | } | |
614 | ||
615 | static bool io_poll_disarm(struct io_kiocb *req) | |
329061d3 JA |
616 | { |
617 | if (!io_poll_get_ownership(req)) | |
618 | return false; | |
619 | io_poll_remove_entries(req); | |
620 | hash_del(&req->hash_node); | |
621 | return true; | |
622 | } | |
623 | ||
624 | int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) | |
329061d3 JA |
625 | { |
626 | struct io_kiocb *req; | |
38513c46 HX |
627 | u32 index; |
628 | spinlock_t *lock; | |
329061d3 JA |
629 | |
630 | if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY)) | |
631 | req = io_poll_file_find(ctx, cd); | |
632 | else | |
633 | req = io_poll_find(ctx, false, cd); | |
38513c46 | 634 | if (!req) { |
329061d3 | 635 | return -ENOENT; |
38513c46 HX |
636 | } else { |
637 | index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits); | |
638 | lock = &ctx->cancel_hash[index].lock; | |
639 | } | |
329061d3 | 640 | io_poll_cancel_req(req); |
38513c46 | 641 | spin_unlock(lock); |
329061d3 JA |
642 | return 0; |
643 | } | |
644 | ||
645 | static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, | |
646 | unsigned int flags) | |
647 | { | |
648 | u32 events; | |
649 | ||
650 | events = READ_ONCE(sqe->poll32_events); | |
651 | #ifdef __BIG_ENDIAN | |
652 | events = swahw32(events); | |
653 | #endif | |
654 | if (!(flags & IORING_POLL_ADD_MULTI)) | |
655 | events |= EPOLLONESHOT; | |
b9ba8a44 JA |
656 | if (!(flags & IORING_POLL_ADD_LEVEL)) |
657 | events |= EPOLLET; | |
658 | return demangle_poll(events) | | |
659 | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET)); | |
329061d3 JA |
660 | } |
661 | ||
662 | int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
663 | { | |
664 | struct io_poll_update *upd = io_kiocb_to_cmd(req); | |
665 | u32 flags; | |
666 | ||
667 | if (sqe->buf_index || sqe->splice_fd_in) | |
668 | return -EINVAL; | |
669 | flags = READ_ONCE(sqe->len); | |
670 | if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | | |
671 | IORING_POLL_ADD_MULTI)) | |
672 | return -EINVAL; | |
673 | /* meaningless without update */ | |
674 | if (flags == IORING_POLL_ADD_MULTI) | |
675 | return -EINVAL; | |
676 | ||
677 | upd->old_user_data = READ_ONCE(sqe->addr); | |
678 | upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; | |
679 | upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; | |
680 | ||
681 | upd->new_user_data = READ_ONCE(sqe->off); | |
682 | if (!upd->update_user_data && upd->new_user_data) | |
683 | return -EINVAL; | |
684 | if (upd->update_events) | |
685 | upd->events = io_poll_parse_events(sqe, flags); | |
686 | else if (sqe->poll32_events) | |
687 | return -EINVAL; | |
688 | ||
689 | return 0; | |
690 | } | |
691 | ||
692 | int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
693 | { | |
694 | struct io_poll *poll = io_kiocb_to_cmd(req); | |
695 | u32 flags; | |
696 | ||
697 | if (sqe->buf_index || sqe->off || sqe->addr) | |
698 | return -EINVAL; | |
699 | flags = READ_ONCE(sqe->len); | |
b9ba8a44 | 700 | if (flags & ~(IORING_POLL_ADD_MULTI|IORING_POLL_ADD_LEVEL)) |
329061d3 JA |
701 | return -EINVAL; |
702 | if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) | |
703 | return -EINVAL; | |
704 | ||
329061d3 JA |
705 | poll->events = io_poll_parse_events(sqe, flags); |
706 | return 0; | |
707 | } | |
708 | ||
709 | int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) | |
710 | { | |
711 | struct io_poll *poll = io_kiocb_to_cmd(req); | |
712 | struct io_poll_table ipt; | |
713 | int ret; | |
714 | ||
715 | ipt.pt._qproc = io_poll_queue_proc; | |
716 | ||
717 | ret = __io_arm_poll_handler(req, poll, &ipt, poll->events); | |
718 | if (ret) { | |
719 | io_req_set_res(req, ret, 0); | |
720 | return IOU_OK; | |
721 | } | |
722 | if (ipt.error) { | |
723 | req_set_fail(req); | |
724 | return ipt.error; | |
725 | } | |
726 | ||
727 | return IOU_ISSUE_SKIP_COMPLETE; | |
728 | } | |
729 | ||
730 | int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) | |
731 | { | |
732 | struct io_poll_update *poll_update = io_kiocb_to_cmd(req); | |
733 | struct io_cancel_data cd = { .data = poll_update->old_user_data, }; | |
734 | struct io_ring_ctx *ctx = req->ctx; | |
38513c46 HX |
735 | u32 index = hash_long(cd.data, ctx->cancel_hash_bits); |
736 | spinlock_t *lock = &ctx->cancel_hash[index].lock; | |
329061d3 JA |
737 | struct io_kiocb *preq; |
738 | int ret2, ret = 0; | |
739 | bool locked; | |
740 | ||
329061d3 | 741 | preq = io_poll_find(ctx, true, &cd); |
38513c46 HX |
742 | if (!preq) { |
743 | ret = -ENOENT; | |
744 | goto out; | |
745 | } | |
746 | ret2 = io_poll_disarm(preq); | |
747 | spin_unlock(lock); | |
748 | if (!ret2) { | |
749 | ret = -EALREADY; | |
329061d3 JA |
750 | goto out; |
751 | } | |
329061d3 JA |
752 | |
753 | if (poll_update->update_events || poll_update->update_user_data) { | |
754 | /* only mask one event flags, keep behavior flags */ | |
755 | if (poll_update->update_events) { | |
756 | struct io_poll *poll = io_kiocb_to_cmd(preq); | |
757 | ||
758 | poll->events &= ~0xffff; | |
759 | poll->events |= poll_update->events & 0xffff; | |
760 | poll->events |= IO_POLL_UNMASK; | |
761 | } | |
762 | if (poll_update->update_user_data) | |
763 | preq->cqe.user_data = poll_update->new_user_data; | |
764 | ||
765 | ret2 = io_poll_add(preq, issue_flags); | |
766 | /* successfully updated, don't complete poll request */ | |
767 | if (!ret2 || ret2 == -EIOCBQUEUED) | |
768 | goto out; | |
769 | } | |
770 | ||
771 | req_set_fail(preq); | |
772 | io_req_set_res(preq, -ECANCELED, 0); | |
773 | locked = !(issue_flags & IO_URING_F_UNLOCKED); | |
774 | io_req_task_complete(preq, &locked); | |
775 | out: | |
776 | if (ret < 0) { | |
777 | req_set_fail(req); | |
778 | return ret; | |
779 | } | |
780 | /* complete update request, we're done with it */ | |
781 | io_req_set_res(req, ret, 0); | |
782 | return IOU_OK; | |
783 | } |