Commit | Line | Data |
---|---|---|
329061d3 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/fs.h> | |
5 | #include <linux/file.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/poll.h> | |
9 | #include <linux/hashtable.h> | |
10 | #include <linux/io_uring.h> | |
11 | ||
12 | #include <trace/events/io_uring.h> | |
13 | ||
14 | #include <uapi/linux/io_uring.h> | |
15 | ||
329061d3 JA |
16 | #include "io_uring.h" |
17 | #include "refs.h" | |
18 | #include "opdef.h" | |
3b77495a | 19 | #include "kbuf.h" |
329061d3 | 20 | #include "poll.h" |
38513c46 | 21 | #include "cancel.h" |
329061d3 JA |
22 | |
23 | struct io_poll_update { | |
24 | struct file *file; | |
25 | u64 old_user_data; | |
26 | u64 new_user_data; | |
27 | __poll_t events; | |
28 | bool update_events; | |
29 | bool update_user_data; | |
30 | }; | |
31 | ||
32 | struct io_poll_table { | |
33 | struct poll_table_struct pt; | |
34 | struct io_kiocb *req; | |
35 | int nr_entries; | |
36 | int error; | |
49f1c68e | 37 | bool owning; |
063a0079 PB |
38 | /* output value, set only if arm poll returns >0 */ |
39 | __poll_t result_mask; | |
329061d3 JA |
40 | }; |
41 | ||
42 | #define IO_POLL_CANCEL_FLAG BIT(31) | |
43 | #define IO_POLL_REF_MASK GENMASK(30, 0) | |
44 | ||
0638cd7b PB |
45 | #define IO_WQE_F_DOUBLE 1 |
46 | ||
47 | static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe) | |
48 | { | |
49 | unsigned long priv = (unsigned long)wqe->private; | |
50 | ||
51 | return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE); | |
52 | } | |
53 | ||
54 | static inline bool wqe_is_double(struct wait_queue_entry *wqe) | |
55 | { | |
56 | unsigned long priv = (unsigned long)wqe->private; | |
57 | ||
58 | return priv & IO_WQE_F_DOUBLE; | |
59 | } | |
60 | ||
329061d3 JA |
61 | /* |
62 | * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can | |
63 | * bump it and acquire ownership. It's disallowed to modify requests while not | |
64 | * owning it, that prevents from races for enqueueing task_work's and b/w | |
65 | * arming poll and wakeups. | |
66 | */ | |
67 | static inline bool io_poll_get_ownership(struct io_kiocb *req) | |
68 | { | |
69 | return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); | |
70 | } | |
71 | ||
72 | static void io_poll_mark_cancelled(struct io_kiocb *req) | |
73 | { | |
74 | atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); | |
75 | } | |
76 | ||
77 | static struct io_poll *io_poll_get_double(struct io_kiocb *req) | |
78 | { | |
79 | /* pure poll stashes this in ->async_data, poll driven retry elsewhere */ | |
80 | if (req->opcode == IORING_OP_POLL_ADD) | |
81 | return req->async_data; | |
82 | return req->apoll->double_poll; | |
83 | } | |
84 | ||
85 | static struct io_poll *io_poll_get_single(struct io_kiocb *req) | |
86 | { | |
87 | if (req->opcode == IORING_OP_POLL_ADD) | |
f2ccb5ae | 88 | return io_kiocb_to_cmd(req, struct io_poll); |
329061d3 JA |
89 | return &req->apoll->poll; |
90 | } | |
91 | ||
92 | static void io_poll_req_insert(struct io_kiocb *req) | |
93 | { | |
e6f89be6 PB |
94 | struct io_hash_table *table = &req->ctx->cancel_table; |
95 | u32 index = hash_long(req->cqe.user_data, table->hash_bits); | |
96 | struct io_hash_bucket *hb = &table->hbs[index]; | |
329061d3 | 97 | |
38513c46 HX |
98 | spin_lock(&hb->lock); |
99 | hlist_add_head(&req->hash_node, &hb->list); | |
100 | spin_unlock(&hb->lock); | |
101 | } | |
102 | ||
103 | static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) | |
104 | { | |
e6f89be6 PB |
105 | struct io_hash_table *table = &req->ctx->cancel_table; |
106 | u32 index = hash_long(req->cqe.user_data, table->hash_bits); | |
107 | spinlock_t *lock = &table->hbs[index].lock; | |
38513c46 HX |
108 | |
109 | spin_lock(lock); | |
110 | hash_del(&req->hash_node); | |
111 | spin_unlock(lock); | |
329061d3 JA |
112 | } |
113 | ||
9ca9fb24 PB |
114 | static void io_poll_req_insert_locked(struct io_kiocb *req) |
115 | { | |
116 | struct io_hash_table *table = &req->ctx->cancel_table_locked; | |
117 | u32 index = hash_long(req->cqe.user_data, table->hash_bits); | |
118 | ||
5576035f PB |
119 | lockdep_assert_held(&req->ctx->uring_lock); |
120 | ||
9ca9fb24 PB |
121 | hlist_add_head(&req->hash_node, &table->hbs[index].list); |
122 | } | |
123 | ||
124 | static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked) | |
125 | { | |
126 | struct io_ring_ctx *ctx = req->ctx; | |
127 | ||
128 | if (req->flags & REQ_F_HASH_LOCKED) { | |
129 | /* | |
130 | * ->cancel_table_locked is protected by ->uring_lock in | |
131 | * contrast to per bucket spinlocks. Likely, tctx_task_work() | |
132 | * already grabbed the mutex for us, but there is a chance it | |
133 | * failed. | |
134 | */ | |
135 | io_tw_lock(ctx, locked); | |
136 | hash_del(&req->hash_node); | |
b21a51e2 | 137 | req->flags &= ~REQ_F_HASH_LOCKED; |
9ca9fb24 PB |
138 | } else { |
139 | io_poll_req_delete(req, ctx); | |
140 | } | |
141 | } | |
142 | ||
329061d3 JA |
143 | static void io_init_poll_iocb(struct io_poll *poll, __poll_t events, |
144 | wait_queue_func_t wake_func) | |
145 | { | |
146 | poll->head = NULL; | |
147 | #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP) | |
148 | /* mask in events that we always want/need */ | |
149 | poll->events = events | IO_POLL_UNMASK; | |
150 | INIT_LIST_HEAD(&poll->wait.entry); | |
151 | init_waitqueue_func_entry(&poll->wait, wake_func); | |
152 | } | |
153 | ||
154 | static inline void io_poll_remove_entry(struct io_poll *poll) | |
155 | { | |
156 | struct wait_queue_head *head = smp_load_acquire(&poll->head); | |
157 | ||
158 | if (head) { | |
159 | spin_lock_irq(&head->lock); | |
160 | list_del_init(&poll->wait.entry); | |
161 | poll->head = NULL; | |
162 | spin_unlock_irq(&head->lock); | |
163 | } | |
164 | } | |
165 | ||
166 | static void io_poll_remove_entries(struct io_kiocb *req) | |
167 | { | |
168 | /* | |
169 | * Nothing to do if neither of those flags are set. Avoid dipping | |
170 | * into the poll/apoll/double cachelines if we can. | |
171 | */ | |
172 | if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) | |
173 | return; | |
174 | ||
175 | /* | |
176 | * While we hold the waitqueue lock and the waitqueue is nonempty, | |
177 | * wake_up_pollfree() will wait for us. However, taking the waitqueue | |
178 | * lock in the first place can race with the waitqueue being freed. | |
179 | * | |
180 | * We solve this as eventpoll does: by taking advantage of the fact that | |
181 | * all users of wake_up_pollfree() will RCU-delay the actual free. If | |
182 | * we enter rcu_read_lock() and see that the pointer to the queue is | |
183 | * non-NULL, we can then lock it without the memory being freed out from | |
184 | * under us. | |
185 | * | |
186 | * Keep holding rcu_read_lock() as long as we hold the queue lock, in | |
187 | * case the caller deletes the entry from the queue, leaving it empty. | |
188 | * In that case, only RCU prevents the queue memory from being freed. | |
189 | */ | |
190 | rcu_read_lock(); | |
191 | if (req->flags & REQ_F_SINGLE_POLL) | |
192 | io_poll_remove_entry(io_poll_get_single(req)); | |
193 | if (req->flags & REQ_F_DOUBLE_POLL) | |
194 | io_poll_remove_entry(io_poll_get_double(req)); | |
195 | rcu_read_unlock(); | |
196 | } | |
197 | ||
2ba69707 DY |
198 | enum { |
199 | IOU_POLL_DONE = 0, | |
200 | IOU_POLL_NO_ACTION = 1, | |
114eccdf | 201 | IOU_POLL_REMOVE_POLL_USE_RES = 2, |
2ba69707 DY |
202 | }; |
203 | ||
329061d3 JA |
204 | /* |
205 | * All poll tw should go through this. Checks for poll events, manages | |
206 | * references, does rewait, etc. | |
207 | * | |
2ba69707 DY |
208 | * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require, |
209 | * which is either spurious wakeup or multishot CQE is served. | |
210 | * IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res. | |
114eccdf DY |
211 | * IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result |
212 | * is stored in req->cqe. | |
329061d3 JA |
213 | */ |
214 | static int io_poll_check_events(struct io_kiocb *req, bool *locked) | |
215 | { | |
216 | struct io_ring_ctx *ctx = req->ctx; | |
217 | int v, ret; | |
218 | ||
219 | /* req->task == current here, checking PF_EXITING is safe */ | |
220 | if (unlikely(req->task->flags & PF_EXITING)) | |
221 | return -ECANCELED; | |
222 | ||
223 | do { | |
224 | v = atomic_read(&req->poll_refs); | |
225 | ||
226 | /* tw handler should be the owner, and so have some references */ | |
227 | if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK))) | |
2ba69707 | 228 | return IOU_POLL_DONE; |
329061d3 JA |
229 | if (v & IO_POLL_CANCEL_FLAG) |
230 | return -ECANCELED; | |
231 | ||
2ba69707 | 232 | /* the mask was stashed in __io_poll_execute */ |
329061d3 JA |
233 | if (!req->cqe.res) { |
234 | struct poll_table_struct pt = { ._key = req->apoll_events }; | |
235 | req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; | |
236 | } | |
237 | ||
238 | if ((unlikely(!req->cqe.res))) | |
239 | continue; | |
240 | if (req->apoll_events & EPOLLONESHOT) | |
2ba69707 | 241 | return IOU_POLL_DONE; |
329061d3 JA |
242 | |
243 | /* multishot, just fill a CQE and proceed */ | |
244 | if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { | |
245 | __poll_t mask = mangle_poll(req->cqe.res & | |
246 | req->apoll_events); | |
329061d3 | 247 | |
d245bca6 | 248 | if (!io_post_aux_cqe(ctx, req->cqe.user_data, |
a2da6763 DY |
249 | mask, IORING_CQE_F_MORE, false)) { |
250 | io_req_set_res(req, mask, 0); | |
251 | return IOU_POLL_REMOVE_POLL_USE_RES; | |
252 | } | |
d245bca6 PB |
253 | } else { |
254 | ret = io_poll_issue(req, locked); | |
114eccdf DY |
255 | if (ret == IOU_STOP_MULTISHOT) |
256 | return IOU_POLL_REMOVE_POLL_USE_RES; | |
2ba69707 | 257 | if (ret < 0) |
d245bca6 PB |
258 | return ret; |
259 | } | |
329061d3 JA |
260 | |
261 | /* | |
262 | * Release all references, retry if someone tried to restart | |
263 | * task_work while we were executing it. | |
264 | */ | |
265 | } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs)); | |
266 | ||
2ba69707 | 267 | return IOU_POLL_NO_ACTION; |
329061d3 JA |
268 | } |
269 | ||
270 | static void io_poll_task_func(struct io_kiocb *req, bool *locked) | |
271 | { | |
329061d3 JA |
272 | int ret; |
273 | ||
274 | ret = io_poll_check_events(req, locked); | |
2ba69707 | 275 | if (ret == IOU_POLL_NO_ACTION) |
329061d3 JA |
276 | return; |
277 | ||
2ba69707 | 278 | if (ret == IOU_POLL_DONE) { |
f2ccb5ae | 279 | struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); |
329061d3 | 280 | req->cqe.res = mangle_poll(req->cqe.res & poll->events); |
114eccdf | 281 | } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) { |
329061d3 JA |
282 | req->cqe.res = ret; |
283 | req_set_fail(req); | |
284 | } | |
285 | ||
286 | io_poll_remove_entries(req); | |
9ca9fb24 PB |
287 | io_poll_tw_hash_eject(req, locked); |
288 | ||
0ec6dca2 PB |
289 | io_req_set_res(req, req->cqe.res, 0); |
290 | io_req_task_complete(req, locked); | |
329061d3 JA |
291 | } |
292 | ||
293 | static void io_apoll_task_func(struct io_kiocb *req, bool *locked) | |
294 | { | |
329061d3 JA |
295 | int ret; |
296 | ||
297 | ret = io_poll_check_events(req, locked); | |
2ba69707 | 298 | if (ret == IOU_POLL_NO_ACTION) |
329061d3 JA |
299 | return; |
300 | ||
301 | io_poll_remove_entries(req); | |
9ca9fb24 | 302 | io_poll_tw_hash_eject(req, locked); |
329061d3 | 303 | |
114eccdf DY |
304 | if (ret == IOU_POLL_REMOVE_POLL_USE_RES) |
305 | io_req_complete_post(req); | |
306 | else if (ret == IOU_POLL_DONE) | |
329061d3 JA |
307 | io_req_task_submit(req, locked); |
308 | else | |
309 | io_req_complete_failed(req, ret); | |
310 | } | |
311 | ||
13a99017 | 312 | static void __io_poll_execute(struct io_kiocb *req, int mask) |
329061d3 JA |
313 | { |
314 | io_req_set_res(req, mask, 0); | |
315 | /* | |
316 | * This is useful for poll that is armed on behalf of another | |
317 | * request, and where the wakeup path could be on a different | |
318 | * CPU. We want to avoid pulling in req->apoll->events for that | |
319 | * case. | |
320 | */ | |
321 | if (req->opcode == IORING_OP_POLL_ADD) | |
322 | req->io_task_work.func = io_poll_task_func; | |
323 | else | |
324 | req->io_task_work.func = io_apoll_task_func; | |
325 | ||
48863ffd | 326 | trace_io_uring_task_add(req, mask); |
329061d3 JA |
327 | io_req_task_work_add(req); |
328 | } | |
329 | ||
13a99017 | 330 | static inline void io_poll_execute(struct io_kiocb *req, int res) |
329061d3 JA |
331 | { |
332 | if (io_poll_get_ownership(req)) | |
13a99017 | 333 | __io_poll_execute(req, res); |
329061d3 JA |
334 | } |
335 | ||
336 | static void io_poll_cancel_req(struct io_kiocb *req) | |
337 | { | |
338 | io_poll_mark_cancelled(req); | |
339 | /* kick tw, which should complete the request */ | |
13a99017 | 340 | io_poll_execute(req, 0); |
329061d3 JA |
341 | } |
342 | ||
329061d3 JA |
343 | #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI) |
344 | ||
fe991a76 JA |
345 | static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) |
346 | { | |
347 | io_poll_mark_cancelled(req); | |
348 | /* we have to kick tw in case it's not already */ | |
349 | io_poll_execute(req, 0); | |
350 | ||
351 | /* | |
352 | * If the waitqueue is being freed early but someone is already | |
353 | * holds ownership over it, we have to tear down the request as | |
354 | * best we can. That means immediately removing the request from | |
355 | * its waitqueue and preventing all further accesses to the | |
356 | * waitqueue via the request. | |
357 | */ | |
358 | list_del_init(&poll->wait.entry); | |
359 | ||
360 | /* | |
361 | * Careful: this *must* be the last step, since as soon | |
362 | * as req->head is NULL'ed out, the request can be | |
363 | * completed and freed, since aio_poll_complete_work() | |
364 | * will no longer need to take the waitqueue lock. | |
365 | */ | |
366 | smp_store_release(&poll->head, NULL); | |
367 | return 1; | |
368 | } | |
369 | ||
329061d3 JA |
370 | static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, |
371 | void *key) | |
372 | { | |
373 | struct io_kiocb *req = wqe_to_req(wait); | |
374 | struct io_poll *poll = container_of(wait, struct io_poll, wait); | |
375 | __poll_t mask = key_to_poll(key); | |
376 | ||
fe991a76 JA |
377 | if (unlikely(mask & POLLFREE)) |
378 | return io_pollfree_wake(req, poll); | |
329061d3 JA |
379 | |
380 | /* for instances that support it check for an event match first */ | |
381 | if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON))) | |
382 | return 0; | |
383 | ||
384 | if (io_poll_get_ownership(req)) { | |
385 | /* optional, saves extra locking for removal in tw handler */ | |
386 | if (mask && poll->events & EPOLLONESHOT) { | |
387 | list_del_init(&poll->wait.entry); | |
388 | poll->head = NULL; | |
389 | if (wqe_is_double(wait)) | |
390 | req->flags &= ~REQ_F_DOUBLE_POLL; | |
391 | else | |
392 | req->flags &= ~REQ_F_SINGLE_POLL; | |
393 | } | |
13a99017 | 394 | __io_poll_execute(req, mask); |
329061d3 JA |
395 | } |
396 | return 1; | |
397 | } | |
398 | ||
30a33669 PB |
399 | /* fails only when polling is already completing by the first entry */ |
400 | static bool io_poll_double_prepare(struct io_kiocb *req) | |
49f1c68e PB |
401 | { |
402 | struct wait_queue_head *head; | |
403 | struct io_poll *poll = io_poll_get_single(req); | |
404 | ||
405 | /* head is RCU protected, see io_poll_remove_entries() comments */ | |
406 | rcu_read_lock(); | |
407 | head = smp_load_acquire(&poll->head); | |
7a121ced | 408 | /* |
30a33669 PB |
409 | * poll arm might not hold ownership and so race for req->flags with |
410 | * io_poll_wake(). There is only one poll entry queued, serialise with | |
411 | * it by taking its head lock. As we're still arming the tw hanlder | |
412 | * is not going to be run, so there are no races with it. | |
7a121ced | 413 | */ |
30a33669 | 414 | if (head) { |
49f1c68e | 415 | spin_lock_irq(&head->lock); |
30a33669 PB |
416 | req->flags |= REQ_F_DOUBLE_POLL; |
417 | if (req->opcode == IORING_OP_POLL_ADD) | |
418 | req->flags |= REQ_F_ASYNC_DATA; | |
49f1c68e | 419 | spin_unlock_irq(&head->lock); |
30a33669 | 420 | } |
49f1c68e | 421 | rcu_read_unlock(); |
30a33669 | 422 | return !!head; |
49f1c68e PB |
423 | } |
424 | ||
329061d3 JA |
425 | static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, |
426 | struct wait_queue_head *head, | |
427 | struct io_poll **poll_ptr) | |
428 | { | |
429 | struct io_kiocb *req = pt->req; | |
430 | unsigned long wqe_private = (unsigned long) req; | |
431 | ||
432 | /* | |
433 | * The file being polled uses multiple waitqueues for poll handling | |
434 | * (e.g. one for read, one for write). Setup a separate io_poll | |
435 | * if this happens. | |
436 | */ | |
437 | if (unlikely(pt->nr_entries)) { | |
438 | struct io_poll *first = poll; | |
439 | ||
440 | /* double add on the same waitqueue head, ignore */ | |
441 | if (first->head == head) | |
442 | return; | |
443 | /* already have a 2nd entry, fail a third attempt */ | |
444 | if (*poll_ptr) { | |
445 | if ((*poll_ptr)->head == head) | |
446 | return; | |
447 | pt->error = -EINVAL; | |
448 | return; | |
449 | } | |
450 | ||
451 | poll = kmalloc(sizeof(*poll), GFP_ATOMIC); | |
452 | if (!poll) { | |
453 | pt->error = -ENOMEM; | |
454 | return; | |
455 | } | |
49f1c68e | 456 | |
329061d3 | 457 | /* mark as double wq entry */ |
0638cd7b | 458 | wqe_private |= IO_WQE_F_DOUBLE; |
329061d3 | 459 | io_init_poll_iocb(poll, first->events, first->wait.func); |
30a33669 PB |
460 | if (!io_poll_double_prepare(req)) { |
461 | /* the request is completing, just back off */ | |
462 | kfree(poll); | |
463 | return; | |
464 | } | |
329061d3 | 465 | *poll_ptr = poll; |
49f1c68e PB |
466 | } else { |
467 | /* fine to modify, there is no poll queued to race with us */ | |
468 | req->flags |= REQ_F_SINGLE_POLL; | |
329061d3 JA |
469 | } |
470 | ||
329061d3 JA |
471 | pt->nr_entries++; |
472 | poll->head = head; | |
473 | poll->wait.private = (void *) wqe_private; | |
474 | ||
475 | if (poll->events & EPOLLEXCLUSIVE) | |
476 | add_wait_queue_exclusive(head, &poll->wait); | |
477 | else | |
478 | add_wait_queue(head, &poll->wait); | |
479 | } | |
480 | ||
481 | static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, | |
482 | struct poll_table_struct *p) | |
483 | { | |
484 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); | |
f2ccb5ae | 485 | struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); |
329061d3 JA |
486 | |
487 | __io_queue_proc(poll, pt, head, | |
488 | (struct io_poll **) &pt->req->async_data); | |
489 | } | |
490 | ||
49f1c68e PB |
491 | static bool io_poll_can_finish_inline(struct io_kiocb *req, |
492 | struct io_poll_table *pt) | |
493 | { | |
494 | return pt->owning || io_poll_get_ownership(req); | |
495 | } | |
496 | ||
de08356f PB |
497 | /* |
498 | * Returns 0 when it's handed over for polling. The caller owns the requests if | |
499 | * it returns non-zero, but otherwise should not touch it. Negative values | |
500 | * contain an error code. When the result is >0, the polling has completed | |
501 | * inline and ipt.result_mask is set to the mask. | |
502 | */ | |
329061d3 JA |
503 | static int __io_arm_poll_handler(struct io_kiocb *req, |
504 | struct io_poll *poll, | |
49f1c68e PB |
505 | struct io_poll_table *ipt, __poll_t mask, |
506 | unsigned issue_flags) | |
329061d3 JA |
507 | { |
508 | struct io_ring_ctx *ctx = req->ctx; | |
509 | int v; | |
510 | ||
511 | INIT_HLIST_NODE(&req->hash_node); | |
512 | req->work.cancel_seq = atomic_read(&ctx->cancel_seq); | |
513 | io_init_poll_iocb(poll, mask, io_poll_wake); | |
514 | poll->file = req->file; | |
329061d3 JA |
515 | req->apoll_events = poll->events; |
516 | ||
517 | ipt->pt._key = mask; | |
518 | ipt->req = req; | |
519 | ipt->error = 0; | |
520 | ipt->nr_entries = 0; | |
329061d3 | 521 | /* |
49f1c68e PB |
522 | * Polling is either completed here or via task_work, so if we're in the |
523 | * task context we're naturally serialised with tw by merit of running | |
524 | * the same task. When it's io-wq, take the ownership to prevent tw | |
525 | * from running. However, when we're in the task context, skip taking | |
526 | * it as an optimisation. | |
527 | * | |
528 | * Note: even though the request won't be completed/freed, without | |
529 | * ownership we still can race with io_poll_wake(). | |
530 | * io_poll_can_finish_inline() tries to deal with that. | |
329061d3 | 531 | */ |
49f1c68e | 532 | ipt->owning = issue_flags & IO_URING_F_UNLOCKED; |
49f1c68e | 533 | atomic_set(&req->poll_refs, (int)ipt->owning); |
e8375e43 PB |
534 | |
535 | /* io-wq doesn't hold uring_lock */ | |
536 | if (issue_flags & IO_URING_F_UNLOCKED) | |
537 | req->flags &= ~REQ_F_HASH_LOCKED; | |
538 | ||
329061d3 JA |
539 | mask = vfs_poll(req->file, &ipt->pt) & poll->events; |
540 | ||
de08356f PB |
541 | if (unlikely(ipt->error || !ipt->nr_entries)) { |
542 | io_poll_remove_entries(req); | |
543 | ||
49f1c68e PB |
544 | if (!io_poll_can_finish_inline(req, ipt)) { |
545 | io_poll_mark_cancelled(req); | |
546 | return 0; | |
547 | } else if (mask && (poll->events & EPOLLET)) { | |
de08356f PB |
548 | ipt->result_mask = mask; |
549 | return 1; | |
de08356f | 550 | } |
49f1c68e | 551 | return ipt->error ?: -EINVAL; |
de08356f PB |
552 | } |
553 | ||
b9ba8a44 JA |
554 | if (mask && |
555 | ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) { | |
49f1c68e PB |
556 | if (!io_poll_can_finish_inline(req, ipt)) |
557 | return 0; | |
329061d3 | 558 | io_poll_remove_entries(req); |
063a0079 | 559 | ipt->result_mask = mask; |
329061d3 | 560 | /* no one else has access to the req, forget about the ref */ |
063a0079 | 561 | return 1; |
329061d3 | 562 | } |
b9ba8a44 | 563 | |
9ca9fb24 PB |
564 | if (req->flags & REQ_F_HASH_LOCKED) |
565 | io_poll_req_insert_locked(req); | |
566 | else | |
567 | io_poll_req_insert(req); | |
329061d3 | 568 | |
49f1c68e PB |
569 | if (mask && (poll->events & EPOLLET) && |
570 | io_poll_can_finish_inline(req, ipt)) { | |
13a99017 | 571 | __io_poll_execute(req, mask); |
329061d3 JA |
572 | return 0; |
573 | } | |
574 | ||
49f1c68e PB |
575 | if (ipt->owning) { |
576 | /* | |
577 | * Release ownership. If someone tried to queue a tw while it was | |
578 | * locked, kick it off for them. | |
579 | */ | |
580 | v = atomic_dec_return(&req->poll_refs); | |
581 | if (unlikely(v & IO_POLL_REF_MASK)) | |
582 | __io_poll_execute(req, 0); | |
583 | } | |
329061d3 JA |
584 | return 0; |
585 | } | |
586 | ||
587 | static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, | |
588 | struct poll_table_struct *p) | |
589 | { | |
590 | struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); | |
591 | struct async_poll *apoll = pt->req->apoll; | |
592 | ||
593 | __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); | |
594 | } | |
595 | ||
5204aa8c PB |
596 | static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, |
597 | unsigned issue_flags) | |
598 | { | |
599 | struct io_ring_ctx *ctx = req->ctx; | |
9b797a37 | 600 | struct io_cache_entry *entry; |
5204aa8c PB |
601 | struct async_poll *apoll; |
602 | ||
603 | if (req->flags & REQ_F_POLLED) { | |
604 | apoll = req->apoll; | |
605 | kfree(apoll->double_poll); | |
606 | } else if (!(issue_flags & IO_URING_F_UNLOCKED) && | |
9b797a37 JA |
607 | (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) { |
608 | apoll = container_of(entry, struct async_poll, cache); | |
5204aa8c PB |
609 | } else { |
610 | apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); | |
611 | if (unlikely(!apoll)) | |
612 | return NULL; | |
613 | } | |
614 | apoll->double_poll = NULL; | |
615 | req->apoll = apoll; | |
616 | return apoll; | |
617 | } | |
618 | ||
329061d3 JA |
619 | int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) |
620 | { | |
621 | const struct io_op_def *def = &io_op_defs[req->opcode]; | |
329061d3 JA |
622 | struct async_poll *apoll; |
623 | struct io_poll_table ipt; | |
b9ba8a44 | 624 | __poll_t mask = POLLPRI | POLLERR | EPOLLET; |
329061d3 JA |
625 | int ret; |
626 | ||
9ca9fb24 PB |
627 | /* |
628 | * apoll requests already grab the mutex to complete in the tw handler, | |
629 | * so removal from the mutex-backed hash is free, use it by default. | |
630 | */ | |
e8375e43 | 631 | req->flags |= REQ_F_HASH_LOCKED; |
9ca9fb24 | 632 | |
329061d3 JA |
633 | if (!def->pollin && !def->pollout) |
634 | return IO_APOLL_ABORTED; | |
635 | if (!file_can_poll(req->file)) | |
636 | return IO_APOLL_ABORTED; | |
637 | if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) | |
638 | return IO_APOLL_ABORTED; | |
639 | if (!(req->flags & REQ_F_APOLL_MULTISHOT)) | |
640 | mask |= EPOLLONESHOT; | |
641 | ||
642 | if (def->pollin) { | |
643 | mask |= EPOLLIN | EPOLLRDNORM; | |
644 | ||
645 | /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ | |
646 | if (req->flags & REQ_F_CLEAR_POLLIN) | |
647 | mask &= ~EPOLLIN; | |
648 | } else { | |
649 | mask |= EPOLLOUT | EPOLLWRNORM; | |
650 | } | |
651 | if (def->poll_exclusive) | |
652 | mask |= EPOLLEXCLUSIVE; | |
5204aa8c PB |
653 | |
654 | apoll = io_req_alloc_apoll(req, issue_flags); | |
655 | if (!apoll) | |
656 | return IO_APOLL_ABORTED; | |
329061d3 JA |
657 | req->flags |= REQ_F_POLLED; |
658 | ipt.pt._qproc = io_async_queue_proc; | |
659 | ||
660 | io_kbuf_recycle(req, issue_flags); | |
661 | ||
49f1c68e | 662 | ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); |
de08356f PB |
663 | if (ret) |
664 | return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED; | |
48863ffd | 665 | trace_io_uring_poll_arm(req, mask, apoll->poll.events); |
329061d3 JA |
666 | return IO_APOLL_OK; |
667 | } | |
668 | ||
9ca9fb24 PB |
669 | static __cold bool io_poll_remove_all_table(struct task_struct *tsk, |
670 | struct io_hash_table *table, | |
671 | bool cancel_all) | |
329061d3 | 672 | { |
e6f89be6 | 673 | unsigned nr_buckets = 1U << table->hash_bits; |
329061d3 JA |
674 | struct hlist_node *tmp; |
675 | struct io_kiocb *req; | |
676 | bool found = false; | |
677 | int i; | |
678 | ||
e6f89be6 PB |
679 | for (i = 0; i < nr_buckets; i++) { |
680 | struct io_hash_bucket *hb = &table->hbs[i]; | |
329061d3 | 681 | |
38513c46 HX |
682 | spin_lock(&hb->lock); |
683 | hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { | |
329061d3 JA |
684 | if (io_match_task_safe(req, tsk, cancel_all)) { |
685 | hlist_del_init(&req->hash_node); | |
686 | io_poll_cancel_req(req); | |
687 | found = true; | |
688 | } | |
689 | } | |
38513c46 | 690 | spin_unlock(&hb->lock); |
329061d3 | 691 | } |
329061d3 JA |
692 | return found; |
693 | } | |
694 | ||
9ca9fb24 PB |
695 | /* |
696 | * Returns true if we found and killed one or more poll requests | |
697 | */ | |
698 | __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, | |
699 | bool cancel_all) | |
700 | __must_hold(&ctx->uring_lock) | |
701 | { | |
b321823a PB |
702 | bool ret; |
703 | ||
704 | ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all); | |
705 | ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all); | |
706 | return ret; | |
9ca9fb24 PB |
707 | } |
708 | ||
329061d3 | 709 | static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, |
1ab1edb0 | 710 | struct io_cancel_data *cd, |
e6f89be6 | 711 | struct io_hash_table *table, |
1ab1edb0 | 712 | struct io_hash_bucket **out_bucket) |
329061d3 | 713 | { |
329061d3 | 714 | struct io_kiocb *req; |
e6f89be6 PB |
715 | u32 index = hash_long(cd->data, table->hash_bits); |
716 | struct io_hash_bucket *hb = &table->hbs[index]; | |
329061d3 | 717 | |
1ab1edb0 PB |
718 | *out_bucket = NULL; |
719 | ||
38513c46 HX |
720 | spin_lock(&hb->lock); |
721 | hlist_for_each_entry(req, &hb->list, hash_node) { | |
329061d3 JA |
722 | if (cd->data != req->cqe.user_data) |
723 | continue; | |
724 | if (poll_only && req->opcode != IORING_OP_POLL_ADD) | |
725 | continue; | |
726 | if (cd->flags & IORING_ASYNC_CANCEL_ALL) { | |
727 | if (cd->seq == req->work.cancel_seq) | |
728 | continue; | |
729 | req->work.cancel_seq = cd->seq; | |
730 | } | |
1ab1edb0 | 731 | *out_bucket = hb; |
329061d3 JA |
732 | return req; |
733 | } | |
38513c46 | 734 | spin_unlock(&hb->lock); |
329061d3 JA |
735 | return NULL; |
736 | } | |
737 | ||
738 | static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, | |
1ab1edb0 | 739 | struct io_cancel_data *cd, |
e6f89be6 | 740 | struct io_hash_table *table, |
1ab1edb0 | 741 | struct io_hash_bucket **out_bucket) |
329061d3 | 742 | { |
e6f89be6 | 743 | unsigned nr_buckets = 1U << table->hash_bits; |
329061d3 JA |
744 | struct io_kiocb *req; |
745 | int i; | |
746 | ||
1ab1edb0 PB |
747 | *out_bucket = NULL; |
748 | ||
e6f89be6 PB |
749 | for (i = 0; i < nr_buckets; i++) { |
750 | struct io_hash_bucket *hb = &table->hbs[i]; | |
329061d3 | 751 | |
38513c46 HX |
752 | spin_lock(&hb->lock); |
753 | hlist_for_each_entry(req, &hb->list, hash_node) { | |
329061d3 JA |
754 | if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && |
755 | req->file != cd->file) | |
756 | continue; | |
757 | if (cd->seq == req->work.cancel_seq) | |
758 | continue; | |
759 | req->work.cancel_seq = cd->seq; | |
1ab1edb0 | 760 | *out_bucket = hb; |
329061d3 JA |
761 | return req; |
762 | } | |
38513c46 | 763 | spin_unlock(&hb->lock); |
329061d3 JA |
764 | } |
765 | return NULL; | |
766 | } | |
767 | ||
9ca9fb24 | 768 | static int io_poll_disarm(struct io_kiocb *req) |
329061d3 | 769 | { |
9ca9fb24 PB |
770 | if (!req) |
771 | return -ENOENT; | |
329061d3 | 772 | if (!io_poll_get_ownership(req)) |
9ca9fb24 | 773 | return -EALREADY; |
329061d3 JA |
774 | io_poll_remove_entries(req); |
775 | hash_del(&req->hash_node); | |
9ca9fb24 | 776 | return 0; |
329061d3 JA |
777 | } |
778 | ||
a2cdd519 | 779 | static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, |
e6f89be6 | 780 | struct io_hash_table *table) |
329061d3 | 781 | { |
1ab1edb0 | 782 | struct io_hash_bucket *bucket; |
329061d3 JA |
783 | struct io_kiocb *req; |
784 | ||
785 | if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY)) | |
e6f89be6 | 786 | req = io_poll_file_find(ctx, cd, table, &bucket); |
329061d3 | 787 | else |
e6f89be6 | 788 | req = io_poll_find(ctx, false, cd, table, &bucket); |
1ab1edb0 PB |
789 | |
790 | if (req) | |
791 | io_poll_cancel_req(req); | |
792 | if (bucket) | |
793 | spin_unlock(&bucket->lock); | |
794 | return req ? 0 : -ENOENT; | |
329061d3 JA |
795 | } |
796 | ||
5d7943d9 PB |
797 | int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, |
798 | unsigned issue_flags) | |
a2cdd519 | 799 | { |
9ca9fb24 PB |
800 | int ret; |
801 | ||
802 | ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table); | |
803 | if (ret != -ENOENT) | |
804 | return ret; | |
805 | ||
806 | io_ring_submit_lock(ctx, issue_flags); | |
807 | ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked); | |
808 | io_ring_submit_unlock(ctx, issue_flags); | |
809 | return ret; | |
a2cdd519 PB |
810 | } |
811 | ||
329061d3 JA |
812 | static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, |
813 | unsigned int flags) | |
814 | { | |
815 | u32 events; | |
816 | ||
817 | events = READ_ONCE(sqe->poll32_events); | |
818 | #ifdef __BIG_ENDIAN | |
819 | events = swahw32(events); | |
820 | #endif | |
821 | if (!(flags & IORING_POLL_ADD_MULTI)) | |
822 | events |= EPOLLONESHOT; | |
b9ba8a44 JA |
823 | if (!(flags & IORING_POLL_ADD_LEVEL)) |
824 | events |= EPOLLET; | |
825 | return demangle_poll(events) | | |
826 | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET)); | |
329061d3 JA |
827 | } |
828 | ||
829 | int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
830 | { | |
f2ccb5ae | 831 | struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update); |
329061d3 JA |
832 | u32 flags; |
833 | ||
834 | if (sqe->buf_index || sqe->splice_fd_in) | |
835 | return -EINVAL; | |
836 | flags = READ_ONCE(sqe->len); | |
837 | if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | | |
838 | IORING_POLL_ADD_MULTI)) | |
839 | return -EINVAL; | |
840 | /* meaningless without update */ | |
841 | if (flags == IORING_POLL_ADD_MULTI) | |
842 | return -EINVAL; | |
843 | ||
844 | upd->old_user_data = READ_ONCE(sqe->addr); | |
845 | upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; | |
846 | upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; | |
847 | ||
848 | upd->new_user_data = READ_ONCE(sqe->off); | |
849 | if (!upd->update_user_data && upd->new_user_data) | |
850 | return -EINVAL; | |
851 | if (upd->update_events) | |
852 | upd->events = io_poll_parse_events(sqe, flags); | |
853 | else if (sqe->poll32_events) | |
854 | return -EINVAL; | |
855 | ||
856 | return 0; | |
857 | } | |
858 | ||
859 | int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |
860 | { | |
f2ccb5ae | 861 | struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); |
329061d3 JA |
862 | u32 flags; |
863 | ||
864 | if (sqe->buf_index || sqe->off || sqe->addr) | |
865 | return -EINVAL; | |
866 | flags = READ_ONCE(sqe->len); | |
d59bd748 | 867 | if (flags & ~IORING_POLL_ADD_MULTI) |
329061d3 JA |
868 | return -EINVAL; |
869 | if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) | |
870 | return -EINVAL; | |
871 | ||
329061d3 JA |
872 | poll->events = io_poll_parse_events(sqe, flags); |
873 | return 0; | |
874 | } | |
875 | ||
876 | int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) | |
877 | { | |
f2ccb5ae | 878 | struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); |
329061d3 JA |
879 | struct io_poll_table ipt; |
880 | int ret; | |
881 | ||
882 | ipt.pt._qproc = io_poll_queue_proc; | |
883 | ||
9ca9fb24 PB |
884 | /* |
885 | * If sqpoll or single issuer, there is no contention for ->uring_lock | |
886 | * and we'll end up holding it in tw handlers anyway. | |
887 | */ | |
e8375e43 | 888 | if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER)) |
9ca9fb24 | 889 | req->flags |= REQ_F_HASH_LOCKED; |
9ca9fb24 | 890 | |
49f1c68e | 891 | ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); |
de08356f | 892 | if (ret > 0) { |
063a0079 | 893 | io_req_set_res(req, ipt.result_mask, 0); |
329061d3 JA |
894 | return IOU_OK; |
895 | } | |
de08356f | 896 | return ret ?: IOU_ISSUE_SKIP_COMPLETE; |
329061d3 JA |
897 | } |
898 | ||
899 | int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) | |
900 | { | |
f2ccb5ae | 901 | struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); |
329061d3 JA |
902 | struct io_cancel_data cd = { .data = poll_update->old_user_data, }; |
903 | struct io_ring_ctx *ctx = req->ctx; | |
1ab1edb0 | 904 | struct io_hash_bucket *bucket; |
329061d3 JA |
905 | struct io_kiocb *preq; |
906 | int ret2, ret = 0; | |
907 | bool locked; | |
908 | ||
e6f89be6 | 909 | preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket); |
9ca9fb24 | 910 | ret2 = io_poll_disarm(preq); |
1ab1edb0 PB |
911 | if (bucket) |
912 | spin_unlock(&bucket->lock); | |
9ca9fb24 PB |
913 | if (!ret2) |
914 | goto found; | |
915 | if (ret2 != -ENOENT) { | |
916 | ret = ret2; | |
38513c46 HX |
917 | goto out; |
918 | } | |
9ca9fb24 PB |
919 | |
920 | io_ring_submit_lock(ctx, issue_flags); | |
921 | preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket); | |
922 | ret2 = io_poll_disarm(preq); | |
923 | if (bucket) | |
924 | spin_unlock(&bucket->lock); | |
925 | io_ring_submit_unlock(ctx, issue_flags); | |
926 | if (ret2) { | |
927 | ret = ret2; | |
329061d3 JA |
928 | goto out; |
929 | } | |
329061d3 | 930 | |
9ca9fb24 | 931 | found: |
bce5d70c PB |
932 | if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) { |
933 | ret = -EFAULT; | |
934 | goto out; | |
935 | } | |
936 | ||
329061d3 JA |
937 | if (poll_update->update_events || poll_update->update_user_data) { |
938 | /* only mask one event flags, keep behavior flags */ | |
939 | if (poll_update->update_events) { | |
f2ccb5ae | 940 | struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll); |
329061d3 JA |
941 | |
942 | poll->events &= ~0xffff; | |
943 | poll->events |= poll_update->events & 0xffff; | |
944 | poll->events |= IO_POLL_UNMASK; | |
945 | } | |
946 | if (poll_update->update_user_data) | |
947 | preq->cqe.user_data = poll_update->new_user_data; | |
948 | ||
949 | ret2 = io_poll_add(preq, issue_flags); | |
950 | /* successfully updated, don't complete poll request */ | |
951 | if (!ret2 || ret2 == -EIOCBQUEUED) | |
952 | goto out; | |
953 | } | |
954 | ||
955 | req_set_fail(preq); | |
956 | io_req_set_res(preq, -ECANCELED, 0); | |
957 | locked = !(issue_flags & IO_URING_F_UNLOCKED); | |
958 | io_req_task_complete(preq, &locked); | |
959 | out: | |
960 | if (ret < 0) { | |
961 | req_set_fail(req); | |
962 | return ret; | |
963 | } | |
964 | /* complete update request, we're done with it */ | |
965 | io_req_set_res(req, ret, 0); | |
966 | return IOU_OK; | |
967 | } | |
9da7471e | 968 | |
9b797a37 | 969 | void io_apoll_cache_free(struct io_cache_entry *entry) |
9da7471e | 970 | { |
9b797a37 | 971 | kfree(container_of(entry, struct async_poll, cache)); |
9da7471e | 972 | } |