Commit | Line | Data |
---|---|---|
de23077e JA |
1 | #ifndef IOU_CORE_H |
2 | #define IOU_CORE_H | |
3 | ||
4 | #include <linux/errno.h> | |
cd40cae2 | 5 | #include <linux/lockdep.h> |
b5d3ae20 | 6 | #include <linux/resume_user_mode.h> |
c1755c25 | 7 | #include <linux/kasan.h> |
95041b93 | 8 | #include <linux/poll.h> |
ab1c84d8 | 9 | #include <linux/io_uring_types.h> |
44648532 | 10 | #include <uapi/linux/eventpoll.h> |
49f7a309 | 11 | #include "alloc_cache.h" |
ab1c84d8 | 12 | #include "io-wq.h" |
a6b21fbb | 13 | #include "slist.h" |
ab1c84d8 | 14 | #include "filetable.h" |
ef623a64 | 15 | #include "opdef.h" |
de23077e | 16 | |
f3b44f92 JA |
17 | #ifndef CREATE_TRACE_POINTS |
18 | #include <trace/events/io_uring.h> | |
19 | #endif | |
20 | ||
97b388d7 | 21 | enum { |
5027d024 PB |
22 | IOU_COMPLETE = 0, |
23 | ||
97b388d7 | 24 | IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, |
114eccdf | 25 | |
7a9dcb05 PB |
26 | /* |
27 | * The request has more work to do and should be retried. io_uring will | |
28 | * attempt to wait on the file for eligible opcodes, but otherwise | |
29 | * it'll be handed to iowq for blocking execution. It works for normal | |
30 | * requests as well as for the multi shot mode. | |
31 | */ | |
32 | IOU_RETRY = -EAGAIN, | |
33 | ||
704ea888 JA |
34 | /* |
35 | * Requeue the task_work to restart operations on this request. The | |
36 | * actual value isn't important, should just be not an otherwise | |
37 | * valid error code, yet less than -MAX_ERRNO and valid internally. | |
38 | */ | |
39 | IOU_REQUEUE = -3072, | |
97b388d7 JA |
40 | }; |
41 | ||
405b4dc1 SR |
42 | struct io_wait_queue { |
43 | struct wait_queue_entry wq; | |
44 | struct io_ring_ctx *ctx; | |
45 | unsigned cq_tail; | |
1100c4a2 | 46 | unsigned cq_min_tail; |
405b4dc1 | 47 | unsigned nr_timeouts; |
cebf123c | 48 | int hit_timeout; |
1100c4a2 | 49 | ktime_t min_timeout; |
405b4dc1 | 50 | ktime_t timeout; |
cebf123c | 51 | struct hrtimer t; |
405b4dc1 | 52 | |
8d0c12a8 | 53 | #ifdef CONFIG_NET_RX_BUSY_POLL |
342b2e39 | 54 | ktime_t napi_busy_poll_dt; |
8d0c12a8 SR |
55 | bool napi_prefer_busy_poll; |
56 | #endif | |
405b4dc1 SR |
57 | }; |
58 | ||
59 | static inline bool io_should_wake(struct io_wait_queue *iowq) | |
60 | { | |
61 | struct io_ring_ctx *ctx = iowq->ctx; | |
62 | int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; | |
63 | ||
64 | /* | |
65 | * Wake up if we have enough events, or if a timeout occurred since we | |
66 | * started waiting. For timeouts, we always want to return to userspace, | |
67 | * regardless of event count. | |
68 | */ | |
69 | return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; | |
70 | } | |
71 | ||
09d0a8ea JA |
72 | #define IORING_MAX_ENTRIES 32768 |
73 | #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES) | |
74 | ||
75 | unsigned long rings_size(unsigned int flags, unsigned int sq_entries, | |
76 | unsigned int cq_entries, size_t *sq_offset); | |
81d8191e | 77 | int io_uring_fill_params(unsigned entries, struct io_uring_params *p); |
20d6b633 | 78 | bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); |
c0e0d6ba | 79 | int io_run_task_work_sig(struct io_ring_ctx *ctx); |
973fc83f | 80 | void io_req_defer_failed(struct io_kiocb *req, s32 res); |
b529c96a | 81 | bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); |
f33096a3 | 82 | void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); |
e5c12945 | 83 | bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags); |
9046c641 PB |
84 | void __io_commit_cqring_flush(struct io_ring_ctx *ctx); |
85 | ||
079afb08 | 86 | void io_req_track_inflight(struct io_kiocb *req); |
9046c641 PB |
87 | struct file *io_file_get_normal(struct io_kiocb *req, int fd); |
88 | struct file *io_file_get_fixed(struct io_kiocb *req, int fd, | |
89 | unsigned issue_flags); | |
90 | ||
8501fe70 | 91 | void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); |
ea910678 | 92 | void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags); |
9046c641 | 93 | void io_req_task_queue(struct io_kiocb *req); |
bcf8a029 | 94 | void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw); |
9046c641 | 95 | void io_req_task_queue_fail(struct io_kiocb *req, int ret); |
bcf8a029 | 96 | void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw); |
af5d68f8 JA |
97 | struct llist_node *io_handle_tw_list(struct llist_node *node, unsigned int *count, unsigned int max_entries); |
98 | struct llist_node *tctx_task_work_run(struct io_uring_task *tctx, unsigned int max_entries, unsigned int *count); | |
9046c641 PB |
99 | void tctx_task_work(struct callback_head *cb); |
100 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); | |
9046c641 | 101 | |
6e76ac59 JT |
102 | int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, |
103 | int start, int end); | |
6746ee4c | 104 | void io_req_queue_iowq(struct io_kiocb *req); |
6e76ac59 | 105 | |
bcf8a029 | 106 | int io_poll_issue(struct io_kiocb *req, io_tw_token_t tw); |
9046c641 PB |
107 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); |
108 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); | |
ec26c225 | 109 | void __io_submit_flush_completions(struct io_ring_ctx *ctx); |
9046c641 PB |
110 | |
111 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work); | |
112 | void io_wq_submit_work(struct io_wq_work *work); | |
113 | ||
114 | void io_free_req(struct io_kiocb *req); | |
115 | void io_queue_next(struct io_kiocb *req); | |
63809137 | 116 | void io_task_refs_refill(struct io_uring_task *tctx); |
bd1a3783 | 117 | bool __io_alloc_req_refill(struct io_ring_ctx *ctx); |
9046c641 | 118 | |
f03baece | 119 | bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx, |
9046c641 PB |
120 | bool cancel_all); |
121 | ||
c4320315 JA |
122 | void io_activate_pollwq(struct io_ring_ctx *ctx); |
123 | ||
1658633c JA |
124 | static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) |
125 | { | |
c133b3b0 | 126 | #if defined(CONFIG_PROVE_LOCKING) |
1658633c JA |
127 | lockdep_assert(in_task()); |
128 | ||
60495b08 PB |
129 | if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) |
130 | lockdep_assert_held(&ctx->uring_lock); | |
131 | ||
1658633c JA |
132 | if (ctx->flags & IORING_SETUP_IOPOLL) { |
133 | lockdep_assert_held(&ctx->uring_lock); | |
134 | } else if (!ctx->task_complete) { | |
135 | lockdep_assert_held(&ctx->completion_lock); | |
136 | } else if (ctx->submitter_task) { | |
137 | /* | |
138 | * ->submitter_task may be NULL and we can still post a CQE, | |
139 | * if the ring has been setup with IORING_SETUP_R_DISABLED. | |
140 | * Not from an SQE, as those cannot be submitted, but via | |
141 | * updating tagged resources. | |
142 | */ | |
60495b08 | 143 | if (!percpu_ref_is_dying(&ctx->refs)) |
1658633c JA |
144 | lockdep_assert(current == ctx->submitter_task); |
145 | } | |
1658633c | 146 | #endif |
c133b3b0 | 147 | } |
f26cc959 | 148 | |
3035deac PB |
149 | static inline bool io_is_compat(struct io_ring_ctx *ctx) |
150 | { | |
151 | return IS_ENABLED(CONFIG_COMPAT) && unlikely(ctx->compat); | |
152 | } | |
153 | ||
e52d2e58 PB |
154 | static inline void io_req_task_work_add(struct io_kiocb *req) |
155 | { | |
8501fe70 | 156 | __io_req_task_work_add(req, 0); |
e52d2e58 PB |
157 | } |
158 | ||
da12d9ab PB |
159 | static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) |
160 | { | |
161 | if (!wq_list_empty(&ctx->submit_state.compl_reqs) || | |
902ce82c | 162 | ctx->submit_state.cq_flush) |
da12d9ab PB |
163 | __io_submit_flush_completions(ctx); |
164 | } | |
165 | ||
9046c641 PB |
166 | #define io_for_each_link(pos, head) \ |
167 | for (pos = (head); pos; pos = pos->link) | |
f3b44f92 | 168 | |
59fbc409 PB |
169 | static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx, |
170 | struct io_uring_cqe **ret, | |
171 | bool overflow) | |
f3b44f92 | 172 | { |
20d6b633 | 173 | io_lockdep_assert_cq_locked(ctx); |
f3b44f92 | 174 | |
20d6b633 PB |
175 | if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) { |
176 | if (unlikely(!io_cqe_cache_refill(ctx, overflow))) | |
59fbc409 | 177 | return false; |
f3b44f92 | 178 | } |
59fbc409 | 179 | *ret = ctx->cqe_cached; |
20d6b633 PB |
180 | ctx->cached_cq_tail++; |
181 | ctx->cqe_cached++; | |
182 | if (ctx->flags & IORING_SETUP_CQE32) | |
183 | ctx->cqe_cached++; | |
59fbc409 | 184 | return true; |
aa1df3a3 PB |
185 | } |
186 | ||
59fbc409 | 187 | static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret) |
aa1df3a3 | 188 | { |
59fbc409 | 189 | return io_get_cqe_overflow(ctx, ret, false); |
f3b44f92 JA |
190 | } |
191 | ||
11ed914b DW |
192 | static inline bool io_defer_get_uncommited_cqe(struct io_ring_ctx *ctx, |
193 | struct io_uring_cqe **cqe_ret) | |
194 | { | |
195 | io_lockdep_assert_cq_locked(ctx); | |
196 | ||
11ed914b DW |
197 | ctx->submit_state.cq_flush = true; |
198 | return io_get_cqe(ctx, cqe_ret); | |
199 | } | |
200 | ||
093a650b PB |
201 | static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, |
202 | struct io_kiocb *req) | |
f3b44f92 JA |
203 | { |
204 | struct io_uring_cqe *cqe; | |
205 | ||
e8c328c3 PB |
206 | /* |
207 | * If we can't get a cq entry, userspace overflowed the | |
208 | * submission (by quite a lot). Increment the overflow count in | |
209 | * the ring. | |
210 | */ | |
59fbc409 | 211 | if (unlikely(!io_get_cqe(ctx, &cqe))) |
f66f7342 | 212 | return false; |
e0486f3f | 213 | |
e0486f3f | 214 | |
e8c328c3 | 215 | memcpy(cqe, &req->cqe, sizeof(*cqe)); |
e8c328c3 | 216 | if (ctx->flags & IORING_SETUP_CQE32) { |
b24c5d75 PB |
217 | memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); |
218 | memset(&req->big_cqe, 0, sizeof(req->big_cqe)); | |
f3b44f92 | 219 | } |
2946f08a PB |
220 | |
221 | if (trace_io_uring_complete_enabled()) | |
222 | trace_io_uring_complete(req->ctx, req, cqe); | |
e8c328c3 | 223 | return true; |
f3b44f92 JA |
224 | } |
225 | ||
531113bb JA |
226 | static inline void req_set_fail(struct io_kiocb *req) |
227 | { | |
228 | req->flags |= REQ_F_FAIL; | |
229 | if (req->flags & REQ_F_CQE_SKIP) { | |
230 | req->flags &= ~REQ_F_CQE_SKIP; | |
231 | req->flags |= REQ_F_SKIP_LINK_CQES; | |
232 | } | |
233 | } | |
234 | ||
de23077e JA |
235 | static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) |
236 | { | |
237 | req->cqe.res = res; | |
238 | req->cqe.flags = cflags; | |
239 | } | |
240 | ||
49f7a309 | 241 | static inline void *io_uring_alloc_async_data(struct io_alloc_cache *cache, |
fa359552 | 242 | struct io_kiocb *req) |
49f7a309 | 243 | { |
ff74954e JA |
244 | if (cache) { |
245 | req->async_data = io_cache_alloc(cache, GFP_KERNEL); | |
246 | } else { | |
247 | const struct io_issue_def *def = &io_issue_defs[req->opcode]; | |
49f7a309 | 248 | |
ff74954e JA |
249 | WARN_ON_ONCE(!def->async_size); |
250 | req->async_data = kmalloc(def->async_size, GFP_KERNEL); | |
251 | } | |
ef623a64 GKB |
252 | if (req->async_data) |
253 | req->flags |= REQ_F_ASYNC_DATA; | |
254 | return req->async_data; | |
255 | } | |
256 | ||
99f15d8d JA |
257 | static inline bool req_has_async_data(struct io_kiocb *req) |
258 | { | |
259 | return req->flags & REQ_F_ASYNC_DATA; | |
260 | } | |
261 | ||
17bc2837 | 262 | static inline void io_put_file(struct io_kiocb *req) |
531113bb | 263 | { |
17bc2837 JA |
264 | if (!(req->flags & REQ_F_FIXED_FILE) && req->file) |
265 | fput(req->file); | |
531113bb JA |
266 | } |
267 | ||
cd40cae2 JA |
268 | static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, |
269 | unsigned issue_flags) | |
270 | { | |
271 | lockdep_assert_held(&ctx->uring_lock); | |
bfe30bfd | 272 | if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) |
cd40cae2 JA |
273 | mutex_unlock(&ctx->uring_lock); |
274 | } | |
275 | ||
276 | static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, | |
277 | unsigned issue_flags) | |
278 | { | |
279 | /* | |
280 | * "Normal" inline submissions always hold the uring_lock, since we | |
281 | * grab it from the system call. Same is true for the SQPOLL offload. | |
282 | * The only exception is when we've detached the request and issue it | |
283 | * from an async worker thread, grab the lock for that case. | |
284 | */ | |
bfe30bfd | 285 | if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) |
cd40cae2 JA |
286 | mutex_lock(&ctx->uring_lock); |
287 | lockdep_assert_held(&ctx->uring_lock); | |
288 | } | |
289 | ||
f9ead18c JA |
290 | static inline void io_commit_cqring(struct io_ring_ctx *ctx) |
291 | { | |
292 | /* order cqe stores with ring update */ | |
293 | smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); | |
294 | } | |
295 | ||
7b235dd8 PB |
296 | static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) |
297 | { | |
bca39f39 | 298 | if (wq_has_sleeper(&ctx->poll_wq)) |
7b235dd8 PB |
299 | __wake_up(&ctx->poll_wq, TASK_NORMAL, 0, |
300 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); | |
301 | } | |
302 | ||
6e7248ad | 303 | static inline void io_cqring_wake(struct io_ring_ctx *ctx) |
f3b44f92 JA |
304 | { |
305 | /* | |
44648532 JA |
306 | * Trigger waitqueue handler on all waiters on our waitqueue. This |
307 | * won't necessarily wake up all the tasks, io_should_wake() will make | |
308 | * that decision. | |
309 | * | |
310 | * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter | |
311 | * set in the mask so that if we recurse back into our own poll | |
312 | * waitqueue handlers, we know we have a dependency between eventfd or | |
313 | * epoll and should terminate multishot poll at that point. | |
f3b44f92 | 314 | */ |
6e7248ad | 315 | if (wq_has_sleeper(&ctx->cq_wait)) |
44648532 JA |
316 | __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, |
317 | poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); | |
f3b44f92 JA |
318 | } |
319 | ||
17437f31 JA |
320 | static inline bool io_sqring_full(struct io_ring_ctx *ctx) |
321 | { | |
322 | struct io_rings *r = ctx->rings; | |
323 | ||
28aabffa JA |
324 | /* |
325 | * SQPOLL must use the actual sqring head, as using the cached_sq_head | |
326 | * is race prone if the SQPOLL thread has grabbed entries but not yet | |
327 | * committed them to the ring. For !SQPOLL, this doesn't matter, but | |
328 | * since this helper is just used for SQPOLL sqring waits (or POLLOUT), | |
329 | * just read the actual sqring head unconditionally. | |
330 | */ | |
331 | return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries; | |
17437f31 JA |
332 | } |
333 | ||
334 | static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) | |
335 | { | |
336 | struct io_rings *rings = ctx->rings; | |
e3ef728f | 337 | unsigned int entries; |
17437f31 JA |
338 | |
339 | /* make sure SQ entry isn't read before tail */ | |
e3ef728f JA |
340 | entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; |
341 | return min(entries, ctx->sq_entries); | |
17437f31 JA |
342 | } |
343 | ||
c0e0d6ba | 344 | static inline int io_run_task_work(void) |
17437f31 | 345 | { |
af5d68f8 JA |
346 | bool ret = false; |
347 | ||
7cfe7a09 JA |
348 | /* |
349 | * Always check-and-clear the task_work notification signal. With how | |
350 | * signaling works for task_work, we can find it set with nothing to | |
351 | * run. We need to clear it for that case, like get_signal() does. | |
352 | */ | |
353 | if (test_thread_flag(TIF_NOTIFY_SIGNAL)) | |
354 | clear_notify_signal(); | |
b5d3ae20 JA |
355 | /* |
356 | * PF_IO_WORKER never returns to userspace, so check here if we have | |
357 | * notify work that needs processing. | |
358 | */ | |
af5d68f8 JA |
359 | if (current->flags & PF_IO_WORKER) { |
360 | if (test_thread_flag(TIF_NOTIFY_RESUME)) { | |
361 | __set_current_state(TASK_RUNNING); | |
362 | resume_user_mode_work(NULL); | |
363 | } | |
364 | if (current->io_uring) { | |
365 | unsigned int count = 0; | |
366 | ||
8f7033aa | 367 | __set_current_state(TASK_RUNNING); |
af5d68f8 JA |
368 | tctx_task_work_run(current->io_uring, UINT_MAX, &count); |
369 | if (count) | |
370 | ret = true; | |
371 | } | |
2f2bb1ff | 372 | } |
46a525e1 | 373 | if (task_work_pending(current)) { |
17437f31 | 374 | __set_current_state(TASK_RUNNING); |
46a525e1 | 375 | task_work_run(); |
af5d68f8 | 376 | ret = true; |
17437f31 JA |
377 | } |
378 | ||
af5d68f8 | 379 | return ret; |
c0e0d6ba DY |
380 | } |
381 | ||
40cfe553 DW |
382 | static inline bool io_local_work_pending(struct io_ring_ctx *ctx) |
383 | { | |
f46b9cdb | 384 | return !llist_empty(&ctx->work_llist) || !llist_empty(&ctx->retry_llist); |
40cfe553 DW |
385 | } |
386 | ||
dac6a0ea JA |
387 | static inline bool io_task_work_pending(struct io_ring_ctx *ctx) |
388 | { | |
40cfe553 | 389 | return task_work_pending(current) || io_local_work_pending(ctx); |
dac6a0ea JA |
390 | } |
391 | ||
bcf8a029 | 392 | static inline void io_tw_lock(struct io_ring_ctx *ctx, io_tw_token_t tw) |
aa1e90f6 | 393 | { |
8e5b3b89 | 394 | lockdep_assert_held(&ctx->uring_lock); |
aa1e90f6 PB |
395 | } |
396 | ||
9da070b1 PB |
397 | /* |
398 | * Don't complete immediately but use deferred completion infrastructure. | |
399 | * Protected by ->uring_lock and can only be used either with | |
400 | * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. | |
401 | */ | |
402 | static inline void io_req_complete_defer(struct io_kiocb *req) | |
403 | __must_hold(&req->ctx->uring_lock) | |
aa1e90f6 PB |
404 | { |
405 | struct io_submit_state *state = &req->ctx->submit_state; | |
406 | ||
9da070b1 PB |
407 | lockdep_assert_held(&req->ctx->uring_lock); |
408 | ||
aa1e90f6 PB |
409 | wq_list_add_tail(&req->comp_list, &state->compl_reqs); |
410 | } | |
411 | ||
46929b08 PB |
412 | static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) |
413 | { | |
8fb7aee0 | 414 | if (unlikely(ctx->off_timeout_used || |
bca39f39 | 415 | ctx->has_evfd || ctx->poll_activated)) |
46929b08 PB |
416 | __io_commit_cqring_flush(ctx); |
417 | } | |
418 | ||
63809137 PB |
419 | static inline void io_get_task_refs(int nr) |
420 | { | |
421 | struct io_uring_task *tctx = current->io_uring; | |
422 | ||
423 | tctx->cached_refs -= nr; | |
424 | if (unlikely(tctx->cached_refs < 0)) | |
425 | io_task_refs_refill(tctx); | |
426 | } | |
427 | ||
bd1a3783 PB |
428 | static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) |
429 | { | |
430 | return !ctx->submit_state.free_list.next; | |
431 | } | |
432 | ||
c1755c25 BL |
433 | extern struct kmem_cache *req_cachep; |
434 | ||
c8576f3e | 435 | static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx) |
bd1a3783 | 436 | { |
c1755c25 | 437 | struct io_kiocb *req; |
bd1a3783 | 438 | |
c1755c25 | 439 | req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); |
c1755c25 BL |
440 | wq_stack_extract(&ctx->submit_state.free_list); |
441 | return req; | |
bd1a3783 PB |
442 | } |
443 | ||
c8576f3e PB |
444 | static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req) |
445 | { | |
446 | if (unlikely(io_req_cache_empty(ctx))) { | |
447 | if (!__io_alloc_req_refill(ctx)) | |
448 | return false; | |
449 | } | |
450 | *req = io_extract_req(ctx); | |
451 | return true; | |
452 | } | |
453 | ||
140102ae PB |
454 | static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) |
455 | { | |
456 | return likely(ctx->submitter_task == current); | |
457 | } | |
458 | ||
76de6749 PB |
459 | static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) |
460 | { | |
6567506b PB |
461 | return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || |
462 | ctx->submitter_task == current); | |
76de6749 PB |
463 | } |
464 | ||
b6f58a3f JA |
465 | /* |
466 | * Terminate the request if either of these conditions are true: | |
467 | * | |
468 | * 1) It's being executed by the original task, but that task is marked | |
469 | * with PF_EXITING as it's exiting. | |
470 | * 2) PF_KTHREAD is set, in which case the invoker of the task_work is | |
471 | * our fallback task_work. | |
472 | */ | |
473 | static inline bool io_should_terminate_tw(void) | |
474 | { | |
475 | return current->flags & (PF_KTHREAD | PF_EXITING); | |
476 | } | |
477 | ||
833b5dff PB |
478 | static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) |
479 | { | |
480 | io_req_set_res(req, res, 0); | |
481 | req->io_task_work.func = io_req_task_complete; | |
482 | io_req_task_work_add(req); | |
483 | } | |
484 | ||
96c7d4f8 BL |
485 | /* |
486 | * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each | |
487 | * slot. | |
488 | */ | |
489 | static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) | |
490 | { | |
491 | if (ctx->flags & IORING_SETUP_SQE128) | |
492 | return 2 * sizeof(struct io_uring_sqe); | |
493 | return sizeof(struct io_uring_sqe); | |
494 | } | |
95041b93 JA |
495 | |
496 | static inline bool io_file_can_poll(struct io_kiocb *req) | |
497 | { | |
498 | if (req->flags & REQ_F_CAN_POLL) | |
499 | return true; | |
5fc16fa5 | 500 | if (req->file && file_can_poll(req->file)) { |
95041b93 JA |
501 | req->flags |= REQ_F_CAN_POLL; |
502 | return true; | |
503 | } | |
504 | return false; | |
505 | } | |
428f1382 | 506 | |
2b8e976b PB |
507 | static inline ktime_t io_get_time(struct io_ring_ctx *ctx) |
508 | { | |
509 | if (ctx->clockid == CLOCK_MONOTONIC) | |
510 | return ktime_get(); | |
511 | ||
512 | return ktime_get_with_offset(ctx->clock_offset); | |
513 | } | |
514 | ||
428f1382 JA |
515 | enum { |
516 | IO_CHECK_CQ_OVERFLOW_BIT, | |
517 | IO_CHECK_CQ_DROPPED_BIT, | |
518 | }; | |
519 | ||
520 | static inline bool io_has_work(struct io_ring_ctx *ctx) | |
521 | { | |
522 | return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) || | |
40cfe553 | 523 | io_local_work_pending(ctx); |
428f1382 | 524 | } |
de23077e | 525 | #endif |