io_uring: improve io_get_sqe
[linux-block.git] / io_uring / io_uring.h
CommitLineData
de23077e
JA
1#ifndef IOU_CORE_H
2#define IOU_CORE_H
3
4#include <linux/errno.h>
cd40cae2 5#include <linux/lockdep.h>
b5d3ae20 6#include <linux/resume_user_mode.h>
c1755c25 7#include <linux/kasan.h>
ab1c84d8 8#include <linux/io_uring_types.h>
44648532 9#include <uapi/linux/eventpoll.h>
ab1c84d8 10#include "io-wq.h"
a6b21fbb 11#include "slist.h"
ab1c84d8 12#include "filetable.h"
de23077e 13
f3b44f92
JA
14#ifndef CREATE_TRACE_POINTS
15#include <trace/events/io_uring.h>
16#endif
17
97b388d7
JA
18enum {
19 IOU_OK = 0,
20 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
114eccdf
DY
21
22 /*
91482864
PB
23 * Intended only when both IO_URING_F_MULTISHOT is passed
24 * to indicate to the poll runner that multishot should be
114eccdf
DY
25 * removed and the result is set on req->cqe.res.
26 */
27 IOU_STOP_MULTISHOT = -ECANCELED,
97b388d7
JA
28};
29
aa1df3a3 30struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
68494a65 31bool io_req_cqe_overflow(struct io_kiocb *req);
c0e0d6ba 32int io_run_task_work_sig(struct io_ring_ctx *ctx);
973fc83f 33void io_req_defer_failed(struct io_kiocb *req, s32 res);
1bec951c 34void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags);
b529c96a 35bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
9b8c5475
DY
36bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags,
37 bool allow_overflow);
9046c641
PB
38void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
39
40struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
41
42struct file *io_file_get_normal(struct io_kiocb *req, int fd);
43struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
44 unsigned issue_flags);
45
f6b543fd
JA
46static inline bool io_req_ffs_set(struct io_kiocb *req)
47{
48 return req->flags & REQ_F_FIXED_FILE;
49}
50
e52d2e58 51void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
9046c641
PB
52bool io_is_uring_fops(struct file *file);
53bool io_alloc_async_data(struct io_kiocb *req);
9046c641
PB
54void io_req_task_queue(struct io_kiocb *req);
55void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
56void io_req_task_complete(struct io_kiocb *req, bool *locked);
57void io_req_task_queue_fail(struct io_kiocb *req, int ret);
58void io_req_task_submit(struct io_kiocb *req, bool *locked);
59void tctx_task_work(struct callback_head *cb);
60__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
61int io_uring_alloc_task_context(struct task_struct *task,
62 struct io_ring_ctx *ctx);
63
64int io_poll_issue(struct io_kiocb *req, bool *locked);
65int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
66int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
67void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
68int io_req_prep_async(struct io_kiocb *req);
69
70struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
71void io_wq_submit_work(struct io_wq_work *work);
72
73void io_free_req(struct io_kiocb *req);
74void io_queue_next(struct io_kiocb *req);
63809137 75void io_task_refs_refill(struct io_uring_task *tctx);
bd1a3783 76bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
9046c641
PB
77
78bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
79 bool cancel_all);
80
f26cc959
PB
81#define io_lockdep_assert_cq_locked(ctx) \
82 do { \
83 if (ctx->flags & IORING_SETUP_IOPOLL) { \
84 lockdep_assert_held(&ctx->uring_lock); \
85 } else if (!ctx->task_complete) { \
86 lockdep_assert_held(&ctx->completion_lock); \
87 } else if (ctx->submitter_task->flags & PF_EXITING) { \
88 lockdep_assert(current_work()); \
89 } else { \
90 lockdep_assert(current == ctx->submitter_task); \
91 } \
92 } while (0)
93
e52d2e58
PB
94static inline void io_req_task_work_add(struct io_kiocb *req)
95{
96 __io_req_task_work_add(req, true);
97}
98
9046c641
PB
99#define io_for_each_link(pos, head) \
100 for (pos = (head); pos; pos = pos->link)
f3b44f92 101
25399321
PB
102void io_cq_unlock_post(struct io_ring_ctx *ctx);
103
aa1df3a3
PB
104static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
105 bool overflow)
f3b44f92 106{
f26cc959
PB
107 io_lockdep_assert_cq_locked(ctx);
108
f3b44f92
JA
109 if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
110 struct io_uring_cqe *cqe = ctx->cqe_cached;
111
f3b44f92
JA
112 ctx->cached_cq_tail++;
113 ctx->cqe_cached++;
b3659a65
PB
114 if (ctx->flags & IORING_SETUP_CQE32)
115 ctx->cqe_cached++;
f3b44f92
JA
116 return cqe;
117 }
118
aa1df3a3
PB
119 return __io_get_cqe(ctx, overflow);
120}
121
122static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
123{
124 return io_get_cqe_overflow(ctx, false);
f3b44f92
JA
125}
126
127static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
128 struct io_kiocb *req)
129{
130 struct io_uring_cqe *cqe;
131
e8c328c3
PB
132 /*
133 * If we can't get a cq entry, userspace overflowed the
134 * submission (by quite a lot). Increment the overflow count in
135 * the ring.
136 */
137 cqe = io_get_cqe(ctx);
138 if (unlikely(!cqe))
f66f7342 139 return false;
e0486f3f
DY
140
141 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
142 req->cqe.res, req->cqe.flags,
143 (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
144 (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
145
e8c328c3
PB
146 memcpy(cqe, &req->cqe, sizeof(*cqe));
147
148 if (ctx->flags & IORING_SETUP_CQE32) {
f3b44f92
JA
149 u64 extra1 = 0, extra2 = 0;
150
151 if (req->flags & REQ_F_CQE32_INIT) {
152 extra1 = req->extra1;
153 extra2 = req->extra2;
154 }
155
e8c328c3
PB
156 WRITE_ONCE(cqe->big_cqe[0], extra1);
157 WRITE_ONCE(cqe->big_cqe[1], extra2);
f3b44f92 158 }
e8c328c3 159 return true;
f3b44f92
JA
160}
161
f66f7342
PB
162static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
163 struct io_kiocb *req)
164{
165 if (likely(__io_fill_cqe_req(ctx, req)))
166 return true;
167 return io_req_cqe_overflow(req);
168}
169
531113bb
JA
170static inline void req_set_fail(struct io_kiocb *req)
171{
172 req->flags |= REQ_F_FAIL;
173 if (req->flags & REQ_F_CQE_SKIP) {
174 req->flags &= ~REQ_F_CQE_SKIP;
175 req->flags |= REQ_F_SKIP_LINK_CQES;
176 }
177}
178
de23077e
JA
179static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
180{
181 req->cqe.res = res;
182 req->cqe.flags = cflags;
183}
184
99f15d8d
JA
185static inline bool req_has_async_data(struct io_kiocb *req)
186{
187 return req->flags & REQ_F_ASYNC_DATA;
188}
189
531113bb
JA
190static inline void io_put_file(struct file *file)
191{
192 if (file)
193 fput(file);
194}
195
cd40cae2
JA
196static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
197 unsigned issue_flags)
198{
199 lockdep_assert_held(&ctx->uring_lock);
200 if (issue_flags & IO_URING_F_UNLOCKED)
201 mutex_unlock(&ctx->uring_lock);
202}
203
204static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
205 unsigned issue_flags)
206{
207 /*
208 * "Normal" inline submissions always hold the uring_lock, since we
209 * grab it from the system call. Same is true for the SQPOLL offload.
210 * The only exception is when we've detached the request and issue it
211 * from an async worker thread, grab the lock for that case.
212 */
213 if (issue_flags & IO_URING_F_UNLOCKED)
214 mutex_lock(&ctx->uring_lock);
215 lockdep_assert_held(&ctx->uring_lock);
216}
217
f9ead18c
JA
218static inline void io_commit_cqring(struct io_ring_ctx *ctx)
219{
220 /* order cqe stores with ring update */
221 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
222}
223
7b235dd8
PB
224static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
225{
bca39f39 226 if (wq_has_sleeper(&ctx->poll_wq))
7b235dd8
PB
227 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
228 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
229}
230
fc86f9d3
PB
231/* requires smb_mb() prior, see wq_has_sleeper() */
232static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
f3b44f92
JA
233{
234 /*
44648532
JA
235 * Trigger waitqueue handler on all waiters on our waitqueue. This
236 * won't necessarily wake up all the tasks, io_should_wake() will make
237 * that decision.
238 *
239 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
240 * set in the mask so that if we recurse back into our own poll
241 * waitqueue handlers, we know we have a dependency between eventfd or
242 * epoll and should terminate multishot poll at that point.
f3b44f92 243 */
fc86f9d3 244 if (waitqueue_active(&ctx->cq_wait))
44648532
JA
245 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
246 poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
f3b44f92
JA
247}
248
fc86f9d3
PB
249static inline void io_cqring_wake(struct io_ring_ctx *ctx)
250{
251 smp_mb();
252 __io_cqring_wake(ctx);
253}
254
17437f31
JA
255static inline bool io_sqring_full(struct io_ring_ctx *ctx)
256{
257 struct io_rings *r = ctx->rings;
258
259 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
260}
261
262static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
263{
264 struct io_rings *rings = ctx->rings;
265
266 /* make sure SQ entry isn't read before tail */
267 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
268}
269
c0e0d6ba 270static inline int io_run_task_work(void)
17437f31 271{
7cfe7a09
JA
272 /*
273 * Always check-and-clear the task_work notification signal. With how
274 * signaling works for task_work, we can find it set with nothing to
275 * run. We need to clear it for that case, like get_signal() does.
276 */
277 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
278 clear_notify_signal();
b5d3ae20
JA
279 /*
280 * PF_IO_WORKER never returns to userspace, so check here if we have
281 * notify work that needs processing.
282 */
283 if (current->flags & PF_IO_WORKER &&
284 test_thread_flag(TIF_NOTIFY_RESUME))
285 resume_user_mode_work(NULL);
46a525e1 286 if (task_work_pending(current)) {
17437f31 287 __set_current_state(TASK_RUNNING);
46a525e1 288 task_work_run();
c0e0d6ba 289 return 1;
17437f31
JA
290 }
291
c0e0d6ba
DY
292 return 0;
293}
294
dac6a0ea
JA
295static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
296{
6434ec01 297 return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
dac6a0ea
JA
298}
299
aa1e90f6
PB
300static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
301{
302 if (!*locked) {
303 mutex_lock(&ctx->uring_lock);
304 *locked = true;
305 }
306}
307
9da070b1
PB
308/*
309 * Don't complete immediately but use deferred completion infrastructure.
310 * Protected by ->uring_lock and can only be used either with
311 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
312 */
313static inline void io_req_complete_defer(struct io_kiocb *req)
314 __must_hold(&req->ctx->uring_lock)
aa1e90f6
PB
315{
316 struct io_submit_state *state = &req->ctx->submit_state;
317
9da070b1
PB
318 lockdep_assert_held(&req->ctx->uring_lock);
319
aa1e90f6
PB
320 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
321}
322
46929b08
PB
323static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
324{
bca39f39
PB
325 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
326 ctx->has_evfd || ctx->poll_activated))
46929b08
PB
327 __io_commit_cqring_flush(ctx);
328}
329
63809137
PB
330static inline void io_get_task_refs(int nr)
331{
332 struct io_uring_task *tctx = current->io_uring;
333
334 tctx->cached_refs -= nr;
335 if (unlikely(tctx->cached_refs < 0))
336 io_task_refs_refill(tctx);
337}
338
bd1a3783
PB
339static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
340{
341 return !ctx->submit_state.free_list.next;
342}
343
344static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
345{
346 if (unlikely(io_req_cache_empty(ctx)))
347 return __io_alloc_req_refill(ctx);
348 return true;
349}
350
c1755c25
BL
351extern struct kmem_cache *req_cachep;
352
bd1a3783
PB
353static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
354{
c1755c25 355 struct io_kiocb *req;
bd1a3783 356
c1755c25
BL
357 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list);
358 kasan_unpoison_object_data(req_cachep, req);
359 wq_stack_extract(&ctx->submit_state.free_list);
360 return req;
bd1a3783
PB
361}
362
140102ae
PB
363static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx)
364{
365 return likely(ctx->submitter_task == current);
366}
367
76de6749
PB
368static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx)
369{
6567506b
PB
370 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) ||
371 ctx->submitter_task == current);
76de6749
PB
372}
373
833b5dff
PB
374static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res)
375{
376 io_req_set_res(req, res, 0);
377 req->io_task_work.func = io_req_task_complete;
378 io_req_task_work_add(req);
379}
380
de23077e 381#endif