io_uring: don't inline __io_get_cqe()
[linux-block.git] / io_uring / io_uring.h
CommitLineData
de23077e
JA
1#ifndef IOU_CORE_H
2#define IOU_CORE_H
3
4#include <linux/errno.h>
cd40cae2 5#include <linux/lockdep.h>
de23077e
JA
6#include "io_uring_types.h"
7
f3b44f92
JA
8#ifndef CREATE_TRACE_POINTS
9#include <trace/events/io_uring.h>
10#endif
11
97b388d7
JA
12enum {
13 IOU_OK = 0,
14 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
15};
16
faf88dde 17struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
f3b44f92
JA
18bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
19 u32 cflags, u64 extra1, u64 extra2);
20
f3b44f92
JA
21static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
22{
23 if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
24 struct io_uring_cqe *cqe = ctx->cqe_cached;
25
26 if (ctx->flags & IORING_SETUP_CQE32) {
27 unsigned int off = ctx->cqe_cached - ctx->rings->cqes;
28
29 cqe += off;
30 }
31
32 ctx->cached_cq_tail++;
33 ctx->cqe_cached++;
34 return cqe;
35 }
36
37 return __io_get_cqe(ctx);
38}
39
40static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
41 struct io_kiocb *req)
42{
43 struct io_uring_cqe *cqe;
44
45 if (!(ctx->flags & IORING_SETUP_CQE32)) {
46 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
47 req->cqe.res, req->cqe.flags, 0, 0);
48
49 /*
50 * If we can't get a cq entry, userspace overflowed the
51 * submission (by quite a lot). Increment the overflow count in
52 * the ring.
53 */
54 cqe = io_get_cqe(ctx);
55 if (likely(cqe)) {
56 memcpy(cqe, &req->cqe, sizeof(*cqe));
57 return true;
58 }
59
60 return io_cqring_event_overflow(ctx, req->cqe.user_data,
61 req->cqe.res, req->cqe.flags,
62 0, 0);
63 } else {
64 u64 extra1 = 0, extra2 = 0;
65
66 if (req->flags & REQ_F_CQE32_INIT) {
67 extra1 = req->extra1;
68 extra2 = req->extra2;
69 }
70
71 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
72 req->cqe.res, req->cqe.flags, extra1, extra2);
73
74 /*
75 * If we can't get a cq entry, userspace overflowed the
76 * submission (by quite a lot). Increment the overflow count in
77 * the ring.
78 */
79 cqe = io_get_cqe(ctx);
80 if (likely(cqe)) {
81 memcpy(cqe, &req->cqe, sizeof(struct io_uring_cqe));
82 WRITE_ONCE(cqe->big_cqe[0], extra1);
83 WRITE_ONCE(cqe->big_cqe[1], extra2);
84 return true;
85 }
86
87 return io_cqring_event_overflow(ctx, req->cqe.user_data,
88 req->cqe.res, req->cqe.flags,
89 extra1, extra2);
90 }
91}
92
531113bb
JA
93static inline void req_set_fail(struct io_kiocb *req)
94{
95 req->flags |= REQ_F_FAIL;
96 if (req->flags & REQ_F_CQE_SKIP) {
97 req->flags &= ~REQ_F_CQE_SKIP;
98 req->flags |= REQ_F_SKIP_LINK_CQES;
99 }
100}
101
de23077e
JA
102static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
103{
104 req->cqe.res = res;
105 req->cqe.flags = cflags;
106}
107
99f15d8d
JA
108static inline bool req_has_async_data(struct io_kiocb *req)
109{
110 return req->flags & REQ_F_ASYNC_DATA;
111}
112
531113bb
JA
113static inline void io_put_file(struct file *file)
114{
115 if (file)
116 fput(file);
117}
118
cd40cae2
JA
119static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
120 unsigned issue_flags)
121{
122 lockdep_assert_held(&ctx->uring_lock);
123 if (issue_flags & IO_URING_F_UNLOCKED)
124 mutex_unlock(&ctx->uring_lock);
125}
126
127static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
128 unsigned issue_flags)
129{
130 /*
131 * "Normal" inline submissions always hold the uring_lock, since we
132 * grab it from the system call. Same is true for the SQPOLL offload.
133 * The only exception is when we've detached the request and issue it
134 * from an async worker thread, grab the lock for that case.
135 */
136 if (issue_flags & IO_URING_F_UNLOCKED)
137 mutex_lock(&ctx->uring_lock);
138 lockdep_assert_held(&ctx->uring_lock);
139}
140
f9ead18c
JA
141static inline void io_commit_cqring(struct io_ring_ctx *ctx)
142{
143 /* order cqe stores with ring update */
144 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
145}
146
f3b44f92
JA
147static inline void io_cqring_wake(struct io_ring_ctx *ctx)
148{
149 /*
150 * wake_up_all() may seem excessive, but io_wake_function() and
151 * io_should_wake() handle the termination of the loop and only
152 * wake as many waiters as we need to.
153 */
154 if (wq_has_sleeper(&ctx->cq_wait))
155 wake_up_all(&ctx->cq_wait);
156}
157
17437f31
JA
158static inline bool io_sqring_full(struct io_ring_ctx *ctx)
159{
160 struct io_rings *r = ctx->rings;
161
162 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
163}
164
165static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
166{
167 struct io_rings *rings = ctx->rings;
168
169 /* make sure SQ entry isn't read before tail */
170 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
171}
172
173static inline bool io_run_task_work(void)
174{
175 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
176 __set_current_state(TASK_RUNNING);
177 clear_notify_signal();
178 if (task_work_pending(current))
179 task_work_run();
180 return true;
181 }
182
183 return false;
184}
185
aa1e90f6
PB
186static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
187{
188 if (!*locked) {
189 mutex_lock(&ctx->uring_lock);
190 *locked = true;
191 }
192}
193
194static inline void io_req_add_compl_list(struct io_kiocb *req)
195{
196 struct io_submit_state *state = &req->ctx->submit_state;
197
198 if (!(req->flags & REQ_F_CQE_SKIP))
199 state->flush_cqes = true;
200 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
201}
202
73572984 203int io_run_task_work_sig(void);
329061d3 204void io_req_complete_failed(struct io_kiocb *req, s32 res);
99f15d8d 205void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
59915143
JA
206void io_req_complete_post(struct io_kiocb *req);
207void __io_req_complete_post(struct io_kiocb *req);
d245bca6 208bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
f9ead18c 209void io_cqring_ev_posted(struct io_ring_ctx *ctx);
f3b44f92 210void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
f9ead18c 211
3b77495a 212struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
329061d3 213
531113bb
JA
214struct file *io_file_get_normal(struct io_kiocb *req, int fd);
215struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
216 unsigned issue_flags);
cd40cae2 217
cd40cae2 218bool io_is_uring_fops(struct file *file);
99f15d8d
JA
219bool io_alloc_async_data(struct io_kiocb *req);
220void io_req_task_work_add(struct io_kiocb *req);
f3b44f92 221void io_req_task_prio_work_add(struct io_kiocb *req);
59915143 222void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
f3b44f92
JA
223void io_req_task_queue(struct io_kiocb *req);
224void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
59915143
JA
225void io_req_task_complete(struct io_kiocb *req, bool *locked);
226void io_req_task_queue_fail(struct io_kiocb *req, int ret);
329061d3 227void io_req_task_submit(struct io_kiocb *req, bool *locked);
c9f06aa7 228void tctx_task_work(struct callback_head *cb);
17437f31
JA
229__cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
230int io_uring_alloc_task_context(struct task_struct *task,
231 struct io_ring_ctx *ctx);
232
329061d3 233int io_poll_issue(struct io_kiocb *req, bool *locked);
17437f31
JA
234int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
235int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
f3b44f92
JA
236void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
237int io_req_prep_async(struct io_kiocb *req);
59915143 238
c9f06aa7
JA
239struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
240void io_wq_submit_work(struct io_wq_work *work);
241
59915143
JA
242void io_free_req(struct io_kiocb *req);
243void io_queue_next(struct io_kiocb *req);
244
329061d3
JA
245bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
246 bool cancel_all);
247
59915143
JA
248#define io_for_each_link(pos, head) \
249 for (pos = (head); pos; pos = pos->link)
531113bb 250
de23077e 251#endif