Commit | Line | Data |
---|---|---|
de23077e JA |
1 | #ifndef IOU_CORE_H |
2 | #define IOU_CORE_H | |
3 | ||
4 | #include <linux/errno.h> | |
cd40cae2 | 5 | #include <linux/lockdep.h> |
de23077e JA |
6 | #include "io_uring_types.h" |
7 | ||
97b388d7 JA |
8 | enum { |
9 | IOU_OK = 0, | |
10 | IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, | |
11 | }; | |
12 | ||
531113bb JA |
13 | static inline void req_set_fail(struct io_kiocb *req) |
14 | { | |
15 | req->flags |= REQ_F_FAIL; | |
16 | if (req->flags & REQ_F_CQE_SKIP) { | |
17 | req->flags &= ~REQ_F_CQE_SKIP; | |
18 | req->flags |= REQ_F_SKIP_LINK_CQES; | |
19 | } | |
20 | } | |
21 | ||
de23077e JA |
22 | static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) |
23 | { | |
24 | req->cqe.res = res; | |
25 | req->cqe.flags = cflags; | |
26 | } | |
27 | ||
99f15d8d JA |
28 | static inline bool req_has_async_data(struct io_kiocb *req) |
29 | { | |
30 | return req->flags & REQ_F_ASYNC_DATA; | |
31 | } | |
32 | ||
531113bb JA |
33 | static inline void io_put_file(struct file *file) |
34 | { | |
35 | if (file) | |
36 | fput(file); | |
37 | } | |
38 | ||
cd40cae2 JA |
39 | static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, |
40 | unsigned issue_flags) | |
41 | { | |
42 | lockdep_assert_held(&ctx->uring_lock); | |
43 | if (issue_flags & IO_URING_F_UNLOCKED) | |
44 | mutex_unlock(&ctx->uring_lock); | |
45 | } | |
46 | ||
47 | static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, | |
48 | unsigned issue_flags) | |
49 | { | |
50 | /* | |
51 | * "Normal" inline submissions always hold the uring_lock, since we | |
52 | * grab it from the system call. Same is true for the SQPOLL offload. | |
53 | * The only exception is when we've detached the request and issue it | |
54 | * from an async worker thread, grab the lock for that case. | |
55 | */ | |
56 | if (issue_flags & IO_URING_F_UNLOCKED) | |
57 | mutex_lock(&ctx->uring_lock); | |
58 | lockdep_assert_held(&ctx->uring_lock); | |
59 | } | |
60 | ||
f9ead18c JA |
61 | static inline void io_commit_cqring(struct io_ring_ctx *ctx) |
62 | { | |
63 | /* order cqe stores with ring update */ | |
64 | smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); | |
65 | } | |
66 | ||
17437f31 JA |
67 | static inline bool io_sqring_full(struct io_ring_ctx *ctx) |
68 | { | |
69 | struct io_rings *r = ctx->rings; | |
70 | ||
71 | return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; | |
72 | } | |
73 | ||
74 | static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) | |
75 | { | |
76 | struct io_rings *rings = ctx->rings; | |
77 | ||
78 | /* make sure SQ entry isn't read before tail */ | |
79 | return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; | |
80 | } | |
81 | ||
82 | static inline bool io_run_task_work(void) | |
83 | { | |
84 | if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) { | |
85 | __set_current_state(TASK_RUNNING); | |
86 | clear_notify_signal(); | |
87 | if (task_work_pending(current)) | |
88 | task_work_run(); | |
89 | return true; | |
90 | } | |
91 | ||
92 | return false; | |
93 | } | |
94 | ||
329061d3 | 95 | void io_req_complete_failed(struct io_kiocb *req, s32 res); |
99f15d8d | 96 | void __io_req_complete(struct io_kiocb *req, unsigned issue_flags); |
59915143 JA |
97 | void io_req_complete_post(struct io_kiocb *req); |
98 | void __io_req_complete_post(struct io_kiocb *req); | |
f9ead18c JA |
99 | bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, |
100 | u32 cflags); | |
101 | void io_cqring_ev_posted(struct io_ring_ctx *ctx); | |
102 | void __user *io_buffer_select(struct io_kiocb *req, size_t *len, | |
103 | unsigned int issue_flags); | |
104 | unsigned int io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); | |
105 | ||
106 | static inline bool io_do_buffer_select(struct io_kiocb *req) | |
107 | { | |
108 | if (!(req->flags & REQ_F_BUFFER_SELECT)) | |
109 | return false; | |
110 | return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); | |
111 | } | |
112 | ||
329061d3 JA |
113 | void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags); |
114 | static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) | |
115 | { | |
116 | if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) | |
117 | return; | |
118 | /* | |
119 | * For legacy provided buffer mode, don't recycle if we already did | |
120 | * IO to this buffer. For ring-mapped provided buffer mode, we should | |
121 | * increment ring->head to explicitly monopolize the buffer to avoid | |
122 | * multiple use. | |
123 | */ | |
124 | if ((req->flags & REQ_F_BUFFER_SELECTED) && | |
125 | (req->flags & REQ_F_PARTIAL_IO)) | |
126 | return; | |
127 | ||
128 | /* | |
129 | * READV uses fields in `struct io_rw` (len/addr) to stash the selected | |
130 | * buffer data. However if that buffer is recycled the original request | |
131 | * data stored in addr is lost. Therefore forbid recycling for now. | |
132 | */ | |
133 | if (req->opcode == IORING_OP_READV) | |
134 | return; | |
135 | ||
136 | __io_kbuf_recycle(req, issue_flags); | |
137 | } | |
138 | ||
531113bb JA |
139 | struct file *io_file_get_normal(struct io_kiocb *req, int fd); |
140 | struct file *io_file_get_fixed(struct io_kiocb *req, int fd, | |
141 | unsigned issue_flags); | |
cd40cae2 JA |
142 | int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags, |
143 | struct file *file, unsigned int file_slot); | |
f9ead18c JA |
144 | int io_install_fixed_file(struct io_kiocb *req, struct file *file, |
145 | unsigned int issue_flags, u32 slot_index); | |
cd40cae2 JA |
146 | |
147 | int io_rsrc_node_switch_start(struct io_ring_ctx *ctx); | |
148 | int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, | |
149 | struct io_rsrc_node *node, void *rsrc); | |
150 | void io_rsrc_node_switch(struct io_ring_ctx *ctx, | |
151 | struct io_rsrc_data *data_to_kill); | |
152 | bool io_is_uring_fops(struct file *file); | |
99f15d8d JA |
153 | bool io_alloc_async_data(struct io_kiocb *req); |
154 | void io_req_task_work_add(struct io_kiocb *req); | |
59915143 JA |
155 | void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags); |
156 | void io_req_task_complete(struct io_kiocb *req, bool *locked); | |
157 | void io_req_task_queue_fail(struct io_kiocb *req, int ret); | |
329061d3 | 158 | void io_req_task_submit(struct io_kiocb *req, bool *locked); |
c9f06aa7 | 159 | void tctx_task_work(struct callback_head *cb); |
59915143 | 160 | int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd); |
17437f31 JA |
161 | __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); |
162 | int io_uring_alloc_task_context(struct task_struct *task, | |
163 | struct io_ring_ctx *ctx); | |
164 | ||
329061d3 | 165 | int io_poll_issue(struct io_kiocb *req, bool *locked); |
17437f31 JA |
166 | int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); |
167 | int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); | |
59915143 | 168 | |
c9f06aa7 JA |
169 | struct io_wq_work *io_wq_free_work(struct io_wq_work *work); |
170 | void io_wq_submit_work(struct io_wq_work *work); | |
171 | ||
59915143 JA |
172 | void io_free_req(struct io_kiocb *req); |
173 | void io_queue_next(struct io_kiocb *req); | |
174 | ||
329061d3 JA |
175 | bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, |
176 | bool cancel_all); | |
177 | ||
59915143 JA |
178 | #define io_for_each_link(pos, head) \ |
179 | for (pos = (head); pos; pos = pos->link) | |
531113bb | 180 | |
de23077e | 181 | #endif |