4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include "io_uring_types.h"
10 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
13 static inline void req_set_fail(struct io_kiocb *req)
15 req->flags |= REQ_F_FAIL;
16 if (req->flags & REQ_F_CQE_SKIP) {
17 req->flags &= ~REQ_F_CQE_SKIP;
18 req->flags |= REQ_F_SKIP_LINK_CQES;
22 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
25 req->cqe.flags = cflags;
28 static inline bool req_has_async_data(struct io_kiocb *req)
30 return req->flags & REQ_F_ASYNC_DATA;
33 static inline void io_put_file(struct file *file)
39 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
42 lockdep_assert_held(&ctx->uring_lock);
43 if (issue_flags & IO_URING_F_UNLOCKED)
44 mutex_unlock(&ctx->uring_lock);
47 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
51 * "Normal" inline submissions always hold the uring_lock, since we
52 * grab it from the system call. Same is true for the SQPOLL offload.
53 * The only exception is when we've detached the request and issue it
54 * from an async worker thread, grab the lock for that case.
56 if (issue_flags & IO_URING_F_UNLOCKED)
57 mutex_lock(&ctx->uring_lock);
58 lockdep_assert_held(&ctx->uring_lock);
61 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
63 /* order cqe stores with ring update */
64 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
67 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
69 struct io_rings *r = ctx->rings;
71 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
74 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
76 struct io_rings *rings = ctx->rings;
78 /* make sure SQ entry isn't read before tail */
79 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
82 static inline bool io_run_task_work(void)
84 if (test_thread_flag(TIF_NOTIFY_SIGNAL) || task_work_pending(current)) {
85 __set_current_state(TASK_RUNNING);
86 clear_notify_signal();
87 if (task_work_pending(current))
95 void io_req_complete_failed(struct io_kiocb *req, s32 res);
96 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
97 void io_req_complete_post(struct io_kiocb *req);
98 void __io_req_complete_post(struct io_kiocb *req);
99 bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
101 void io_cqring_ev_posted(struct io_ring_ctx *ctx);
102 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
103 unsigned int issue_flags);
104 unsigned int io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
106 static inline bool io_do_buffer_select(struct io_kiocb *req)
108 if (!(req->flags & REQ_F_BUFFER_SELECT))
110 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
113 void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags);
114 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
116 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
119 * For legacy provided buffer mode, don't recycle if we already did
120 * IO to this buffer. For ring-mapped provided buffer mode, we should
121 * increment ring->head to explicitly monopolize the buffer to avoid
124 if ((req->flags & REQ_F_BUFFER_SELECTED) &&
125 (req->flags & REQ_F_PARTIAL_IO))
129 * READV uses fields in `struct io_rw` (len/addr) to stash the selected
130 * buffer data. However if that buffer is recycled the original request
131 * data stored in addr is lost. Therefore forbid recycling for now.
133 if (req->opcode == IORING_OP_READV)
136 __io_kbuf_recycle(req, issue_flags);
139 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
140 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
141 unsigned issue_flags);
142 int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
143 struct file *file, unsigned int file_slot);
144 int io_install_fixed_file(struct io_kiocb *req, struct file *file,
145 unsigned int issue_flags, u32 slot_index);
147 int io_rsrc_node_switch_start(struct io_ring_ctx *ctx);
148 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
149 struct io_rsrc_node *node, void *rsrc);
150 void io_rsrc_node_switch(struct io_ring_ctx *ctx,
151 struct io_rsrc_data *data_to_kill);
152 bool io_is_uring_fops(struct file *file);
153 bool io_alloc_async_data(struct io_kiocb *req);
154 void io_req_task_work_add(struct io_kiocb *req);
155 void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
156 void io_req_task_complete(struct io_kiocb *req, bool *locked);
157 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
158 void io_req_task_submit(struct io_kiocb *req, bool *locked);
159 void tctx_task_work(struct callback_head *cb);
160 int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd);
161 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
162 int io_uring_alloc_task_context(struct task_struct *task,
163 struct io_ring_ctx *ctx);
165 int io_poll_issue(struct io_kiocb *req, bool *locked);
166 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
167 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
169 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
170 void io_wq_submit_work(struct io_wq_work *work);
172 void io_free_req(struct io_kiocb *req);
173 void io_queue_next(struct io_kiocb *req);
175 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
178 #define io_for_each_link(pos, head) \
179 for (pos = (head); pos; pos = pos->link)