1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/security.h>
7 #include <linux/nospec.h>
9 #include <uapi/linux/io_uring.h>
12 #include "alloc_cache.h"
14 #include "uring_cmd.h"
16 void io_cmd_cache_free(const void *entry)
18 struct io_async_cmd *ac = (struct io_async_cmd *)entry;
20 io_vec_free(&ac->vec);
24 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
26 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
27 struct io_async_cmd *ac = req->async_data;
28 struct io_uring_cmd_data *cache = &ac->data;
31 kfree(cache->op_data);
32 cache->op_data = NULL;
35 if (issue_flags & IO_URING_F_UNLOCKED)
38 io_alloc_cache_vec_kasan(&ac->vec);
39 if (ac->vec.nr > IO_VEC_CACHE_SOFT_CAP)
40 io_vec_free(&ac->vec);
42 if (io_alloc_cache_put(&req->ctx->cmd_cache, cache)) {
44 req->async_data = NULL;
45 req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
49 void io_uring_cmd_cleanup(struct io_kiocb *req)
51 io_req_uring_cleanup(req, 0);
54 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
55 struct io_uring_task *tctx, bool cancel_all)
57 struct hlist_node *tmp;
61 lockdep_assert_held(&ctx->uring_lock);
63 hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
65 struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
67 struct file *file = req->file;
69 if (!cancel_all && req->tctx != tctx)
72 if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
73 file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
74 IO_URING_F_COMPLETE_DEFER);
78 io_submit_flush_completions(ctx);
82 static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
83 unsigned int issue_flags)
85 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
86 struct io_ring_ctx *ctx = req->ctx;
88 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
91 cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
92 io_ring_submit_lock(ctx, issue_flags);
93 hlist_del(&req->hash_node);
94 io_ring_submit_unlock(ctx, issue_flags);
98 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
99 * will try to cancel this issued command by sending ->uring_cmd() with
100 * issue_flags of IO_URING_F_CANCEL.
102 * The command is guaranteed to not be done when calling ->uring_cmd()
103 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
104 * with race between io_uring canceling and normal completion.
106 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
107 unsigned int issue_flags)
109 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
110 struct io_ring_ctx *ctx = req->ctx;
112 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
113 cmd->flags |= IORING_URING_CMD_CANCELABLE;
114 io_ring_submit_lock(ctx, issue_flags);
115 hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
116 io_ring_submit_unlock(ctx, issue_flags);
119 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
121 static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
123 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
124 unsigned int flags = IO_URING_F_COMPLETE_DEFER;
126 if (io_should_terminate_tw())
127 flags |= IO_URING_F_TASK_DEAD;
129 /* task_work executor checks the deffered list completion */
130 ioucmd->task_work_cb(ioucmd, flags);
133 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
134 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
137 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
139 ioucmd->task_work_cb = task_work_cb;
140 req->io_task_work.func = io_uring_cmd_work;
141 __io_req_task_work_add(req, flags);
143 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
145 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
146 u64 extra1, u64 extra2)
148 req->big_cqe.extra1 = extra1;
149 req->big_cqe.extra2 = extra2;
153 * Called by consumers of io_uring_cmd, if they originally returned
154 * -EIOCBQUEUED upon receiving the command.
156 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
157 unsigned issue_flags)
159 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
161 io_uring_cmd_del_cancelable(ioucmd, issue_flags);
166 io_req_set_res(req, ret, 0);
167 if (req->ctx->flags & IORING_SETUP_CQE32)
168 io_req_set_cqe32_extra(req, res2, 0);
169 io_req_uring_cleanup(req, issue_flags);
170 if (req->ctx->flags & IORING_SETUP_IOPOLL) {
171 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
172 smp_store_release(&req->iopoll_completed, 1);
173 } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
174 if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
176 io_req_complete_defer(req);
178 req->io_task_work.func = io_req_task_complete;
179 io_req_task_work_add(req);
182 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
184 static int io_uring_cmd_prep_setup(struct io_kiocb *req,
185 const struct io_uring_sqe *sqe)
187 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
188 struct io_async_cmd *ac;
190 /* see io_uring_cmd_get_async_data() */
191 BUILD_BUG_ON(offsetof(struct io_async_cmd, data) != 0);
193 ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
196 ac->data.op_data = NULL;
199 * Unconditionally cache the SQE for now - this is only needed for
200 * requests that go async, but prep handlers must ensure that any
201 * sqe data is stable beyond prep. Since uring_cmd is special in
202 * that it doesn't read in per-op data, play it safe and ensure that
203 * any SQE data is stable beyond prep. This can later get relaxed.
205 memcpy(ac->sqes, sqe, uring_sqe_size(req->ctx));
206 ioucmd->sqe = ac->sqes;
210 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
212 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
217 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
218 if (ioucmd->flags & ~IORING_URING_CMD_MASK)
221 if (ioucmd->flags & IORING_URING_CMD_FIXED)
222 req->buf_index = READ_ONCE(sqe->buf_index);
224 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
226 return io_uring_cmd_prep_setup(req, sqe);
229 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
231 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
232 struct io_ring_ctx *ctx = req->ctx;
233 struct file *file = req->file;
236 if (!file->f_op->uring_cmd)
239 ret = security_uring_cmd(ioucmd);
243 if (ctx->flags & IORING_SETUP_SQE128)
244 issue_flags |= IO_URING_F_SQE128;
245 if (ctx->flags & IORING_SETUP_CQE32)
246 issue_flags |= IO_URING_F_CQE32;
247 if (io_is_compat(ctx))
248 issue_flags |= IO_URING_F_COMPAT;
249 if (ctx->flags & IORING_SETUP_IOPOLL) {
250 if (!file->f_op->uring_cmd_iopoll)
252 issue_flags |= IO_URING_F_IOPOLL;
253 req->iopoll_completed = 0;
254 if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
255 /* make sure every req only blocks once */
256 req->flags &= ~REQ_F_IOPOLL_STATE;
257 req->iopoll_start = ktime_get_ns();
261 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
262 if (ret == -EAGAIN || ret == -EIOCBQUEUED)
266 io_req_uring_cleanup(req, issue_flags);
267 io_req_set_res(req, ret, 0);
271 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
272 struct iov_iter *iter,
273 struct io_uring_cmd *ioucmd,
274 unsigned int issue_flags)
276 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
278 if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
281 return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
283 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
285 int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
286 const struct iovec __user *uvec,
288 int ddir, struct iov_iter *iter,
289 unsigned issue_flags)
291 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
292 struct io_async_cmd *ac = req->async_data;
295 if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
298 ret = io_prep_reg_iovec(req, &ac->vec, uvec, uvec_segs);
302 return io_import_reg_vec(ddir, iter, req, &ac->vec, uvec_segs,
305 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed_vec);
307 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
309 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
311 io_req_queue_iowq(req);