1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
9 #include <uapi/linux/io_uring.h>
13 #include "filetable.h"
14 #include "alloc_cache.h"
17 /* All valid masks for MSG_RING */
18 #define IORING_MSG_RING_MASK (IORING_MSG_RING_CQE_SKIP | \
19 IORING_MSG_RING_FLAGS_PASS)
23 struct file *src_file;
24 struct callback_head tw;
36 static void io_double_unlock_ctx(struct io_ring_ctx *octx)
38 mutex_unlock(&octx->uring_lock);
41 static int io_lock_external_ctx(struct io_ring_ctx *octx,
42 unsigned int issue_flags)
45 * To ensure proper ordering between the two ctxs, we can only
46 * attempt a trylock on the target. If that fails and we already have
47 * the source ctx lock, punt to io-wq.
49 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
50 if (!mutex_trylock(&octx->uring_lock))
54 mutex_lock(&octx->uring_lock);
58 void io_msg_ring_cleanup(struct io_kiocb *req)
60 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
62 if (WARN_ON_ONCE(!msg->src_file))
69 static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
71 return target_ctx->task_complete;
74 static void io_msg_tw_complete(struct io_kiocb *req, io_tw_token_t tw)
76 struct io_ring_ctx *ctx = req->ctx;
78 io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
79 if (spin_trylock(&ctx->msg_lock)) {
80 if (io_alloc_cache_put(&ctx->msg_cache, req))
82 spin_unlock(&ctx->msg_lock);
85 kmem_cache_free(req_cachep, req);
86 percpu_ref_put(&ctx->refs);
89 static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
90 int res, u32 cflags, u64 user_data)
92 if (!READ_ONCE(ctx->submitter_task)) {
93 kmem_cache_free(req_cachep, req);
96 req->opcode = IORING_OP_NOP;
97 req->cqe.user_data = user_data;
98 io_req_set_res(req, res, cflags);
99 percpu_ref_get(&ctx->refs);
102 req->io_task_work.func = io_msg_tw_complete;
103 io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
107 static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
109 struct io_kiocb *req = NULL;
111 if (spin_trylock(&ctx->msg_lock)) {
112 req = io_alloc_cache_get(&ctx->msg_cache);
113 spin_unlock(&ctx->msg_lock);
117 return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
120 static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
123 struct io_kiocb *target;
126 target = io_msg_get_kiocb(target_ctx);
127 if (unlikely(!target))
130 if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
131 flags = msg->cqe_flags;
133 return io_msg_remote_post(target_ctx, target, msg->len, flags,
137 static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
138 struct io_msg *msg, unsigned int issue_flags)
143 if (msg->src_fd || msg->flags & ~IORING_MSG_RING_FLAGS_PASS)
145 if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
147 if (target_ctx->flags & IORING_SETUP_R_DISABLED)
150 if (io_msg_need_remote(target_ctx))
151 return io_msg_data_remote(target_ctx, msg);
153 if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
154 flags = msg->cqe_flags;
157 if (target_ctx->flags & IORING_SETUP_IOPOLL) {
158 if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
161 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
163 if (target_ctx->flags & IORING_SETUP_IOPOLL)
164 io_double_unlock_ctx(target_ctx);
168 static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
170 struct io_ring_ctx *target_ctx = req->file->private_data;
171 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
173 return __io_msg_ring_data(target_ctx, msg, issue_flags);
176 static int io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
178 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
179 struct io_ring_ctx *ctx = req->ctx;
180 struct io_rsrc_node *node;
183 io_ring_submit_lock(ctx, issue_flags);
184 node = io_rsrc_node_lookup(&ctx->file_table.data, msg->src_fd);
186 msg->src_file = io_slot_file(node);
188 get_file(msg->src_file);
189 req->flags |= REQ_F_NEED_CLEANUP;
192 io_ring_submit_unlock(ctx, issue_flags);
196 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
198 struct io_ring_ctx *target_ctx = req->file->private_data;
199 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
200 struct file *src_file = msg->src_file;
203 if (unlikely(io_lock_external_ctx(target_ctx, issue_flags)))
206 ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
210 msg->src_file = NULL;
211 req->flags &= ~REQ_F_NEED_CLEANUP;
213 if (msg->flags & IORING_MSG_RING_CQE_SKIP)
216 * If this fails, the target still received the file descriptor but
217 * wasn't notified of the fact. This means that if this request
218 * completes with -EOVERFLOW, then the sender must ensure that a
219 * later IORING_OP_MSG_RING delivers the message.
221 if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0))
224 io_double_unlock_ctx(target_ctx);
228 static void io_msg_tw_fd_complete(struct callback_head *head)
230 struct io_msg *msg = container_of(head, struct io_msg, tw);
231 struct io_kiocb *req = cmd_to_io_kiocb(msg);
232 int ret = -EOWNERDEAD;
234 if (!(current->flags & PF_EXITING))
235 ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED);
238 io_req_queue_tw_complete(req, ret);
241 static int io_msg_fd_remote(struct io_kiocb *req)
243 struct io_ring_ctx *ctx = req->file->private_data;
244 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
245 struct task_struct *task = READ_ONCE(ctx->submitter_task);
250 init_task_work(&msg->tw, io_msg_tw_fd_complete);
251 if (task_work_add(task, &msg->tw, TWA_SIGNAL))
254 return IOU_ISSUE_SKIP_COMPLETE;
257 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
259 struct io_ring_ctx *target_ctx = req->file->private_data;
260 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
261 struct io_ring_ctx *ctx = req->ctx;
265 if (target_ctx == ctx)
267 if (target_ctx->flags & IORING_SETUP_R_DISABLED)
269 if (!msg->src_file) {
270 int ret = io_msg_grab_file(req, issue_flags);
275 if (io_msg_need_remote(target_ctx))
276 return io_msg_fd_remote(req);
277 return io_msg_install_complete(req, issue_flags);
280 static int __io_msg_ring_prep(struct io_msg *msg, const struct io_uring_sqe *sqe)
282 if (unlikely(sqe->buf_index || sqe->personality))
285 msg->src_file = NULL;
286 msg->user_data = READ_ONCE(sqe->off);
287 msg->len = READ_ONCE(sqe->len);
288 msg->cmd = READ_ONCE(sqe->addr);
289 msg->src_fd = READ_ONCE(sqe->addr3);
290 msg->dst_fd = READ_ONCE(sqe->file_index);
291 msg->flags = READ_ONCE(sqe->msg_ring_flags);
292 if (msg->flags & ~IORING_MSG_RING_MASK)
298 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
300 return __io_msg_ring_prep(io_kiocb_to_cmd(req, struct io_msg), sqe);
303 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
305 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
309 if (!io_is_uring_fops(req->file))
313 case IORING_MSG_DATA:
314 ret = io_msg_ring_data(req, issue_flags);
316 case IORING_MSG_SEND_FD:
317 ret = io_msg_send_fd(req, issue_flags);
326 if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE)
330 io_req_set_res(req, ret, 0);
334 int io_uring_sync_msg_ring(struct io_uring_sqe *sqe)
336 struct io_msg io_msg = { };
339 ret = __io_msg_ring_prep(&io_msg, sqe);
344 * Only data sending supported, not IORING_MSG_SEND_FD as that one
345 * doesn't make sense without a source ring to send files from.
347 if (io_msg.cmd != IORING_MSG_DATA)
350 CLASS(fd, f)(sqe->fd);
353 if (!io_is_uring_fops(fd_file(f)))
355 return __io_msg_ring_data(fd_file(f)->private_data,
356 &io_msg, IO_URING_F_UNLOCKED);