io_uring: never run tw and fallback in parallel
[linux-block.git] / io_uring / msg_ring.c
CommitLineData
36404b09
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/slab.h>
e6130eba 6#include <linux/nospec.h>
36404b09
JA
7#include <linux/io_uring.h>
8
9#include <uapi/linux/io_uring.h>
10
36404b09 11#include "io_uring.h"
e6130eba
JA
12#include "rsrc.h"
13#include "filetable.h"
36404b09
JA
14#include "msg_ring.h"
15
16struct io_msg {
17 struct file *file;
18 u64 user_data;
19 u32 len;
e6130eba
JA
20 u32 cmd;
21 u32 src_fd;
22 u32 dst_fd;
23 u32 flags;
36404b09
JA
24};
25
e6130eba
JA
26static int io_msg_ring_data(struct io_kiocb *req)
27{
28 struct io_ring_ctx *target_ctx = req->file->private_data;
f2ccb5ae 29 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
e6130eba
JA
30
31 if (msg->src_fd || msg->dst_fd || msg->flags)
32 return -EINVAL;
33
b529c96a 34 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
e6130eba
JA
35 return 0;
36
37 return -EOVERFLOW;
38}
39
40static void io_double_unlock_ctx(struct io_ring_ctx *ctx,
41 struct io_ring_ctx *octx,
42 unsigned int issue_flags)
43{
44 if (issue_flags & IO_URING_F_UNLOCKED)
45 mutex_unlock(&ctx->uring_lock);
46 mutex_unlock(&octx->uring_lock);
47}
48
49static int io_double_lock_ctx(struct io_ring_ctx *ctx,
50 struct io_ring_ctx *octx,
51 unsigned int issue_flags)
52{
53 /*
54 * To ensure proper ordering between the two ctxs, we can only
55 * attempt a trylock on the target. If that fails and we already have
56 * the source ctx lock, punt to io-wq.
57 */
58 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
59 if (!mutex_trylock(&octx->uring_lock))
60 return -EAGAIN;
61 return 0;
62 }
63
64 /* Always grab smallest value ctx first. We know ctx != octx. */
65 if (ctx < octx) {
66 mutex_lock(&ctx->uring_lock);
67 mutex_lock(&octx->uring_lock);
68 } else {
69 mutex_lock(&octx->uring_lock);
70 mutex_lock(&ctx->uring_lock);
71 }
72
73 return 0;
74}
75
76static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
77{
78 struct io_ring_ctx *target_ctx = req->file->private_data;
f2ccb5ae 79 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
e6130eba
JA
80 struct io_ring_ctx *ctx = req->ctx;
81 unsigned long file_ptr;
82 struct file *src_file;
83 int ret;
84
85 if (target_ctx == ctx)
86 return -EINVAL;
87
88 ret = io_double_lock_ctx(ctx, target_ctx, issue_flags);
89 if (unlikely(ret))
90 return ret;
91
92 ret = -EBADF;
93 if (unlikely(msg->src_fd >= ctx->nr_user_files))
94 goto out_unlock;
95
96 msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files);
97 file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr;
16bbdfe5
HM
98 if (!file_ptr)
99 goto out_unlock;
100
e6130eba
JA
101 src_file = (struct file *) (file_ptr & FFS_MASK);
102 get_file(src_file);
103
104 ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
105 if (ret < 0) {
106 fput(src_file);
107 goto out_unlock;
108 }
109
110 if (msg->flags & IORING_MSG_RING_CQE_SKIP)
111 goto out_unlock;
112
113 /*
114 * If this fails, the target still received the file descriptor but
115 * wasn't notified of the fact. This means that if this request
116 * completes with -EOVERFLOW, then the sender must ensure that a
117 * later IORING_OP_MSG_RING delivers the message.
118 */
b529c96a 119 if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
e6130eba
JA
120 ret = -EOVERFLOW;
121out_unlock:
122 io_double_unlock_ctx(ctx, target_ctx, issue_flags);
123 return ret;
124}
125
36404b09
JA
126int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
127{
f2ccb5ae 128 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
36404b09 129
e6130eba 130 if (unlikely(sqe->buf_index || sqe->personality))
36404b09
JA
131 return -EINVAL;
132
133 msg->user_data = READ_ONCE(sqe->off);
134 msg->len = READ_ONCE(sqe->len);
e6130eba
JA
135 msg->cmd = READ_ONCE(sqe->addr);
136 msg->src_fd = READ_ONCE(sqe->addr3);
137 msg->dst_fd = READ_ONCE(sqe->file_index);
138 msg->flags = READ_ONCE(sqe->msg_ring_flags);
139 if (msg->flags & ~IORING_MSG_RING_CQE_SKIP)
140 return -EINVAL;
141
36404b09
JA
142 return 0;
143}
144
145int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
146{
f2ccb5ae 147 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
36404b09
JA
148 int ret;
149
150 ret = -EBADFD;
151 if (!io_is_uring_fops(req->file))
152 goto done;
153
e6130eba
JA
154 switch (msg->cmd) {
155 case IORING_MSG_DATA:
156 ret = io_msg_ring_data(req);
157 break;
158 case IORING_MSG_SEND_FD:
159 ret = io_msg_send_fd(req, issue_flags);
160 break;
161 default:
162 ret = -EINVAL;
163 break;
164 }
36404b09
JA
165
166done:
4c979eae
PB
167 if (ret == -EAGAIN)
168 return -EAGAIN;
36404b09
JA
169 if (ret < 0)
170 req_set_fail(req);
171 io_req_set_res(req, ret, 0);
36404b09
JA
172 return IOU_OK;
173}