Commit | Line | Data |
---|---|---|
36404b09 JA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> | |
3 | #include <linux/errno.h> | |
4 | #include <linux/file.h> | |
5 | #include <linux/slab.h> | |
e6130eba | 6 | #include <linux/nospec.h> |
36404b09 JA |
7 | #include <linux/io_uring.h> |
8 | ||
9 | #include <uapi/linux/io_uring.h> | |
10 | ||
36404b09 | 11 | #include "io_uring.h" |
e6130eba JA |
12 | #include "rsrc.h" |
13 | #include "filetable.h" | |
36404b09 JA |
14 | #include "msg_ring.h" |
15 | ||
16 | struct io_msg { | |
17 | struct file *file; | |
11373026 | 18 | struct file *src_file; |
6d043ee1 | 19 | struct callback_head tw; |
36404b09 JA |
20 | u64 user_data; |
21 | u32 len; | |
e6130eba JA |
22 | u32 cmd; |
23 | u32 src_fd; | |
24 | u32 dst_fd; | |
25 | u32 flags; | |
36404b09 JA |
26 | }; |
27 | ||
423d5081 JA |
28 | static void io_double_unlock_ctx(struct io_ring_ctx *octx) |
29 | { | |
30 | mutex_unlock(&octx->uring_lock); | |
31 | } | |
32 | ||
33 | static int io_double_lock_ctx(struct io_ring_ctx *octx, | |
34 | unsigned int issue_flags) | |
35 | { | |
36 | /* | |
37 | * To ensure proper ordering between the two ctxs, we can only | |
38 | * attempt a trylock on the target. If that fails and we already have | |
39 | * the source ctx lock, punt to io-wq. | |
40 | */ | |
41 | if (!(issue_flags & IO_URING_F_UNLOCKED)) { | |
42 | if (!mutex_trylock(&octx->uring_lock)) | |
43 | return -EAGAIN; | |
44 | return 0; | |
45 | } | |
46 | mutex_lock(&octx->uring_lock); | |
47 | return 0; | |
48 | } | |
49 | ||
11373026 PB |
50 | void io_msg_ring_cleanup(struct io_kiocb *req) |
51 | { | |
52 | struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); | |
53 | ||
54 | if (WARN_ON_ONCE(!msg->src_file)) | |
55 | return; | |
56 | ||
57 | fput(msg->src_file); | |
58 | msg->src_file = NULL; | |
59 | } | |
60 | ||
6d043ee1 PB |
61 | static void io_msg_tw_complete(struct callback_head *head) |
62 | { | |
63 | struct io_msg *msg = container_of(head, struct io_msg, tw); | |
64 | struct io_kiocb *req = cmd_to_io_kiocb(msg); | |
65 | struct io_ring_ctx *target_ctx = req->file->private_data; | |
66 | int ret = 0; | |
67 | ||
e12d7a46 | 68 | if (current->flags & PF_EXITING) { |
6d043ee1 | 69 | ret = -EOWNERDEAD; |
e12d7a46 JA |
70 | } else { |
71 | /* | |
72 | * If the target ring is using IOPOLL mode, then we need to be | |
73 | * holding the uring_lock for posting completions. Other ring | |
74 | * types rely on the regular completion locking, which is | |
75 | * handled while posting. | |
76 | */ | |
77 | if (target_ctx->flags & IORING_SETUP_IOPOLL) | |
78 | mutex_lock(&target_ctx->uring_lock); | |
79 | if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) | |
80 | ret = -EOVERFLOW; | |
81 | if (target_ctx->flags & IORING_SETUP_IOPOLL) | |
82 | mutex_unlock(&target_ctx->uring_lock); | |
83 | } | |
6d043ee1 PB |
84 | |
85 | if (ret < 0) | |
86 | req_set_fail(req); | |
87 | io_req_queue_tw_complete(req, ret); | |
88 | } | |
89 | ||
e12d7a46 | 90 | static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) |
e6130eba JA |
91 | { |
92 | struct io_ring_ctx *target_ctx = req->file->private_data; | |
f2ccb5ae | 93 | struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); |
e12d7a46 | 94 | int ret; |
e6130eba JA |
95 | |
96 | if (msg->src_fd || msg->dst_fd || msg->flags) | |
97 | return -EINVAL; | |
98 | ||
6d043ee1 PB |
99 | if (target_ctx->task_complete && current != target_ctx->submitter_task) { |
100 | init_task_work(&msg->tw, io_msg_tw_complete); | |
101 | if (task_work_add(target_ctx->submitter_task, &msg->tw, | |
102 | TWA_SIGNAL_NO_IPI)) | |
103 | return -EOWNERDEAD; | |
104 | ||
761c61c1 | 105 | atomic_or(IORING_SQ_TASKRUN, &target_ctx->rings->sq_flags); |
6d043ee1 PB |
106 | return IOU_ISSUE_SKIP_COMPLETE; |
107 | } | |
108 | ||
e12d7a46 JA |
109 | ret = -EOVERFLOW; |
110 | if (target_ctx->flags & IORING_SETUP_IOPOLL) { | |
111 | if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) | |
112 | return -EAGAIN; | |
113 | if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) | |
114 | ret = 0; | |
115 | io_double_unlock_ctx(target_ctx); | |
116 | } else { | |
117 | if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) | |
118 | ret = 0; | |
119 | } | |
120 | return ret; | |
e6130eba JA |
121 | } |
122 | ||
11373026 PB |
123 | static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) |
124 | { | |
125 | struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); | |
126 | struct io_ring_ctx *ctx = req->ctx; | |
127 | struct file *file = NULL; | |
128 | unsigned long file_ptr; | |
129 | int idx = msg->src_fd; | |
130 | ||
131 | io_ring_submit_lock(ctx, issue_flags); | |
132 | if (likely(idx < ctx->nr_user_files)) { | |
133 | idx = array_index_nospec(idx, ctx->nr_user_files); | |
134 | file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr; | |
135 | file = (struct file *) (file_ptr & FFS_MASK); | |
136 | if (file) | |
137 | get_file(file); | |
e6130eba | 138 | } |
11373026 PB |
139 | io_ring_submit_unlock(ctx, issue_flags); |
140 | return file; | |
e6130eba JA |
141 | } |
142 | ||
17211310 | 143 | static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags) |
e6130eba JA |
144 | { |
145 | struct io_ring_ctx *target_ctx = req->file->private_data; | |
f2ccb5ae | 146 | struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); |
11373026 | 147 | struct file *src_file = msg->src_file; |
e6130eba JA |
148 | int ret; |
149 | ||
11373026 PB |
150 | if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) |
151 | return -EAGAIN; | |
e6130eba JA |
152 | |
153 | ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd); | |
11373026 | 154 | if (ret < 0) |
e6130eba | 155 | goto out_unlock; |
17211310 | 156 | |
11373026 PB |
157 | msg->src_file = NULL; |
158 | req->flags &= ~REQ_F_NEED_CLEANUP; | |
e6130eba JA |
159 | |
160 | if (msg->flags & IORING_MSG_RING_CQE_SKIP) | |
161 | goto out_unlock; | |
e6130eba JA |
162 | /* |
163 | * If this fails, the target still received the file descriptor but | |
164 | * wasn't notified of the fact. This means that if this request | |
165 | * completes with -EOVERFLOW, then the sender must ensure that a | |
166 | * later IORING_OP_MSG_RING delivers the message. | |
167 | */ | |
b529c96a | 168 | if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) |
e6130eba JA |
169 | ret = -EOVERFLOW; |
170 | out_unlock: | |
423d5081 | 171 | io_double_unlock_ctx(target_ctx); |
e6130eba JA |
172 | return ret; |
173 | } | |
174 | ||
6d043ee1 PB |
175 | static void io_msg_tw_fd_complete(struct callback_head *head) |
176 | { | |
177 | struct io_msg *msg = container_of(head, struct io_msg, tw); | |
178 | struct io_kiocb *req = cmd_to_io_kiocb(msg); | |
179 | int ret = -EOWNERDEAD; | |
180 | ||
181 | if (!(current->flags & PF_EXITING)) | |
182 | ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED); | |
183 | if (ret < 0) | |
184 | req_set_fail(req); | |
185 | io_req_queue_tw_complete(req, ret); | |
186 | } | |
187 | ||
17211310 PB |
188 | static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) |
189 | { | |
190 | struct io_ring_ctx *target_ctx = req->file->private_data; | |
191 | struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); | |
192 | struct io_ring_ctx *ctx = req->ctx; | |
193 | struct file *src_file = msg->src_file; | |
194 | ||
195 | if (target_ctx == ctx) | |
196 | return -EINVAL; | |
197 | if (!src_file) { | |
198 | src_file = io_msg_grab_file(req, issue_flags); | |
199 | if (!src_file) | |
200 | return -EBADF; | |
201 | msg->src_file = src_file; | |
202 | req->flags |= REQ_F_NEED_CLEANUP; | |
203 | } | |
6d043ee1 PB |
204 | |
205 | if (target_ctx->task_complete && current != target_ctx->submitter_task) { | |
206 | init_task_work(&msg->tw, io_msg_tw_fd_complete); | |
207 | if (task_work_add(target_ctx->submitter_task, &msg->tw, | |
208 | TWA_SIGNAL)) | |
209 | return -EOWNERDEAD; | |
210 | ||
211 | return IOU_ISSUE_SKIP_COMPLETE; | |
212 | } | |
17211310 PB |
213 | return io_msg_install_complete(req, issue_flags); |
214 | } | |
215 | ||
36404b09 JA |
216 | int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) |
217 | { | |
f2ccb5ae | 218 | struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); |
36404b09 | 219 | |
e6130eba | 220 | if (unlikely(sqe->buf_index || sqe->personality)) |
36404b09 JA |
221 | return -EINVAL; |
222 | ||
11373026 | 223 | msg->src_file = NULL; |
36404b09 JA |
224 | msg->user_data = READ_ONCE(sqe->off); |
225 | msg->len = READ_ONCE(sqe->len); | |
e6130eba JA |
226 | msg->cmd = READ_ONCE(sqe->addr); |
227 | msg->src_fd = READ_ONCE(sqe->addr3); | |
228 | msg->dst_fd = READ_ONCE(sqe->file_index); | |
229 | msg->flags = READ_ONCE(sqe->msg_ring_flags); | |
230 | if (msg->flags & ~IORING_MSG_RING_CQE_SKIP) | |
231 | return -EINVAL; | |
232 | ||
36404b09 JA |
233 | return 0; |
234 | } | |
235 | ||
236 | int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) | |
237 | { | |
f2ccb5ae | 238 | struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); |
36404b09 JA |
239 | int ret; |
240 | ||
241 | ret = -EBADFD; | |
242 | if (!io_is_uring_fops(req->file)) | |
243 | goto done; | |
244 | ||
e6130eba JA |
245 | switch (msg->cmd) { |
246 | case IORING_MSG_DATA: | |
e12d7a46 | 247 | ret = io_msg_ring_data(req, issue_flags); |
e6130eba JA |
248 | break; |
249 | case IORING_MSG_SEND_FD: | |
250 | ret = io_msg_send_fd(req, issue_flags); | |
251 | break; | |
252 | default: | |
253 | ret = -EINVAL; | |
254 | break; | |
255 | } | |
36404b09 JA |
256 | |
257 | done: | |
6d043ee1 PB |
258 | if (ret < 0) { |
259 | if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE) | |
260 | return ret; | |
36404b09 | 261 | req_set_fail(req); |
6d043ee1 | 262 | } |
36404b09 | 263 | io_req_set_res(req, ret, 0); |
36404b09 JA |
264 | return IOU_OK; |
265 | } |