Merge tag 'efi-fixes-for-v6.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / io_uring / net.c
CommitLineData
f9ead18c
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/slab.h>
6#include <linux/net.h>
7#include <linux/compat.h>
8#include <net/compat.h>
9#include <linux/io_uring.h>
10
11#include <uapi/linux/io_uring.h>
12
f9ead18c 13#include "io_uring.h"
3b77495a 14#include "kbuf.h"
43e0bbbd 15#include "alloc_cache.h"
f9ead18c 16#include "net.h"
06a5464b 17#include "notif.h"
10c7d33e 18#include "rsrc.h"
f9ead18c
JA
19
20#if defined(CONFIG_NET)
21struct io_shutdown {
22 struct file *file;
23 int how;
24};
25
26struct io_accept {
27 struct file *file;
28 struct sockaddr __user *addr;
29 int __user *addr_len;
30 int flags;
31 u32 file_slot;
32 unsigned long nofile;
33};
34
35struct io_socket {
36 struct file *file;
37 int domain;
38 int type;
39 int protocol;
40 int flags;
41 u32 file_slot;
42 unsigned long nofile;
43};
44
45struct io_connect {
46 struct file *file;
47 struct sockaddr __user *addr;
48 int addr_len;
3fb1bd68 49 bool in_progress;
74e2e17e 50 bool seen_econnaborted;
f9ead18c
JA
51};
52
53struct io_sr_msg {
54 struct file *file;
55 union {
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
58 void __user *buf;
59 };
0b048557
PB
60 unsigned len;
61 unsigned done_io;
293402e5 62 unsigned msg_flags;
76b367a2 63 unsigned nr_multishot_loops;
0b048557 64 u16 flags;
516e82f0 65 /* initialised and used only by !msg send variants */
0b048557 66 u16 addr_len;
b00c51ef 67 u16 buf_group;
092aeedb 68 void __user *addr;
cac9e441 69 void __user *msg_control;
516e82f0 70 /* used only for send zerocopy */
b48c312b 71 struct io_kiocb *notif;
06a5464b
PB
72};
73
76b367a2
JA
74/*
75 * Number of times we'll try and do receives if there's more data. If we
76 * exceed this limit, then add us to the back of the queue and retry from
77 * there. This helps fairness between flooding clients.
78 */
79#define MULTISHOT_MAX_RETRY 32
80
f9ead18c
JA
81int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
82{
f2ccb5ae 83 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
84
85 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
86 sqe->buf_index || sqe->splice_fd_in))
87 return -EINVAL;
88
89 shutdown->how = READ_ONCE(sqe->len);
aebb224f 90 req->flags |= REQ_F_FORCE_ASYNC;
f9ead18c
JA
91 return 0;
92}
93
94int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
95{
f2ccb5ae 96 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
97 struct socket *sock;
98 int ret;
99
aebb224f 100 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
f9ead18c
JA
101
102 sock = sock_from_file(req->file);
103 if (unlikely(!sock))
104 return -ENOTSOCK;
105
106 ret = __sys_shutdown_sock(sock, shutdown->how);
107 io_req_set_res(req, ret, 0);
108 return IOU_OK;
109}
110
111static bool io_net_retry(struct socket *sock, int flags)
112{
113 if (!(flags & MSG_WAITALL))
114 return false;
115 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
116}
117
43e0bbbd
JA
118static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
119{
120 struct io_async_msghdr *hdr = req->async_data;
121
06360426 122 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
43e0bbbd
JA
123 return;
124
125 /* Let normal cleanup path reap it if we fail adding to the cache */
126 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
127 req->async_data = NULL;
128 req->flags &= ~REQ_F_ASYNC_DATA;
129 }
130}
131
858c293e
PB
132static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
133 unsigned int issue_flags)
43e0bbbd
JA
134{
135 struct io_ring_ctx *ctx = req->ctx;
136 struct io_cache_entry *entry;
4c17a496 137 struct io_async_msghdr *hdr;
43e0bbbd 138
df730ec2
XL
139 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
140 entry = io_alloc_cache_get(&ctx->netmsg_cache);
141 if (entry) {
142 hdr = container_of(entry, struct io_async_msghdr, cache);
143 hdr->free_iov = NULL;
144 req->flags |= REQ_F_ASYNC_DATA;
145 req->async_data = hdr;
146 return hdr;
147 }
43e0bbbd
JA
148 }
149
4c17a496
PB
150 if (!io_alloc_async_data(req)) {
151 hdr = req->async_data;
152 hdr->free_iov = NULL;
153 return hdr;
154 }
43e0bbbd
JA
155 return NULL;
156}
157
858c293e
PB
158static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
159{
160 /* ->prep_async is always called from the submission context */
161 return io_msg_alloc_async(req, 0);
162}
163
f9ead18c 164static int io_setup_async_msg(struct io_kiocb *req,
43e0bbbd
JA
165 struct io_async_msghdr *kmsg,
166 unsigned int issue_flags)
f9ead18c 167{
3f743e9b 168 struct io_async_msghdr *async_msg;
f9ead18c 169
3f743e9b 170 if (req_has_async_data(req))
f9ead18c 171 return -EAGAIN;
858c293e 172 async_msg = io_msg_alloc_async(req, issue_flags);
43e0bbbd 173 if (!async_msg) {
f9ead18c
JA
174 kfree(kmsg->free_iov);
175 return -ENOMEM;
176 }
f9ead18c
JA
177 req->flags |= REQ_F_NEED_CLEANUP;
178 memcpy(async_msg, kmsg, sizeof(*kmsg));
6f10ae8a
PB
179 if (async_msg->msg.msg_name)
180 async_msg->msg.msg_name = &async_msg->addr;
c21a8027
PB
181
182 if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
183 return -EAGAIN;
184
f9ead18c 185 /* if were using fast_iov, set it to the new one */
4b61152e 186 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
de4f5fed
JA
187 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
188 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
3e4cb6eb 189 }
f9ead18c
JA
190
191 return -EAGAIN;
192}
193
52307ac4 194#ifdef CONFIG_COMPAT
c5597802
JA
195static int io_compat_msg_copy_hdr(struct io_kiocb *req,
196 struct io_async_msghdr *iomsg,
197 struct compat_msghdr *msg, int ddir)
52307ac4
JA
198{
199 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
52307ac4
JA
200 struct compat_iovec __user *uiov;
201 int ret;
202
c5597802 203 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
52307ac4
JA
204 return -EFAULT;
205
c5597802 206 uiov = compat_ptr(msg->msg_iov);
52307ac4
JA
207 if (req->flags & REQ_F_BUFFER_SELECT) {
208 compat_ssize_t clen;
209
210 iomsg->free_iov = NULL;
c5597802 211 if (msg->msg_iovlen == 0) {
52307ac4 212 sr->len = 0;
c5597802 213 } else if (msg->msg_iovlen > 1) {
52307ac4
JA
214 return -EINVAL;
215 } else {
216 if (!access_ok(uiov, sizeof(*uiov)))
217 return -EFAULT;
218 if (__get_user(clen, &uiov->iov_len))
219 return -EFAULT;
220 if (clen < 0)
221 return -EINVAL;
222 sr->len = clen;
223 }
224
52307ac4
JA
225 return 0;
226 }
227
228 iomsg->free_iov = iomsg->fast_iov;
c5597802 229 ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
52307ac4
JA
230 UIO_FASTIOV, &iomsg->free_iov,
231 &iomsg->msg.msg_iter, true);
232 if (unlikely(ret < 0))
233 return ret;
234
235 return 0;
236}
237#endif
238
c5597802
JA
239static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
240 struct user_msghdr *msg, int ddir)
52307ac4
JA
241{
242 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
52307ac4
JA
243 int ret;
244
792060de 245 if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
52307ac4
JA
246 return -EFAULT;
247
792060de
JA
248 ret = -EFAULT;
249 unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end);
250 unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end);
251 unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end);
252 unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end);
253 unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end);
254 unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end);
255 msg->msg_flags = 0;
256
52307ac4 257 if (req->flags & REQ_F_BUFFER_SELECT) {
c5597802 258 if (msg->msg_iovlen == 0) {
52307ac4
JA
259 sr->len = iomsg->fast_iov[0].iov_len = 0;
260 iomsg->fast_iov[0].iov_base = NULL;
261 iomsg->free_iov = NULL;
c5597802 262 } else if (msg->msg_iovlen > 1) {
792060de
JA
263 ret = -EINVAL;
264 goto ua_end;
52307ac4 265 } else {
792060de
JA
266 /* we only need the length for provided buffers */
267 if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
268 goto ua_end;
269 unsafe_get_user(iomsg->fast_iov[0].iov_len,
270 &msg->msg_iov[0].iov_len, ua_end);
52307ac4
JA
271 sr->len = iomsg->fast_iov[0].iov_len;
272 iomsg->free_iov = NULL;
273 }
792060de
JA
274 ret = 0;
275ua_end:
276 user_access_end();
277 return ret;
52307ac4
JA
278 }
279
792060de 280 user_access_end();
52307ac4 281 iomsg->free_iov = iomsg->fast_iov;
c5597802 282 ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
52307ac4
JA
283 &iomsg->free_iov, &iomsg->msg.msg_iter, false);
284 if (unlikely(ret < 0))
285 return ret;
286
287 return 0;
288}
289
f9ead18c
JA
290static int io_sendmsg_copy_hdr(struct io_kiocb *req,
291 struct io_async_msghdr *iomsg)
292{
f2ccb5ae 293 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
c5597802 294 struct user_msghdr msg;
cac9e441 295 int ret;
f9ead18c
JA
296
297 iomsg->msg.msg_name = &iomsg->addr;
52307ac4
JA
298 iomsg->msg.msg_iter.nr_segs = 0;
299
300#ifdef CONFIG_COMPAT
c5597802
JA
301 if (unlikely(req->ctx->compat)) {
302 struct compat_msghdr cmsg;
52307ac4 303
c5597802
JA
304 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
305 if (unlikely(ret))
306 return ret;
52307ac4 307
c5597802
JA
308 return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
309 }
310#endif
f9ead18c 311
c5597802
JA
312 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
313 if (unlikely(ret))
52307ac4
JA
314 return ret;
315
c5597802
JA
316 ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
317
cac9e441 318 /* save msg_control as sys_sendmsg() overwrites it */
26fed836 319 sr->msg_control = iomsg->msg.msg_control_user;
cac9e441 320 return ret;
f9ead18c
JA
321}
322
516e82f0 323int io_send_prep_async(struct io_kiocb *req)
581711c4 324{
ac9e5784 325 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
326 struct io_async_msghdr *io;
327 int ret;
328
329 if (!zc->addr || req_has_async_data(req))
330 return 0;
6bf8ad25
PB
331 io = io_msg_alloc_async_prep(req);
332 if (!io)
581711c4 333 return -ENOMEM;
581711c4
PB
334 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
335 return ret;
336}
337
338static int io_setup_async_addr(struct io_kiocb *req,
6ae61b7a 339 struct sockaddr_storage *addr_storage,
581711c4
PB
340 unsigned int issue_flags)
341{
6ae61b7a 342 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
343 struct io_async_msghdr *io;
344
6ae61b7a 345 if (!sr->addr || req_has_async_data(req))
581711c4 346 return -EAGAIN;
6bf8ad25
PB
347 io = io_msg_alloc_async(req, issue_flags);
348 if (!io)
581711c4 349 return -ENOMEM;
6ae61b7a 350 memcpy(&io->addr, addr_storage, sizeof(io->addr));
581711c4
PB
351 return -EAGAIN;
352}
353
f9ead18c
JA
354int io_sendmsg_prep_async(struct io_kiocb *req)
355{
356 int ret;
357
858c293e
PB
358 if (!io_msg_alloc_async_prep(req))
359 return -ENOMEM;
f9ead18c
JA
360 ret = io_sendmsg_copy_hdr(req, req->async_data);
361 if (!ret)
362 req->flags |= REQ_F_NEED_CLEANUP;
363 return ret;
364}
365
366void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
367{
368 struct io_async_msghdr *io = req->async_data;
369
370 kfree(io->free_iov);
371}
372
373int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
374{
f2ccb5ae 375 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c 376
9817ad85
JA
377 sr->done_io = 0;
378
516e82f0
PB
379 if (req->opcode == IORING_OP_SEND) {
380 if (READ_ONCE(sqe->__pad3[0]))
381 return -EINVAL;
382 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
383 sr->addr_len = READ_ONCE(sqe->addr_len);
384 } else if (sqe->addr2 || sqe->file_index) {
f9ead18c 385 return -EINVAL;
516e82f0 386 }
f9ead18c
JA
387
388 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
389 sr->len = READ_ONCE(sqe->len);
390 sr->flags = READ_ONCE(sqe->ioprio);
391 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
392 return -EINVAL;
393 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
394 if (sr->msg_flags & MSG_DONTWAIT)
395 req->flags |= REQ_F_NOWAIT;
396
397#ifdef CONFIG_COMPAT
398 if (req->ctx->compat)
399 sr->msg_flags |= MSG_CMSG_COMPAT;
400#endif
f9ead18c
JA
401 return 0;
402}
403
d9b44188
JA
404static void io_req_msg_cleanup(struct io_kiocb *req,
405 struct io_async_msghdr *kmsg,
406 unsigned int issue_flags)
407{
408 req->flags &= ~REQ_F_NEED_CLEANUP;
409 /* fast path, check for non-NULL to avoid function call */
410 if (kmsg->free_iov)
411 kfree(kmsg->free_iov);
412 io_netmsg_recycle(req, issue_flags);
413}
414
f9ead18c
JA
415int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
416{
f2ccb5ae 417 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
418 struct io_async_msghdr iomsg, *kmsg;
419 struct socket *sock;
420 unsigned flags;
421 int min_ret = 0;
422 int ret;
423
424 sock = sock_from_file(req->file);
425 if (unlikely(!sock))
426 return -ENOTSOCK;
427
428 if (req_has_async_data(req)) {
429 kmsg = req->async_data;
26fed836 430 kmsg->msg.msg_control_user = sr->msg_control;
f9ead18c
JA
431 } else {
432 ret = io_sendmsg_copy_hdr(req, &iomsg);
433 if (ret)
434 return ret;
435 kmsg = &iomsg;
436 }
437
438 if (!(req->flags & REQ_F_POLLED) &&
439 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 440 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
441
442 flags = sr->msg_flags;
443 if (issue_flags & IO_URING_F_NONBLOCK)
444 flags |= MSG_DONTWAIT;
445 if (flags & MSG_WAITALL)
446 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
447
448 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
449
450 if (ret < min_ret) {
451 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
43e0bbbd 452 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 453 if (ret > 0 && io_net_retry(sock, flags)) {
b1dc4920
JA
454 kmsg->msg.msg_controllen = 0;
455 kmsg->msg.msg_control = NULL;
f9ead18c 456 sr->done_io += ret;
186daf23 457 req->flags |= REQ_F_BL_NO_RECYCLE;
43e0bbbd 458 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 459 }
95eafc74
PB
460 if (ret == -ERESTARTSYS)
461 ret = -EINTR;
f9ead18c
JA
462 req_set_fail(req);
463 }
d9b44188 464 io_req_msg_cleanup(req, kmsg, issue_flags);
f9ead18c
JA
465 if (ret >= 0)
466 ret += sr->done_io;
467 else if (sr->done_io)
468 ret = sr->done_io;
469 io_req_set_res(req, ret, 0);
470 return IOU_OK;
471}
472
473int io_send(struct io_kiocb *req, unsigned int issue_flags)
474{
516e82f0 475 struct sockaddr_storage __address;
f2ccb5ae 476 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c 477 struct msghdr msg;
f9ead18c
JA
478 struct socket *sock;
479 unsigned flags;
480 int min_ret = 0;
481 int ret;
482
04360d3e
PB
483 msg.msg_name = NULL;
484 msg.msg_control = NULL;
485 msg.msg_controllen = 0;
486 msg.msg_namelen = 0;
487 msg.msg_ubuf = NULL;
488
516e82f0
PB
489 if (sr->addr) {
490 if (req_has_async_data(req)) {
491 struct io_async_msghdr *io = req->async_data;
492
493 msg.msg_name = &io->addr;
494 } else {
495 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
496 if (unlikely(ret < 0))
497 return ret;
498 msg.msg_name = (struct sockaddr *)&__address;
499 }
500 msg.msg_namelen = sr->addr_len;
501 }
502
f9ead18c
JA
503 if (!(req->flags & REQ_F_POLLED) &&
504 (sr->flags & IORING_RECVSEND_POLL_FIRST))
516e82f0 505 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c
JA
506
507 sock = sock_from_file(req->file);
508 if (unlikely(!sock))
509 return -ENOTSOCK;
510
4b61152e 511 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
f9ead18c
JA
512 if (unlikely(ret))
513 return ret;
514
f9ead18c
JA
515 flags = sr->msg_flags;
516 if (issue_flags & IO_URING_F_NONBLOCK)
517 flags |= MSG_DONTWAIT;
518 if (flags & MSG_WAITALL)
519 min_ret = iov_iter_count(&msg.msg_iter);
520
b841b901 521 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
f9ead18c
JA
522 msg.msg_flags = flags;
523 ret = sock_sendmsg(sock, &msg);
524 if (ret < min_ret) {
525 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
516e82f0
PB
526 return io_setup_async_addr(req, &__address, issue_flags);
527
f9ead18c
JA
528 if (ret > 0 && io_net_retry(sock, flags)) {
529 sr->len -= ret;
530 sr->buf += ret;
531 sr->done_io += ret;
186daf23 532 req->flags |= REQ_F_BL_NO_RECYCLE;
516e82f0 533 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c 534 }
95eafc74
PB
535 if (ret == -ERESTARTSYS)
536 ret = -EINTR;
f9ead18c
JA
537 req_set_fail(req);
538 }
539 if (ret >= 0)
540 ret += sr->done_io;
541 else if (sr->done_io)
542 ret = sr->done_io;
543 io_req_set_res(req, ret, 0);
544 return IOU_OK;
545}
546
c5597802
JA
547static int io_recvmsg_mshot_prep(struct io_kiocb *req,
548 struct io_async_msghdr *iomsg,
86bcacc9 549 int namelen, size_t controllen)
9bb66906 550{
c5597802
JA
551 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
552 (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
553 int hdr;
554
555 if (unlikely(namelen < 0))
556 return -EOVERFLOW;
8ede3db5 557 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
c5597802
JA
558 namelen, &hdr))
559 return -EOVERFLOW;
8ede3db5 560 if (check_add_overflow(hdr, controllen, &hdr))
c5597802
JA
561 return -EOVERFLOW;
562
563 iomsg->namelen = namelen;
564 iomsg->controllen = controllen;
565 return 0;
566 }
9bb66906 567
c5597802 568 return 0;
9bb66906
DY
569}
570
f9ead18c
JA
571static int io_recvmsg_copy_hdr(struct io_kiocb *req,
572 struct io_async_msghdr *iomsg)
f9ead18c 573{
7fa875b8 574 struct user_msghdr msg;
f9ead18c
JA
575 int ret;
576
c5597802
JA
577 iomsg->msg.msg_name = &iomsg->addr;
578 iomsg->msg.msg_iter.nr_segs = 0;
f9ead18c
JA
579
580#ifdef CONFIG_COMPAT
c5597802
JA
581 if (unlikely(req->ctx->compat)) {
582 struct compat_msghdr cmsg;
72c531f8 583
c5597802
JA
584 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
585 if (unlikely(ret))
586 return ret;
9bb66906 587
c5597802
JA
588 ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
589 if (unlikely(ret))
f9ead18c 590 return ret;
f9ead18c 591
c5597802
JA
592 return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
593 cmsg.msg_controllen);
594 }
f9ead18c
JA
595#endif
596
c5597802
JA
597 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
598 if (unlikely(ret))
599 return ret;
f9ead18c 600
c5597802
JA
601 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
602 if (unlikely(ret))
603 return ret;
f9ead18c 604
c5597802
JA
605 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
606 msg.msg_controllen);
f9ead18c
JA
607}
608
609int io_recvmsg_prep_async(struct io_kiocb *req)
610{
52307ac4 611 struct io_async_msghdr *iomsg;
f9ead18c
JA
612 int ret;
613
858c293e
PB
614 if (!io_msg_alloc_async_prep(req))
615 return -ENOMEM;
52307ac4
JA
616 iomsg = req->async_data;
617 ret = io_recvmsg_copy_hdr(req, iomsg);
f9ead18c
JA
618 if (!ret)
619 req->flags |= REQ_F_NEED_CLEANUP;
620 return ret;
621}
622
b3fdea6e
DY
623#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
624
f9ead18c
JA
625int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
626{
f2ccb5ae 627 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c 628
9817ad85
JA
629 sr->done_io = 0;
630
f9ead18c
JA
631 if (unlikely(sqe->file_index || sqe->addr2))
632 return -EINVAL;
633
634 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
635 sr->len = READ_ONCE(sqe->len);
636 sr->flags = READ_ONCE(sqe->ioprio);
b3fdea6e 637 if (sr->flags & ~(RECVMSG_FLAGS))
f9ead18c 638 return -EINVAL;
7605c43d 639 sr->msg_flags = READ_ONCE(sqe->msg_flags);
f9ead18c
JA
640 if (sr->msg_flags & MSG_DONTWAIT)
641 req->flags |= REQ_F_NOWAIT;
642 if (sr->msg_flags & MSG_ERRQUEUE)
643 req->flags |= REQ_F_CLEAR_POLLIN;
b3fdea6e
DY
644 if (sr->flags & IORING_RECV_MULTISHOT) {
645 if (!(req->flags & REQ_F_BUFFER_SELECT))
646 return -EINVAL;
647 if (sr->msg_flags & MSG_WAITALL)
648 return -EINVAL;
649 if (req->opcode == IORING_OP_RECV && sr->len)
650 return -EINVAL;
651 req->flags |= REQ_F_APOLL_MULTISHOT;
b00c51ef
JA
652 /*
653 * Store the buffer group for this multishot receive separately,
654 * as if we end up doing an io-wq based issue that selects a
655 * buffer, it has to be committed immediately and that will
656 * clear ->buf_list. This means we lose the link to the buffer
657 * list, and the eventual buffer put on completion then cannot
658 * restore it.
659 */
660 sr->buf_group = req->buf_index;
b3fdea6e 661 }
f9ead18c
JA
662
663#ifdef CONFIG_COMPAT
664 if (req->ctx->compat)
665 sr->msg_flags |= MSG_CMSG_COMPAT;
666#endif
76b367a2 667 sr->nr_multishot_loops = 0;
f9ead18c
JA
668 return 0;
669}
670
b3fdea6e
DY
671static inline void io_recv_prep_retry(struct io_kiocb *req)
672{
f2ccb5ae 673 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
b3fdea6e 674
b5311dbc 675 req->flags &= ~REQ_F_BL_EMPTY;
b3fdea6e
DY
676 sr->done_io = 0;
677 sr->len = 0; /* get from the provided buffer */
b00c51ef 678 req->buf_index = sr->buf_group;
b3fdea6e
DY
679}
680
681/*
9bb66906 682 * Finishes io_recv and io_recvmsg.
b3fdea6e
DY
683 *
684 * Returns true if it is actually finished, or false if it should run
685 * again (for multishot).
686 */
9bb66906 687static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
7d41bcb7 688 struct msghdr *msg, bool mshot_finished,
100d6b17 689 unsigned issue_flags)
b3fdea6e 690{
7d41bcb7
JA
691 unsigned int cflags;
692
693 cflags = io_put_kbuf(req, issue_flags);
fb6328bc 694 if (msg->msg_inq > 0)
7d41bcb7
JA
695 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
696
91e5d765
JA
697 /*
698 * Fill CQE for this receive and see if we should keep trying to
699 * receive from this socket.
700 */
1af04699
PB
701 if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
702 io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
91e5d765 703 *ret, cflags | IORING_CQE_F_MORE)) {
76b367a2
JA
704 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
705 int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
706
91e5d765
JA
707 io_recv_prep_retry(req);
708 /* Known not-empty or unknown state, retry */
fb6328bc 709 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq < 0) {
76b367a2
JA
710 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
711 return false;
712 /* mshot retries exceeded, force a requeue */
713 sr->nr_multishot_loops = 0;
714 mshot_retry_ret = IOU_REQUEUE;
715 }
91e5d765 716 if (issue_flags & IO_URING_F_MULTISHOT)
76b367a2 717 *ret = mshot_retry_ret;
91e5d765
JA
718 else
719 *ret = -EAGAIN;
720 return true;
721 }
1af04699
PB
722
723 /* Finish the request / stop multishot. */
b3fdea6e
DY
724 io_req_set_res(req, *ret, cflags);
725
100d6b17 726 if (issue_flags & IO_URING_F_MULTISHOT)
b3fdea6e 727 *ret = IOU_STOP_MULTISHOT;
e2df2ccb
DY
728 else
729 *ret = IOU_OK;
b3fdea6e
DY
730 return true;
731}
732
9bb66906
DY
733static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
734 struct io_sr_msg *sr, void __user **buf,
735 size_t *len)
736{
737 unsigned long ubuf = (unsigned long) *buf;
738 unsigned long hdr;
739
740 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
741 kmsg->controllen;
742 if (*len < hdr)
743 return -EFAULT;
744
745 if (kmsg->controllen) {
746 unsigned long control = ubuf + hdr - kmsg->controllen;
747
d1f6222c 748 kmsg->msg.msg_control_user = (void __user *) control;
9bb66906
DY
749 kmsg->msg.msg_controllen = kmsg->controllen;
750 }
751
752 sr->buf = *buf; /* stash for later copy */
d1f6222c 753 *buf = (void __user *) (ubuf + hdr);
9bb66906
DY
754 kmsg->payloadlen = *len = *len - hdr;
755 return 0;
756}
757
758struct io_recvmsg_multishot_hdr {
759 struct io_uring_recvmsg_out msg;
760 struct sockaddr_storage addr;
761};
762
763static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
764 struct io_async_msghdr *kmsg,
765 unsigned int flags, bool *finished)
766{
767 int err;
768 int copy_len;
769 struct io_recvmsg_multishot_hdr hdr;
770
771 if (kmsg->namelen)
772 kmsg->msg.msg_name = &hdr.addr;
773 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
774 kmsg->msg.msg_namelen = 0;
775
776 if (sock->file->f_flags & O_NONBLOCK)
777 flags |= MSG_DONTWAIT;
778
779 err = sock_recvmsg(sock, &kmsg->msg, flags);
780 *finished = err <= 0;
781 if (err < 0)
782 return err;
783
784 hdr.msg = (struct io_uring_recvmsg_out) {
785 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
786 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
787 };
788
789 hdr.msg.payloadlen = err;
790 if (err > kmsg->payloadlen)
791 err = kmsg->payloadlen;
792
793 copy_len = sizeof(struct io_uring_recvmsg_out);
794 if (kmsg->msg.msg_namelen > kmsg->namelen)
795 copy_len += kmsg->namelen;
796 else
797 copy_len += kmsg->msg.msg_namelen;
798
799 /*
800 * "fromlen shall refer to the value before truncation.."
801 * 1003.1g
802 */
803 hdr.msg.namelen = kmsg->msg.msg_namelen;
804
805 /* ensure that there is no gap between hdr and sockaddr_storage */
806 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
807 sizeof(struct io_uring_recvmsg_out));
808 if (copy_to_user(io->buf, &hdr, copy_len)) {
809 *finished = true;
810 return -EFAULT;
811 }
812
813 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
814 kmsg->controllen + err;
815}
816
f9ead18c
JA
817int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
818{
f2ccb5ae 819 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
820 struct io_async_msghdr iomsg, *kmsg;
821 struct socket *sock;
f9ead18c
JA
822 unsigned flags;
823 int ret, min_ret = 0;
824 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
9bb66906 825 bool mshot_finished = true;
f9ead18c
JA
826
827 sock = sock_from_file(req->file);
828 if (unlikely(!sock))
829 return -ENOTSOCK;
830
831 if (req_has_async_data(req)) {
832 kmsg = req->async_data;
833 } else {
834 ret = io_recvmsg_copy_hdr(req, &iomsg);
835 if (ret)
836 return ret;
837 kmsg = &iomsg;
838 }
839
840 if (!(req->flags & REQ_F_POLLED) &&
841 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 842 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 843
eb18c29d
JA
844 flags = sr->msg_flags;
845 if (force_nonblock)
846 flags |= MSG_DONTWAIT;
17add5ce 847
9bb66906 848retry_multishot:
f9ead18c
JA
849 if (io_do_buffer_select(req)) {
850 void __user *buf;
9bb66906 851 size_t len = sr->len;
f9ead18c 852
9bb66906 853 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
854 if (!buf)
855 return -ENOBUFS;
9bb66906
DY
856
857 if (req->flags & REQ_F_APOLL_MULTISHOT) {
858 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
859 if (ret) {
860 io_kbuf_recycle(req, issue_flags);
861 return ret;
862 }
863 }
864
4b61152e 865 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
f9ead18c
JA
866 }
867
f9ead18c 868 kmsg->msg.msg_get_inq = 1;
b65db921 869 kmsg->msg.msg_inq = -1;
78d0d206 870 if (req->flags & REQ_F_APOLL_MULTISHOT) {
9bb66906
DY
871 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
872 &mshot_finished);
78d0d206
JA
873 } else {
874 /* disable partial retry for recvmsg with cmsg attached */
875 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
876 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
877
9bb66906
DY
878 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
879 kmsg->uaddr, flags);
78d0d206 880 }
9bb66906 881
f9ead18c 882 if (ret < min_ret) {
9bb66906
DY
883 if (ret == -EAGAIN && force_nonblock) {
884 ret = io_setup_async_msg(req, kmsg, issue_flags);
100d6b17 885 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
9bb66906
DY
886 io_kbuf_recycle(req, issue_flags);
887 return IOU_ISSUE_SKIP_COMPLETE;
888 }
889 return ret;
890 }
f9ead18c
JA
891 if (ret > 0 && io_net_retry(sock, flags)) {
892 sr->done_io += ret;
186daf23 893 req->flags |= REQ_F_BL_NO_RECYCLE;
43e0bbbd 894 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 895 }
95eafc74
PB
896 if (ret == -ERESTARTSYS)
897 ret = -EINTR;
f9ead18c
JA
898 req_set_fail(req);
899 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
900 req_set_fail(req);
901 }
902
d4e097da 903 if (ret > 0)
f9ead18c
JA
904 ret += sr->done_io;
905 else if (sr->done_io)
906 ret = sr->done_io;
d4e097da
DY
907 else
908 io_kbuf_recycle(req, issue_flags);
909
7d41bcb7 910 if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
9bb66906
DY
911 goto retry_multishot;
912
d9b44188
JA
913 if (mshot_finished)
914 io_req_msg_cleanup(req, kmsg, issue_flags);
915 else if (ret == -EAGAIN)
deaef31b 916 return io_setup_async_msg(req, kmsg, issue_flags);
9bb66906
DY
917
918 return ret;
f9ead18c
JA
919}
920
921int io_recv(struct io_kiocb *req, unsigned int issue_flags)
922{
f2ccb5ae 923 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
924 struct msghdr msg;
925 struct socket *sock;
f9ead18c
JA
926 unsigned flags;
927 int ret, min_ret = 0;
928 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
b3fdea6e 929 size_t len = sr->len;
f9ead18c
JA
930
931 if (!(req->flags & REQ_F_POLLED) &&
932 (sr->flags & IORING_RECVSEND_POLL_FIRST))
933 return -EAGAIN;
934
935 sock = sock_from_file(req->file);
936 if (unlikely(!sock))
937 return -ENOTSOCK;
938
bf34e697
JA
939 msg.msg_name = NULL;
940 msg.msg_namelen = 0;
941 msg.msg_control = NULL;
942 msg.msg_get_inq = 1;
943 msg.msg_controllen = 0;
944 msg.msg_iocb = NULL;
945 msg.msg_ubuf = NULL;
946
eb18c29d
JA
947 flags = sr->msg_flags;
948 if (force_nonblock)
949 flags |= MSG_DONTWAIT;
950
b3fdea6e 951retry_multishot:
f9ead18c
JA
952 if (io_do_buffer_select(req)) {
953 void __user *buf;
954
b3fdea6e 955 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
956 if (!buf)
957 return -ENOBUFS;
958 sr->buf = buf;
72bd8025 959 sr->len = len;
f9ead18c
JA
960 }
961
4b61152e 962 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
f9ead18c
JA
963 if (unlikely(ret))
964 goto out_free;
965
b65db921 966 msg.msg_inq = -1;
f9ead18c 967 msg.msg_flags = 0;
f9ead18c 968
f9ead18c
JA
969 if (flags & MSG_WAITALL)
970 min_ret = iov_iter_count(&msg.msg_iter);
971
972 ret = sock_recvmsg(sock, &msg, flags);
973 if (ret < min_ret) {
b3fdea6e 974 if (ret == -EAGAIN && force_nonblock) {
100d6b17 975 if (issue_flags & IO_URING_F_MULTISHOT) {
b3fdea6e
DY
976 io_kbuf_recycle(req, issue_flags);
977 return IOU_ISSUE_SKIP_COMPLETE;
978 }
979
f9ead18c 980 return -EAGAIN;
b3fdea6e 981 }
f9ead18c
JA
982 if (ret > 0 && io_net_retry(sock, flags)) {
983 sr->len -= ret;
984 sr->buf += ret;
985 sr->done_io += ret;
186daf23 986 req->flags |= REQ_F_BL_NO_RECYCLE;
f9ead18c
JA
987 return -EAGAIN;
988 }
95eafc74
PB
989 if (ret == -ERESTARTSYS)
990 ret = -EINTR;
f9ead18c
JA
991 req_set_fail(req);
992 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
993out_free:
994 req_set_fail(req);
995 }
996
d4e097da 997 if (ret > 0)
f9ead18c
JA
998 ret += sr->done_io;
999 else if (sr->done_io)
1000 ret = sr->done_io;
d4e097da
DY
1001 else
1002 io_kbuf_recycle(req, issue_flags);
1003
7d41bcb7 1004 if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
b3fdea6e
DY
1005 goto retry_multishot;
1006
1007 return ret;
f9ead18c
JA
1008}
1009
b0e9b551 1010void io_send_zc_cleanup(struct io_kiocb *req)
b48c312b 1011{
ac9e5784 1012 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
493108d9 1013 struct io_async_msghdr *io;
b48c312b 1014
493108d9
PB
1015 if (req_has_async_data(req)) {
1016 io = req->async_data;
4c17a496
PB
1017 /* might be ->fast_iov if *msg_copy_hdr failed */
1018 if (io->free_iov != io->fast_iov)
1019 kfree(io->free_iov);
493108d9 1020 }
a75155fa 1021 if (zc->notif) {
a75155fa
PB
1022 io_notif_flush(zc->notif);
1023 zc->notif = NULL;
1024 }
b48c312b
PB
1025}
1026
40725d1b
PB
1027#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
1028#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
1029
b0e9b551 1030int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
06a5464b 1031{
ac9e5784 1032 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
10c7d33e 1033 struct io_ring_ctx *ctx = req->ctx;
b48c312b 1034 struct io_kiocb *notif;
06a5464b 1035
9817ad85
JA
1036 zc->done_io = 0;
1037
493108d9 1038 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
b48c312b
PB
1039 return -EINVAL;
1040 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1041 if (req->flags & REQ_F_CQE_SKIP)
06a5464b
PB
1042 return -EINVAL;
1043
e3366e02
PB
1044 notif = zc->notif = io_alloc_notif(ctx);
1045 if (!notif)
1046 return -ENOMEM;
1047 notif->cqe.user_data = req->cqe.user_data;
1048 notif->cqe.res = 0;
1049 notif->cqe.flags = IORING_CQE_F_NOTIF;
1050 req->flags |= REQ_F_NEED_CLEANUP;
40725d1b
PB
1051
1052 zc->flags = READ_ONCE(sqe->ioprio);
1053 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1054 if (zc->flags & ~IO_ZC_FLAGS_VALID)
1055 return -EINVAL;
1056 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1057 io_notif_set_extended(notif);
1058 io_notif_to_data(notif)->zc_report = true;
1059 }
1060 }
1061
10c7d33e
PB
1062 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1063 unsigned idx = READ_ONCE(sqe->buf_index);
1064
1065 if (unlikely(idx >= ctx->nr_user_bufs))
1066 return -EFAULT;
1067 idx = array_index_nospec(idx, ctx->nr_user_bufs);
1068 req->imu = READ_ONCE(ctx->user_bufs[idx]);
e3366e02 1069 io_req_set_rsrc_node(notif, ctx, 0);
10c7d33e 1070 }
06a5464b 1071
493108d9
PB
1072 if (req->opcode == IORING_OP_SEND_ZC) {
1073 if (READ_ONCE(sqe->__pad3[0]))
1074 return -EINVAL;
1075 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1076 zc->addr_len = READ_ONCE(sqe->addr_len);
1077 } else {
1078 if (unlikely(sqe->addr2 || sqe->file_index))
1079 return -EINVAL;
1080 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1081 return -EINVAL;
1082 }
1083
06a5464b
PB
1084 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1085 zc->len = READ_ONCE(sqe->len);
1086 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
06a5464b
PB
1087 if (zc->msg_flags & MSG_DONTWAIT)
1088 req->flags |= REQ_F_NOWAIT;
092aeedb 1089
06a5464b
PB
1090#ifdef CONFIG_COMPAT
1091 if (req->ctx->compat)
1092 zc->msg_flags |= MSG_CMSG_COMPAT;
1093#endif
1094 return 0;
1095}
1096
cd9021e8
PB
1097static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1098 struct iov_iter *from, size_t length)
1099{
1100 skb_zcopy_downgrade_managed(skb);
1101 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1102}
1103
3ff1a0d3
PB
1104static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1105 struct iov_iter *from, size_t length)
1106{
1107 struct skb_shared_info *shinfo = skb_shinfo(skb);
1108 int frag = shinfo->nr_frags;
1109 int ret = 0;
1110 struct bvec_iter bi;
1111 ssize_t copied = 0;
1112 unsigned long truesize = 0;
1113
cd9021e8 1114 if (!frag)
3ff1a0d3 1115 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
cd9021e8 1116 else if (unlikely(!skb_zcopy_managed(skb)))
3ff1a0d3 1117 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
3ff1a0d3
PB
1118
1119 bi.bi_size = min(from->count, length);
1120 bi.bi_bvec_done = from->iov_offset;
1121 bi.bi_idx = 0;
1122
1123 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1124 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1125
1126 copied += v.bv_len;
1127 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1128 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1129 v.bv_offset, v.bv_len);
1130 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1131 }
1132 if (bi.bi_size)
1133 ret = -EMSGSIZE;
1134
1135 shinfo->nr_frags = frag;
1136 from->bvec += bi.bi_idx;
1137 from->nr_segs -= bi.bi_idx;
dfb58b17 1138 from->count -= copied;
3ff1a0d3
PB
1139 from->iov_offset = bi.bi_bvec_done;
1140
1141 skb->data_len += copied;
1142 skb->len += copied;
1143 skb->truesize += truesize;
1144
1145 if (sk && sk->sk_type == SOCK_STREAM) {
1146 sk_wmem_queued_add(sk, truesize);
1147 if (!skb_zcopy_pure(skb))
1148 sk_mem_charge(sk, truesize);
1149 } else {
1150 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1151 }
1152 return ret;
1153}
1154
b0e9b551 1155int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
06a5464b 1156{
6ae61b7a 1157 struct sockaddr_storage __address;
ac9e5784 1158 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
06a5464b 1159 struct msghdr msg;
06a5464b 1160 struct socket *sock;
6ae91ac9 1161 unsigned msg_flags;
06a5464b
PB
1162 int ret, min_ret = 0;
1163
06a5464b
PB
1164 sock = sock_from_file(req->file);
1165 if (unlikely(!sock))
1166 return -ENOTSOCK;
edf81438
PB
1167 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1168 return -EOPNOTSUPP;
06a5464b 1169
06a5464b
PB
1170 msg.msg_name = NULL;
1171 msg.msg_control = NULL;
1172 msg.msg_controllen = 0;
1173 msg.msg_namelen = 0;
1174
86dc8f23 1175 if (zc->addr) {
581711c4
PB
1176 if (req_has_async_data(req)) {
1177 struct io_async_msghdr *io = req->async_data;
1178
6ae61b7a 1179 msg.msg_name = &io->addr;
581711c4
PB
1180 } else {
1181 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1182 if (unlikely(ret < 0))
1183 return ret;
1184 msg.msg_name = (struct sockaddr *)&__address;
581711c4 1185 }
86dc8f23
PB
1186 msg.msg_namelen = zc->addr_len;
1187 }
1188
3c840053
PB
1189 if (!(req->flags & REQ_F_POLLED) &&
1190 (zc->flags & IORING_RECVSEND_POLL_FIRST))
6ae61b7a 1191 return io_setup_async_addr(req, &__address, issue_flags);
3c840053 1192
10c7d33e 1193 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
de4eda9d 1194 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
10c7d33e
PB
1195 (u64)(uintptr_t)zc->buf, zc->len);
1196 if (unlikely(ret))
986e263d 1197 return ret;
cd9021e8 1198 msg.sg_from_iter = io_sg_from_iter;
10c7d33e 1199 } else {
42385b02 1200 io_notif_set_extended(zc->notif);
4b61152e 1201 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
10c7d33e
PB
1202 if (unlikely(ret))
1203 return ret;
b48c312b 1204 ret = io_notif_account_mem(zc->notif, zc->len);
2e32ba56
PB
1205 if (unlikely(ret))
1206 return ret;
cd9021e8 1207 msg.sg_from_iter = io_sg_from_iter_iovec;
10c7d33e 1208 }
06a5464b
PB
1209
1210 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1211 if (issue_flags & IO_URING_F_NONBLOCK)
1212 msg_flags |= MSG_DONTWAIT;
1213 if (msg_flags & MSG_WAITALL)
1214 min_ret = iov_iter_count(&msg.msg_iter);
b841b901 1215 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
06a5464b
PB
1216
1217 msg.msg_flags = msg_flags;
b48c312b 1218 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
06a5464b
PB
1219 ret = sock_sendmsg(sock, &msg);
1220
1221 if (unlikely(ret < min_ret)) {
1222 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6ae61b7a 1223 return io_setup_async_addr(req, &__address, issue_flags);
581711c4 1224
4a933e62
PB
1225 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1226 zc->len -= ret;
1227 zc->buf += ret;
1228 zc->done_io += ret;
186daf23 1229 req->flags |= REQ_F_BL_NO_RECYCLE;
6ae61b7a 1230 return io_setup_async_addr(req, &__address, issue_flags);
4a933e62
PB
1231 }
1232 if (ret == -ERESTARTSYS)
1233 ret = -EINTR;
5a848b7c 1234 req_set_fail(req);
06a5464b
PB
1235 }
1236
4a933e62
PB
1237 if (ret >= 0)
1238 ret += zc->done_io;
1239 else if (zc->done_io)
1240 ret = zc->done_io;
b48c312b 1241
108893dd
PB
1242 /*
1243 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1244 * flushing notif to io_send_zc_cleanup()
1245 */
1246 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1247 io_notif_flush(zc->notif);
1248 req->flags &= ~REQ_F_NEED_CLEANUP;
1249 }
6ae91ac9 1250 io_req_set_res(req, ret, IORING_CQE_F_MORE);
06a5464b
PB
1251 return IOU_OK;
1252}
1253
493108d9
PB
1254int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1255{
1256 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1257 struct io_async_msghdr iomsg, *kmsg;
1258 struct socket *sock;
6ae91ac9 1259 unsigned flags;
493108d9
PB
1260 int ret, min_ret = 0;
1261
42385b02
PB
1262 io_notif_set_extended(sr->notif);
1263
493108d9
PB
1264 sock = sock_from_file(req->file);
1265 if (unlikely(!sock))
1266 return -ENOTSOCK;
cc767e7c
PB
1267 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1268 return -EOPNOTSUPP;
493108d9
PB
1269
1270 if (req_has_async_data(req)) {
1271 kmsg = req->async_data;
1272 } else {
1273 ret = io_sendmsg_copy_hdr(req, &iomsg);
1274 if (ret)
1275 return ret;
1276 kmsg = &iomsg;
1277 }
1278
1279 if (!(req->flags & REQ_F_POLLED) &&
1280 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1281 return io_setup_async_msg(req, kmsg, issue_flags);
1282
1283 flags = sr->msg_flags | MSG_ZEROCOPY;
1284 if (issue_flags & IO_URING_F_NONBLOCK)
1285 flags |= MSG_DONTWAIT;
1286 if (flags & MSG_WAITALL)
1287 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1288
1289 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1290 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1291 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1292
1293 if (unlikely(ret < min_ret)) {
1294 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1295 return io_setup_async_msg(req, kmsg, issue_flags);
1296
1297 if (ret > 0 && io_net_retry(sock, flags)) {
1298 sr->done_io += ret;
186daf23 1299 req->flags |= REQ_F_BL_NO_RECYCLE;
493108d9
PB
1300 return io_setup_async_msg(req, kmsg, issue_flags);
1301 }
493108d9
PB
1302 if (ret == -ERESTARTSYS)
1303 ret = -EINTR;
1304 req_set_fail(req);
1305 }
1306 /* fast path, check for non-NULL to avoid function call */
108893dd 1307 if (kmsg->free_iov) {
493108d9 1308 kfree(kmsg->free_iov);
108893dd
PB
1309 kmsg->free_iov = NULL;
1310 }
493108d9
PB
1311
1312 io_netmsg_recycle(req, issue_flags);
1313 if (ret >= 0)
1314 ret += sr->done_io;
1315 else if (sr->done_io)
1316 ret = sr->done_io;
1317
108893dd
PB
1318 /*
1319 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1320 * flushing notif to io_send_zc_cleanup()
1321 */
1322 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1323 io_notif_flush(sr->notif);
1324 req->flags &= ~REQ_F_NEED_CLEANUP;
1325 }
6ae91ac9 1326 io_req_set_res(req, ret, IORING_CQE_F_MORE);
493108d9
PB
1327 return IOU_OK;
1328}
1329
7e6b638e
PB
1330void io_sendrecv_fail(struct io_kiocb *req)
1331{
1332 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
7e6b638e 1333
9817ad85 1334 if (sr->done_io)
6ae91ac9
PB
1335 req->cqe.res = sr->done_io;
1336
c4c0009e 1337 if ((req->flags & REQ_F_NEED_CLEANUP) &&
6ae91ac9
PB
1338 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1339 req->cqe.flags |= IORING_CQE_F_MORE;
5693bcce
PB
1340}
1341
f9ead18c
JA
1342int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1343{
f2ccb5ae 1344 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1345 unsigned flags;
1346
1347 if (sqe->len || sqe->buf_index)
1348 return -EINVAL;
1349
1350 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1351 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1352 accept->flags = READ_ONCE(sqe->accept_flags);
1353 accept->nofile = rlimit(RLIMIT_NOFILE);
1354 flags = READ_ONCE(sqe->ioprio);
1355 if (flags & ~IORING_ACCEPT_MULTISHOT)
1356 return -EINVAL;
1357
1358 accept->file_slot = READ_ONCE(sqe->file_index);
1359 if (accept->file_slot) {
1360 if (accept->flags & SOCK_CLOEXEC)
1361 return -EINVAL;
1362 if (flags & IORING_ACCEPT_MULTISHOT &&
1363 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1364 return -EINVAL;
1365 }
1366 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1367 return -EINVAL;
1368 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1369 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1370 if (flags & IORING_ACCEPT_MULTISHOT)
1371 req->flags |= REQ_F_APOLL_MULTISHOT;
1372 return 0;
1373}
1374
1375int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1376{
f2ccb5ae 1377 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1378 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1379 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1380 bool fixed = !!accept->file_slot;
1381 struct file *file;
1382 int ret, fd;
1383
1384retry:
1385 if (!fixed) {
1386 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1387 if (unlikely(fd < 0))
1388 return fd;
1389 }
1390 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1391 accept->flags);
1392 if (IS_ERR(file)) {
1393 if (!fixed)
1394 put_unused_fd(fd);
1395 ret = PTR_ERR(file);
1396 if (ret == -EAGAIN && force_nonblock) {
1397 /*
1398 * if it's multishot and polled, we don't need to
1399 * return EAGAIN to arm the poll infra since it
1400 * has already been done
1401 */
91482864 1402 if (issue_flags & IO_URING_F_MULTISHOT)
a37ee9e1 1403 return IOU_ISSUE_SKIP_COMPLETE;
f9ead18c
JA
1404 return ret;
1405 }
1406 if (ret == -ERESTARTSYS)
1407 ret = -EINTR;
1408 req_set_fail(req);
1409 } else if (!fixed) {
1410 fd_install(fd, file);
1411 ret = fd;
1412 } else {
1413 ret = io_fixed_fd_install(req, issue_flags, file,
1414 accept->file_slot);
1415 }
1416
1417 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1418 io_req_set_res(req, ret, 0);
1419 return IOU_OK;
1420 }
f9ead18c 1421
515e2696
DY
1422 if (ret < 0)
1423 return ret;
b6b2bb58
PB
1424 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
1425 ret, IORING_CQE_F_MORE))
d245bca6 1426 goto retry;
cbd25748 1427
a37ee9e1
JA
1428 io_req_set_res(req, ret, 0);
1429 return IOU_STOP_MULTISHOT;
f9ead18c
JA
1430}
1431
1432int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1433{
f2ccb5ae 1434 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1435
1436 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1437 return -EINVAL;
1438
1439 sock->domain = READ_ONCE(sqe->fd);
1440 sock->type = READ_ONCE(sqe->off);
1441 sock->protocol = READ_ONCE(sqe->len);
1442 sock->file_slot = READ_ONCE(sqe->file_index);
1443 sock->nofile = rlimit(RLIMIT_NOFILE);
1444
1445 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1446 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1447 return -EINVAL;
1448 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1449 return -EINVAL;
1450 return 0;
1451}
1452
1453int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1454{
f2ccb5ae 1455 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1456 bool fixed = !!sock->file_slot;
1457 struct file *file;
1458 int ret, fd;
1459
1460 if (!fixed) {
1461 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1462 if (unlikely(fd < 0))
1463 return fd;
1464 }
1465 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1466 if (IS_ERR(file)) {
1467 if (!fixed)
1468 put_unused_fd(fd);
1469 ret = PTR_ERR(file);
1470 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1471 return -EAGAIN;
1472 if (ret == -ERESTARTSYS)
1473 ret = -EINTR;
1474 req_set_fail(req);
1475 } else if (!fixed) {
1476 fd_install(fd, file);
1477 ret = fd;
1478 } else {
1479 ret = io_fixed_fd_install(req, issue_flags, file,
1480 sock->file_slot);
1481 }
1482 io_req_set_res(req, ret, 0);
1483 return IOU_OK;
1484}
1485
1486int io_connect_prep_async(struct io_kiocb *req)
1487{
1488 struct io_async_connect *io = req->async_data;
f2ccb5ae 1489 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1490
1491 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1492}
1493
1494int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1495{
f2ccb5ae 1496 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1497
1498 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1499 return -EINVAL;
1500
1501 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1502 conn->addr_len = READ_ONCE(sqe->addr2);
74e2e17e 1503 conn->in_progress = conn->seen_econnaborted = false;
f9ead18c
JA
1504 return 0;
1505}
1506
1507int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1508{
f2ccb5ae 1509 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1510 struct io_async_connect __io, *io;
1511 unsigned file_flags;
1512 int ret;
1513 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1514
1515 if (req_has_async_data(req)) {
1516 io = req->async_data;
1517 } else {
1518 ret = move_addr_to_kernel(connect->addr,
1519 connect->addr_len,
1520 &__io.address);
1521 if (ret)
1522 goto out;
1523 io = &__io;
1524 }
1525
1526 file_flags = force_nonblock ? O_NONBLOCK : 0;
1527
1528 ret = __sys_connect_file(req->file, &io->address,
1529 connect->addr_len, file_flags);
74e2e17e
JA
1530 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1531 && force_nonblock) {
3fb1bd68
JA
1532 if (ret == -EINPROGRESS) {
1533 connect->in_progress = true;
f8f9ab2d 1534 } else if (ret == -ECONNABORTED) {
74e2e17e 1535 if (connect->seen_econnaborted)
3fb1bd68 1536 goto out;
74e2e17e
JA
1537 connect->seen_econnaborted = true;
1538 }
1539 if (req_has_async_data(req))
1540 return -EAGAIN;
1541 if (io_alloc_async_data(req)) {
1542 ret = -ENOMEM;
1543 goto out;
f9ead18c 1544 }
74e2e17e 1545 memcpy(req->async_data, &__io, sizeof(__io));
f9ead18c
JA
1546 return -EAGAIN;
1547 }
f8f9ab2d
JA
1548 if (connect->in_progress) {
1549 /*
1550 * At least bluetooth will return -EBADFD on a re-connect
1551 * attempt, and it's (supposedly) also valid to get -EISCONN
1552 * which means the previous result is good. For both of these,
1553 * grab the sock_error() and use that for the completion.
1554 */
1555 if (ret == -EBADFD || ret == -EISCONN)
1556 ret = sock_error(sock_from_file(req->file)->sk);
1557 }
f9ead18c
JA
1558 if (ret == -ERESTARTSYS)
1559 ret = -EINTR;
1560out:
1561 if (ret < 0)
1562 req_set_fail(req);
1563 io_req_set_res(req, ret, 0);
1564 return IOU_OK;
1565}
43e0bbbd
JA
1566
1567void io_netmsg_cache_free(struct io_cache_entry *entry)
1568{
1569 kfree(container_of(entry, struct io_async_msghdr, cache));
1570}
f9ead18c 1571#endif