io_uring: fix CQE reordering
[linux-block.git] / io_uring / net.c
CommitLineData
f9ead18c
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/slab.h>
6#include <linux/net.h>
7#include <linux/compat.h>
8#include <net/compat.h>
9#include <linux/io_uring.h>
10
11#include <uapi/linux/io_uring.h>
12
f9ead18c 13#include "io_uring.h"
3b77495a 14#include "kbuf.h"
43e0bbbd 15#include "alloc_cache.h"
f9ead18c 16#include "net.h"
06a5464b 17#include "notif.h"
10c7d33e 18#include "rsrc.h"
f9ead18c
JA
19
20#if defined(CONFIG_NET)
21struct io_shutdown {
22 struct file *file;
23 int how;
24};
25
26struct io_accept {
27 struct file *file;
28 struct sockaddr __user *addr;
29 int __user *addr_len;
30 int flags;
31 u32 file_slot;
32 unsigned long nofile;
33};
34
35struct io_socket {
36 struct file *file;
37 int domain;
38 int type;
39 int protocol;
40 int flags;
41 u32 file_slot;
42 unsigned long nofile;
43};
44
45struct io_connect {
46 struct file *file;
47 struct sockaddr __user *addr;
48 int addr_len;
49};
50
51struct io_sr_msg {
52 struct file *file;
53 union {
54 struct compat_msghdr __user *umsg_compat;
55 struct user_msghdr __user *umsg;
56 void __user *buf;
57 };
0b048557
PB
58 unsigned len;
59 unsigned done_io;
293402e5 60 unsigned msg_flags;
0b048557 61 u16 flags;
516e82f0 62 /* initialised and used only by !msg send variants */
0b048557 63 u16 addr_len;
092aeedb 64 void __user *addr;
516e82f0 65 /* used only for send zerocopy */
b48c312b 66 struct io_kiocb *notif;
06a5464b
PB
67};
68
f9ead18c
JA
69#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
70
71int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
72{
f2ccb5ae 73 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
74
75 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
76 sqe->buf_index || sqe->splice_fd_in))
77 return -EINVAL;
78
79 shutdown->how = READ_ONCE(sqe->len);
80 return 0;
81}
82
83int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
84{
f2ccb5ae 85 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
86 struct socket *sock;
87 int ret;
88
89 if (issue_flags & IO_URING_F_NONBLOCK)
90 return -EAGAIN;
91
92 sock = sock_from_file(req->file);
93 if (unlikely(!sock))
94 return -ENOTSOCK;
95
96 ret = __sys_shutdown_sock(sock, shutdown->how);
97 io_req_set_res(req, ret, 0);
98 return IOU_OK;
99}
100
101static bool io_net_retry(struct socket *sock, int flags)
102{
103 if (!(flags & MSG_WAITALL))
104 return false;
105 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
106}
107
43e0bbbd
JA
108static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
109{
110 struct io_async_msghdr *hdr = req->async_data;
111
06360426 112 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
43e0bbbd
JA
113 return;
114
115 /* Let normal cleanup path reap it if we fail adding to the cache */
116 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
117 req->async_data = NULL;
118 req->flags &= ~REQ_F_ASYNC_DATA;
119 }
120}
121
858c293e
PB
122static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
123 unsigned int issue_flags)
43e0bbbd
JA
124{
125 struct io_ring_ctx *ctx = req->ctx;
126 struct io_cache_entry *entry;
127
128 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
129 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
130 struct io_async_msghdr *hdr;
131
132 hdr = container_of(entry, struct io_async_msghdr, cache);
133 req->flags |= REQ_F_ASYNC_DATA;
134 req->async_data = hdr;
135 return hdr;
136 }
137
138 if (!io_alloc_async_data(req))
139 return req->async_data;
140
141 return NULL;
142}
143
858c293e
PB
144static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
145{
146 /* ->prep_async is always called from the submission context */
147 return io_msg_alloc_async(req, 0);
148}
149
f9ead18c 150static int io_setup_async_msg(struct io_kiocb *req,
43e0bbbd
JA
151 struct io_async_msghdr *kmsg,
152 unsigned int issue_flags)
f9ead18c 153{
3f743e9b 154 struct io_async_msghdr *async_msg;
f9ead18c 155
3f743e9b 156 if (req_has_async_data(req))
f9ead18c 157 return -EAGAIN;
858c293e 158 async_msg = io_msg_alloc_async(req, issue_flags);
43e0bbbd 159 if (!async_msg) {
f9ead18c
JA
160 kfree(kmsg->free_iov);
161 return -ENOMEM;
162 }
f9ead18c
JA
163 req->flags |= REQ_F_NEED_CLEANUP;
164 memcpy(async_msg, kmsg, sizeof(*kmsg));
165 async_msg->msg.msg_name = &async_msg->addr;
166 /* if were using fast_iov, set it to the new one */
167 if (!async_msg->free_iov)
168 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
169
170 return -EAGAIN;
171}
172
173static int io_sendmsg_copy_hdr(struct io_kiocb *req,
174 struct io_async_msghdr *iomsg)
175{
f2ccb5ae 176 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
177
178 iomsg->msg.msg_name = &iomsg->addr;
179 iomsg->free_iov = iomsg->fast_iov;
180 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
181 &iomsg->free_iov);
182}
183
516e82f0 184int io_send_prep_async(struct io_kiocb *req)
581711c4 185{
ac9e5784 186 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
187 struct io_async_msghdr *io;
188 int ret;
189
190 if (!zc->addr || req_has_async_data(req))
191 return 0;
6bf8ad25
PB
192 io = io_msg_alloc_async_prep(req);
193 if (!io)
581711c4 194 return -ENOMEM;
c4c0009e 195 io->free_iov = NULL;
581711c4
PB
196 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
197 return ret;
198}
199
200static int io_setup_async_addr(struct io_kiocb *req,
6ae61b7a 201 struct sockaddr_storage *addr_storage,
581711c4
PB
202 unsigned int issue_flags)
203{
6ae61b7a 204 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
205 struct io_async_msghdr *io;
206
6ae61b7a 207 if (!sr->addr || req_has_async_data(req))
581711c4 208 return -EAGAIN;
6bf8ad25
PB
209 io = io_msg_alloc_async(req, issue_flags);
210 if (!io)
581711c4 211 return -ENOMEM;
c4c0009e 212 io->free_iov = NULL;
6ae61b7a 213 memcpy(&io->addr, addr_storage, sizeof(io->addr));
581711c4
PB
214 return -EAGAIN;
215}
216
f9ead18c
JA
217int io_sendmsg_prep_async(struct io_kiocb *req)
218{
219 int ret;
220
858c293e
PB
221 if (!io_msg_alloc_async_prep(req))
222 return -ENOMEM;
f9ead18c
JA
223 ret = io_sendmsg_copy_hdr(req, req->async_data);
224 if (!ret)
225 req->flags |= REQ_F_NEED_CLEANUP;
226 return ret;
227}
228
229void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
230{
231 struct io_async_msghdr *io = req->async_data;
232
233 kfree(io->free_iov);
234}
235
236int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
237{
f2ccb5ae 238 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c 239
516e82f0
PB
240 if (req->opcode == IORING_OP_SEND) {
241 if (READ_ONCE(sqe->__pad3[0]))
242 return -EINVAL;
243 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
244 sr->addr_len = READ_ONCE(sqe->addr_len);
245 } else if (sqe->addr2 || sqe->file_index) {
f9ead18c 246 return -EINVAL;
516e82f0 247 }
f9ead18c
JA
248
249 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
250 sr->len = READ_ONCE(sqe->len);
251 sr->flags = READ_ONCE(sqe->ioprio);
252 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
253 return -EINVAL;
254 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
255 if (sr->msg_flags & MSG_DONTWAIT)
256 req->flags |= REQ_F_NOWAIT;
257
258#ifdef CONFIG_COMPAT
259 if (req->ctx->compat)
260 sr->msg_flags |= MSG_CMSG_COMPAT;
261#endif
262 sr->done_io = 0;
263 return 0;
264}
265
266int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
267{
f2ccb5ae 268 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
269 struct io_async_msghdr iomsg, *kmsg;
270 struct socket *sock;
271 unsigned flags;
272 int min_ret = 0;
273 int ret;
274
275 sock = sock_from_file(req->file);
276 if (unlikely(!sock))
277 return -ENOTSOCK;
278
279 if (req_has_async_data(req)) {
280 kmsg = req->async_data;
281 } else {
282 ret = io_sendmsg_copy_hdr(req, &iomsg);
283 if (ret)
284 return ret;
285 kmsg = &iomsg;
286 }
287
288 if (!(req->flags & REQ_F_POLLED) &&
289 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 290 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
291
292 flags = sr->msg_flags;
293 if (issue_flags & IO_URING_F_NONBLOCK)
294 flags |= MSG_DONTWAIT;
295 if (flags & MSG_WAITALL)
296 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
297
298 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
299
300 if (ret < min_ret) {
301 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
43e0bbbd 302 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
303 if (ret > 0 && io_net_retry(sock, flags)) {
304 sr->done_io += ret;
305 req->flags |= REQ_F_PARTIAL_IO;
43e0bbbd 306 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 307 }
95eafc74
PB
308 if (ret == -ERESTARTSYS)
309 ret = -EINTR;
f9ead18c
JA
310 req_set_fail(req);
311 }
312 /* fast path, check for non-NULL to avoid function call */
313 if (kmsg->free_iov)
314 kfree(kmsg->free_iov);
315 req->flags &= ~REQ_F_NEED_CLEANUP;
43e0bbbd 316 io_netmsg_recycle(req, issue_flags);
f9ead18c
JA
317 if (ret >= 0)
318 ret += sr->done_io;
319 else if (sr->done_io)
320 ret = sr->done_io;
321 io_req_set_res(req, ret, 0);
322 return IOU_OK;
323}
324
325int io_send(struct io_kiocb *req, unsigned int issue_flags)
326{
516e82f0 327 struct sockaddr_storage __address;
f2ccb5ae 328 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
329 struct msghdr msg;
330 struct iovec iov;
331 struct socket *sock;
332 unsigned flags;
333 int min_ret = 0;
334 int ret;
335
516e82f0
PB
336 if (sr->addr) {
337 if (req_has_async_data(req)) {
338 struct io_async_msghdr *io = req->async_data;
339
340 msg.msg_name = &io->addr;
341 } else {
342 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
343 if (unlikely(ret < 0))
344 return ret;
345 msg.msg_name = (struct sockaddr *)&__address;
346 }
347 msg.msg_namelen = sr->addr_len;
348 }
349
f9ead18c
JA
350 if (!(req->flags & REQ_F_POLLED) &&
351 (sr->flags & IORING_RECVSEND_POLL_FIRST))
516e82f0 352 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c
JA
353
354 sock = sock_from_file(req->file);
355 if (unlikely(!sock))
356 return -ENOTSOCK;
357
358 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
359 if (unlikely(ret))
360 return ret;
361
362 msg.msg_name = NULL;
363 msg.msg_control = NULL;
364 msg.msg_controllen = 0;
365 msg.msg_namelen = 0;
e02b6651 366 msg.msg_ubuf = NULL;
f9ead18c
JA
367
368 flags = sr->msg_flags;
369 if (issue_flags & IO_URING_F_NONBLOCK)
370 flags |= MSG_DONTWAIT;
371 if (flags & MSG_WAITALL)
372 min_ret = iov_iter_count(&msg.msg_iter);
373
374 msg.msg_flags = flags;
375 ret = sock_sendmsg(sock, &msg);
376 if (ret < min_ret) {
377 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
516e82f0
PB
378 return io_setup_async_addr(req, &__address, issue_flags);
379
f9ead18c
JA
380 if (ret > 0 && io_net_retry(sock, flags)) {
381 sr->len -= ret;
382 sr->buf += ret;
383 sr->done_io += ret;
384 req->flags |= REQ_F_PARTIAL_IO;
516e82f0 385 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c 386 }
95eafc74
PB
387 if (ret == -ERESTARTSYS)
388 ret = -EINTR;
f9ead18c
JA
389 req_set_fail(req);
390 }
391 if (ret >= 0)
392 ret += sr->done_io;
393 else if (sr->done_io)
394 ret = sr->done_io;
395 io_req_set_res(req, ret, 0);
396 return IOU_OK;
397}
398
9bb66906
DY
399static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
400{
9b0fc3c0 401 int hdr;
9bb66906 402
9b0fc3c0 403 if (iomsg->namelen < 0)
9bb66906 404 return true;
9b0fc3c0
DY
405 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
406 iomsg->namelen, &hdr))
9bb66906 407 return true;
9b0fc3c0 408 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
9bb66906
DY
409 return true;
410
411 return false;
412}
413
f9ead18c
JA
414static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
415 struct io_async_msghdr *iomsg)
416{
f2ccb5ae 417 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
7fa875b8 418 struct user_msghdr msg;
f9ead18c
JA
419 int ret;
420
7fa875b8
DY
421 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
422 return -EFAULT;
423
424 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
f9ead18c
JA
425 if (ret)
426 return ret;
427
428 if (req->flags & REQ_F_BUFFER_SELECT) {
7fa875b8 429 if (msg.msg_iovlen == 0) {
5702196e
DY
430 sr->len = iomsg->fast_iov[0].iov_len = 0;
431 iomsg->fast_iov[0].iov_base = NULL;
432 iomsg->free_iov = NULL;
7fa875b8 433 } else if (msg.msg_iovlen > 1) {
f9ead18c 434 return -EINVAL;
5702196e 435 } else {
7fa875b8 436 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
5702196e
DY
437 return -EFAULT;
438 sr->len = iomsg->fast_iov[0].iov_len;
439 iomsg->free_iov = NULL;
440 }
9bb66906
DY
441
442 if (req->flags & REQ_F_APOLL_MULTISHOT) {
443 iomsg->namelen = msg.msg_namelen;
444 iomsg->controllen = msg.msg_controllen;
445 if (io_recvmsg_multishot_overflow(iomsg))
446 return -EOVERFLOW;
447 }
f9ead18c
JA
448 } else {
449 iomsg->free_iov = iomsg->fast_iov;
7fa875b8 450 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
f9ead18c
JA
451 &iomsg->free_iov, &iomsg->msg.msg_iter,
452 false);
453 if (ret > 0)
454 ret = 0;
455 }
456
457 return ret;
458}
459
460#ifdef CONFIG_COMPAT
461static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
462 struct io_async_msghdr *iomsg)
463{
f2ccb5ae 464 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
72c531f8 465 struct compat_msghdr msg;
f9ead18c 466 struct compat_iovec __user *uiov;
f9ead18c
JA
467 int ret;
468
72c531f8
DY
469 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
470 return -EFAULT;
471
4f6a94d3 472 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
f9ead18c
JA
473 if (ret)
474 return ret;
475
72c531f8 476 uiov = compat_ptr(msg.msg_iov);
f9ead18c
JA
477 if (req->flags & REQ_F_BUFFER_SELECT) {
478 compat_ssize_t clen;
479
72c531f8 480 if (msg.msg_iovlen == 0) {
6d2f75a0
DY
481 sr->len = 0;
482 iomsg->free_iov = NULL;
72c531f8 483 } else if (msg.msg_iovlen > 1) {
f9ead18c 484 return -EINVAL;
6d2f75a0
DY
485 } else {
486 if (!access_ok(uiov, sizeof(*uiov)))
487 return -EFAULT;
488 if (__get_user(clen, &uiov->iov_len))
489 return -EFAULT;
490 if (clen < 0)
491 return -EINVAL;
492 sr->len = clen;
493 iomsg->free_iov = NULL;
494 }
9bb66906
DY
495
496 if (req->flags & REQ_F_APOLL_MULTISHOT) {
497 iomsg->namelen = msg.msg_namelen;
498 iomsg->controllen = msg.msg_controllen;
499 if (io_recvmsg_multishot_overflow(iomsg))
500 return -EOVERFLOW;
501 }
f9ead18c
JA
502 } else {
503 iomsg->free_iov = iomsg->fast_iov;
72c531f8 504 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
f9ead18c
JA
505 UIO_FASTIOV, &iomsg->free_iov,
506 &iomsg->msg.msg_iter, true);
507 if (ret < 0)
508 return ret;
509 }
510
511 return 0;
512}
513#endif
514
515static int io_recvmsg_copy_hdr(struct io_kiocb *req,
516 struct io_async_msghdr *iomsg)
517{
518 iomsg->msg.msg_name = &iomsg->addr;
519
520#ifdef CONFIG_COMPAT
521 if (req->ctx->compat)
522 return __io_compat_recvmsg_copy_hdr(req, iomsg);
523#endif
524
525 return __io_recvmsg_copy_hdr(req, iomsg);
526}
527
528int io_recvmsg_prep_async(struct io_kiocb *req)
529{
530 int ret;
531
858c293e
PB
532 if (!io_msg_alloc_async_prep(req))
533 return -ENOMEM;
f9ead18c
JA
534 ret = io_recvmsg_copy_hdr(req, req->async_data);
535 if (!ret)
536 req->flags |= REQ_F_NEED_CLEANUP;
537 return ret;
538}
539
b3fdea6e
DY
540#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
541
f9ead18c
JA
542int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
543{
f2ccb5ae 544 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
545
546 if (unlikely(sqe->file_index || sqe->addr2))
547 return -EINVAL;
548
549 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
550 sr->len = READ_ONCE(sqe->len);
551 sr->flags = READ_ONCE(sqe->ioprio);
b3fdea6e 552 if (sr->flags & ~(RECVMSG_FLAGS))
f9ead18c
JA
553 return -EINVAL;
554 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
555 if (sr->msg_flags & MSG_DONTWAIT)
556 req->flags |= REQ_F_NOWAIT;
557 if (sr->msg_flags & MSG_ERRQUEUE)
558 req->flags |= REQ_F_CLEAR_POLLIN;
b3fdea6e
DY
559 if (sr->flags & IORING_RECV_MULTISHOT) {
560 if (!(req->flags & REQ_F_BUFFER_SELECT))
561 return -EINVAL;
562 if (sr->msg_flags & MSG_WAITALL)
563 return -EINVAL;
564 if (req->opcode == IORING_OP_RECV && sr->len)
565 return -EINVAL;
566 req->flags |= REQ_F_APOLL_MULTISHOT;
567 }
f9ead18c
JA
568
569#ifdef CONFIG_COMPAT
570 if (req->ctx->compat)
571 sr->msg_flags |= MSG_CMSG_COMPAT;
572#endif
573 sr->done_io = 0;
574 return 0;
575}
576
b3fdea6e
DY
577static inline void io_recv_prep_retry(struct io_kiocb *req)
578{
f2ccb5ae 579 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
b3fdea6e
DY
580
581 sr->done_io = 0;
582 sr->len = 0; /* get from the provided buffer */
583}
584
585/*
9bb66906 586 * Finishes io_recv and io_recvmsg.
b3fdea6e
DY
587 *
588 * Returns true if it is actually finished, or false if it should run
589 * again (for multishot).
590 */
9bb66906
DY
591static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
592 unsigned int cflags, bool mshot_finished)
b3fdea6e
DY
593{
594 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
595 io_req_set_res(req, *ret, cflags);
596 *ret = IOU_OK;
597 return true;
598 }
599
9bb66906 600 if (!mshot_finished) {
b3fdea6e
DY
601 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
602 cflags | IORING_CQE_F_MORE, false)) {
603 io_recv_prep_retry(req);
604 return false;
605 }
606 /*
607 * Otherwise stop multishot but use the current result.
608 * Probably will end up going into overflow, but this means
609 * we cannot trust the ordering anymore
610 */
611 }
612
613 io_req_set_res(req, *ret, cflags);
614
615 if (req->flags & REQ_F_POLLED)
616 *ret = IOU_STOP_MULTISHOT;
e2df2ccb
DY
617 else
618 *ret = IOU_OK;
b3fdea6e
DY
619 return true;
620}
621
9bb66906
DY
622static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
623 struct io_sr_msg *sr, void __user **buf,
624 size_t *len)
625{
626 unsigned long ubuf = (unsigned long) *buf;
627 unsigned long hdr;
628
629 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
630 kmsg->controllen;
631 if (*len < hdr)
632 return -EFAULT;
633
634 if (kmsg->controllen) {
635 unsigned long control = ubuf + hdr - kmsg->controllen;
636
d1f6222c 637 kmsg->msg.msg_control_user = (void __user *) control;
9bb66906
DY
638 kmsg->msg.msg_controllen = kmsg->controllen;
639 }
640
641 sr->buf = *buf; /* stash for later copy */
d1f6222c 642 *buf = (void __user *) (ubuf + hdr);
9bb66906
DY
643 kmsg->payloadlen = *len = *len - hdr;
644 return 0;
645}
646
647struct io_recvmsg_multishot_hdr {
648 struct io_uring_recvmsg_out msg;
649 struct sockaddr_storage addr;
650};
651
652static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
653 struct io_async_msghdr *kmsg,
654 unsigned int flags, bool *finished)
655{
656 int err;
657 int copy_len;
658 struct io_recvmsg_multishot_hdr hdr;
659
660 if (kmsg->namelen)
661 kmsg->msg.msg_name = &hdr.addr;
662 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
663 kmsg->msg.msg_namelen = 0;
664
665 if (sock->file->f_flags & O_NONBLOCK)
666 flags |= MSG_DONTWAIT;
667
668 err = sock_recvmsg(sock, &kmsg->msg, flags);
669 *finished = err <= 0;
670 if (err < 0)
671 return err;
672
673 hdr.msg = (struct io_uring_recvmsg_out) {
674 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
675 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
676 };
677
678 hdr.msg.payloadlen = err;
679 if (err > kmsg->payloadlen)
680 err = kmsg->payloadlen;
681
682 copy_len = sizeof(struct io_uring_recvmsg_out);
683 if (kmsg->msg.msg_namelen > kmsg->namelen)
684 copy_len += kmsg->namelen;
685 else
686 copy_len += kmsg->msg.msg_namelen;
687
688 /*
689 * "fromlen shall refer to the value before truncation.."
690 * 1003.1g
691 */
692 hdr.msg.namelen = kmsg->msg.msg_namelen;
693
694 /* ensure that there is no gap between hdr and sockaddr_storage */
695 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
696 sizeof(struct io_uring_recvmsg_out));
697 if (copy_to_user(io->buf, &hdr, copy_len)) {
698 *finished = true;
699 return -EFAULT;
700 }
701
702 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
703 kmsg->controllen + err;
704}
705
f9ead18c
JA
706int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
707{
f2ccb5ae 708 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
709 struct io_async_msghdr iomsg, *kmsg;
710 struct socket *sock;
711 unsigned int cflags;
712 unsigned flags;
713 int ret, min_ret = 0;
714 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
9bb66906 715 bool mshot_finished = true;
f9ead18c
JA
716
717 sock = sock_from_file(req->file);
718 if (unlikely(!sock))
719 return -ENOTSOCK;
720
721 if (req_has_async_data(req)) {
722 kmsg = req->async_data;
723 } else {
724 ret = io_recvmsg_copy_hdr(req, &iomsg);
725 if (ret)
726 return ret;
727 kmsg = &iomsg;
728 }
729
730 if (!(req->flags & REQ_F_POLLED) &&
731 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 732 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 733
9bb66906 734retry_multishot:
f9ead18c
JA
735 if (io_do_buffer_select(req)) {
736 void __user *buf;
9bb66906 737 size_t len = sr->len;
f9ead18c 738
9bb66906 739 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
740 if (!buf)
741 return -ENOBUFS;
9bb66906
DY
742
743 if (req->flags & REQ_F_APOLL_MULTISHOT) {
744 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
745 if (ret) {
746 io_kbuf_recycle(req, issue_flags);
747 return ret;
748 }
749 }
750
f9ead18c 751 kmsg->fast_iov[0].iov_base = buf;
9bb66906 752 kmsg->fast_iov[0].iov_len = len;
f9ead18c 753 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
9bb66906 754 len);
f9ead18c
JA
755 }
756
757 flags = sr->msg_flags;
758 if (force_nonblock)
759 flags |= MSG_DONTWAIT;
760 if (flags & MSG_WAITALL)
761 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
762
763 kmsg->msg.msg_get_inq = 1;
9bb66906
DY
764 if (req->flags & REQ_F_APOLL_MULTISHOT)
765 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
766 &mshot_finished);
767 else
768 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
769 kmsg->uaddr, flags);
770
f9ead18c 771 if (ret < min_ret) {
9bb66906
DY
772 if (ret == -EAGAIN && force_nonblock) {
773 ret = io_setup_async_msg(req, kmsg, issue_flags);
774 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
775 IO_APOLL_MULTI_POLLED) {
776 io_kbuf_recycle(req, issue_flags);
777 return IOU_ISSUE_SKIP_COMPLETE;
778 }
779 return ret;
780 }
f9ead18c
JA
781 if (ret > 0 && io_net_retry(sock, flags)) {
782 sr->done_io += ret;
783 req->flags |= REQ_F_PARTIAL_IO;
43e0bbbd 784 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 785 }
95eafc74
PB
786 if (ret == -ERESTARTSYS)
787 ret = -EINTR;
f9ead18c
JA
788 req_set_fail(req);
789 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
790 req_set_fail(req);
791 }
792
d4e097da 793 if (ret > 0)
f9ead18c
JA
794 ret += sr->done_io;
795 else if (sr->done_io)
796 ret = sr->done_io;
d4e097da
DY
797 else
798 io_kbuf_recycle(req, issue_flags);
799
f9ead18c
JA
800 cflags = io_put_kbuf(req, issue_flags);
801 if (kmsg->msg.msg_inq)
802 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
b3fdea6e 803
9bb66906
DY
804 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
805 goto retry_multishot;
806
807 if (mshot_finished) {
808 io_netmsg_recycle(req, issue_flags);
809 /* fast path, check for non-NULL to avoid function call */
810 if (kmsg->free_iov)
811 kfree(kmsg->free_iov);
812 req->flags &= ~REQ_F_NEED_CLEANUP;
813 }
814
815 return ret;
f9ead18c
JA
816}
817
818int io_recv(struct io_kiocb *req, unsigned int issue_flags)
819{
f2ccb5ae 820 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
821 struct msghdr msg;
822 struct socket *sock;
823 struct iovec iov;
824 unsigned int cflags;
825 unsigned flags;
826 int ret, min_ret = 0;
827 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
b3fdea6e 828 size_t len = sr->len;
f9ead18c
JA
829
830 if (!(req->flags & REQ_F_POLLED) &&
831 (sr->flags & IORING_RECVSEND_POLL_FIRST))
832 return -EAGAIN;
833
834 sock = sock_from_file(req->file);
835 if (unlikely(!sock))
836 return -ENOTSOCK;
837
b3fdea6e 838retry_multishot:
f9ead18c
JA
839 if (io_do_buffer_select(req)) {
840 void __user *buf;
841
b3fdea6e 842 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
843 if (!buf)
844 return -ENOBUFS;
845 sr->buf = buf;
846 }
847
b3fdea6e 848 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
f9ead18c
JA
849 if (unlikely(ret))
850 goto out_free;
851
852 msg.msg_name = NULL;
853 msg.msg_namelen = 0;
854 msg.msg_control = NULL;
855 msg.msg_get_inq = 1;
856 msg.msg_flags = 0;
857 msg.msg_controllen = 0;
858 msg.msg_iocb = NULL;
e02b6651 859 msg.msg_ubuf = NULL;
f9ead18c
JA
860
861 flags = sr->msg_flags;
862 if (force_nonblock)
863 flags |= MSG_DONTWAIT;
864 if (flags & MSG_WAITALL)
865 min_ret = iov_iter_count(&msg.msg_iter);
866
867 ret = sock_recvmsg(sock, &msg, flags);
868 if (ret < min_ret) {
b3fdea6e
DY
869 if (ret == -EAGAIN && force_nonblock) {
870 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
871 io_kbuf_recycle(req, issue_flags);
872 return IOU_ISSUE_SKIP_COMPLETE;
873 }
874
f9ead18c 875 return -EAGAIN;
b3fdea6e 876 }
f9ead18c
JA
877 if (ret > 0 && io_net_retry(sock, flags)) {
878 sr->len -= ret;
879 sr->buf += ret;
880 sr->done_io += ret;
881 req->flags |= REQ_F_PARTIAL_IO;
882 return -EAGAIN;
883 }
95eafc74
PB
884 if (ret == -ERESTARTSYS)
885 ret = -EINTR;
f9ead18c
JA
886 req_set_fail(req);
887 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
888out_free:
889 req_set_fail(req);
890 }
891
d4e097da 892 if (ret > 0)
f9ead18c
JA
893 ret += sr->done_io;
894 else if (sr->done_io)
895 ret = sr->done_io;
d4e097da
DY
896 else
897 io_kbuf_recycle(req, issue_flags);
898
f9ead18c
JA
899 cflags = io_put_kbuf(req, issue_flags);
900 if (msg.msg_inq)
901 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
b3fdea6e 902
9bb66906 903 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
b3fdea6e
DY
904 goto retry_multishot;
905
906 return ret;
f9ead18c
JA
907}
908
b0e9b551 909void io_send_zc_cleanup(struct io_kiocb *req)
b48c312b 910{
ac9e5784 911 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
493108d9 912 struct io_async_msghdr *io;
b48c312b 913
493108d9
PB
914 if (req_has_async_data(req)) {
915 io = req->async_data;
916 kfree(io->free_iov);
917 }
a75155fa
PB
918 if (zc->notif) {
919 zc->notif->flags |= REQ_F_CQE_SKIP;
920 io_notif_flush(zc->notif);
921 zc->notif = NULL;
922 }
b48c312b
PB
923}
924
b0e9b551 925int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
06a5464b 926{
ac9e5784 927 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
10c7d33e 928 struct io_ring_ctx *ctx = req->ctx;
b48c312b 929 struct io_kiocb *notif;
06a5464b 930
493108d9 931 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
b48c312b
PB
932 return -EINVAL;
933 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
934 if (req->flags & REQ_F_CQE_SKIP)
06a5464b
PB
935 return -EINVAL;
936
937 zc->flags = READ_ONCE(sqe->ioprio);
63809137 938 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
57f33224 939 IORING_RECVSEND_FIXED_BUF))
06a5464b 940 return -EINVAL;
e3366e02
PB
941 notif = zc->notif = io_alloc_notif(ctx);
942 if (!notif)
943 return -ENOMEM;
944 notif->cqe.user_data = req->cqe.user_data;
945 notif->cqe.res = 0;
946 notif->cqe.flags = IORING_CQE_F_NOTIF;
947 req->flags |= REQ_F_NEED_CLEANUP;
10c7d33e
PB
948 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
949 unsigned idx = READ_ONCE(sqe->buf_index);
950
951 if (unlikely(idx >= ctx->nr_user_bufs))
952 return -EFAULT;
953 idx = array_index_nospec(idx, ctx->nr_user_bufs);
954 req->imu = READ_ONCE(ctx->user_bufs[idx]);
e3366e02 955 io_req_set_rsrc_node(notif, ctx, 0);
10c7d33e 956 }
06a5464b 957
493108d9
PB
958 if (req->opcode == IORING_OP_SEND_ZC) {
959 if (READ_ONCE(sqe->__pad3[0]))
960 return -EINVAL;
961 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
962 zc->addr_len = READ_ONCE(sqe->addr_len);
963 } else {
964 if (unlikely(sqe->addr2 || sqe->file_index))
965 return -EINVAL;
966 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
967 return -EINVAL;
968 }
969
06a5464b
PB
970 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
971 zc->len = READ_ONCE(sqe->len);
972 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
06a5464b
PB
973 if (zc->msg_flags & MSG_DONTWAIT)
974 req->flags |= REQ_F_NOWAIT;
092aeedb 975
4a933e62 976 zc->done_io = 0;
092aeedb 977
06a5464b
PB
978#ifdef CONFIG_COMPAT
979 if (req->ctx->compat)
980 zc->msg_flags |= MSG_CMSG_COMPAT;
981#endif
982 return 0;
983}
984
cd9021e8
PB
985static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
986 struct iov_iter *from, size_t length)
987{
988 skb_zcopy_downgrade_managed(skb);
989 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
990}
991
3ff1a0d3
PB
992static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
993 struct iov_iter *from, size_t length)
994{
995 struct skb_shared_info *shinfo = skb_shinfo(skb);
996 int frag = shinfo->nr_frags;
997 int ret = 0;
998 struct bvec_iter bi;
999 ssize_t copied = 0;
1000 unsigned long truesize = 0;
1001
cd9021e8 1002 if (!frag)
3ff1a0d3 1003 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
cd9021e8 1004 else if (unlikely(!skb_zcopy_managed(skb)))
3ff1a0d3 1005 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
3ff1a0d3
PB
1006
1007 bi.bi_size = min(from->count, length);
1008 bi.bi_bvec_done = from->iov_offset;
1009 bi.bi_idx = 0;
1010
1011 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1012 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1013
1014 copied += v.bv_len;
1015 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1016 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1017 v.bv_offset, v.bv_len);
1018 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1019 }
1020 if (bi.bi_size)
1021 ret = -EMSGSIZE;
1022
1023 shinfo->nr_frags = frag;
1024 from->bvec += bi.bi_idx;
1025 from->nr_segs -= bi.bi_idx;
dfb58b17 1026 from->count -= copied;
3ff1a0d3
PB
1027 from->iov_offset = bi.bi_bvec_done;
1028
1029 skb->data_len += copied;
1030 skb->len += copied;
1031 skb->truesize += truesize;
1032
1033 if (sk && sk->sk_type == SOCK_STREAM) {
1034 sk_wmem_queued_add(sk, truesize);
1035 if (!skb_zcopy_pure(skb))
1036 sk_mem_charge(sk, truesize);
1037 } else {
1038 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1039 }
1040 return ret;
1041}
1042
b0e9b551 1043int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
06a5464b 1044{
6ae61b7a 1045 struct sockaddr_storage __address;
ac9e5784 1046 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
06a5464b
PB
1047 struct msghdr msg;
1048 struct iovec iov;
1049 struct socket *sock;
b48c312b 1050 unsigned msg_flags, cflags;
06a5464b
PB
1051 int ret, min_ret = 0;
1052
06a5464b
PB
1053 sock = sock_from_file(req->file);
1054 if (unlikely(!sock))
1055 return -ENOTSOCK;
1056
06a5464b
PB
1057 msg.msg_name = NULL;
1058 msg.msg_control = NULL;
1059 msg.msg_controllen = 0;
1060 msg.msg_namelen = 0;
1061
86dc8f23 1062 if (zc->addr) {
581711c4
PB
1063 if (req_has_async_data(req)) {
1064 struct io_async_msghdr *io = req->async_data;
1065
6ae61b7a 1066 msg.msg_name = &io->addr;
581711c4
PB
1067 } else {
1068 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1069 if (unlikely(ret < 0))
1070 return ret;
1071 msg.msg_name = (struct sockaddr *)&__address;
581711c4 1072 }
86dc8f23
PB
1073 msg.msg_namelen = zc->addr_len;
1074 }
1075
3c840053
PB
1076 if (!(req->flags & REQ_F_POLLED) &&
1077 (zc->flags & IORING_RECVSEND_POLL_FIRST))
6ae61b7a 1078 return io_setup_async_addr(req, &__address, issue_flags);
3c840053 1079
10c7d33e
PB
1080 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1081 ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
1082 (u64)(uintptr_t)zc->buf, zc->len);
1083 if (unlikely(ret))
986e263d 1084 return ret;
cd9021e8 1085 msg.sg_from_iter = io_sg_from_iter;
10c7d33e
PB
1086 } else {
1087 ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
1088 &msg.msg_iter);
1089 if (unlikely(ret))
1090 return ret;
b48c312b 1091 ret = io_notif_account_mem(zc->notif, zc->len);
2e32ba56
PB
1092 if (unlikely(ret))
1093 return ret;
cd9021e8 1094 msg.sg_from_iter = io_sg_from_iter_iovec;
10c7d33e 1095 }
06a5464b
PB
1096
1097 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1098 if (issue_flags & IO_URING_F_NONBLOCK)
1099 msg_flags |= MSG_DONTWAIT;
1100 if (msg_flags & MSG_WAITALL)
1101 min_ret = iov_iter_count(&msg.msg_iter);
1102
1103 msg.msg_flags = msg_flags;
b48c312b 1104 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
06a5464b
PB
1105 ret = sock_sendmsg(sock, &msg);
1106
1107 if (unlikely(ret < min_ret)) {
1108 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6ae61b7a 1109 return io_setup_async_addr(req, &__address, issue_flags);
581711c4 1110
4a933e62
PB
1111 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1112 zc->len -= ret;
1113 zc->buf += ret;
1114 zc->done_io += ret;
1115 req->flags |= REQ_F_PARTIAL_IO;
6ae61b7a 1116 return io_setup_async_addr(req, &__address, issue_flags);
4a933e62 1117 }
b48c312b
PB
1118 if (ret < 0 && !zc->done_io)
1119 zc->notif->flags |= REQ_F_CQE_SKIP;
4a933e62
PB
1120 if (ret == -ERESTARTSYS)
1121 ret = -EINTR;
5a848b7c 1122 req_set_fail(req);
06a5464b
PB
1123 }
1124
4a933e62
PB
1125 if (ret >= 0)
1126 ret += zc->done_io;
1127 else if (zc->done_io)
1128 ret = zc->done_io;
b48c312b
PB
1129
1130 io_notif_flush(zc->notif);
1131 req->flags &= ~REQ_F_NEED_CLEANUP;
1132 cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
1133 io_req_set_res(req, ret, cflags);
06a5464b
PB
1134 return IOU_OK;
1135}
1136
493108d9
PB
1137int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1138{
1139 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1140 struct io_async_msghdr iomsg, *kmsg;
1141 struct socket *sock;
1142 unsigned flags, cflags;
1143 int ret, min_ret = 0;
1144
1145 sock = sock_from_file(req->file);
1146 if (unlikely(!sock))
1147 return -ENOTSOCK;
1148
1149 if (req_has_async_data(req)) {
1150 kmsg = req->async_data;
1151 } else {
1152 ret = io_sendmsg_copy_hdr(req, &iomsg);
1153 if (ret)
1154 return ret;
1155 kmsg = &iomsg;
1156 }
1157
1158 if (!(req->flags & REQ_F_POLLED) &&
1159 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1160 return io_setup_async_msg(req, kmsg, issue_flags);
1161
1162 flags = sr->msg_flags | MSG_ZEROCOPY;
1163 if (issue_flags & IO_URING_F_NONBLOCK)
1164 flags |= MSG_DONTWAIT;
1165 if (flags & MSG_WAITALL)
1166 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1167
1168 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1169 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1170 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1171
1172 if (unlikely(ret < min_ret)) {
1173 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1174 return io_setup_async_msg(req, kmsg, issue_flags);
1175
1176 if (ret > 0 && io_net_retry(sock, flags)) {
1177 sr->done_io += ret;
1178 req->flags |= REQ_F_PARTIAL_IO;
1179 return io_setup_async_msg(req, kmsg, issue_flags);
1180 }
1181 if (ret < 0 && !sr->done_io)
1182 sr->notif->flags |= REQ_F_CQE_SKIP;
1183 if (ret == -ERESTARTSYS)
1184 ret = -EINTR;
1185 req_set_fail(req);
1186 }
1187 /* fast path, check for non-NULL to avoid function call */
1188 if (kmsg->free_iov)
1189 kfree(kmsg->free_iov);
1190
1191 io_netmsg_recycle(req, issue_flags);
1192 if (ret >= 0)
1193 ret += sr->done_io;
1194 else if (sr->done_io)
1195 ret = sr->done_io;
1196
1197 io_notif_flush(sr->notif);
1198 req->flags &= ~REQ_F_NEED_CLEANUP;
1199 cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
1200 io_req_set_res(req, ret, cflags);
1201 return IOU_OK;
1202}
1203
7e6b638e
PB
1204void io_sendrecv_fail(struct io_kiocb *req)
1205{
1206 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1207 int res = req->cqe.res;
1208
1209 if (req->flags & REQ_F_PARTIAL_IO)
1210 res = sr->done_io;
c4c0009e 1211 if ((req->flags & REQ_F_NEED_CLEANUP) &&
493108d9 1212 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) {
c4c0009e
PB
1213 /* preserve notification for partial I/O */
1214 if (res < 0)
1215 sr->notif->flags |= REQ_F_CQE_SKIP;
1216 io_notif_flush(sr->notif);
1217 sr->notif = NULL;
5693bcce
PB
1218 }
1219 io_req_set_res(req, res, req->cqe.flags);
1220}
1221
f9ead18c
JA
1222int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1223{
f2ccb5ae 1224 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1225 unsigned flags;
1226
1227 if (sqe->len || sqe->buf_index)
1228 return -EINVAL;
1229
1230 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1231 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1232 accept->flags = READ_ONCE(sqe->accept_flags);
1233 accept->nofile = rlimit(RLIMIT_NOFILE);
1234 flags = READ_ONCE(sqe->ioprio);
1235 if (flags & ~IORING_ACCEPT_MULTISHOT)
1236 return -EINVAL;
1237
1238 accept->file_slot = READ_ONCE(sqe->file_index);
1239 if (accept->file_slot) {
1240 if (accept->flags & SOCK_CLOEXEC)
1241 return -EINVAL;
1242 if (flags & IORING_ACCEPT_MULTISHOT &&
1243 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1244 return -EINVAL;
1245 }
1246 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1247 return -EINVAL;
1248 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1249 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1250 if (flags & IORING_ACCEPT_MULTISHOT)
1251 req->flags |= REQ_F_APOLL_MULTISHOT;
1252 return 0;
1253}
1254
1255int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1256{
1257 struct io_ring_ctx *ctx = req->ctx;
f2ccb5ae 1258 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1259 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1260 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1261 bool fixed = !!accept->file_slot;
1262 struct file *file;
1263 int ret, fd;
1264
1265retry:
1266 if (!fixed) {
1267 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1268 if (unlikely(fd < 0))
1269 return fd;
1270 }
1271 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1272 accept->flags);
1273 if (IS_ERR(file)) {
1274 if (!fixed)
1275 put_unused_fd(fd);
1276 ret = PTR_ERR(file);
1277 if (ret == -EAGAIN && force_nonblock) {
1278 /*
1279 * if it's multishot and polled, we don't need to
1280 * return EAGAIN to arm the poll infra since it
1281 * has already been done
1282 */
1283 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
1284 IO_APOLL_MULTI_POLLED)
1285 ret = IOU_ISSUE_SKIP_COMPLETE;
1286 return ret;
1287 }
1288 if (ret == -ERESTARTSYS)
1289 ret = -EINTR;
1290 req_set_fail(req);
1291 } else if (!fixed) {
1292 fd_install(fd, file);
1293 ret = fd;
1294 } else {
1295 ret = io_fixed_fd_install(req, issue_flags, file,
1296 accept->file_slot);
1297 }
1298
1299 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1300 io_req_set_res(req, ret, 0);
1301 return IOU_OK;
1302 }
f9ead18c 1303
cbd25748
DY
1304 if (ret >= 0 &&
1305 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
d245bca6 1306 goto retry;
cbd25748
DY
1307
1308 io_req_set_res(req, ret, 0);
1309 if (req->flags & REQ_F_POLLED)
1310 return IOU_STOP_MULTISHOT;
1311 return IOU_OK;
f9ead18c
JA
1312}
1313
1314int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1315{
f2ccb5ae 1316 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1317
1318 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1319 return -EINVAL;
1320
1321 sock->domain = READ_ONCE(sqe->fd);
1322 sock->type = READ_ONCE(sqe->off);
1323 sock->protocol = READ_ONCE(sqe->len);
1324 sock->file_slot = READ_ONCE(sqe->file_index);
1325 sock->nofile = rlimit(RLIMIT_NOFILE);
1326
1327 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1328 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1329 return -EINVAL;
1330 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1331 return -EINVAL;
1332 return 0;
1333}
1334
1335int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1336{
f2ccb5ae 1337 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1338 bool fixed = !!sock->file_slot;
1339 struct file *file;
1340 int ret, fd;
1341
1342 if (!fixed) {
1343 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1344 if (unlikely(fd < 0))
1345 return fd;
1346 }
1347 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1348 if (IS_ERR(file)) {
1349 if (!fixed)
1350 put_unused_fd(fd);
1351 ret = PTR_ERR(file);
1352 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1353 return -EAGAIN;
1354 if (ret == -ERESTARTSYS)
1355 ret = -EINTR;
1356 req_set_fail(req);
1357 } else if (!fixed) {
1358 fd_install(fd, file);
1359 ret = fd;
1360 } else {
1361 ret = io_fixed_fd_install(req, issue_flags, file,
1362 sock->file_slot);
1363 }
1364 io_req_set_res(req, ret, 0);
1365 return IOU_OK;
1366}
1367
1368int io_connect_prep_async(struct io_kiocb *req)
1369{
1370 struct io_async_connect *io = req->async_data;
f2ccb5ae 1371 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1372
1373 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1374}
1375
1376int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1377{
f2ccb5ae 1378 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1379
1380 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1381 return -EINVAL;
1382
1383 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1384 conn->addr_len = READ_ONCE(sqe->addr2);
1385 return 0;
1386}
1387
1388int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1389{
f2ccb5ae 1390 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1391 struct io_async_connect __io, *io;
1392 unsigned file_flags;
1393 int ret;
1394 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1395
1396 if (req_has_async_data(req)) {
1397 io = req->async_data;
1398 } else {
1399 ret = move_addr_to_kernel(connect->addr,
1400 connect->addr_len,
1401 &__io.address);
1402 if (ret)
1403 goto out;
1404 io = &__io;
1405 }
1406
1407 file_flags = force_nonblock ? O_NONBLOCK : 0;
1408
1409 ret = __sys_connect_file(req->file, &io->address,
1410 connect->addr_len, file_flags);
1411 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1412 if (req_has_async_data(req))
1413 return -EAGAIN;
1414 if (io_alloc_async_data(req)) {
1415 ret = -ENOMEM;
1416 goto out;
1417 }
1418 memcpy(req->async_data, &__io, sizeof(__io));
1419 return -EAGAIN;
1420 }
1421 if (ret == -ERESTARTSYS)
1422 ret = -EINTR;
1423out:
1424 if (ret < 0)
1425 req_set_fail(req);
1426 io_req_set_res(req, ret, 0);
1427 return IOU_OK;
1428}
43e0bbbd
JA
1429
1430void io_netmsg_cache_free(struct io_cache_entry *entry)
1431{
1432 kfree(container_of(entry, struct io_async_msghdr, cache));
1433}
f9ead18c 1434#endif