io_uring/cmd: add cmd lazy tw wake helper
[linux-block.git] / io_uring / net.c
CommitLineData
f9ead18c
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/slab.h>
6#include <linux/net.h>
7#include <linux/compat.h>
8#include <net/compat.h>
9#include <linux/io_uring.h>
10
11#include <uapi/linux/io_uring.h>
12
f9ead18c 13#include "io_uring.h"
3b77495a 14#include "kbuf.h"
43e0bbbd 15#include "alloc_cache.h"
f9ead18c 16#include "net.h"
06a5464b 17#include "notif.h"
10c7d33e 18#include "rsrc.h"
f9ead18c
JA
19
20#if defined(CONFIG_NET)
21struct io_shutdown {
22 struct file *file;
23 int how;
24};
25
26struct io_accept {
27 struct file *file;
28 struct sockaddr __user *addr;
29 int __user *addr_len;
30 int flags;
31 u32 file_slot;
32 unsigned long nofile;
33};
34
35struct io_socket {
36 struct file *file;
37 int domain;
38 int type;
39 int protocol;
40 int flags;
41 u32 file_slot;
42 unsigned long nofile;
43};
44
45struct io_connect {
46 struct file *file;
47 struct sockaddr __user *addr;
48 int addr_len;
3fb1bd68 49 bool in_progress;
74e2e17e 50 bool seen_econnaborted;
f9ead18c
JA
51};
52
53struct io_sr_msg {
54 struct file *file;
55 union {
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
58 void __user *buf;
59 };
0b048557
PB
60 unsigned len;
61 unsigned done_io;
293402e5 62 unsigned msg_flags;
0b048557 63 u16 flags;
516e82f0 64 /* initialised and used only by !msg send variants */
0b048557 65 u16 addr_len;
b00c51ef 66 u16 buf_group;
092aeedb 67 void __user *addr;
516e82f0 68 /* used only for send zerocopy */
b48c312b 69 struct io_kiocb *notif;
06a5464b
PB
70};
71
17add5ce
PB
72static inline bool io_check_multishot(struct io_kiocb *req,
73 unsigned int issue_flags)
74{
75 /*
76 * When ->locked_cq is set we only allow to post CQEs from the original
77 * task context. Usual request completions will be handled in other
78 * generic paths but multipoll may decide to post extra cqes.
79 */
80 return !(issue_flags & IO_URING_F_IOWQ) ||
81 !(issue_flags & IO_URING_F_MULTISHOT) ||
82 !req->ctx->task_complete;
83}
84
f9ead18c
JA
85int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
86{
f2ccb5ae 87 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
88
89 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
90 sqe->buf_index || sqe->splice_fd_in))
91 return -EINVAL;
92
93 shutdown->how = READ_ONCE(sqe->len);
aebb224f 94 req->flags |= REQ_F_FORCE_ASYNC;
f9ead18c
JA
95 return 0;
96}
97
98int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
99{
f2ccb5ae 100 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
101 struct socket *sock;
102 int ret;
103
aebb224f 104 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
f9ead18c
JA
105
106 sock = sock_from_file(req->file);
107 if (unlikely(!sock))
108 return -ENOTSOCK;
109
110 ret = __sys_shutdown_sock(sock, shutdown->how);
111 io_req_set_res(req, ret, 0);
112 return IOU_OK;
113}
114
115static bool io_net_retry(struct socket *sock, int flags)
116{
117 if (!(flags & MSG_WAITALL))
118 return false;
119 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
120}
121
43e0bbbd
JA
122static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
123{
124 struct io_async_msghdr *hdr = req->async_data;
125
06360426 126 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
43e0bbbd
JA
127 return;
128
129 /* Let normal cleanup path reap it if we fail adding to the cache */
130 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
131 req->async_data = NULL;
132 req->flags &= ~REQ_F_ASYNC_DATA;
133 }
134}
135
858c293e
PB
136static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
137 unsigned int issue_flags)
43e0bbbd
JA
138{
139 struct io_ring_ctx *ctx = req->ctx;
140 struct io_cache_entry *entry;
4c17a496 141 struct io_async_msghdr *hdr;
43e0bbbd 142
df730ec2
XL
143 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
144 entry = io_alloc_cache_get(&ctx->netmsg_cache);
145 if (entry) {
146 hdr = container_of(entry, struct io_async_msghdr, cache);
147 hdr->free_iov = NULL;
148 req->flags |= REQ_F_ASYNC_DATA;
149 req->async_data = hdr;
150 return hdr;
151 }
43e0bbbd
JA
152 }
153
4c17a496
PB
154 if (!io_alloc_async_data(req)) {
155 hdr = req->async_data;
156 hdr->free_iov = NULL;
157 return hdr;
158 }
43e0bbbd
JA
159 return NULL;
160}
161
858c293e
PB
162static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
163{
164 /* ->prep_async is always called from the submission context */
165 return io_msg_alloc_async(req, 0);
166}
167
f9ead18c 168static int io_setup_async_msg(struct io_kiocb *req,
43e0bbbd
JA
169 struct io_async_msghdr *kmsg,
170 unsigned int issue_flags)
f9ead18c 171{
3f743e9b 172 struct io_async_msghdr *async_msg;
f9ead18c 173
3f743e9b 174 if (req_has_async_data(req))
f9ead18c 175 return -EAGAIN;
858c293e 176 async_msg = io_msg_alloc_async(req, issue_flags);
43e0bbbd 177 if (!async_msg) {
f9ead18c
JA
178 kfree(kmsg->free_iov);
179 return -ENOMEM;
180 }
f9ead18c
JA
181 req->flags |= REQ_F_NEED_CLEANUP;
182 memcpy(async_msg, kmsg, sizeof(*kmsg));
6f10ae8a
PB
183 if (async_msg->msg.msg_name)
184 async_msg->msg.msg_name = &async_msg->addr;
f9ead18c 185 /* if were using fast_iov, set it to the new one */
4b61152e 186 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
de4f5fed
JA
187 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
188 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
3e4cb6eb 189 }
f9ead18c
JA
190
191 return -EAGAIN;
192}
193
194static int io_sendmsg_copy_hdr(struct io_kiocb *req,
195 struct io_async_msghdr *iomsg)
196{
f2ccb5ae 197 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
198
199 iomsg->msg.msg_name = &iomsg->addr;
200 iomsg->free_iov = iomsg->fast_iov;
201 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
202 &iomsg->free_iov);
203}
204
516e82f0 205int io_send_prep_async(struct io_kiocb *req)
581711c4 206{
ac9e5784 207 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
208 struct io_async_msghdr *io;
209 int ret;
210
211 if (!zc->addr || req_has_async_data(req))
212 return 0;
6bf8ad25
PB
213 io = io_msg_alloc_async_prep(req);
214 if (!io)
581711c4 215 return -ENOMEM;
581711c4
PB
216 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
217 return ret;
218}
219
220static int io_setup_async_addr(struct io_kiocb *req,
6ae61b7a 221 struct sockaddr_storage *addr_storage,
581711c4
PB
222 unsigned int issue_flags)
223{
6ae61b7a 224 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
225 struct io_async_msghdr *io;
226
6ae61b7a 227 if (!sr->addr || req_has_async_data(req))
581711c4 228 return -EAGAIN;
6bf8ad25
PB
229 io = io_msg_alloc_async(req, issue_flags);
230 if (!io)
581711c4 231 return -ENOMEM;
6ae61b7a 232 memcpy(&io->addr, addr_storage, sizeof(io->addr));
581711c4
PB
233 return -EAGAIN;
234}
235
f9ead18c
JA
236int io_sendmsg_prep_async(struct io_kiocb *req)
237{
238 int ret;
239
858c293e
PB
240 if (!io_msg_alloc_async_prep(req))
241 return -ENOMEM;
f9ead18c
JA
242 ret = io_sendmsg_copy_hdr(req, req->async_data);
243 if (!ret)
244 req->flags |= REQ_F_NEED_CLEANUP;
245 return ret;
246}
247
248void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
249{
250 struct io_async_msghdr *io = req->async_data;
251
252 kfree(io->free_iov);
253}
254
255int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
256{
f2ccb5ae 257 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c 258
516e82f0
PB
259 if (req->opcode == IORING_OP_SEND) {
260 if (READ_ONCE(sqe->__pad3[0]))
261 return -EINVAL;
262 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
263 sr->addr_len = READ_ONCE(sqe->addr_len);
264 } else if (sqe->addr2 || sqe->file_index) {
f9ead18c 265 return -EINVAL;
516e82f0 266 }
f9ead18c
JA
267
268 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
269 sr->len = READ_ONCE(sqe->len);
270 sr->flags = READ_ONCE(sqe->ioprio);
271 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
272 return -EINVAL;
273 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
274 if (sr->msg_flags & MSG_DONTWAIT)
275 req->flags |= REQ_F_NOWAIT;
276
277#ifdef CONFIG_COMPAT
278 if (req->ctx->compat)
279 sr->msg_flags |= MSG_CMSG_COMPAT;
280#endif
281 sr->done_io = 0;
282 return 0;
283}
284
285int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
286{
f2ccb5ae 287 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
288 struct io_async_msghdr iomsg, *kmsg;
289 struct socket *sock;
290 unsigned flags;
291 int min_ret = 0;
292 int ret;
293
294 sock = sock_from_file(req->file);
295 if (unlikely(!sock))
296 return -ENOTSOCK;
297
298 if (req_has_async_data(req)) {
299 kmsg = req->async_data;
300 } else {
301 ret = io_sendmsg_copy_hdr(req, &iomsg);
302 if (ret)
303 return ret;
304 kmsg = &iomsg;
305 }
306
307 if (!(req->flags & REQ_F_POLLED) &&
308 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 309 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
310
311 flags = sr->msg_flags;
312 if (issue_flags & IO_URING_F_NONBLOCK)
313 flags |= MSG_DONTWAIT;
314 if (flags & MSG_WAITALL)
315 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
316
317 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
318
319 if (ret < min_ret) {
320 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
43e0bbbd 321 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
322 if (ret > 0 && io_net_retry(sock, flags)) {
323 sr->done_io += ret;
324 req->flags |= REQ_F_PARTIAL_IO;
43e0bbbd 325 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 326 }
95eafc74
PB
327 if (ret == -ERESTARTSYS)
328 ret = -EINTR;
f9ead18c
JA
329 req_set_fail(req);
330 }
331 /* fast path, check for non-NULL to avoid function call */
332 if (kmsg->free_iov)
333 kfree(kmsg->free_iov);
334 req->flags &= ~REQ_F_NEED_CLEANUP;
43e0bbbd 335 io_netmsg_recycle(req, issue_flags);
f9ead18c
JA
336 if (ret >= 0)
337 ret += sr->done_io;
338 else if (sr->done_io)
339 ret = sr->done_io;
340 io_req_set_res(req, ret, 0);
341 return IOU_OK;
342}
343
344int io_send(struct io_kiocb *req, unsigned int issue_flags)
345{
516e82f0 346 struct sockaddr_storage __address;
f2ccb5ae 347 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c 348 struct msghdr msg;
f9ead18c
JA
349 struct socket *sock;
350 unsigned flags;
351 int min_ret = 0;
352 int ret;
353
04360d3e
PB
354 msg.msg_name = NULL;
355 msg.msg_control = NULL;
356 msg.msg_controllen = 0;
357 msg.msg_namelen = 0;
358 msg.msg_ubuf = NULL;
359
516e82f0
PB
360 if (sr->addr) {
361 if (req_has_async_data(req)) {
362 struct io_async_msghdr *io = req->async_data;
363
364 msg.msg_name = &io->addr;
365 } else {
366 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
367 if (unlikely(ret < 0))
368 return ret;
369 msg.msg_name = (struct sockaddr *)&__address;
370 }
371 msg.msg_namelen = sr->addr_len;
372 }
373
f9ead18c
JA
374 if (!(req->flags & REQ_F_POLLED) &&
375 (sr->flags & IORING_RECVSEND_POLL_FIRST))
516e82f0 376 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c
JA
377
378 sock = sock_from_file(req->file);
379 if (unlikely(!sock))
380 return -ENOTSOCK;
381
4b61152e 382 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
f9ead18c
JA
383 if (unlikely(ret))
384 return ret;
385
f9ead18c
JA
386 flags = sr->msg_flags;
387 if (issue_flags & IO_URING_F_NONBLOCK)
388 flags |= MSG_DONTWAIT;
389 if (flags & MSG_WAITALL)
390 min_ret = iov_iter_count(&msg.msg_iter);
391
392 msg.msg_flags = flags;
393 ret = sock_sendmsg(sock, &msg);
394 if (ret < min_ret) {
395 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
516e82f0
PB
396 return io_setup_async_addr(req, &__address, issue_flags);
397
f9ead18c
JA
398 if (ret > 0 && io_net_retry(sock, flags)) {
399 sr->len -= ret;
400 sr->buf += ret;
401 sr->done_io += ret;
402 req->flags |= REQ_F_PARTIAL_IO;
516e82f0 403 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c 404 }
95eafc74
PB
405 if (ret == -ERESTARTSYS)
406 ret = -EINTR;
f9ead18c
JA
407 req_set_fail(req);
408 }
409 if (ret >= 0)
410 ret += sr->done_io;
411 else if (sr->done_io)
412 ret = sr->done_io;
413 io_req_set_res(req, ret, 0);
414 return IOU_OK;
415}
416
9bb66906
DY
417static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
418{
9b0fc3c0 419 int hdr;
9bb66906 420
9b0fc3c0 421 if (iomsg->namelen < 0)
9bb66906 422 return true;
9b0fc3c0
DY
423 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
424 iomsg->namelen, &hdr))
9bb66906 425 return true;
9b0fc3c0 426 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
9bb66906
DY
427 return true;
428
429 return false;
430}
431
f9ead18c
JA
432static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
433 struct io_async_msghdr *iomsg)
434{
f2ccb5ae 435 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
7fa875b8 436 struct user_msghdr msg;
f9ead18c
JA
437 int ret;
438
7fa875b8
DY
439 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
440 return -EFAULT;
441
442 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
f9ead18c
JA
443 if (ret)
444 return ret;
445
446 if (req->flags & REQ_F_BUFFER_SELECT) {
7fa875b8 447 if (msg.msg_iovlen == 0) {
5702196e
DY
448 sr->len = iomsg->fast_iov[0].iov_len = 0;
449 iomsg->fast_iov[0].iov_base = NULL;
450 iomsg->free_iov = NULL;
7fa875b8 451 } else if (msg.msg_iovlen > 1) {
f9ead18c 452 return -EINVAL;
5702196e 453 } else {
7fa875b8 454 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
5702196e
DY
455 return -EFAULT;
456 sr->len = iomsg->fast_iov[0].iov_len;
457 iomsg->free_iov = NULL;
458 }
9bb66906
DY
459
460 if (req->flags & REQ_F_APOLL_MULTISHOT) {
461 iomsg->namelen = msg.msg_namelen;
462 iomsg->controllen = msg.msg_controllen;
463 if (io_recvmsg_multishot_overflow(iomsg))
464 return -EOVERFLOW;
465 }
f9ead18c
JA
466 } else {
467 iomsg->free_iov = iomsg->fast_iov;
de4eda9d 468 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
f9ead18c
JA
469 &iomsg->free_iov, &iomsg->msg.msg_iter,
470 false);
471 if (ret > 0)
472 ret = 0;
473 }
474
475 return ret;
476}
477
478#ifdef CONFIG_COMPAT
479static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
480 struct io_async_msghdr *iomsg)
481{
f2ccb5ae 482 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
72c531f8 483 struct compat_msghdr msg;
f9ead18c 484 struct compat_iovec __user *uiov;
f9ead18c
JA
485 int ret;
486
72c531f8
DY
487 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
488 return -EFAULT;
489
4f6a94d3 490 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
f9ead18c
JA
491 if (ret)
492 return ret;
493
72c531f8 494 uiov = compat_ptr(msg.msg_iov);
f9ead18c
JA
495 if (req->flags & REQ_F_BUFFER_SELECT) {
496 compat_ssize_t clen;
497
990a4de5 498 iomsg->free_iov = NULL;
72c531f8 499 if (msg.msg_iovlen == 0) {
6d2f75a0 500 sr->len = 0;
72c531f8 501 } else if (msg.msg_iovlen > 1) {
f9ead18c 502 return -EINVAL;
6d2f75a0
DY
503 } else {
504 if (!access_ok(uiov, sizeof(*uiov)))
505 return -EFAULT;
506 if (__get_user(clen, &uiov->iov_len))
507 return -EFAULT;
508 if (clen < 0)
509 return -EINVAL;
510 sr->len = clen;
6d2f75a0 511 }
9bb66906
DY
512
513 if (req->flags & REQ_F_APOLL_MULTISHOT) {
514 iomsg->namelen = msg.msg_namelen;
515 iomsg->controllen = msg.msg_controllen;
516 if (io_recvmsg_multishot_overflow(iomsg))
517 return -EOVERFLOW;
518 }
f9ead18c
JA
519 } else {
520 iomsg->free_iov = iomsg->fast_iov;
de4eda9d 521 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
f9ead18c
JA
522 UIO_FASTIOV, &iomsg->free_iov,
523 &iomsg->msg.msg_iter, true);
524 if (ret < 0)
525 return ret;
526 }
527
528 return 0;
529}
530#endif
531
532static int io_recvmsg_copy_hdr(struct io_kiocb *req,
533 struct io_async_msghdr *iomsg)
534{
535 iomsg->msg.msg_name = &iomsg->addr;
536
537#ifdef CONFIG_COMPAT
538 if (req->ctx->compat)
539 return __io_compat_recvmsg_copy_hdr(req, iomsg);
540#endif
541
542 return __io_recvmsg_copy_hdr(req, iomsg);
543}
544
545int io_recvmsg_prep_async(struct io_kiocb *req)
546{
547 int ret;
548
858c293e
PB
549 if (!io_msg_alloc_async_prep(req))
550 return -ENOMEM;
f9ead18c
JA
551 ret = io_recvmsg_copy_hdr(req, req->async_data);
552 if (!ret)
553 req->flags |= REQ_F_NEED_CLEANUP;
554 return ret;
555}
556
b3fdea6e
DY
557#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
558
f9ead18c
JA
559int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
560{
f2ccb5ae 561 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
562
563 if (unlikely(sqe->file_index || sqe->addr2))
564 return -EINVAL;
565
566 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
567 sr->len = READ_ONCE(sqe->len);
568 sr->flags = READ_ONCE(sqe->ioprio);
b3fdea6e 569 if (sr->flags & ~(RECVMSG_FLAGS))
f9ead18c 570 return -EINVAL;
7605c43d 571 sr->msg_flags = READ_ONCE(sqe->msg_flags);
f9ead18c
JA
572 if (sr->msg_flags & MSG_DONTWAIT)
573 req->flags |= REQ_F_NOWAIT;
574 if (sr->msg_flags & MSG_ERRQUEUE)
575 req->flags |= REQ_F_CLEAR_POLLIN;
b3fdea6e
DY
576 if (sr->flags & IORING_RECV_MULTISHOT) {
577 if (!(req->flags & REQ_F_BUFFER_SELECT))
578 return -EINVAL;
579 if (sr->msg_flags & MSG_WAITALL)
580 return -EINVAL;
581 if (req->opcode == IORING_OP_RECV && sr->len)
582 return -EINVAL;
583 req->flags |= REQ_F_APOLL_MULTISHOT;
b00c51ef
JA
584 /*
585 * Store the buffer group for this multishot receive separately,
586 * as if we end up doing an io-wq based issue that selects a
587 * buffer, it has to be committed immediately and that will
588 * clear ->buf_list. This means we lose the link to the buffer
589 * list, and the eventual buffer put on completion then cannot
590 * restore it.
591 */
592 sr->buf_group = req->buf_index;
b3fdea6e 593 }
f9ead18c
JA
594
595#ifdef CONFIG_COMPAT
596 if (req->ctx->compat)
597 sr->msg_flags |= MSG_CMSG_COMPAT;
598#endif
599 sr->done_io = 0;
600 return 0;
601}
602
b3fdea6e
DY
603static inline void io_recv_prep_retry(struct io_kiocb *req)
604{
f2ccb5ae 605 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
b3fdea6e
DY
606
607 sr->done_io = 0;
608 sr->len = 0; /* get from the provided buffer */
b00c51ef 609 req->buf_index = sr->buf_group;
b3fdea6e
DY
610}
611
612/*
9bb66906 613 * Finishes io_recv and io_recvmsg.
b3fdea6e
DY
614 *
615 * Returns true if it is actually finished, or false if it should run
616 * again (for multishot).
617 */
9bb66906 618static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
7d41bcb7 619 struct msghdr *msg, bool mshot_finished,
100d6b17 620 unsigned issue_flags)
b3fdea6e 621{
7d41bcb7
JA
622 unsigned int cflags;
623
624 cflags = io_put_kbuf(req, issue_flags);
625 if (msg->msg_inq && msg->msg_inq != -1U)
626 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
627
b3fdea6e
DY
628 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
629 io_req_set_res(req, *ret, cflags);
630 *ret = IOU_OK;
631 return true;
632 }
633
9bb66906 634 if (!mshot_finished) {
9b8c5475
DY
635 if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
636 req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
b3fdea6e 637 io_recv_prep_retry(req);
a2741c58
JA
638 /* Known not-empty or unknown state, retry */
639 if (cflags & IORING_CQE_F_SOCK_NONEMPTY ||
640 msg->msg_inq == -1U)
641 return false;
642 if (issue_flags & IO_URING_F_MULTISHOT)
643 *ret = IOU_ISSUE_SKIP_COMPLETE;
644 else
645 *ret = -EAGAIN;
646 return true;
b3fdea6e 647 }
e2ad599d 648 /* Otherwise stop multishot but use the current result. */
b3fdea6e
DY
649 }
650
651 io_req_set_res(req, *ret, cflags);
652
100d6b17 653 if (issue_flags & IO_URING_F_MULTISHOT)
b3fdea6e 654 *ret = IOU_STOP_MULTISHOT;
e2df2ccb
DY
655 else
656 *ret = IOU_OK;
b3fdea6e
DY
657 return true;
658}
659
9bb66906
DY
660static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
661 struct io_sr_msg *sr, void __user **buf,
662 size_t *len)
663{
664 unsigned long ubuf = (unsigned long) *buf;
665 unsigned long hdr;
666
667 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
668 kmsg->controllen;
669 if (*len < hdr)
670 return -EFAULT;
671
672 if (kmsg->controllen) {
673 unsigned long control = ubuf + hdr - kmsg->controllen;
674
d1f6222c 675 kmsg->msg.msg_control_user = (void __user *) control;
9bb66906
DY
676 kmsg->msg.msg_controllen = kmsg->controllen;
677 }
678
679 sr->buf = *buf; /* stash for later copy */
d1f6222c 680 *buf = (void __user *) (ubuf + hdr);
9bb66906
DY
681 kmsg->payloadlen = *len = *len - hdr;
682 return 0;
683}
684
685struct io_recvmsg_multishot_hdr {
686 struct io_uring_recvmsg_out msg;
687 struct sockaddr_storage addr;
688};
689
690static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
691 struct io_async_msghdr *kmsg,
692 unsigned int flags, bool *finished)
693{
694 int err;
695 int copy_len;
696 struct io_recvmsg_multishot_hdr hdr;
697
698 if (kmsg->namelen)
699 kmsg->msg.msg_name = &hdr.addr;
700 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
701 kmsg->msg.msg_namelen = 0;
702
703 if (sock->file->f_flags & O_NONBLOCK)
704 flags |= MSG_DONTWAIT;
705
706 err = sock_recvmsg(sock, &kmsg->msg, flags);
707 *finished = err <= 0;
708 if (err < 0)
709 return err;
710
711 hdr.msg = (struct io_uring_recvmsg_out) {
712 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
713 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
714 };
715
716 hdr.msg.payloadlen = err;
717 if (err > kmsg->payloadlen)
718 err = kmsg->payloadlen;
719
720 copy_len = sizeof(struct io_uring_recvmsg_out);
721 if (kmsg->msg.msg_namelen > kmsg->namelen)
722 copy_len += kmsg->namelen;
723 else
724 copy_len += kmsg->msg.msg_namelen;
725
726 /*
727 * "fromlen shall refer to the value before truncation.."
728 * 1003.1g
729 */
730 hdr.msg.namelen = kmsg->msg.msg_namelen;
731
732 /* ensure that there is no gap between hdr and sockaddr_storage */
733 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
734 sizeof(struct io_uring_recvmsg_out));
735 if (copy_to_user(io->buf, &hdr, copy_len)) {
736 *finished = true;
737 return -EFAULT;
738 }
739
740 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
741 kmsg->controllen + err;
742}
743
f9ead18c
JA
744int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
745{
f2ccb5ae 746 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
747 struct io_async_msghdr iomsg, *kmsg;
748 struct socket *sock;
f9ead18c
JA
749 unsigned flags;
750 int ret, min_ret = 0;
751 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
9bb66906 752 bool mshot_finished = true;
f9ead18c
JA
753
754 sock = sock_from_file(req->file);
755 if (unlikely(!sock))
756 return -ENOTSOCK;
757
758 if (req_has_async_data(req)) {
759 kmsg = req->async_data;
760 } else {
761 ret = io_recvmsg_copy_hdr(req, &iomsg);
762 if (ret)
763 return ret;
764 kmsg = &iomsg;
765 }
766
767 if (!(req->flags & REQ_F_POLLED) &&
768 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 769 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 770
17add5ce
PB
771 if (!io_check_multishot(req, issue_flags))
772 return io_setup_async_msg(req, kmsg, issue_flags);
773
9bb66906 774retry_multishot:
f9ead18c
JA
775 if (io_do_buffer_select(req)) {
776 void __user *buf;
9bb66906 777 size_t len = sr->len;
f9ead18c 778
9bb66906 779 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
780 if (!buf)
781 return -ENOBUFS;
9bb66906
DY
782
783 if (req->flags & REQ_F_APOLL_MULTISHOT) {
784 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
785 if (ret) {
786 io_kbuf_recycle(req, issue_flags);
787 return ret;
788 }
789 }
790
4b61152e 791 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
f9ead18c
JA
792 }
793
794 flags = sr->msg_flags;
795 if (force_nonblock)
796 flags |= MSG_DONTWAIT;
797 if (flags & MSG_WAITALL)
798 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
799
800 kmsg->msg.msg_get_inq = 1;
88fc8b84 801 kmsg->msg.msg_inq = -1U;
9bb66906
DY
802 if (req->flags & REQ_F_APOLL_MULTISHOT)
803 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
804 &mshot_finished);
805 else
806 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
807 kmsg->uaddr, flags);
808
f9ead18c 809 if (ret < min_ret) {
9bb66906
DY
810 if (ret == -EAGAIN && force_nonblock) {
811 ret = io_setup_async_msg(req, kmsg, issue_flags);
100d6b17 812 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
9bb66906
DY
813 io_kbuf_recycle(req, issue_flags);
814 return IOU_ISSUE_SKIP_COMPLETE;
815 }
816 return ret;
817 }
f9ead18c
JA
818 if (ret > 0 && io_net_retry(sock, flags)) {
819 sr->done_io += ret;
820 req->flags |= REQ_F_PARTIAL_IO;
43e0bbbd 821 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 822 }
95eafc74
PB
823 if (ret == -ERESTARTSYS)
824 ret = -EINTR;
f9ead18c
JA
825 req_set_fail(req);
826 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
827 req_set_fail(req);
828 }
829
d4e097da 830 if (ret > 0)
f9ead18c
JA
831 ret += sr->done_io;
832 else if (sr->done_io)
833 ret = sr->done_io;
d4e097da
DY
834 else
835 io_kbuf_recycle(req, issue_flags);
836
7d41bcb7 837 if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
9bb66906
DY
838 goto retry_multishot;
839
840 if (mshot_finished) {
9bb66906
DY
841 /* fast path, check for non-NULL to avoid function call */
842 if (kmsg->free_iov)
843 kfree(kmsg->free_iov);
6c3e8955 844 io_netmsg_recycle(req, issue_flags);
9bb66906
DY
845 req->flags &= ~REQ_F_NEED_CLEANUP;
846 }
847
848 return ret;
f9ead18c
JA
849}
850
851int io_recv(struct io_kiocb *req, unsigned int issue_flags)
852{
f2ccb5ae 853 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
854 struct msghdr msg;
855 struct socket *sock;
f9ead18c
JA
856 unsigned flags;
857 int ret, min_ret = 0;
858 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
b3fdea6e 859 size_t len = sr->len;
f9ead18c
JA
860
861 if (!(req->flags & REQ_F_POLLED) &&
862 (sr->flags & IORING_RECVSEND_POLL_FIRST))
863 return -EAGAIN;
864
17add5ce
PB
865 if (!io_check_multishot(req, issue_flags))
866 return -EAGAIN;
867
f9ead18c
JA
868 sock = sock_from_file(req->file);
869 if (unlikely(!sock))
870 return -ENOTSOCK;
871
bf34e697
JA
872 msg.msg_name = NULL;
873 msg.msg_namelen = 0;
874 msg.msg_control = NULL;
875 msg.msg_get_inq = 1;
876 msg.msg_controllen = 0;
877 msg.msg_iocb = NULL;
878 msg.msg_ubuf = NULL;
879
b3fdea6e 880retry_multishot:
f9ead18c
JA
881 if (io_do_buffer_select(req)) {
882 void __user *buf;
883
b3fdea6e 884 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
885 if (!buf)
886 return -ENOBUFS;
887 sr->buf = buf;
888 }
889
4b61152e 890 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
f9ead18c
JA
891 if (unlikely(ret))
892 goto out_free;
893
88fc8b84 894 msg.msg_inq = -1U;
f9ead18c 895 msg.msg_flags = 0;
f9ead18c
JA
896
897 flags = sr->msg_flags;
898 if (force_nonblock)
899 flags |= MSG_DONTWAIT;
900 if (flags & MSG_WAITALL)
901 min_ret = iov_iter_count(&msg.msg_iter);
902
903 ret = sock_recvmsg(sock, &msg, flags);
904 if (ret < min_ret) {
b3fdea6e 905 if (ret == -EAGAIN && force_nonblock) {
100d6b17 906 if (issue_flags & IO_URING_F_MULTISHOT) {
b3fdea6e
DY
907 io_kbuf_recycle(req, issue_flags);
908 return IOU_ISSUE_SKIP_COMPLETE;
909 }
910
f9ead18c 911 return -EAGAIN;
b3fdea6e 912 }
f9ead18c
JA
913 if (ret > 0 && io_net_retry(sock, flags)) {
914 sr->len -= ret;
915 sr->buf += ret;
916 sr->done_io += ret;
917 req->flags |= REQ_F_PARTIAL_IO;
918 return -EAGAIN;
919 }
95eafc74
PB
920 if (ret == -ERESTARTSYS)
921 ret = -EINTR;
f9ead18c
JA
922 req_set_fail(req);
923 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
924out_free:
925 req_set_fail(req);
926 }
927
d4e097da 928 if (ret > 0)
f9ead18c
JA
929 ret += sr->done_io;
930 else if (sr->done_io)
931 ret = sr->done_io;
d4e097da
DY
932 else
933 io_kbuf_recycle(req, issue_flags);
934
7d41bcb7 935 if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
b3fdea6e
DY
936 goto retry_multishot;
937
938 return ret;
f9ead18c
JA
939}
940
b0e9b551 941void io_send_zc_cleanup(struct io_kiocb *req)
b48c312b 942{
ac9e5784 943 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
493108d9 944 struct io_async_msghdr *io;
b48c312b 945
493108d9
PB
946 if (req_has_async_data(req)) {
947 io = req->async_data;
4c17a496
PB
948 /* might be ->fast_iov if *msg_copy_hdr failed */
949 if (io->free_iov != io->fast_iov)
950 kfree(io->free_iov);
493108d9 951 }
a75155fa 952 if (zc->notif) {
a75155fa
PB
953 io_notif_flush(zc->notif);
954 zc->notif = NULL;
955 }
b48c312b
PB
956}
957
40725d1b
PB
958#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
959#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
960
b0e9b551 961int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
06a5464b 962{
ac9e5784 963 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
10c7d33e 964 struct io_ring_ctx *ctx = req->ctx;
b48c312b 965 struct io_kiocb *notif;
06a5464b 966
493108d9 967 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
b48c312b
PB
968 return -EINVAL;
969 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
970 if (req->flags & REQ_F_CQE_SKIP)
06a5464b
PB
971 return -EINVAL;
972
e3366e02
PB
973 notif = zc->notif = io_alloc_notif(ctx);
974 if (!notif)
975 return -ENOMEM;
976 notif->cqe.user_data = req->cqe.user_data;
977 notif->cqe.res = 0;
978 notif->cqe.flags = IORING_CQE_F_NOTIF;
979 req->flags |= REQ_F_NEED_CLEANUP;
40725d1b
PB
980
981 zc->flags = READ_ONCE(sqe->ioprio);
982 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
983 if (zc->flags & ~IO_ZC_FLAGS_VALID)
984 return -EINVAL;
985 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
986 io_notif_set_extended(notif);
987 io_notif_to_data(notif)->zc_report = true;
988 }
989 }
990
10c7d33e
PB
991 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
992 unsigned idx = READ_ONCE(sqe->buf_index);
993
994 if (unlikely(idx >= ctx->nr_user_bufs))
995 return -EFAULT;
996 idx = array_index_nospec(idx, ctx->nr_user_bufs);
997 req->imu = READ_ONCE(ctx->user_bufs[idx]);
e3366e02 998 io_req_set_rsrc_node(notif, ctx, 0);
10c7d33e 999 }
06a5464b 1000
493108d9
PB
1001 if (req->opcode == IORING_OP_SEND_ZC) {
1002 if (READ_ONCE(sqe->__pad3[0]))
1003 return -EINVAL;
1004 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1005 zc->addr_len = READ_ONCE(sqe->addr_len);
1006 } else {
1007 if (unlikely(sqe->addr2 || sqe->file_index))
1008 return -EINVAL;
1009 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1010 return -EINVAL;
1011 }
1012
06a5464b
PB
1013 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1014 zc->len = READ_ONCE(sqe->len);
1015 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
06a5464b
PB
1016 if (zc->msg_flags & MSG_DONTWAIT)
1017 req->flags |= REQ_F_NOWAIT;
092aeedb 1018
4a933e62 1019 zc->done_io = 0;
092aeedb 1020
06a5464b
PB
1021#ifdef CONFIG_COMPAT
1022 if (req->ctx->compat)
1023 zc->msg_flags |= MSG_CMSG_COMPAT;
1024#endif
1025 return 0;
1026}
1027
cd9021e8
PB
1028static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1029 struct iov_iter *from, size_t length)
1030{
1031 skb_zcopy_downgrade_managed(skb);
1032 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1033}
1034
3ff1a0d3
PB
1035static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1036 struct iov_iter *from, size_t length)
1037{
1038 struct skb_shared_info *shinfo = skb_shinfo(skb);
1039 int frag = shinfo->nr_frags;
1040 int ret = 0;
1041 struct bvec_iter bi;
1042 ssize_t copied = 0;
1043 unsigned long truesize = 0;
1044
cd9021e8 1045 if (!frag)
3ff1a0d3 1046 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
cd9021e8 1047 else if (unlikely(!skb_zcopy_managed(skb)))
3ff1a0d3 1048 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
3ff1a0d3
PB
1049
1050 bi.bi_size = min(from->count, length);
1051 bi.bi_bvec_done = from->iov_offset;
1052 bi.bi_idx = 0;
1053
1054 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1055 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1056
1057 copied += v.bv_len;
1058 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1059 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1060 v.bv_offset, v.bv_len);
1061 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1062 }
1063 if (bi.bi_size)
1064 ret = -EMSGSIZE;
1065
1066 shinfo->nr_frags = frag;
1067 from->bvec += bi.bi_idx;
1068 from->nr_segs -= bi.bi_idx;
dfb58b17 1069 from->count -= copied;
3ff1a0d3
PB
1070 from->iov_offset = bi.bi_bvec_done;
1071
1072 skb->data_len += copied;
1073 skb->len += copied;
1074 skb->truesize += truesize;
1075
1076 if (sk && sk->sk_type == SOCK_STREAM) {
1077 sk_wmem_queued_add(sk, truesize);
1078 if (!skb_zcopy_pure(skb))
1079 sk_mem_charge(sk, truesize);
1080 } else {
1081 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1082 }
1083 return ret;
1084}
1085
b0e9b551 1086int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
06a5464b 1087{
6ae61b7a 1088 struct sockaddr_storage __address;
ac9e5784 1089 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
06a5464b 1090 struct msghdr msg;
06a5464b 1091 struct socket *sock;
6ae91ac9 1092 unsigned msg_flags;
06a5464b
PB
1093 int ret, min_ret = 0;
1094
06a5464b
PB
1095 sock = sock_from_file(req->file);
1096 if (unlikely(!sock))
1097 return -ENOTSOCK;
edf81438
PB
1098 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1099 return -EOPNOTSUPP;
06a5464b 1100
06a5464b
PB
1101 msg.msg_name = NULL;
1102 msg.msg_control = NULL;
1103 msg.msg_controllen = 0;
1104 msg.msg_namelen = 0;
1105
86dc8f23 1106 if (zc->addr) {
581711c4
PB
1107 if (req_has_async_data(req)) {
1108 struct io_async_msghdr *io = req->async_data;
1109
6ae61b7a 1110 msg.msg_name = &io->addr;
581711c4
PB
1111 } else {
1112 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1113 if (unlikely(ret < 0))
1114 return ret;
1115 msg.msg_name = (struct sockaddr *)&__address;
581711c4 1116 }
86dc8f23
PB
1117 msg.msg_namelen = zc->addr_len;
1118 }
1119
3c840053
PB
1120 if (!(req->flags & REQ_F_POLLED) &&
1121 (zc->flags & IORING_RECVSEND_POLL_FIRST))
6ae61b7a 1122 return io_setup_async_addr(req, &__address, issue_flags);
3c840053 1123
10c7d33e 1124 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
de4eda9d 1125 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
10c7d33e
PB
1126 (u64)(uintptr_t)zc->buf, zc->len);
1127 if (unlikely(ret))
986e263d 1128 return ret;
cd9021e8 1129 msg.sg_from_iter = io_sg_from_iter;
10c7d33e 1130 } else {
42385b02 1131 io_notif_set_extended(zc->notif);
4b61152e 1132 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
10c7d33e
PB
1133 if (unlikely(ret))
1134 return ret;
b48c312b 1135 ret = io_notif_account_mem(zc->notif, zc->len);
2e32ba56
PB
1136 if (unlikely(ret))
1137 return ret;
cd9021e8 1138 msg.sg_from_iter = io_sg_from_iter_iovec;
10c7d33e 1139 }
06a5464b
PB
1140
1141 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1142 if (issue_flags & IO_URING_F_NONBLOCK)
1143 msg_flags |= MSG_DONTWAIT;
1144 if (msg_flags & MSG_WAITALL)
1145 min_ret = iov_iter_count(&msg.msg_iter);
1146
1147 msg.msg_flags = msg_flags;
b48c312b 1148 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
06a5464b
PB
1149 ret = sock_sendmsg(sock, &msg);
1150
1151 if (unlikely(ret < min_ret)) {
1152 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6ae61b7a 1153 return io_setup_async_addr(req, &__address, issue_flags);
581711c4 1154
4a933e62
PB
1155 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1156 zc->len -= ret;
1157 zc->buf += ret;
1158 zc->done_io += ret;
1159 req->flags |= REQ_F_PARTIAL_IO;
6ae61b7a 1160 return io_setup_async_addr(req, &__address, issue_flags);
4a933e62
PB
1161 }
1162 if (ret == -ERESTARTSYS)
1163 ret = -EINTR;
5a848b7c 1164 req_set_fail(req);
06a5464b
PB
1165 }
1166
4a933e62
PB
1167 if (ret >= 0)
1168 ret += zc->done_io;
1169 else if (zc->done_io)
1170 ret = zc->done_io;
b48c312b 1171
108893dd
PB
1172 /*
1173 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1174 * flushing notif to io_send_zc_cleanup()
1175 */
1176 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1177 io_notif_flush(zc->notif);
1178 req->flags &= ~REQ_F_NEED_CLEANUP;
1179 }
6ae91ac9 1180 io_req_set_res(req, ret, IORING_CQE_F_MORE);
06a5464b
PB
1181 return IOU_OK;
1182}
1183
493108d9
PB
1184int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1185{
1186 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1187 struct io_async_msghdr iomsg, *kmsg;
1188 struct socket *sock;
6ae91ac9 1189 unsigned flags;
493108d9
PB
1190 int ret, min_ret = 0;
1191
42385b02
PB
1192 io_notif_set_extended(sr->notif);
1193
493108d9
PB
1194 sock = sock_from_file(req->file);
1195 if (unlikely(!sock))
1196 return -ENOTSOCK;
cc767e7c
PB
1197 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1198 return -EOPNOTSUPP;
493108d9
PB
1199
1200 if (req_has_async_data(req)) {
1201 kmsg = req->async_data;
1202 } else {
1203 ret = io_sendmsg_copy_hdr(req, &iomsg);
1204 if (ret)
1205 return ret;
1206 kmsg = &iomsg;
1207 }
1208
1209 if (!(req->flags & REQ_F_POLLED) &&
1210 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1211 return io_setup_async_msg(req, kmsg, issue_flags);
1212
1213 flags = sr->msg_flags | MSG_ZEROCOPY;
1214 if (issue_flags & IO_URING_F_NONBLOCK)
1215 flags |= MSG_DONTWAIT;
1216 if (flags & MSG_WAITALL)
1217 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1218
1219 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1220 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1221 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1222
1223 if (unlikely(ret < min_ret)) {
1224 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1225 return io_setup_async_msg(req, kmsg, issue_flags);
1226
1227 if (ret > 0 && io_net_retry(sock, flags)) {
1228 sr->done_io += ret;
1229 req->flags |= REQ_F_PARTIAL_IO;
1230 return io_setup_async_msg(req, kmsg, issue_flags);
1231 }
493108d9
PB
1232 if (ret == -ERESTARTSYS)
1233 ret = -EINTR;
1234 req_set_fail(req);
1235 }
1236 /* fast path, check for non-NULL to avoid function call */
108893dd 1237 if (kmsg->free_iov) {
493108d9 1238 kfree(kmsg->free_iov);
108893dd
PB
1239 kmsg->free_iov = NULL;
1240 }
493108d9
PB
1241
1242 io_netmsg_recycle(req, issue_flags);
1243 if (ret >= 0)
1244 ret += sr->done_io;
1245 else if (sr->done_io)
1246 ret = sr->done_io;
1247
108893dd
PB
1248 /*
1249 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1250 * flushing notif to io_send_zc_cleanup()
1251 */
1252 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1253 io_notif_flush(sr->notif);
1254 req->flags &= ~REQ_F_NEED_CLEANUP;
1255 }
6ae91ac9 1256 io_req_set_res(req, ret, IORING_CQE_F_MORE);
493108d9
PB
1257 return IOU_OK;
1258}
1259
7e6b638e
PB
1260void io_sendrecv_fail(struct io_kiocb *req)
1261{
1262 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
7e6b638e
PB
1263
1264 if (req->flags & REQ_F_PARTIAL_IO)
6ae91ac9
PB
1265 req->cqe.res = sr->done_io;
1266
c4c0009e 1267 if ((req->flags & REQ_F_NEED_CLEANUP) &&
6ae91ac9
PB
1268 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1269 req->cqe.flags |= IORING_CQE_F_MORE;
5693bcce
PB
1270}
1271
f9ead18c
JA
1272int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1273{
f2ccb5ae 1274 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1275 unsigned flags;
1276
1277 if (sqe->len || sqe->buf_index)
1278 return -EINVAL;
1279
1280 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1281 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1282 accept->flags = READ_ONCE(sqe->accept_flags);
1283 accept->nofile = rlimit(RLIMIT_NOFILE);
1284 flags = READ_ONCE(sqe->ioprio);
1285 if (flags & ~IORING_ACCEPT_MULTISHOT)
1286 return -EINVAL;
1287
1288 accept->file_slot = READ_ONCE(sqe->file_index);
1289 if (accept->file_slot) {
1290 if (accept->flags & SOCK_CLOEXEC)
1291 return -EINVAL;
1292 if (flags & IORING_ACCEPT_MULTISHOT &&
1293 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1294 return -EINVAL;
1295 }
1296 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1297 return -EINVAL;
1298 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1299 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1300 if (flags & IORING_ACCEPT_MULTISHOT)
1301 req->flags |= REQ_F_APOLL_MULTISHOT;
1302 return 0;
1303}
1304
1305int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1306{
1307 struct io_ring_ctx *ctx = req->ctx;
f2ccb5ae 1308 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1309 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1310 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1311 bool fixed = !!accept->file_slot;
1312 struct file *file;
1313 int ret, fd;
1314
17add5ce
PB
1315 if (!io_check_multishot(req, issue_flags))
1316 return -EAGAIN;
f9ead18c
JA
1317retry:
1318 if (!fixed) {
1319 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1320 if (unlikely(fd < 0))
1321 return fd;
1322 }
1323 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1324 accept->flags);
1325 if (IS_ERR(file)) {
1326 if (!fixed)
1327 put_unused_fd(fd);
1328 ret = PTR_ERR(file);
1329 if (ret == -EAGAIN && force_nonblock) {
1330 /*
1331 * if it's multishot and polled, we don't need to
1332 * return EAGAIN to arm the poll infra since it
1333 * has already been done
1334 */
91482864 1335 if (issue_flags & IO_URING_F_MULTISHOT)
f9ead18c
JA
1336 ret = IOU_ISSUE_SKIP_COMPLETE;
1337 return ret;
1338 }
1339 if (ret == -ERESTARTSYS)
1340 ret = -EINTR;
1341 req_set_fail(req);
1342 } else if (!fixed) {
1343 fd_install(fd, file);
1344 ret = fd;
1345 } else {
1346 ret = io_fixed_fd_install(req, issue_flags, file,
1347 accept->file_slot);
1348 }
1349
1350 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1351 io_req_set_res(req, ret, 0);
1352 return IOU_OK;
1353 }
f9ead18c 1354
515e2696
DY
1355 if (ret < 0)
1356 return ret;
9b8c5475
DY
1357 if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1358 req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
d245bca6 1359 goto retry;
cbd25748 1360
515e2696 1361 return -ECANCELED;
f9ead18c
JA
1362}
1363
1364int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1365{
f2ccb5ae 1366 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1367
1368 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1369 return -EINVAL;
1370
1371 sock->domain = READ_ONCE(sqe->fd);
1372 sock->type = READ_ONCE(sqe->off);
1373 sock->protocol = READ_ONCE(sqe->len);
1374 sock->file_slot = READ_ONCE(sqe->file_index);
1375 sock->nofile = rlimit(RLIMIT_NOFILE);
1376
1377 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1378 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1379 return -EINVAL;
1380 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1381 return -EINVAL;
1382 return 0;
1383}
1384
1385int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1386{
f2ccb5ae 1387 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1388 bool fixed = !!sock->file_slot;
1389 struct file *file;
1390 int ret, fd;
1391
1392 if (!fixed) {
1393 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1394 if (unlikely(fd < 0))
1395 return fd;
1396 }
1397 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1398 if (IS_ERR(file)) {
1399 if (!fixed)
1400 put_unused_fd(fd);
1401 ret = PTR_ERR(file);
1402 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1403 return -EAGAIN;
1404 if (ret == -ERESTARTSYS)
1405 ret = -EINTR;
1406 req_set_fail(req);
1407 } else if (!fixed) {
1408 fd_install(fd, file);
1409 ret = fd;
1410 } else {
1411 ret = io_fixed_fd_install(req, issue_flags, file,
1412 sock->file_slot);
1413 }
1414 io_req_set_res(req, ret, 0);
1415 return IOU_OK;
1416}
1417
1418int io_connect_prep_async(struct io_kiocb *req)
1419{
1420 struct io_async_connect *io = req->async_data;
f2ccb5ae 1421 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1422
1423 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1424}
1425
1426int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1427{
f2ccb5ae 1428 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1429
1430 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1431 return -EINVAL;
1432
1433 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1434 conn->addr_len = READ_ONCE(sqe->addr2);
74e2e17e 1435 conn->in_progress = conn->seen_econnaborted = false;
f9ead18c
JA
1436 return 0;
1437}
1438
1439int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1440{
f2ccb5ae 1441 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1442 struct io_async_connect __io, *io;
1443 unsigned file_flags;
1444 int ret;
1445 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1446
3fb1bd68
JA
1447 if (connect->in_progress) {
1448 struct socket *socket;
1449
1450 ret = -ENOTSOCK;
1451 socket = sock_from_file(req->file);
1452 if (socket)
1453 ret = sock_error(socket->sk);
1454 goto out;
1455 }
1456
f9ead18c
JA
1457 if (req_has_async_data(req)) {
1458 io = req->async_data;
1459 } else {
1460 ret = move_addr_to_kernel(connect->addr,
1461 connect->addr_len,
1462 &__io.address);
1463 if (ret)
1464 goto out;
1465 io = &__io;
1466 }
1467
1468 file_flags = force_nonblock ? O_NONBLOCK : 0;
1469
1470 ret = __sys_connect_file(req->file, &io->address,
1471 connect->addr_len, file_flags);
74e2e17e
JA
1472 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1473 && force_nonblock) {
3fb1bd68
JA
1474 if (ret == -EINPROGRESS) {
1475 connect->in_progress = true;
74e2e17e
JA
1476 return -EAGAIN;
1477 }
1478 if (ret == -ECONNABORTED) {
1479 if (connect->seen_econnaborted)
3fb1bd68 1480 goto out;
74e2e17e
JA
1481 connect->seen_econnaborted = true;
1482 }
1483 if (req_has_async_data(req))
1484 return -EAGAIN;
1485 if (io_alloc_async_data(req)) {
1486 ret = -ENOMEM;
1487 goto out;
f9ead18c 1488 }
74e2e17e 1489 memcpy(req->async_data, &__io, sizeof(__io));
f9ead18c
JA
1490 return -EAGAIN;
1491 }
1492 if (ret == -ERESTARTSYS)
1493 ret = -EINTR;
1494out:
1495 if (ret < 0)
1496 req_set_fail(req);
1497 io_req_set_res(req, ret, 0);
1498 return IOU_OK;
1499}
43e0bbbd
JA
1500
1501void io_netmsg_cache_free(struct io_cache_entry *entry)
1502{
1503 kfree(container_of(entry, struct io_async_msghdr, cache));
1504}
f9ead18c 1505#endif