io_uring: move zc reporting from the hot path
[linux-block.git] / io_uring / net.c
CommitLineData
f9ead18c
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/slab.h>
6#include <linux/net.h>
7#include <linux/compat.h>
8#include <net/compat.h>
9#include <linux/io_uring.h>
10
11#include <uapi/linux/io_uring.h>
12
f9ead18c 13#include "io_uring.h"
3b77495a 14#include "kbuf.h"
43e0bbbd 15#include "alloc_cache.h"
f9ead18c 16#include "net.h"
06a5464b 17#include "notif.h"
10c7d33e 18#include "rsrc.h"
f9ead18c
JA
19
20#if defined(CONFIG_NET)
21struct io_shutdown {
22 struct file *file;
23 int how;
24};
25
26struct io_accept {
27 struct file *file;
28 struct sockaddr __user *addr;
29 int __user *addr_len;
30 int flags;
31 u32 file_slot;
32 unsigned long nofile;
33};
34
35struct io_socket {
36 struct file *file;
37 int domain;
38 int type;
39 int protocol;
40 int flags;
41 u32 file_slot;
42 unsigned long nofile;
43};
44
45struct io_connect {
46 struct file *file;
47 struct sockaddr __user *addr;
48 int addr_len;
3fb1bd68 49 bool in_progress;
f9ead18c
JA
50};
51
52struct io_sr_msg {
53 struct file *file;
54 union {
55 struct compat_msghdr __user *umsg_compat;
56 struct user_msghdr __user *umsg;
57 void __user *buf;
58 };
0b048557
PB
59 unsigned len;
60 unsigned done_io;
293402e5 61 unsigned msg_flags;
0b048557 62 u16 flags;
516e82f0 63 /* initialised and used only by !msg send variants */
0b048557 64 u16 addr_len;
092aeedb 65 void __user *addr;
516e82f0 66 /* used only for send zerocopy */
b48c312b 67 struct io_kiocb *notif;
06a5464b
PB
68};
69
f9ead18c
JA
70int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
71{
f2ccb5ae 72 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
73
74 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
75 sqe->buf_index || sqe->splice_fd_in))
76 return -EINVAL;
77
78 shutdown->how = READ_ONCE(sqe->len);
79 return 0;
80}
81
82int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
83{
f2ccb5ae 84 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
85 struct socket *sock;
86 int ret;
87
88 if (issue_flags & IO_URING_F_NONBLOCK)
89 return -EAGAIN;
90
91 sock = sock_from_file(req->file);
92 if (unlikely(!sock))
93 return -ENOTSOCK;
94
95 ret = __sys_shutdown_sock(sock, shutdown->how);
96 io_req_set_res(req, ret, 0);
97 return IOU_OK;
98}
99
100static bool io_net_retry(struct socket *sock, int flags)
101{
102 if (!(flags & MSG_WAITALL))
103 return false;
104 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
105}
106
43e0bbbd
JA
107static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
108{
109 struct io_async_msghdr *hdr = req->async_data;
110
06360426 111 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
43e0bbbd
JA
112 return;
113
114 /* Let normal cleanup path reap it if we fail adding to the cache */
115 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
116 req->async_data = NULL;
117 req->flags &= ~REQ_F_ASYNC_DATA;
118 }
119}
120
858c293e
PB
121static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
122 unsigned int issue_flags)
43e0bbbd
JA
123{
124 struct io_ring_ctx *ctx = req->ctx;
125 struct io_cache_entry *entry;
4c17a496 126 struct io_async_msghdr *hdr;
43e0bbbd
JA
127
128 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
129 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
43e0bbbd 130 hdr = container_of(entry, struct io_async_msghdr, cache);
4c17a496 131 hdr->free_iov = NULL;
43e0bbbd
JA
132 req->flags |= REQ_F_ASYNC_DATA;
133 req->async_data = hdr;
134 return hdr;
135 }
136
4c17a496
PB
137 if (!io_alloc_async_data(req)) {
138 hdr = req->async_data;
139 hdr->free_iov = NULL;
140 return hdr;
141 }
43e0bbbd
JA
142 return NULL;
143}
144
858c293e
PB
145static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
146{
147 /* ->prep_async is always called from the submission context */
148 return io_msg_alloc_async(req, 0);
149}
150
f9ead18c 151static int io_setup_async_msg(struct io_kiocb *req,
43e0bbbd
JA
152 struct io_async_msghdr *kmsg,
153 unsigned int issue_flags)
f9ead18c 154{
3f743e9b 155 struct io_async_msghdr *async_msg;
f9ead18c 156
3f743e9b 157 if (req_has_async_data(req))
f9ead18c 158 return -EAGAIN;
858c293e 159 async_msg = io_msg_alloc_async(req, issue_flags);
43e0bbbd 160 if (!async_msg) {
f9ead18c
JA
161 kfree(kmsg->free_iov);
162 return -ENOMEM;
163 }
f9ead18c
JA
164 req->flags |= REQ_F_NEED_CLEANUP;
165 memcpy(async_msg, kmsg, sizeof(*kmsg));
6f10ae8a
PB
166 if (async_msg->msg.msg_name)
167 async_msg->msg.msg_name = &async_msg->addr;
f9ead18c 168 /* if were using fast_iov, set it to the new one */
3e4cb6eb
SM
169 if (!kmsg->free_iov) {
170 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
171 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
172 }
f9ead18c
JA
173
174 return -EAGAIN;
175}
176
177static int io_sendmsg_copy_hdr(struct io_kiocb *req,
178 struct io_async_msghdr *iomsg)
179{
f2ccb5ae 180 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
181
182 iomsg->msg.msg_name = &iomsg->addr;
183 iomsg->free_iov = iomsg->fast_iov;
184 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
185 &iomsg->free_iov);
186}
187
516e82f0 188int io_send_prep_async(struct io_kiocb *req)
581711c4 189{
ac9e5784 190 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
191 struct io_async_msghdr *io;
192 int ret;
193
194 if (!zc->addr || req_has_async_data(req))
195 return 0;
6bf8ad25
PB
196 io = io_msg_alloc_async_prep(req);
197 if (!io)
581711c4 198 return -ENOMEM;
581711c4
PB
199 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
200 return ret;
201}
202
203static int io_setup_async_addr(struct io_kiocb *req,
6ae61b7a 204 struct sockaddr_storage *addr_storage,
581711c4
PB
205 unsigned int issue_flags)
206{
6ae61b7a 207 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
208 struct io_async_msghdr *io;
209
6ae61b7a 210 if (!sr->addr || req_has_async_data(req))
581711c4 211 return -EAGAIN;
6bf8ad25
PB
212 io = io_msg_alloc_async(req, issue_flags);
213 if (!io)
581711c4 214 return -ENOMEM;
6ae61b7a 215 memcpy(&io->addr, addr_storage, sizeof(io->addr));
581711c4
PB
216 return -EAGAIN;
217}
218
f9ead18c
JA
219int io_sendmsg_prep_async(struct io_kiocb *req)
220{
221 int ret;
222
858c293e
PB
223 if (!io_msg_alloc_async_prep(req))
224 return -ENOMEM;
f9ead18c
JA
225 ret = io_sendmsg_copy_hdr(req, req->async_data);
226 if (!ret)
227 req->flags |= REQ_F_NEED_CLEANUP;
228 return ret;
229}
230
231void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
232{
233 struct io_async_msghdr *io = req->async_data;
234
235 kfree(io->free_iov);
236}
237
238int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
239{
f2ccb5ae 240 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c 241
516e82f0
PB
242 if (req->opcode == IORING_OP_SEND) {
243 if (READ_ONCE(sqe->__pad3[0]))
244 return -EINVAL;
245 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
246 sr->addr_len = READ_ONCE(sqe->addr_len);
247 } else if (sqe->addr2 || sqe->file_index) {
f9ead18c 248 return -EINVAL;
516e82f0 249 }
f9ead18c
JA
250
251 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
252 sr->len = READ_ONCE(sqe->len);
253 sr->flags = READ_ONCE(sqe->ioprio);
254 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
255 return -EINVAL;
256 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
257 if (sr->msg_flags & MSG_DONTWAIT)
258 req->flags |= REQ_F_NOWAIT;
259
260#ifdef CONFIG_COMPAT
261 if (req->ctx->compat)
262 sr->msg_flags |= MSG_CMSG_COMPAT;
263#endif
264 sr->done_io = 0;
265 return 0;
266}
267
268int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
269{
f2ccb5ae 270 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
271 struct io_async_msghdr iomsg, *kmsg;
272 struct socket *sock;
273 unsigned flags;
274 int min_ret = 0;
275 int ret;
276
277 sock = sock_from_file(req->file);
278 if (unlikely(!sock))
279 return -ENOTSOCK;
280
281 if (req_has_async_data(req)) {
282 kmsg = req->async_data;
283 } else {
284 ret = io_sendmsg_copy_hdr(req, &iomsg);
285 if (ret)
286 return ret;
287 kmsg = &iomsg;
288 }
289
290 if (!(req->flags & REQ_F_POLLED) &&
291 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 292 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
293
294 flags = sr->msg_flags;
295 if (issue_flags & IO_URING_F_NONBLOCK)
296 flags |= MSG_DONTWAIT;
297 if (flags & MSG_WAITALL)
298 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
299
300 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
301
302 if (ret < min_ret) {
303 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
43e0bbbd 304 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
305 if (ret > 0 && io_net_retry(sock, flags)) {
306 sr->done_io += ret;
307 req->flags |= REQ_F_PARTIAL_IO;
43e0bbbd 308 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 309 }
95eafc74
PB
310 if (ret == -ERESTARTSYS)
311 ret = -EINTR;
f9ead18c
JA
312 req_set_fail(req);
313 }
314 /* fast path, check for non-NULL to avoid function call */
315 if (kmsg->free_iov)
316 kfree(kmsg->free_iov);
317 req->flags &= ~REQ_F_NEED_CLEANUP;
43e0bbbd 318 io_netmsg_recycle(req, issue_flags);
f9ead18c
JA
319 if (ret >= 0)
320 ret += sr->done_io;
321 else if (sr->done_io)
322 ret = sr->done_io;
323 io_req_set_res(req, ret, 0);
324 return IOU_OK;
325}
326
327int io_send(struct io_kiocb *req, unsigned int issue_flags)
328{
516e82f0 329 struct sockaddr_storage __address;
f2ccb5ae 330 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
331 struct msghdr msg;
332 struct iovec iov;
333 struct socket *sock;
334 unsigned flags;
335 int min_ret = 0;
336 int ret;
337
04360d3e
PB
338 msg.msg_name = NULL;
339 msg.msg_control = NULL;
340 msg.msg_controllen = 0;
341 msg.msg_namelen = 0;
342 msg.msg_ubuf = NULL;
343
516e82f0
PB
344 if (sr->addr) {
345 if (req_has_async_data(req)) {
346 struct io_async_msghdr *io = req->async_data;
347
348 msg.msg_name = &io->addr;
349 } else {
350 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
351 if (unlikely(ret < 0))
352 return ret;
353 msg.msg_name = (struct sockaddr *)&__address;
354 }
355 msg.msg_namelen = sr->addr_len;
356 }
357
f9ead18c
JA
358 if (!(req->flags & REQ_F_POLLED) &&
359 (sr->flags & IORING_RECVSEND_POLL_FIRST))
516e82f0 360 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c
JA
361
362 sock = sock_from_file(req->file);
363 if (unlikely(!sock))
364 return -ENOTSOCK;
365
366 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
367 if (unlikely(ret))
368 return ret;
369
f9ead18c
JA
370 flags = sr->msg_flags;
371 if (issue_flags & IO_URING_F_NONBLOCK)
372 flags |= MSG_DONTWAIT;
373 if (flags & MSG_WAITALL)
374 min_ret = iov_iter_count(&msg.msg_iter);
375
376 msg.msg_flags = flags;
377 ret = sock_sendmsg(sock, &msg);
378 if (ret < min_ret) {
379 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
516e82f0
PB
380 return io_setup_async_addr(req, &__address, issue_flags);
381
f9ead18c
JA
382 if (ret > 0 && io_net_retry(sock, flags)) {
383 sr->len -= ret;
384 sr->buf += ret;
385 sr->done_io += ret;
386 req->flags |= REQ_F_PARTIAL_IO;
516e82f0 387 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c 388 }
95eafc74
PB
389 if (ret == -ERESTARTSYS)
390 ret = -EINTR;
f9ead18c
JA
391 req_set_fail(req);
392 }
393 if (ret >= 0)
394 ret += sr->done_io;
395 else if (sr->done_io)
396 ret = sr->done_io;
397 io_req_set_res(req, ret, 0);
398 return IOU_OK;
399}
400
9bb66906
DY
401static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
402{
9b0fc3c0 403 int hdr;
9bb66906 404
9b0fc3c0 405 if (iomsg->namelen < 0)
9bb66906 406 return true;
9b0fc3c0
DY
407 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
408 iomsg->namelen, &hdr))
9bb66906 409 return true;
9b0fc3c0 410 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
9bb66906
DY
411 return true;
412
413 return false;
414}
415
f9ead18c
JA
416static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
417 struct io_async_msghdr *iomsg)
418{
f2ccb5ae 419 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
7fa875b8 420 struct user_msghdr msg;
f9ead18c
JA
421 int ret;
422
7fa875b8
DY
423 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
424 return -EFAULT;
425
426 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
f9ead18c
JA
427 if (ret)
428 return ret;
429
430 if (req->flags & REQ_F_BUFFER_SELECT) {
7fa875b8 431 if (msg.msg_iovlen == 0) {
5702196e
DY
432 sr->len = iomsg->fast_iov[0].iov_len = 0;
433 iomsg->fast_iov[0].iov_base = NULL;
434 iomsg->free_iov = NULL;
7fa875b8 435 } else if (msg.msg_iovlen > 1) {
f9ead18c 436 return -EINVAL;
5702196e 437 } else {
7fa875b8 438 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
5702196e
DY
439 return -EFAULT;
440 sr->len = iomsg->fast_iov[0].iov_len;
441 iomsg->free_iov = NULL;
442 }
9bb66906
DY
443
444 if (req->flags & REQ_F_APOLL_MULTISHOT) {
445 iomsg->namelen = msg.msg_namelen;
446 iomsg->controllen = msg.msg_controllen;
447 if (io_recvmsg_multishot_overflow(iomsg))
448 return -EOVERFLOW;
449 }
f9ead18c
JA
450 } else {
451 iomsg->free_iov = iomsg->fast_iov;
7fa875b8 452 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
f9ead18c
JA
453 &iomsg->free_iov, &iomsg->msg.msg_iter,
454 false);
455 if (ret > 0)
456 ret = 0;
457 }
458
459 return ret;
460}
461
462#ifdef CONFIG_COMPAT
463static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
464 struct io_async_msghdr *iomsg)
465{
f2ccb5ae 466 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
72c531f8 467 struct compat_msghdr msg;
f9ead18c 468 struct compat_iovec __user *uiov;
f9ead18c
JA
469 int ret;
470
72c531f8
DY
471 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
472 return -EFAULT;
473
4f6a94d3 474 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
f9ead18c
JA
475 if (ret)
476 return ret;
477
72c531f8 478 uiov = compat_ptr(msg.msg_iov);
f9ead18c
JA
479 if (req->flags & REQ_F_BUFFER_SELECT) {
480 compat_ssize_t clen;
481
72c531f8 482 if (msg.msg_iovlen == 0) {
6d2f75a0 483 sr->len = 0;
72c531f8 484 } else if (msg.msg_iovlen > 1) {
f9ead18c 485 return -EINVAL;
6d2f75a0
DY
486 } else {
487 if (!access_ok(uiov, sizeof(*uiov)))
488 return -EFAULT;
489 if (__get_user(clen, &uiov->iov_len))
490 return -EFAULT;
491 if (clen < 0)
492 return -EINVAL;
493 sr->len = clen;
6d2f75a0 494 }
9bb66906
DY
495
496 if (req->flags & REQ_F_APOLL_MULTISHOT) {
497 iomsg->namelen = msg.msg_namelen;
498 iomsg->controllen = msg.msg_controllen;
499 if (io_recvmsg_multishot_overflow(iomsg))
500 return -EOVERFLOW;
501 }
f9ead18c
JA
502 } else {
503 iomsg->free_iov = iomsg->fast_iov;
72c531f8 504 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
f9ead18c
JA
505 UIO_FASTIOV, &iomsg->free_iov,
506 &iomsg->msg.msg_iter, true);
507 if (ret < 0)
508 return ret;
509 }
510
511 return 0;
512}
513#endif
514
515static int io_recvmsg_copy_hdr(struct io_kiocb *req,
516 struct io_async_msghdr *iomsg)
517{
518 iomsg->msg.msg_name = &iomsg->addr;
519
520#ifdef CONFIG_COMPAT
521 if (req->ctx->compat)
522 return __io_compat_recvmsg_copy_hdr(req, iomsg);
523#endif
524
525 return __io_recvmsg_copy_hdr(req, iomsg);
526}
527
528int io_recvmsg_prep_async(struct io_kiocb *req)
529{
530 int ret;
531
858c293e
PB
532 if (!io_msg_alloc_async_prep(req))
533 return -ENOMEM;
f9ead18c
JA
534 ret = io_recvmsg_copy_hdr(req, req->async_data);
535 if (!ret)
536 req->flags |= REQ_F_NEED_CLEANUP;
537 return ret;
538}
539
b3fdea6e
DY
540#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
541
f9ead18c
JA
542int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
543{
f2ccb5ae 544 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
545
546 if (unlikely(sqe->file_index || sqe->addr2))
547 return -EINVAL;
548
549 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
550 sr->len = READ_ONCE(sqe->len);
551 sr->flags = READ_ONCE(sqe->ioprio);
b3fdea6e 552 if (sr->flags & ~(RECVMSG_FLAGS))
f9ead18c
JA
553 return -EINVAL;
554 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
555 if (sr->msg_flags & MSG_DONTWAIT)
556 req->flags |= REQ_F_NOWAIT;
557 if (sr->msg_flags & MSG_ERRQUEUE)
558 req->flags |= REQ_F_CLEAR_POLLIN;
b3fdea6e
DY
559 if (sr->flags & IORING_RECV_MULTISHOT) {
560 if (!(req->flags & REQ_F_BUFFER_SELECT))
561 return -EINVAL;
562 if (sr->msg_flags & MSG_WAITALL)
563 return -EINVAL;
564 if (req->opcode == IORING_OP_RECV && sr->len)
565 return -EINVAL;
566 req->flags |= REQ_F_APOLL_MULTISHOT;
567 }
f9ead18c
JA
568
569#ifdef CONFIG_COMPAT
570 if (req->ctx->compat)
571 sr->msg_flags |= MSG_CMSG_COMPAT;
572#endif
573 sr->done_io = 0;
574 return 0;
575}
576
b3fdea6e
DY
577static inline void io_recv_prep_retry(struct io_kiocb *req)
578{
f2ccb5ae 579 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
b3fdea6e
DY
580
581 sr->done_io = 0;
582 sr->len = 0; /* get from the provided buffer */
583}
584
585/*
9bb66906 586 * Finishes io_recv and io_recvmsg.
b3fdea6e
DY
587 *
588 * Returns true if it is actually finished, or false if it should run
589 * again (for multishot).
590 */
9bb66906 591static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
100d6b17
PB
592 unsigned int cflags, bool mshot_finished,
593 unsigned issue_flags)
b3fdea6e
DY
594{
595 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
596 io_req_set_res(req, *ret, cflags);
597 *ret = IOU_OK;
598 return true;
599 }
600
9bb66906 601 if (!mshot_finished) {
b3fdea6e
DY
602 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
603 cflags | IORING_CQE_F_MORE, false)) {
604 io_recv_prep_retry(req);
605 return false;
606 }
607 /*
608 * Otherwise stop multishot but use the current result.
609 * Probably will end up going into overflow, but this means
610 * we cannot trust the ordering anymore
611 */
612 }
613
614 io_req_set_res(req, *ret, cflags);
615
100d6b17 616 if (issue_flags & IO_URING_F_MULTISHOT)
b3fdea6e 617 *ret = IOU_STOP_MULTISHOT;
e2df2ccb
DY
618 else
619 *ret = IOU_OK;
b3fdea6e
DY
620 return true;
621}
622
9bb66906
DY
623static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
624 struct io_sr_msg *sr, void __user **buf,
625 size_t *len)
626{
627 unsigned long ubuf = (unsigned long) *buf;
628 unsigned long hdr;
629
630 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
631 kmsg->controllen;
632 if (*len < hdr)
633 return -EFAULT;
634
635 if (kmsg->controllen) {
636 unsigned long control = ubuf + hdr - kmsg->controllen;
637
d1f6222c 638 kmsg->msg.msg_control_user = (void __user *) control;
9bb66906
DY
639 kmsg->msg.msg_controllen = kmsg->controllen;
640 }
641
642 sr->buf = *buf; /* stash for later copy */
d1f6222c 643 *buf = (void __user *) (ubuf + hdr);
9bb66906
DY
644 kmsg->payloadlen = *len = *len - hdr;
645 return 0;
646}
647
648struct io_recvmsg_multishot_hdr {
649 struct io_uring_recvmsg_out msg;
650 struct sockaddr_storage addr;
651};
652
653static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
654 struct io_async_msghdr *kmsg,
655 unsigned int flags, bool *finished)
656{
657 int err;
658 int copy_len;
659 struct io_recvmsg_multishot_hdr hdr;
660
661 if (kmsg->namelen)
662 kmsg->msg.msg_name = &hdr.addr;
663 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
664 kmsg->msg.msg_namelen = 0;
665
666 if (sock->file->f_flags & O_NONBLOCK)
667 flags |= MSG_DONTWAIT;
668
669 err = sock_recvmsg(sock, &kmsg->msg, flags);
670 *finished = err <= 0;
671 if (err < 0)
672 return err;
673
674 hdr.msg = (struct io_uring_recvmsg_out) {
675 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
676 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
677 };
678
679 hdr.msg.payloadlen = err;
680 if (err > kmsg->payloadlen)
681 err = kmsg->payloadlen;
682
683 copy_len = sizeof(struct io_uring_recvmsg_out);
684 if (kmsg->msg.msg_namelen > kmsg->namelen)
685 copy_len += kmsg->namelen;
686 else
687 copy_len += kmsg->msg.msg_namelen;
688
689 /*
690 * "fromlen shall refer to the value before truncation.."
691 * 1003.1g
692 */
693 hdr.msg.namelen = kmsg->msg.msg_namelen;
694
695 /* ensure that there is no gap between hdr and sockaddr_storage */
696 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
697 sizeof(struct io_uring_recvmsg_out));
698 if (copy_to_user(io->buf, &hdr, copy_len)) {
699 *finished = true;
700 return -EFAULT;
701 }
702
703 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
704 kmsg->controllen + err;
705}
706
f9ead18c
JA
707int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
708{
f2ccb5ae 709 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
710 struct io_async_msghdr iomsg, *kmsg;
711 struct socket *sock;
712 unsigned int cflags;
713 unsigned flags;
714 int ret, min_ret = 0;
715 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
9bb66906 716 bool mshot_finished = true;
f9ead18c
JA
717
718 sock = sock_from_file(req->file);
719 if (unlikely(!sock))
720 return -ENOTSOCK;
721
722 if (req_has_async_data(req)) {
723 kmsg = req->async_data;
724 } else {
725 ret = io_recvmsg_copy_hdr(req, &iomsg);
726 if (ret)
727 return ret;
728 kmsg = &iomsg;
729 }
730
731 if (!(req->flags & REQ_F_POLLED) &&
732 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 733 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 734
9bb66906 735retry_multishot:
f9ead18c
JA
736 if (io_do_buffer_select(req)) {
737 void __user *buf;
9bb66906 738 size_t len = sr->len;
f9ead18c 739
9bb66906 740 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
741 if (!buf)
742 return -ENOBUFS;
9bb66906
DY
743
744 if (req->flags & REQ_F_APOLL_MULTISHOT) {
745 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
746 if (ret) {
747 io_kbuf_recycle(req, issue_flags);
748 return ret;
749 }
750 }
751
f9ead18c 752 kmsg->fast_iov[0].iov_base = buf;
9bb66906 753 kmsg->fast_iov[0].iov_len = len;
f9ead18c 754 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
9bb66906 755 len);
f9ead18c
JA
756 }
757
758 flags = sr->msg_flags;
759 if (force_nonblock)
760 flags |= MSG_DONTWAIT;
761 if (flags & MSG_WAITALL)
762 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
763
764 kmsg->msg.msg_get_inq = 1;
9bb66906
DY
765 if (req->flags & REQ_F_APOLL_MULTISHOT)
766 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
767 &mshot_finished);
768 else
769 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
770 kmsg->uaddr, flags);
771
f9ead18c 772 if (ret < min_ret) {
9bb66906
DY
773 if (ret == -EAGAIN && force_nonblock) {
774 ret = io_setup_async_msg(req, kmsg, issue_flags);
100d6b17 775 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
9bb66906
DY
776 io_kbuf_recycle(req, issue_flags);
777 return IOU_ISSUE_SKIP_COMPLETE;
778 }
779 return ret;
780 }
f9ead18c
JA
781 if (ret > 0 && io_net_retry(sock, flags)) {
782 sr->done_io += ret;
783 req->flags |= REQ_F_PARTIAL_IO;
43e0bbbd 784 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 785 }
95eafc74
PB
786 if (ret == -ERESTARTSYS)
787 ret = -EINTR;
f9ead18c
JA
788 req_set_fail(req);
789 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
790 req_set_fail(req);
791 }
792
d4e097da 793 if (ret > 0)
f9ead18c
JA
794 ret += sr->done_io;
795 else if (sr->done_io)
796 ret = sr->done_io;
d4e097da
DY
797 else
798 io_kbuf_recycle(req, issue_flags);
799
f9ead18c
JA
800 cflags = io_put_kbuf(req, issue_flags);
801 if (kmsg->msg.msg_inq)
802 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
b3fdea6e 803
100d6b17 804 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
9bb66906
DY
805 goto retry_multishot;
806
807 if (mshot_finished) {
808 io_netmsg_recycle(req, issue_flags);
809 /* fast path, check for non-NULL to avoid function call */
810 if (kmsg->free_iov)
811 kfree(kmsg->free_iov);
812 req->flags &= ~REQ_F_NEED_CLEANUP;
813 }
814
815 return ret;
f9ead18c
JA
816}
817
818int io_recv(struct io_kiocb *req, unsigned int issue_flags)
819{
f2ccb5ae 820 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
821 struct msghdr msg;
822 struct socket *sock;
823 struct iovec iov;
824 unsigned int cflags;
825 unsigned flags;
826 int ret, min_ret = 0;
827 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
b3fdea6e 828 size_t len = sr->len;
f9ead18c
JA
829
830 if (!(req->flags & REQ_F_POLLED) &&
831 (sr->flags & IORING_RECVSEND_POLL_FIRST))
832 return -EAGAIN;
833
834 sock = sock_from_file(req->file);
835 if (unlikely(!sock))
836 return -ENOTSOCK;
837
b3fdea6e 838retry_multishot:
f9ead18c
JA
839 if (io_do_buffer_select(req)) {
840 void __user *buf;
841
b3fdea6e 842 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
843 if (!buf)
844 return -ENOBUFS;
845 sr->buf = buf;
846 }
847
b3fdea6e 848 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
f9ead18c
JA
849 if (unlikely(ret))
850 goto out_free;
851
852 msg.msg_name = NULL;
853 msg.msg_namelen = 0;
854 msg.msg_control = NULL;
855 msg.msg_get_inq = 1;
856 msg.msg_flags = 0;
857 msg.msg_controllen = 0;
858 msg.msg_iocb = NULL;
e02b6651 859 msg.msg_ubuf = NULL;
f9ead18c
JA
860
861 flags = sr->msg_flags;
862 if (force_nonblock)
863 flags |= MSG_DONTWAIT;
864 if (flags & MSG_WAITALL)
865 min_ret = iov_iter_count(&msg.msg_iter);
866
867 ret = sock_recvmsg(sock, &msg, flags);
868 if (ret < min_ret) {
b3fdea6e 869 if (ret == -EAGAIN && force_nonblock) {
100d6b17 870 if (issue_flags & IO_URING_F_MULTISHOT) {
b3fdea6e
DY
871 io_kbuf_recycle(req, issue_flags);
872 return IOU_ISSUE_SKIP_COMPLETE;
873 }
874
f9ead18c 875 return -EAGAIN;
b3fdea6e 876 }
f9ead18c
JA
877 if (ret > 0 && io_net_retry(sock, flags)) {
878 sr->len -= ret;
879 sr->buf += ret;
880 sr->done_io += ret;
881 req->flags |= REQ_F_PARTIAL_IO;
882 return -EAGAIN;
883 }
95eafc74
PB
884 if (ret == -ERESTARTSYS)
885 ret = -EINTR;
f9ead18c
JA
886 req_set_fail(req);
887 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
888out_free:
889 req_set_fail(req);
890 }
891
d4e097da 892 if (ret > 0)
f9ead18c
JA
893 ret += sr->done_io;
894 else if (sr->done_io)
895 ret = sr->done_io;
d4e097da
DY
896 else
897 io_kbuf_recycle(req, issue_flags);
898
f9ead18c
JA
899 cflags = io_put_kbuf(req, issue_flags);
900 if (msg.msg_inq)
901 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
b3fdea6e 902
100d6b17 903 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
b3fdea6e
DY
904 goto retry_multishot;
905
906 return ret;
f9ead18c
JA
907}
908
b0e9b551 909void io_send_zc_cleanup(struct io_kiocb *req)
b48c312b 910{
ac9e5784 911 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
493108d9 912 struct io_async_msghdr *io;
b48c312b 913
493108d9
PB
914 if (req_has_async_data(req)) {
915 io = req->async_data;
4c17a496
PB
916 /* might be ->fast_iov if *msg_copy_hdr failed */
917 if (io->free_iov != io->fast_iov)
918 kfree(io->free_iov);
493108d9 919 }
a75155fa 920 if (zc->notif) {
a75155fa
PB
921 io_notif_flush(zc->notif);
922 zc->notif = NULL;
923 }
b48c312b
PB
924}
925
40725d1b
PB
926#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
927#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
928
b0e9b551 929int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
06a5464b 930{
ac9e5784 931 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
10c7d33e 932 struct io_ring_ctx *ctx = req->ctx;
b48c312b 933 struct io_kiocb *notif;
06a5464b 934
493108d9 935 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
b48c312b
PB
936 return -EINVAL;
937 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
938 if (req->flags & REQ_F_CQE_SKIP)
06a5464b
PB
939 return -EINVAL;
940
e3366e02
PB
941 notif = zc->notif = io_alloc_notif(ctx);
942 if (!notif)
943 return -ENOMEM;
944 notif->cqe.user_data = req->cqe.user_data;
945 notif->cqe.res = 0;
946 notif->cqe.flags = IORING_CQE_F_NOTIF;
947 req->flags |= REQ_F_NEED_CLEANUP;
40725d1b
PB
948
949 zc->flags = READ_ONCE(sqe->ioprio);
950 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
951 if (zc->flags & ~IO_ZC_FLAGS_VALID)
952 return -EINVAL;
953 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
954 io_notif_set_extended(notif);
955 io_notif_to_data(notif)->zc_report = true;
956 }
957 }
958
10c7d33e
PB
959 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
960 unsigned idx = READ_ONCE(sqe->buf_index);
961
962 if (unlikely(idx >= ctx->nr_user_bufs))
963 return -EFAULT;
964 idx = array_index_nospec(idx, ctx->nr_user_bufs);
965 req->imu = READ_ONCE(ctx->user_bufs[idx]);
e3366e02 966 io_req_set_rsrc_node(notif, ctx, 0);
10c7d33e 967 }
06a5464b 968
493108d9
PB
969 if (req->opcode == IORING_OP_SEND_ZC) {
970 if (READ_ONCE(sqe->__pad3[0]))
971 return -EINVAL;
972 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
973 zc->addr_len = READ_ONCE(sqe->addr_len);
974 } else {
975 if (unlikely(sqe->addr2 || sqe->file_index))
976 return -EINVAL;
977 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
978 return -EINVAL;
979 }
980
06a5464b
PB
981 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
982 zc->len = READ_ONCE(sqe->len);
983 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
06a5464b
PB
984 if (zc->msg_flags & MSG_DONTWAIT)
985 req->flags |= REQ_F_NOWAIT;
092aeedb 986
4a933e62 987 zc->done_io = 0;
092aeedb 988
06a5464b
PB
989#ifdef CONFIG_COMPAT
990 if (req->ctx->compat)
991 zc->msg_flags |= MSG_CMSG_COMPAT;
992#endif
993 return 0;
994}
995
cd9021e8
PB
996static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
997 struct iov_iter *from, size_t length)
998{
999 skb_zcopy_downgrade_managed(skb);
1000 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1001}
1002
3ff1a0d3
PB
1003static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1004 struct iov_iter *from, size_t length)
1005{
1006 struct skb_shared_info *shinfo = skb_shinfo(skb);
1007 int frag = shinfo->nr_frags;
1008 int ret = 0;
1009 struct bvec_iter bi;
1010 ssize_t copied = 0;
1011 unsigned long truesize = 0;
1012
cd9021e8 1013 if (!frag)
3ff1a0d3 1014 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
cd9021e8 1015 else if (unlikely(!skb_zcopy_managed(skb)))
3ff1a0d3 1016 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
3ff1a0d3
PB
1017
1018 bi.bi_size = min(from->count, length);
1019 bi.bi_bvec_done = from->iov_offset;
1020 bi.bi_idx = 0;
1021
1022 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1023 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1024
1025 copied += v.bv_len;
1026 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1027 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1028 v.bv_offset, v.bv_len);
1029 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1030 }
1031 if (bi.bi_size)
1032 ret = -EMSGSIZE;
1033
1034 shinfo->nr_frags = frag;
1035 from->bvec += bi.bi_idx;
1036 from->nr_segs -= bi.bi_idx;
dfb58b17 1037 from->count -= copied;
3ff1a0d3
PB
1038 from->iov_offset = bi.bi_bvec_done;
1039
1040 skb->data_len += copied;
1041 skb->len += copied;
1042 skb->truesize += truesize;
1043
1044 if (sk && sk->sk_type == SOCK_STREAM) {
1045 sk_wmem_queued_add(sk, truesize);
1046 if (!skb_zcopy_pure(skb))
1047 sk_mem_charge(sk, truesize);
1048 } else {
1049 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1050 }
1051 return ret;
1052}
1053
b0e9b551 1054int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
06a5464b 1055{
6ae61b7a 1056 struct sockaddr_storage __address;
ac9e5784 1057 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
06a5464b
PB
1058 struct msghdr msg;
1059 struct iovec iov;
1060 struct socket *sock;
6ae91ac9 1061 unsigned msg_flags;
06a5464b
PB
1062 int ret, min_ret = 0;
1063
06a5464b
PB
1064 sock = sock_from_file(req->file);
1065 if (unlikely(!sock))
1066 return -ENOTSOCK;
edf81438
PB
1067 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1068 return -EOPNOTSUPP;
06a5464b 1069
06a5464b
PB
1070 msg.msg_name = NULL;
1071 msg.msg_control = NULL;
1072 msg.msg_controllen = 0;
1073 msg.msg_namelen = 0;
1074
86dc8f23 1075 if (zc->addr) {
581711c4
PB
1076 if (req_has_async_data(req)) {
1077 struct io_async_msghdr *io = req->async_data;
1078
6ae61b7a 1079 msg.msg_name = &io->addr;
581711c4
PB
1080 } else {
1081 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1082 if (unlikely(ret < 0))
1083 return ret;
1084 msg.msg_name = (struct sockaddr *)&__address;
581711c4 1085 }
86dc8f23
PB
1086 msg.msg_namelen = zc->addr_len;
1087 }
1088
3c840053
PB
1089 if (!(req->flags & REQ_F_POLLED) &&
1090 (zc->flags & IORING_RECVSEND_POLL_FIRST))
6ae61b7a 1091 return io_setup_async_addr(req, &__address, issue_flags);
3c840053 1092
10c7d33e
PB
1093 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1094 ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
1095 (u64)(uintptr_t)zc->buf, zc->len);
1096 if (unlikely(ret))
986e263d 1097 return ret;
cd9021e8 1098 msg.sg_from_iter = io_sg_from_iter;
10c7d33e
PB
1099 } else {
1100 ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
1101 &msg.msg_iter);
1102 if (unlikely(ret))
1103 return ret;
b48c312b 1104 ret = io_notif_account_mem(zc->notif, zc->len);
2e32ba56
PB
1105 if (unlikely(ret))
1106 return ret;
cd9021e8 1107 msg.sg_from_iter = io_sg_from_iter_iovec;
10c7d33e 1108 }
06a5464b
PB
1109
1110 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1111 if (issue_flags & IO_URING_F_NONBLOCK)
1112 msg_flags |= MSG_DONTWAIT;
1113 if (msg_flags & MSG_WAITALL)
1114 min_ret = iov_iter_count(&msg.msg_iter);
1115
1116 msg.msg_flags = msg_flags;
b48c312b 1117 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
06a5464b
PB
1118 ret = sock_sendmsg(sock, &msg);
1119
1120 if (unlikely(ret < min_ret)) {
1121 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6ae61b7a 1122 return io_setup_async_addr(req, &__address, issue_flags);
581711c4 1123
4a933e62
PB
1124 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1125 zc->len -= ret;
1126 zc->buf += ret;
1127 zc->done_io += ret;
1128 req->flags |= REQ_F_PARTIAL_IO;
6ae61b7a 1129 return io_setup_async_addr(req, &__address, issue_flags);
4a933e62
PB
1130 }
1131 if (ret == -ERESTARTSYS)
1132 ret = -EINTR;
5a848b7c 1133 req_set_fail(req);
06a5464b
PB
1134 }
1135
4a933e62
PB
1136 if (ret >= 0)
1137 ret += zc->done_io;
1138 else if (zc->done_io)
1139 ret = zc->done_io;
b48c312b 1140
108893dd
PB
1141 /*
1142 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1143 * flushing notif to io_send_zc_cleanup()
1144 */
1145 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1146 io_notif_flush(zc->notif);
1147 req->flags &= ~REQ_F_NEED_CLEANUP;
1148 }
6ae91ac9 1149 io_req_set_res(req, ret, IORING_CQE_F_MORE);
06a5464b
PB
1150 return IOU_OK;
1151}
1152
493108d9
PB
1153int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1154{
1155 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1156 struct io_async_msghdr iomsg, *kmsg;
1157 struct socket *sock;
6ae91ac9 1158 unsigned flags;
493108d9
PB
1159 int ret, min_ret = 0;
1160
1161 sock = sock_from_file(req->file);
1162 if (unlikely(!sock))
1163 return -ENOTSOCK;
cc767e7c
PB
1164 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1165 return -EOPNOTSUPP;
493108d9
PB
1166
1167 if (req_has_async_data(req)) {
1168 kmsg = req->async_data;
1169 } else {
1170 ret = io_sendmsg_copy_hdr(req, &iomsg);
1171 if (ret)
1172 return ret;
1173 kmsg = &iomsg;
1174 }
1175
1176 if (!(req->flags & REQ_F_POLLED) &&
1177 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1178 return io_setup_async_msg(req, kmsg, issue_flags);
1179
1180 flags = sr->msg_flags | MSG_ZEROCOPY;
1181 if (issue_flags & IO_URING_F_NONBLOCK)
1182 flags |= MSG_DONTWAIT;
1183 if (flags & MSG_WAITALL)
1184 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1185
1186 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1187 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1188 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1189
1190 if (unlikely(ret < min_ret)) {
1191 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1192 return io_setup_async_msg(req, kmsg, issue_flags);
1193
1194 if (ret > 0 && io_net_retry(sock, flags)) {
1195 sr->done_io += ret;
1196 req->flags |= REQ_F_PARTIAL_IO;
1197 return io_setup_async_msg(req, kmsg, issue_flags);
1198 }
493108d9
PB
1199 if (ret == -ERESTARTSYS)
1200 ret = -EINTR;
1201 req_set_fail(req);
1202 }
1203 /* fast path, check for non-NULL to avoid function call */
108893dd 1204 if (kmsg->free_iov) {
493108d9 1205 kfree(kmsg->free_iov);
108893dd
PB
1206 kmsg->free_iov = NULL;
1207 }
493108d9
PB
1208
1209 io_netmsg_recycle(req, issue_flags);
1210 if (ret >= 0)
1211 ret += sr->done_io;
1212 else if (sr->done_io)
1213 ret = sr->done_io;
1214
108893dd
PB
1215 /*
1216 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1217 * flushing notif to io_send_zc_cleanup()
1218 */
1219 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1220 io_notif_flush(sr->notif);
1221 req->flags &= ~REQ_F_NEED_CLEANUP;
1222 }
6ae91ac9 1223 io_req_set_res(req, ret, IORING_CQE_F_MORE);
493108d9
PB
1224 return IOU_OK;
1225}
1226
7e6b638e
PB
1227void io_sendrecv_fail(struct io_kiocb *req)
1228{
1229 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
7e6b638e
PB
1230
1231 if (req->flags & REQ_F_PARTIAL_IO)
6ae91ac9
PB
1232 req->cqe.res = sr->done_io;
1233
c4c0009e 1234 if ((req->flags & REQ_F_NEED_CLEANUP) &&
6ae91ac9
PB
1235 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1236 req->cqe.flags |= IORING_CQE_F_MORE;
5693bcce
PB
1237}
1238
f9ead18c
JA
1239int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1240{
f2ccb5ae 1241 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1242 unsigned flags;
1243
1244 if (sqe->len || sqe->buf_index)
1245 return -EINVAL;
1246
1247 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1248 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1249 accept->flags = READ_ONCE(sqe->accept_flags);
1250 accept->nofile = rlimit(RLIMIT_NOFILE);
1251 flags = READ_ONCE(sqe->ioprio);
1252 if (flags & ~IORING_ACCEPT_MULTISHOT)
1253 return -EINVAL;
1254
1255 accept->file_slot = READ_ONCE(sqe->file_index);
1256 if (accept->file_slot) {
1257 if (accept->flags & SOCK_CLOEXEC)
1258 return -EINVAL;
1259 if (flags & IORING_ACCEPT_MULTISHOT &&
1260 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1261 return -EINVAL;
1262 }
1263 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1264 return -EINVAL;
1265 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1266 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1267 if (flags & IORING_ACCEPT_MULTISHOT)
1268 req->flags |= REQ_F_APOLL_MULTISHOT;
1269 return 0;
1270}
1271
1272int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1273{
1274 struct io_ring_ctx *ctx = req->ctx;
f2ccb5ae 1275 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1276 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1277 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1278 bool fixed = !!accept->file_slot;
1279 struct file *file;
1280 int ret, fd;
1281
1282retry:
1283 if (!fixed) {
1284 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1285 if (unlikely(fd < 0))
1286 return fd;
1287 }
1288 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1289 accept->flags);
1290 if (IS_ERR(file)) {
1291 if (!fixed)
1292 put_unused_fd(fd);
1293 ret = PTR_ERR(file);
1294 if (ret == -EAGAIN && force_nonblock) {
1295 /*
1296 * if it's multishot and polled, we don't need to
1297 * return EAGAIN to arm the poll infra since it
1298 * has already been done
1299 */
91482864 1300 if (issue_flags & IO_URING_F_MULTISHOT)
f9ead18c
JA
1301 ret = IOU_ISSUE_SKIP_COMPLETE;
1302 return ret;
1303 }
1304 if (ret == -ERESTARTSYS)
1305 ret = -EINTR;
1306 req_set_fail(req);
1307 } else if (!fixed) {
1308 fd_install(fd, file);
1309 ret = fd;
1310 } else {
1311 ret = io_fixed_fd_install(req, issue_flags, file,
1312 accept->file_slot);
1313 }
1314
1315 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1316 io_req_set_res(req, ret, 0);
1317 return IOU_OK;
1318 }
f9ead18c 1319
cbd25748
DY
1320 if (ret >= 0 &&
1321 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
d245bca6 1322 goto retry;
cbd25748
DY
1323
1324 io_req_set_res(req, ret, 0);
91482864 1325 return (issue_flags & IO_URING_F_MULTISHOT) ? IOU_STOP_MULTISHOT : IOU_OK;
f9ead18c
JA
1326}
1327
1328int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1329{
f2ccb5ae 1330 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1331
1332 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1333 return -EINVAL;
1334
1335 sock->domain = READ_ONCE(sqe->fd);
1336 sock->type = READ_ONCE(sqe->off);
1337 sock->protocol = READ_ONCE(sqe->len);
1338 sock->file_slot = READ_ONCE(sqe->file_index);
1339 sock->nofile = rlimit(RLIMIT_NOFILE);
1340
1341 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1342 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1343 return -EINVAL;
1344 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1345 return -EINVAL;
1346 return 0;
1347}
1348
1349int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1350{
f2ccb5ae 1351 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1352 bool fixed = !!sock->file_slot;
1353 struct file *file;
1354 int ret, fd;
1355
1356 if (!fixed) {
1357 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1358 if (unlikely(fd < 0))
1359 return fd;
1360 }
1361 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1362 if (IS_ERR(file)) {
1363 if (!fixed)
1364 put_unused_fd(fd);
1365 ret = PTR_ERR(file);
1366 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1367 return -EAGAIN;
1368 if (ret == -ERESTARTSYS)
1369 ret = -EINTR;
1370 req_set_fail(req);
1371 } else if (!fixed) {
1372 fd_install(fd, file);
1373 ret = fd;
1374 } else {
1375 ret = io_fixed_fd_install(req, issue_flags, file,
1376 sock->file_slot);
1377 }
1378 io_req_set_res(req, ret, 0);
1379 return IOU_OK;
1380}
1381
1382int io_connect_prep_async(struct io_kiocb *req)
1383{
1384 struct io_async_connect *io = req->async_data;
f2ccb5ae 1385 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1386
1387 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1388}
1389
1390int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1391{
f2ccb5ae 1392 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1393
1394 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1395 return -EINVAL;
1396
1397 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1398 conn->addr_len = READ_ONCE(sqe->addr2);
3fb1bd68 1399 conn->in_progress = false;
f9ead18c
JA
1400 return 0;
1401}
1402
1403int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1404{
f2ccb5ae 1405 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1406 struct io_async_connect __io, *io;
1407 unsigned file_flags;
1408 int ret;
1409 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1410
3fb1bd68
JA
1411 if (connect->in_progress) {
1412 struct socket *socket;
1413
1414 ret = -ENOTSOCK;
1415 socket = sock_from_file(req->file);
1416 if (socket)
1417 ret = sock_error(socket->sk);
1418 goto out;
1419 }
1420
f9ead18c
JA
1421 if (req_has_async_data(req)) {
1422 io = req->async_data;
1423 } else {
1424 ret = move_addr_to_kernel(connect->addr,
1425 connect->addr_len,
1426 &__io.address);
1427 if (ret)
1428 goto out;
1429 io = &__io;
1430 }
1431
1432 file_flags = force_nonblock ? O_NONBLOCK : 0;
1433
1434 ret = __sys_connect_file(req->file, &io->address,
1435 connect->addr_len, file_flags);
1436 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
3fb1bd68
JA
1437 if (ret == -EINPROGRESS) {
1438 connect->in_progress = true;
1439 } else {
1440 if (req_has_async_data(req))
1441 return -EAGAIN;
1442 if (io_alloc_async_data(req)) {
1443 ret = -ENOMEM;
1444 goto out;
1445 }
1446 memcpy(req->async_data, &__io, sizeof(__io));
f9ead18c 1447 }
f9ead18c
JA
1448 return -EAGAIN;
1449 }
1450 if (ret == -ERESTARTSYS)
1451 ret = -EINTR;
1452out:
1453 if (ret < 0)
1454 req_set_fail(req);
1455 io_req_set_res(req, ret, 0);
1456 return IOU_OK;
1457}
43e0bbbd
JA
1458
1459void io_netmsg_cache_free(struct io_cache_entry *entry)
1460{
1461 kfree(container_of(entry, struct io_async_msghdr, cache));
1462}
f9ead18c 1463#endif