ublk_drv: add device parameter UBLK_PARAM_TYPE_DEVT
[linux-block.git] / io_uring / net.c
CommitLineData
f9ead18c
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/slab.h>
6#include <linux/net.h>
7#include <linux/compat.h>
8#include <net/compat.h>
9#include <linux/io_uring.h>
10
11#include <uapi/linux/io_uring.h>
12
f9ead18c 13#include "io_uring.h"
3b77495a 14#include "kbuf.h"
43e0bbbd 15#include "alloc_cache.h"
f9ead18c 16#include "net.h"
06a5464b 17#include "notif.h"
10c7d33e 18#include "rsrc.h"
f9ead18c
JA
19
20#if defined(CONFIG_NET)
21struct io_shutdown {
22 struct file *file;
23 int how;
24};
25
26struct io_accept {
27 struct file *file;
28 struct sockaddr __user *addr;
29 int __user *addr_len;
30 int flags;
31 u32 file_slot;
32 unsigned long nofile;
33};
34
35struct io_socket {
36 struct file *file;
37 int domain;
38 int type;
39 int protocol;
40 int flags;
41 u32 file_slot;
42 unsigned long nofile;
43};
44
45struct io_connect {
46 struct file *file;
47 struct sockaddr __user *addr;
48 int addr_len;
3fb1bd68 49 bool in_progress;
f9ead18c
JA
50};
51
52struct io_sr_msg {
53 struct file *file;
54 union {
55 struct compat_msghdr __user *umsg_compat;
56 struct user_msghdr __user *umsg;
57 void __user *buf;
58 };
0b048557
PB
59 unsigned len;
60 unsigned done_io;
293402e5 61 unsigned msg_flags;
0b048557 62 u16 flags;
516e82f0 63 /* initialised and used only by !msg send variants */
0b048557 64 u16 addr_len;
b00c51ef 65 u16 buf_group;
092aeedb 66 void __user *addr;
516e82f0 67 /* used only for send zerocopy */
b48c312b 68 struct io_kiocb *notif;
06a5464b
PB
69};
70
17add5ce
PB
71static inline bool io_check_multishot(struct io_kiocb *req,
72 unsigned int issue_flags)
73{
74 /*
75 * When ->locked_cq is set we only allow to post CQEs from the original
76 * task context. Usual request completions will be handled in other
77 * generic paths but multipoll may decide to post extra cqes.
78 */
79 return !(issue_flags & IO_URING_F_IOWQ) ||
80 !(issue_flags & IO_URING_F_MULTISHOT) ||
81 !req->ctx->task_complete;
82}
83
f9ead18c
JA
84int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
85{
f2ccb5ae 86 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
87
88 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
89 sqe->buf_index || sqe->splice_fd_in))
90 return -EINVAL;
91
92 shutdown->how = READ_ONCE(sqe->len);
93 return 0;
94}
95
96int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
97{
f2ccb5ae 98 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
99 struct socket *sock;
100 int ret;
101
102 if (issue_flags & IO_URING_F_NONBLOCK)
103 return -EAGAIN;
104
105 sock = sock_from_file(req->file);
106 if (unlikely(!sock))
107 return -ENOTSOCK;
108
109 ret = __sys_shutdown_sock(sock, shutdown->how);
110 io_req_set_res(req, ret, 0);
111 return IOU_OK;
112}
113
114static bool io_net_retry(struct socket *sock, int flags)
115{
116 if (!(flags & MSG_WAITALL))
117 return false;
118 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
119}
120
43e0bbbd
JA
121static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
122{
123 struct io_async_msghdr *hdr = req->async_data;
124
06360426 125 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
43e0bbbd
JA
126 return;
127
128 /* Let normal cleanup path reap it if we fail adding to the cache */
129 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
130 req->async_data = NULL;
131 req->flags &= ~REQ_F_ASYNC_DATA;
132 }
133}
134
858c293e
PB
135static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
136 unsigned int issue_flags)
43e0bbbd
JA
137{
138 struct io_ring_ctx *ctx = req->ctx;
139 struct io_cache_entry *entry;
4c17a496 140 struct io_async_msghdr *hdr;
43e0bbbd 141
df730ec2
XL
142 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
143 entry = io_alloc_cache_get(&ctx->netmsg_cache);
144 if (entry) {
145 hdr = container_of(entry, struct io_async_msghdr, cache);
146 hdr->free_iov = NULL;
147 req->flags |= REQ_F_ASYNC_DATA;
148 req->async_data = hdr;
149 return hdr;
150 }
43e0bbbd
JA
151 }
152
4c17a496
PB
153 if (!io_alloc_async_data(req)) {
154 hdr = req->async_data;
155 hdr->free_iov = NULL;
156 return hdr;
157 }
43e0bbbd
JA
158 return NULL;
159}
160
858c293e
PB
161static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
162{
163 /* ->prep_async is always called from the submission context */
164 return io_msg_alloc_async(req, 0);
165}
166
f9ead18c 167static int io_setup_async_msg(struct io_kiocb *req,
43e0bbbd
JA
168 struct io_async_msghdr *kmsg,
169 unsigned int issue_flags)
f9ead18c 170{
3f743e9b 171 struct io_async_msghdr *async_msg;
f9ead18c 172
3f743e9b 173 if (req_has_async_data(req))
f9ead18c 174 return -EAGAIN;
858c293e 175 async_msg = io_msg_alloc_async(req, issue_flags);
43e0bbbd 176 if (!async_msg) {
f9ead18c
JA
177 kfree(kmsg->free_iov);
178 return -ENOMEM;
179 }
f9ead18c
JA
180 req->flags |= REQ_F_NEED_CLEANUP;
181 memcpy(async_msg, kmsg, sizeof(*kmsg));
6f10ae8a
PB
182 if (async_msg->msg.msg_name)
183 async_msg->msg.msg_name = &async_msg->addr;
f9ead18c 184 /* if were using fast_iov, set it to the new one */
3e4cb6eb
SM
185 if (!kmsg->free_iov) {
186 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
187 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
188 }
f9ead18c
JA
189
190 return -EAGAIN;
191}
192
193static int io_sendmsg_copy_hdr(struct io_kiocb *req,
194 struct io_async_msghdr *iomsg)
195{
f2ccb5ae 196 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
197
198 iomsg->msg.msg_name = &iomsg->addr;
199 iomsg->free_iov = iomsg->fast_iov;
200 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
201 &iomsg->free_iov);
202}
203
516e82f0 204int io_send_prep_async(struct io_kiocb *req)
581711c4 205{
ac9e5784 206 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
207 struct io_async_msghdr *io;
208 int ret;
209
210 if (!zc->addr || req_has_async_data(req))
211 return 0;
6bf8ad25
PB
212 io = io_msg_alloc_async_prep(req);
213 if (!io)
581711c4 214 return -ENOMEM;
581711c4
PB
215 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
216 return ret;
217}
218
219static int io_setup_async_addr(struct io_kiocb *req,
6ae61b7a 220 struct sockaddr_storage *addr_storage,
581711c4
PB
221 unsigned int issue_flags)
222{
6ae61b7a 223 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
224 struct io_async_msghdr *io;
225
6ae61b7a 226 if (!sr->addr || req_has_async_data(req))
581711c4 227 return -EAGAIN;
6bf8ad25
PB
228 io = io_msg_alloc_async(req, issue_flags);
229 if (!io)
581711c4 230 return -ENOMEM;
6ae61b7a 231 memcpy(&io->addr, addr_storage, sizeof(io->addr));
581711c4
PB
232 return -EAGAIN;
233}
234
f9ead18c
JA
235int io_sendmsg_prep_async(struct io_kiocb *req)
236{
237 int ret;
238
858c293e
PB
239 if (!io_msg_alloc_async_prep(req))
240 return -ENOMEM;
f9ead18c
JA
241 ret = io_sendmsg_copy_hdr(req, req->async_data);
242 if (!ret)
243 req->flags |= REQ_F_NEED_CLEANUP;
244 return ret;
245}
246
247void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
248{
249 struct io_async_msghdr *io = req->async_data;
250
251 kfree(io->free_iov);
252}
253
254int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
255{
f2ccb5ae 256 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c 257
516e82f0
PB
258 if (req->opcode == IORING_OP_SEND) {
259 if (READ_ONCE(sqe->__pad3[0]))
260 return -EINVAL;
261 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
262 sr->addr_len = READ_ONCE(sqe->addr_len);
263 } else if (sqe->addr2 || sqe->file_index) {
f9ead18c 264 return -EINVAL;
516e82f0 265 }
f9ead18c
JA
266
267 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
268 sr->len = READ_ONCE(sqe->len);
269 sr->flags = READ_ONCE(sqe->ioprio);
270 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
271 return -EINVAL;
272 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
273 if (sr->msg_flags & MSG_DONTWAIT)
274 req->flags |= REQ_F_NOWAIT;
275
276#ifdef CONFIG_COMPAT
277 if (req->ctx->compat)
278 sr->msg_flags |= MSG_CMSG_COMPAT;
279#endif
280 sr->done_io = 0;
281 return 0;
282}
283
284int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
285{
f2ccb5ae 286 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
287 struct io_async_msghdr iomsg, *kmsg;
288 struct socket *sock;
289 unsigned flags;
290 int min_ret = 0;
291 int ret;
292
293 sock = sock_from_file(req->file);
294 if (unlikely(!sock))
295 return -ENOTSOCK;
296
297 if (req_has_async_data(req)) {
298 kmsg = req->async_data;
299 } else {
300 ret = io_sendmsg_copy_hdr(req, &iomsg);
301 if (ret)
302 return ret;
303 kmsg = &iomsg;
304 }
305
306 if (!(req->flags & REQ_F_POLLED) &&
307 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 308 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
309
310 flags = sr->msg_flags;
311 if (issue_flags & IO_URING_F_NONBLOCK)
312 flags |= MSG_DONTWAIT;
313 if (flags & MSG_WAITALL)
314 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
315
316 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
317
318 if (ret < min_ret) {
319 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
43e0bbbd 320 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
321 if (ret > 0 && io_net_retry(sock, flags)) {
322 sr->done_io += ret;
323 req->flags |= REQ_F_PARTIAL_IO;
43e0bbbd 324 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 325 }
95eafc74
PB
326 if (ret == -ERESTARTSYS)
327 ret = -EINTR;
f9ead18c
JA
328 req_set_fail(req);
329 }
330 /* fast path, check for non-NULL to avoid function call */
331 if (kmsg->free_iov)
332 kfree(kmsg->free_iov);
333 req->flags &= ~REQ_F_NEED_CLEANUP;
43e0bbbd 334 io_netmsg_recycle(req, issue_flags);
f9ead18c
JA
335 if (ret >= 0)
336 ret += sr->done_io;
337 else if (sr->done_io)
338 ret = sr->done_io;
339 io_req_set_res(req, ret, 0);
340 return IOU_OK;
341}
342
343int io_send(struct io_kiocb *req, unsigned int issue_flags)
344{
516e82f0 345 struct sockaddr_storage __address;
f2ccb5ae 346 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
347 struct msghdr msg;
348 struct iovec iov;
349 struct socket *sock;
350 unsigned flags;
351 int min_ret = 0;
352 int ret;
353
04360d3e
PB
354 msg.msg_name = NULL;
355 msg.msg_control = NULL;
356 msg.msg_controllen = 0;
357 msg.msg_namelen = 0;
358 msg.msg_ubuf = NULL;
359
516e82f0
PB
360 if (sr->addr) {
361 if (req_has_async_data(req)) {
362 struct io_async_msghdr *io = req->async_data;
363
364 msg.msg_name = &io->addr;
365 } else {
366 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
367 if (unlikely(ret < 0))
368 return ret;
369 msg.msg_name = (struct sockaddr *)&__address;
370 }
371 msg.msg_namelen = sr->addr_len;
372 }
373
f9ead18c
JA
374 if (!(req->flags & REQ_F_POLLED) &&
375 (sr->flags & IORING_RECVSEND_POLL_FIRST))
516e82f0 376 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c
JA
377
378 sock = sock_from_file(req->file);
379 if (unlikely(!sock))
380 return -ENOTSOCK;
381
de4eda9d 382 ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
f9ead18c
JA
383 if (unlikely(ret))
384 return ret;
385
f9ead18c
JA
386 flags = sr->msg_flags;
387 if (issue_flags & IO_URING_F_NONBLOCK)
388 flags |= MSG_DONTWAIT;
389 if (flags & MSG_WAITALL)
390 min_ret = iov_iter_count(&msg.msg_iter);
391
392 msg.msg_flags = flags;
393 ret = sock_sendmsg(sock, &msg);
394 if (ret < min_ret) {
395 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
516e82f0
PB
396 return io_setup_async_addr(req, &__address, issue_flags);
397
f9ead18c
JA
398 if (ret > 0 && io_net_retry(sock, flags)) {
399 sr->len -= ret;
400 sr->buf += ret;
401 sr->done_io += ret;
402 req->flags |= REQ_F_PARTIAL_IO;
516e82f0 403 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c 404 }
95eafc74
PB
405 if (ret == -ERESTARTSYS)
406 ret = -EINTR;
f9ead18c
JA
407 req_set_fail(req);
408 }
409 if (ret >= 0)
410 ret += sr->done_io;
411 else if (sr->done_io)
412 ret = sr->done_io;
413 io_req_set_res(req, ret, 0);
414 return IOU_OK;
415}
416
9bb66906
DY
417static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
418{
9b0fc3c0 419 int hdr;
9bb66906 420
9b0fc3c0 421 if (iomsg->namelen < 0)
9bb66906 422 return true;
9b0fc3c0
DY
423 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
424 iomsg->namelen, &hdr))
9bb66906 425 return true;
9b0fc3c0 426 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
9bb66906
DY
427 return true;
428
429 return false;
430}
431
f9ead18c
JA
432static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
433 struct io_async_msghdr *iomsg)
434{
f2ccb5ae 435 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
7fa875b8 436 struct user_msghdr msg;
f9ead18c
JA
437 int ret;
438
7fa875b8
DY
439 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
440 return -EFAULT;
441
442 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
f9ead18c
JA
443 if (ret)
444 return ret;
445
446 if (req->flags & REQ_F_BUFFER_SELECT) {
7fa875b8 447 if (msg.msg_iovlen == 0) {
5702196e
DY
448 sr->len = iomsg->fast_iov[0].iov_len = 0;
449 iomsg->fast_iov[0].iov_base = NULL;
450 iomsg->free_iov = NULL;
7fa875b8 451 } else if (msg.msg_iovlen > 1) {
f9ead18c 452 return -EINVAL;
5702196e 453 } else {
7fa875b8 454 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
5702196e
DY
455 return -EFAULT;
456 sr->len = iomsg->fast_iov[0].iov_len;
457 iomsg->free_iov = NULL;
458 }
9bb66906
DY
459
460 if (req->flags & REQ_F_APOLL_MULTISHOT) {
461 iomsg->namelen = msg.msg_namelen;
462 iomsg->controllen = msg.msg_controllen;
463 if (io_recvmsg_multishot_overflow(iomsg))
464 return -EOVERFLOW;
465 }
f9ead18c
JA
466 } else {
467 iomsg->free_iov = iomsg->fast_iov;
de4eda9d 468 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
f9ead18c
JA
469 &iomsg->free_iov, &iomsg->msg.msg_iter,
470 false);
471 if (ret > 0)
472 ret = 0;
473 }
474
475 return ret;
476}
477
478#ifdef CONFIG_COMPAT
479static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
480 struct io_async_msghdr *iomsg)
481{
f2ccb5ae 482 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
72c531f8 483 struct compat_msghdr msg;
f9ead18c 484 struct compat_iovec __user *uiov;
f9ead18c
JA
485 int ret;
486
72c531f8
DY
487 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
488 return -EFAULT;
489
4f6a94d3 490 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
f9ead18c
JA
491 if (ret)
492 return ret;
493
72c531f8 494 uiov = compat_ptr(msg.msg_iov);
f9ead18c
JA
495 if (req->flags & REQ_F_BUFFER_SELECT) {
496 compat_ssize_t clen;
497
990a4de5 498 iomsg->free_iov = NULL;
72c531f8 499 if (msg.msg_iovlen == 0) {
6d2f75a0 500 sr->len = 0;
72c531f8 501 } else if (msg.msg_iovlen > 1) {
f9ead18c 502 return -EINVAL;
6d2f75a0
DY
503 } else {
504 if (!access_ok(uiov, sizeof(*uiov)))
505 return -EFAULT;
506 if (__get_user(clen, &uiov->iov_len))
507 return -EFAULT;
508 if (clen < 0)
509 return -EINVAL;
510 sr->len = clen;
6d2f75a0 511 }
9bb66906
DY
512
513 if (req->flags & REQ_F_APOLL_MULTISHOT) {
514 iomsg->namelen = msg.msg_namelen;
515 iomsg->controllen = msg.msg_controllen;
516 if (io_recvmsg_multishot_overflow(iomsg))
517 return -EOVERFLOW;
518 }
f9ead18c
JA
519 } else {
520 iomsg->free_iov = iomsg->fast_iov;
de4eda9d 521 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
f9ead18c
JA
522 UIO_FASTIOV, &iomsg->free_iov,
523 &iomsg->msg.msg_iter, true);
524 if (ret < 0)
525 return ret;
526 }
527
528 return 0;
529}
530#endif
531
532static int io_recvmsg_copy_hdr(struct io_kiocb *req,
533 struct io_async_msghdr *iomsg)
534{
535 iomsg->msg.msg_name = &iomsg->addr;
536
537#ifdef CONFIG_COMPAT
538 if (req->ctx->compat)
539 return __io_compat_recvmsg_copy_hdr(req, iomsg);
540#endif
541
542 return __io_recvmsg_copy_hdr(req, iomsg);
543}
544
545int io_recvmsg_prep_async(struct io_kiocb *req)
546{
547 int ret;
548
858c293e
PB
549 if (!io_msg_alloc_async_prep(req))
550 return -ENOMEM;
f9ead18c
JA
551 ret = io_recvmsg_copy_hdr(req, req->async_data);
552 if (!ret)
553 req->flags |= REQ_F_NEED_CLEANUP;
554 return ret;
555}
556
b3fdea6e
DY
557#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
558
f9ead18c
JA
559int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
560{
f2ccb5ae 561 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
562
563 if (unlikely(sqe->file_index || sqe->addr2))
564 return -EINVAL;
565
566 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
567 sr->len = READ_ONCE(sqe->len);
568 sr->flags = READ_ONCE(sqe->ioprio);
b3fdea6e 569 if (sr->flags & ~(RECVMSG_FLAGS))
f9ead18c
JA
570 return -EINVAL;
571 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
572 if (sr->msg_flags & MSG_DONTWAIT)
573 req->flags |= REQ_F_NOWAIT;
574 if (sr->msg_flags & MSG_ERRQUEUE)
575 req->flags |= REQ_F_CLEAR_POLLIN;
b3fdea6e
DY
576 if (sr->flags & IORING_RECV_MULTISHOT) {
577 if (!(req->flags & REQ_F_BUFFER_SELECT))
578 return -EINVAL;
579 if (sr->msg_flags & MSG_WAITALL)
580 return -EINVAL;
581 if (req->opcode == IORING_OP_RECV && sr->len)
582 return -EINVAL;
583 req->flags |= REQ_F_APOLL_MULTISHOT;
b00c51ef
JA
584 /*
585 * Store the buffer group for this multishot receive separately,
586 * as if we end up doing an io-wq based issue that selects a
587 * buffer, it has to be committed immediately and that will
588 * clear ->buf_list. This means we lose the link to the buffer
589 * list, and the eventual buffer put on completion then cannot
590 * restore it.
591 */
592 sr->buf_group = req->buf_index;
b3fdea6e 593 }
f9ead18c
JA
594
595#ifdef CONFIG_COMPAT
596 if (req->ctx->compat)
597 sr->msg_flags |= MSG_CMSG_COMPAT;
598#endif
599 sr->done_io = 0;
600 return 0;
601}
602
b3fdea6e
DY
603static inline void io_recv_prep_retry(struct io_kiocb *req)
604{
f2ccb5ae 605 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
b3fdea6e
DY
606
607 sr->done_io = 0;
608 sr->len = 0; /* get from the provided buffer */
b00c51ef 609 req->buf_index = sr->buf_group;
b3fdea6e
DY
610}
611
612/*
9bb66906 613 * Finishes io_recv and io_recvmsg.
b3fdea6e
DY
614 *
615 * Returns true if it is actually finished, or false if it should run
616 * again (for multishot).
617 */
9bb66906 618static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
100d6b17
PB
619 unsigned int cflags, bool mshot_finished,
620 unsigned issue_flags)
b3fdea6e
DY
621{
622 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
623 io_req_set_res(req, *ret, cflags);
624 *ret = IOU_OK;
625 return true;
626 }
627
9bb66906 628 if (!mshot_finished) {
9b8c5475
DY
629 if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
630 req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
b3fdea6e
DY
631 io_recv_prep_retry(req);
632 return false;
633 }
e2ad599d 634 /* Otherwise stop multishot but use the current result. */
b3fdea6e
DY
635 }
636
637 io_req_set_res(req, *ret, cflags);
638
100d6b17 639 if (issue_flags & IO_URING_F_MULTISHOT)
b3fdea6e 640 *ret = IOU_STOP_MULTISHOT;
e2df2ccb
DY
641 else
642 *ret = IOU_OK;
b3fdea6e
DY
643 return true;
644}
645
9bb66906
DY
646static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
647 struct io_sr_msg *sr, void __user **buf,
648 size_t *len)
649{
650 unsigned long ubuf = (unsigned long) *buf;
651 unsigned long hdr;
652
653 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
654 kmsg->controllen;
655 if (*len < hdr)
656 return -EFAULT;
657
658 if (kmsg->controllen) {
659 unsigned long control = ubuf + hdr - kmsg->controllen;
660
d1f6222c 661 kmsg->msg.msg_control_user = (void __user *) control;
9bb66906
DY
662 kmsg->msg.msg_controllen = kmsg->controllen;
663 }
664
665 sr->buf = *buf; /* stash for later copy */
d1f6222c 666 *buf = (void __user *) (ubuf + hdr);
9bb66906
DY
667 kmsg->payloadlen = *len = *len - hdr;
668 return 0;
669}
670
671struct io_recvmsg_multishot_hdr {
672 struct io_uring_recvmsg_out msg;
673 struct sockaddr_storage addr;
674};
675
676static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
677 struct io_async_msghdr *kmsg,
678 unsigned int flags, bool *finished)
679{
680 int err;
681 int copy_len;
682 struct io_recvmsg_multishot_hdr hdr;
683
684 if (kmsg->namelen)
685 kmsg->msg.msg_name = &hdr.addr;
686 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
687 kmsg->msg.msg_namelen = 0;
688
689 if (sock->file->f_flags & O_NONBLOCK)
690 flags |= MSG_DONTWAIT;
691
692 err = sock_recvmsg(sock, &kmsg->msg, flags);
693 *finished = err <= 0;
694 if (err < 0)
695 return err;
696
697 hdr.msg = (struct io_uring_recvmsg_out) {
698 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
699 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
700 };
701
702 hdr.msg.payloadlen = err;
703 if (err > kmsg->payloadlen)
704 err = kmsg->payloadlen;
705
706 copy_len = sizeof(struct io_uring_recvmsg_out);
707 if (kmsg->msg.msg_namelen > kmsg->namelen)
708 copy_len += kmsg->namelen;
709 else
710 copy_len += kmsg->msg.msg_namelen;
711
712 /*
713 * "fromlen shall refer to the value before truncation.."
714 * 1003.1g
715 */
716 hdr.msg.namelen = kmsg->msg.msg_namelen;
717
718 /* ensure that there is no gap between hdr and sockaddr_storage */
719 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
720 sizeof(struct io_uring_recvmsg_out));
721 if (copy_to_user(io->buf, &hdr, copy_len)) {
722 *finished = true;
723 return -EFAULT;
724 }
725
726 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
727 kmsg->controllen + err;
728}
729
f9ead18c
JA
730int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
731{
f2ccb5ae 732 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
733 struct io_async_msghdr iomsg, *kmsg;
734 struct socket *sock;
735 unsigned int cflags;
736 unsigned flags;
737 int ret, min_ret = 0;
738 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
9bb66906 739 bool mshot_finished = true;
f9ead18c
JA
740
741 sock = sock_from_file(req->file);
742 if (unlikely(!sock))
743 return -ENOTSOCK;
744
745 if (req_has_async_data(req)) {
746 kmsg = req->async_data;
747 } else {
748 ret = io_recvmsg_copy_hdr(req, &iomsg);
749 if (ret)
750 return ret;
751 kmsg = &iomsg;
752 }
753
754 if (!(req->flags & REQ_F_POLLED) &&
755 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 756 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 757
17add5ce
PB
758 if (!io_check_multishot(req, issue_flags))
759 return io_setup_async_msg(req, kmsg, issue_flags);
760
9bb66906 761retry_multishot:
f9ead18c
JA
762 if (io_do_buffer_select(req)) {
763 void __user *buf;
9bb66906 764 size_t len = sr->len;
f9ead18c 765
9bb66906 766 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
767 if (!buf)
768 return -ENOBUFS;
9bb66906
DY
769
770 if (req->flags & REQ_F_APOLL_MULTISHOT) {
771 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
772 if (ret) {
773 io_kbuf_recycle(req, issue_flags);
774 return ret;
775 }
776 }
777
f9ead18c 778 kmsg->fast_iov[0].iov_base = buf;
9bb66906 779 kmsg->fast_iov[0].iov_len = len;
de4eda9d 780 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
9bb66906 781 len);
f9ead18c
JA
782 }
783
784 flags = sr->msg_flags;
785 if (force_nonblock)
786 flags |= MSG_DONTWAIT;
787 if (flags & MSG_WAITALL)
788 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
789
790 kmsg->msg.msg_get_inq = 1;
9bb66906
DY
791 if (req->flags & REQ_F_APOLL_MULTISHOT)
792 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
793 &mshot_finished);
794 else
795 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
796 kmsg->uaddr, flags);
797
f9ead18c 798 if (ret < min_ret) {
9bb66906
DY
799 if (ret == -EAGAIN && force_nonblock) {
800 ret = io_setup_async_msg(req, kmsg, issue_flags);
100d6b17 801 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
9bb66906
DY
802 io_kbuf_recycle(req, issue_flags);
803 return IOU_ISSUE_SKIP_COMPLETE;
804 }
805 return ret;
806 }
f9ead18c
JA
807 if (ret > 0 && io_net_retry(sock, flags)) {
808 sr->done_io += ret;
809 req->flags |= REQ_F_PARTIAL_IO;
43e0bbbd 810 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 811 }
95eafc74
PB
812 if (ret == -ERESTARTSYS)
813 ret = -EINTR;
f9ead18c
JA
814 req_set_fail(req);
815 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
816 req_set_fail(req);
817 }
818
d4e097da 819 if (ret > 0)
f9ead18c
JA
820 ret += sr->done_io;
821 else if (sr->done_io)
822 ret = sr->done_io;
d4e097da
DY
823 else
824 io_kbuf_recycle(req, issue_flags);
825
f9ead18c
JA
826 cflags = io_put_kbuf(req, issue_flags);
827 if (kmsg->msg.msg_inq)
828 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
b3fdea6e 829
100d6b17 830 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
9bb66906
DY
831 goto retry_multishot;
832
833 if (mshot_finished) {
9bb66906
DY
834 /* fast path, check for non-NULL to avoid function call */
835 if (kmsg->free_iov)
836 kfree(kmsg->free_iov);
6c3e8955 837 io_netmsg_recycle(req, issue_flags);
9bb66906
DY
838 req->flags &= ~REQ_F_NEED_CLEANUP;
839 }
840
841 return ret;
f9ead18c
JA
842}
843
844int io_recv(struct io_kiocb *req, unsigned int issue_flags)
845{
f2ccb5ae 846 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
847 struct msghdr msg;
848 struct socket *sock;
849 struct iovec iov;
850 unsigned int cflags;
851 unsigned flags;
852 int ret, min_ret = 0;
853 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
b3fdea6e 854 size_t len = sr->len;
f9ead18c
JA
855
856 if (!(req->flags & REQ_F_POLLED) &&
857 (sr->flags & IORING_RECVSEND_POLL_FIRST))
858 return -EAGAIN;
859
17add5ce
PB
860 if (!io_check_multishot(req, issue_flags))
861 return -EAGAIN;
862
f9ead18c
JA
863 sock = sock_from_file(req->file);
864 if (unlikely(!sock))
865 return -ENOTSOCK;
866
b3fdea6e 867retry_multishot:
f9ead18c
JA
868 if (io_do_buffer_select(req)) {
869 void __user *buf;
870
b3fdea6e 871 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
872 if (!buf)
873 return -ENOBUFS;
874 sr->buf = buf;
875 }
876
de4eda9d 877 ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
f9ead18c
JA
878 if (unlikely(ret))
879 goto out_free;
880
881 msg.msg_name = NULL;
882 msg.msg_namelen = 0;
883 msg.msg_control = NULL;
884 msg.msg_get_inq = 1;
885 msg.msg_flags = 0;
886 msg.msg_controllen = 0;
887 msg.msg_iocb = NULL;
e02b6651 888 msg.msg_ubuf = NULL;
f9ead18c
JA
889
890 flags = sr->msg_flags;
891 if (force_nonblock)
892 flags |= MSG_DONTWAIT;
893 if (flags & MSG_WAITALL)
894 min_ret = iov_iter_count(&msg.msg_iter);
895
896 ret = sock_recvmsg(sock, &msg, flags);
897 if (ret < min_ret) {
b3fdea6e 898 if (ret == -EAGAIN && force_nonblock) {
100d6b17 899 if (issue_flags & IO_URING_F_MULTISHOT) {
b3fdea6e
DY
900 io_kbuf_recycle(req, issue_flags);
901 return IOU_ISSUE_SKIP_COMPLETE;
902 }
903
f9ead18c 904 return -EAGAIN;
b3fdea6e 905 }
f9ead18c
JA
906 if (ret > 0 && io_net_retry(sock, flags)) {
907 sr->len -= ret;
908 sr->buf += ret;
909 sr->done_io += ret;
910 req->flags |= REQ_F_PARTIAL_IO;
911 return -EAGAIN;
912 }
95eafc74
PB
913 if (ret == -ERESTARTSYS)
914 ret = -EINTR;
f9ead18c
JA
915 req_set_fail(req);
916 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
917out_free:
918 req_set_fail(req);
919 }
920
d4e097da 921 if (ret > 0)
f9ead18c
JA
922 ret += sr->done_io;
923 else if (sr->done_io)
924 ret = sr->done_io;
d4e097da
DY
925 else
926 io_kbuf_recycle(req, issue_flags);
927
f9ead18c
JA
928 cflags = io_put_kbuf(req, issue_flags);
929 if (msg.msg_inq)
930 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
b3fdea6e 931
100d6b17 932 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
b3fdea6e
DY
933 goto retry_multishot;
934
935 return ret;
f9ead18c
JA
936}
937
b0e9b551 938void io_send_zc_cleanup(struct io_kiocb *req)
b48c312b 939{
ac9e5784 940 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
493108d9 941 struct io_async_msghdr *io;
b48c312b 942
493108d9
PB
943 if (req_has_async_data(req)) {
944 io = req->async_data;
4c17a496
PB
945 /* might be ->fast_iov if *msg_copy_hdr failed */
946 if (io->free_iov != io->fast_iov)
947 kfree(io->free_iov);
493108d9 948 }
a75155fa 949 if (zc->notif) {
a75155fa
PB
950 io_notif_flush(zc->notif);
951 zc->notif = NULL;
952 }
b48c312b
PB
953}
954
40725d1b
PB
955#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
956#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
957
b0e9b551 958int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
06a5464b 959{
ac9e5784 960 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
10c7d33e 961 struct io_ring_ctx *ctx = req->ctx;
b48c312b 962 struct io_kiocb *notif;
06a5464b 963
493108d9 964 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
b48c312b
PB
965 return -EINVAL;
966 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
967 if (req->flags & REQ_F_CQE_SKIP)
06a5464b
PB
968 return -EINVAL;
969
e3366e02
PB
970 notif = zc->notif = io_alloc_notif(ctx);
971 if (!notif)
972 return -ENOMEM;
973 notif->cqe.user_data = req->cqe.user_data;
974 notif->cqe.res = 0;
975 notif->cqe.flags = IORING_CQE_F_NOTIF;
976 req->flags |= REQ_F_NEED_CLEANUP;
40725d1b
PB
977
978 zc->flags = READ_ONCE(sqe->ioprio);
979 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
980 if (zc->flags & ~IO_ZC_FLAGS_VALID)
981 return -EINVAL;
982 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
983 io_notif_set_extended(notif);
984 io_notif_to_data(notif)->zc_report = true;
985 }
986 }
987
10c7d33e
PB
988 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
989 unsigned idx = READ_ONCE(sqe->buf_index);
990
991 if (unlikely(idx >= ctx->nr_user_bufs))
992 return -EFAULT;
993 idx = array_index_nospec(idx, ctx->nr_user_bufs);
994 req->imu = READ_ONCE(ctx->user_bufs[idx]);
e3366e02 995 io_req_set_rsrc_node(notif, ctx, 0);
10c7d33e 996 }
06a5464b 997
493108d9
PB
998 if (req->opcode == IORING_OP_SEND_ZC) {
999 if (READ_ONCE(sqe->__pad3[0]))
1000 return -EINVAL;
1001 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1002 zc->addr_len = READ_ONCE(sqe->addr_len);
1003 } else {
1004 if (unlikely(sqe->addr2 || sqe->file_index))
1005 return -EINVAL;
1006 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1007 return -EINVAL;
1008 }
1009
06a5464b
PB
1010 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1011 zc->len = READ_ONCE(sqe->len);
1012 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
06a5464b
PB
1013 if (zc->msg_flags & MSG_DONTWAIT)
1014 req->flags |= REQ_F_NOWAIT;
092aeedb 1015
4a933e62 1016 zc->done_io = 0;
092aeedb 1017
06a5464b
PB
1018#ifdef CONFIG_COMPAT
1019 if (req->ctx->compat)
1020 zc->msg_flags |= MSG_CMSG_COMPAT;
1021#endif
1022 return 0;
1023}
1024
cd9021e8
PB
1025static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1026 struct iov_iter *from, size_t length)
1027{
1028 skb_zcopy_downgrade_managed(skb);
1029 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1030}
1031
3ff1a0d3
PB
1032static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1033 struct iov_iter *from, size_t length)
1034{
1035 struct skb_shared_info *shinfo = skb_shinfo(skb);
1036 int frag = shinfo->nr_frags;
1037 int ret = 0;
1038 struct bvec_iter bi;
1039 ssize_t copied = 0;
1040 unsigned long truesize = 0;
1041
cd9021e8 1042 if (!frag)
3ff1a0d3 1043 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
cd9021e8 1044 else if (unlikely(!skb_zcopy_managed(skb)))
3ff1a0d3 1045 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
3ff1a0d3
PB
1046
1047 bi.bi_size = min(from->count, length);
1048 bi.bi_bvec_done = from->iov_offset;
1049 bi.bi_idx = 0;
1050
1051 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1052 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1053
1054 copied += v.bv_len;
1055 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1056 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1057 v.bv_offset, v.bv_len);
1058 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1059 }
1060 if (bi.bi_size)
1061 ret = -EMSGSIZE;
1062
1063 shinfo->nr_frags = frag;
1064 from->bvec += bi.bi_idx;
1065 from->nr_segs -= bi.bi_idx;
dfb58b17 1066 from->count -= copied;
3ff1a0d3
PB
1067 from->iov_offset = bi.bi_bvec_done;
1068
1069 skb->data_len += copied;
1070 skb->len += copied;
1071 skb->truesize += truesize;
1072
1073 if (sk && sk->sk_type == SOCK_STREAM) {
1074 sk_wmem_queued_add(sk, truesize);
1075 if (!skb_zcopy_pure(skb))
1076 sk_mem_charge(sk, truesize);
1077 } else {
1078 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1079 }
1080 return ret;
1081}
1082
b0e9b551 1083int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
06a5464b 1084{
6ae61b7a 1085 struct sockaddr_storage __address;
ac9e5784 1086 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
06a5464b
PB
1087 struct msghdr msg;
1088 struct iovec iov;
1089 struct socket *sock;
6ae91ac9 1090 unsigned msg_flags;
06a5464b
PB
1091 int ret, min_ret = 0;
1092
06a5464b
PB
1093 sock = sock_from_file(req->file);
1094 if (unlikely(!sock))
1095 return -ENOTSOCK;
edf81438
PB
1096 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1097 return -EOPNOTSUPP;
06a5464b 1098
06a5464b
PB
1099 msg.msg_name = NULL;
1100 msg.msg_control = NULL;
1101 msg.msg_controllen = 0;
1102 msg.msg_namelen = 0;
1103
86dc8f23 1104 if (zc->addr) {
581711c4
PB
1105 if (req_has_async_data(req)) {
1106 struct io_async_msghdr *io = req->async_data;
1107
6ae61b7a 1108 msg.msg_name = &io->addr;
581711c4
PB
1109 } else {
1110 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1111 if (unlikely(ret < 0))
1112 return ret;
1113 msg.msg_name = (struct sockaddr *)&__address;
581711c4 1114 }
86dc8f23
PB
1115 msg.msg_namelen = zc->addr_len;
1116 }
1117
3c840053
PB
1118 if (!(req->flags & REQ_F_POLLED) &&
1119 (zc->flags & IORING_RECVSEND_POLL_FIRST))
6ae61b7a 1120 return io_setup_async_addr(req, &__address, issue_flags);
3c840053 1121
10c7d33e 1122 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
de4eda9d 1123 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
10c7d33e
PB
1124 (u64)(uintptr_t)zc->buf, zc->len);
1125 if (unlikely(ret))
986e263d 1126 return ret;
cd9021e8 1127 msg.sg_from_iter = io_sg_from_iter;
10c7d33e 1128 } else {
42385b02 1129 io_notif_set_extended(zc->notif);
de4eda9d 1130 ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
10c7d33e
PB
1131 &msg.msg_iter);
1132 if (unlikely(ret))
1133 return ret;
b48c312b 1134 ret = io_notif_account_mem(zc->notif, zc->len);
2e32ba56
PB
1135 if (unlikely(ret))
1136 return ret;
cd9021e8 1137 msg.sg_from_iter = io_sg_from_iter_iovec;
10c7d33e 1138 }
06a5464b
PB
1139
1140 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1141 if (issue_flags & IO_URING_F_NONBLOCK)
1142 msg_flags |= MSG_DONTWAIT;
1143 if (msg_flags & MSG_WAITALL)
1144 min_ret = iov_iter_count(&msg.msg_iter);
1145
1146 msg.msg_flags = msg_flags;
b48c312b 1147 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
06a5464b
PB
1148 ret = sock_sendmsg(sock, &msg);
1149
1150 if (unlikely(ret < min_ret)) {
1151 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6ae61b7a 1152 return io_setup_async_addr(req, &__address, issue_flags);
581711c4 1153
4a933e62
PB
1154 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1155 zc->len -= ret;
1156 zc->buf += ret;
1157 zc->done_io += ret;
1158 req->flags |= REQ_F_PARTIAL_IO;
6ae61b7a 1159 return io_setup_async_addr(req, &__address, issue_flags);
4a933e62
PB
1160 }
1161 if (ret == -ERESTARTSYS)
1162 ret = -EINTR;
5a848b7c 1163 req_set_fail(req);
06a5464b
PB
1164 }
1165
4a933e62
PB
1166 if (ret >= 0)
1167 ret += zc->done_io;
1168 else if (zc->done_io)
1169 ret = zc->done_io;
b48c312b 1170
108893dd
PB
1171 /*
1172 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1173 * flushing notif to io_send_zc_cleanup()
1174 */
1175 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1176 io_notif_flush(zc->notif);
1177 req->flags &= ~REQ_F_NEED_CLEANUP;
1178 }
6ae91ac9 1179 io_req_set_res(req, ret, IORING_CQE_F_MORE);
06a5464b
PB
1180 return IOU_OK;
1181}
1182
493108d9
PB
1183int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1184{
1185 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1186 struct io_async_msghdr iomsg, *kmsg;
1187 struct socket *sock;
6ae91ac9 1188 unsigned flags;
493108d9
PB
1189 int ret, min_ret = 0;
1190
42385b02
PB
1191 io_notif_set_extended(sr->notif);
1192
493108d9
PB
1193 sock = sock_from_file(req->file);
1194 if (unlikely(!sock))
1195 return -ENOTSOCK;
cc767e7c
PB
1196 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1197 return -EOPNOTSUPP;
493108d9
PB
1198
1199 if (req_has_async_data(req)) {
1200 kmsg = req->async_data;
1201 } else {
1202 ret = io_sendmsg_copy_hdr(req, &iomsg);
1203 if (ret)
1204 return ret;
1205 kmsg = &iomsg;
1206 }
1207
1208 if (!(req->flags & REQ_F_POLLED) &&
1209 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1210 return io_setup_async_msg(req, kmsg, issue_flags);
1211
1212 flags = sr->msg_flags | MSG_ZEROCOPY;
1213 if (issue_flags & IO_URING_F_NONBLOCK)
1214 flags |= MSG_DONTWAIT;
1215 if (flags & MSG_WAITALL)
1216 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1217
1218 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1219 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1220 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1221
1222 if (unlikely(ret < min_ret)) {
1223 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1224 return io_setup_async_msg(req, kmsg, issue_flags);
1225
1226 if (ret > 0 && io_net_retry(sock, flags)) {
1227 sr->done_io += ret;
1228 req->flags |= REQ_F_PARTIAL_IO;
1229 return io_setup_async_msg(req, kmsg, issue_flags);
1230 }
493108d9
PB
1231 if (ret == -ERESTARTSYS)
1232 ret = -EINTR;
1233 req_set_fail(req);
1234 }
1235 /* fast path, check for non-NULL to avoid function call */
108893dd 1236 if (kmsg->free_iov) {
493108d9 1237 kfree(kmsg->free_iov);
108893dd
PB
1238 kmsg->free_iov = NULL;
1239 }
493108d9
PB
1240
1241 io_netmsg_recycle(req, issue_flags);
1242 if (ret >= 0)
1243 ret += sr->done_io;
1244 else if (sr->done_io)
1245 ret = sr->done_io;
1246
108893dd
PB
1247 /*
1248 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1249 * flushing notif to io_send_zc_cleanup()
1250 */
1251 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1252 io_notif_flush(sr->notif);
1253 req->flags &= ~REQ_F_NEED_CLEANUP;
1254 }
6ae91ac9 1255 io_req_set_res(req, ret, IORING_CQE_F_MORE);
493108d9
PB
1256 return IOU_OK;
1257}
1258
7e6b638e
PB
1259void io_sendrecv_fail(struct io_kiocb *req)
1260{
1261 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
7e6b638e
PB
1262
1263 if (req->flags & REQ_F_PARTIAL_IO)
6ae91ac9
PB
1264 req->cqe.res = sr->done_io;
1265
c4c0009e 1266 if ((req->flags & REQ_F_NEED_CLEANUP) &&
6ae91ac9
PB
1267 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1268 req->cqe.flags |= IORING_CQE_F_MORE;
5693bcce
PB
1269}
1270
f9ead18c
JA
1271int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1272{
f2ccb5ae 1273 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1274 unsigned flags;
1275
1276 if (sqe->len || sqe->buf_index)
1277 return -EINVAL;
1278
1279 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1280 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1281 accept->flags = READ_ONCE(sqe->accept_flags);
1282 accept->nofile = rlimit(RLIMIT_NOFILE);
1283 flags = READ_ONCE(sqe->ioprio);
1284 if (flags & ~IORING_ACCEPT_MULTISHOT)
1285 return -EINVAL;
1286
1287 accept->file_slot = READ_ONCE(sqe->file_index);
1288 if (accept->file_slot) {
1289 if (accept->flags & SOCK_CLOEXEC)
1290 return -EINVAL;
1291 if (flags & IORING_ACCEPT_MULTISHOT &&
1292 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1293 return -EINVAL;
1294 }
1295 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1296 return -EINVAL;
1297 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1298 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1299 if (flags & IORING_ACCEPT_MULTISHOT)
1300 req->flags |= REQ_F_APOLL_MULTISHOT;
1301 return 0;
1302}
1303
1304int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1305{
1306 struct io_ring_ctx *ctx = req->ctx;
f2ccb5ae 1307 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1308 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1309 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1310 bool fixed = !!accept->file_slot;
1311 struct file *file;
1312 int ret, fd;
1313
17add5ce
PB
1314 if (!io_check_multishot(req, issue_flags))
1315 return -EAGAIN;
f9ead18c
JA
1316retry:
1317 if (!fixed) {
1318 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1319 if (unlikely(fd < 0))
1320 return fd;
1321 }
1322 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1323 accept->flags);
1324 if (IS_ERR(file)) {
1325 if (!fixed)
1326 put_unused_fd(fd);
1327 ret = PTR_ERR(file);
1328 if (ret == -EAGAIN && force_nonblock) {
1329 /*
1330 * if it's multishot and polled, we don't need to
1331 * return EAGAIN to arm the poll infra since it
1332 * has already been done
1333 */
91482864 1334 if (issue_flags & IO_URING_F_MULTISHOT)
f9ead18c
JA
1335 ret = IOU_ISSUE_SKIP_COMPLETE;
1336 return ret;
1337 }
1338 if (ret == -ERESTARTSYS)
1339 ret = -EINTR;
1340 req_set_fail(req);
1341 } else if (!fixed) {
1342 fd_install(fd, file);
1343 ret = fd;
1344 } else {
1345 ret = io_fixed_fd_install(req, issue_flags, file,
1346 accept->file_slot);
1347 }
1348
1349 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1350 io_req_set_res(req, ret, 0);
1351 return IOU_OK;
1352 }
f9ead18c 1353
515e2696
DY
1354 if (ret < 0)
1355 return ret;
9b8c5475
DY
1356 if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1357 req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
d245bca6 1358 goto retry;
cbd25748 1359
515e2696 1360 return -ECANCELED;
f9ead18c
JA
1361}
1362
1363int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1364{
f2ccb5ae 1365 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1366
1367 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1368 return -EINVAL;
1369
1370 sock->domain = READ_ONCE(sqe->fd);
1371 sock->type = READ_ONCE(sqe->off);
1372 sock->protocol = READ_ONCE(sqe->len);
1373 sock->file_slot = READ_ONCE(sqe->file_index);
1374 sock->nofile = rlimit(RLIMIT_NOFILE);
1375
1376 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1377 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1378 return -EINVAL;
1379 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1380 return -EINVAL;
1381 return 0;
1382}
1383
1384int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1385{
f2ccb5ae 1386 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1387 bool fixed = !!sock->file_slot;
1388 struct file *file;
1389 int ret, fd;
1390
1391 if (!fixed) {
1392 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1393 if (unlikely(fd < 0))
1394 return fd;
1395 }
1396 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1397 if (IS_ERR(file)) {
1398 if (!fixed)
1399 put_unused_fd(fd);
1400 ret = PTR_ERR(file);
1401 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1402 return -EAGAIN;
1403 if (ret == -ERESTARTSYS)
1404 ret = -EINTR;
1405 req_set_fail(req);
1406 } else if (!fixed) {
1407 fd_install(fd, file);
1408 ret = fd;
1409 } else {
1410 ret = io_fixed_fd_install(req, issue_flags, file,
1411 sock->file_slot);
1412 }
1413 io_req_set_res(req, ret, 0);
1414 return IOU_OK;
1415}
1416
1417int io_connect_prep_async(struct io_kiocb *req)
1418{
1419 struct io_async_connect *io = req->async_data;
f2ccb5ae 1420 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1421
1422 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1423}
1424
1425int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1426{
f2ccb5ae 1427 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1428
1429 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1430 return -EINVAL;
1431
1432 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1433 conn->addr_len = READ_ONCE(sqe->addr2);
3fb1bd68 1434 conn->in_progress = false;
f9ead18c
JA
1435 return 0;
1436}
1437
1438int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1439{
f2ccb5ae 1440 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1441 struct io_async_connect __io, *io;
1442 unsigned file_flags;
1443 int ret;
1444 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1445
3fb1bd68
JA
1446 if (connect->in_progress) {
1447 struct socket *socket;
1448
1449 ret = -ENOTSOCK;
1450 socket = sock_from_file(req->file);
1451 if (socket)
1452 ret = sock_error(socket->sk);
1453 goto out;
1454 }
1455
f9ead18c
JA
1456 if (req_has_async_data(req)) {
1457 io = req->async_data;
1458 } else {
1459 ret = move_addr_to_kernel(connect->addr,
1460 connect->addr_len,
1461 &__io.address);
1462 if (ret)
1463 goto out;
1464 io = &__io;
1465 }
1466
1467 file_flags = force_nonblock ? O_NONBLOCK : 0;
1468
1469 ret = __sys_connect_file(req->file, &io->address,
1470 connect->addr_len, file_flags);
1471 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
3fb1bd68
JA
1472 if (ret == -EINPROGRESS) {
1473 connect->in_progress = true;
1474 } else {
1475 if (req_has_async_data(req))
1476 return -EAGAIN;
1477 if (io_alloc_async_data(req)) {
1478 ret = -ENOMEM;
1479 goto out;
1480 }
1481 memcpy(req->async_data, &__io, sizeof(__io));
f9ead18c 1482 }
f9ead18c
JA
1483 return -EAGAIN;
1484 }
1485 if (ret == -ERESTARTSYS)
1486 ret = -EINTR;
1487out:
1488 if (ret < 0)
1489 req_set_fail(req);
1490 io_req_set_res(req, ret, 0);
1491 return IOU_OK;
1492}
43e0bbbd
JA
1493
1494void io_netmsg_cache_free(struct io_cache_entry *entry)
1495{
1496 kfree(container_of(entry, struct io_async_msghdr, cache));
1497}
f9ead18c 1498#endif