io_uring/net: support non-zerocopy sendto
[linux-block.git] / io_uring / net.c
CommitLineData
f9ead18c
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/slab.h>
6#include <linux/net.h>
7#include <linux/compat.h>
8#include <net/compat.h>
9#include <linux/io_uring.h>
10
11#include <uapi/linux/io_uring.h>
12
f9ead18c 13#include "io_uring.h"
3b77495a 14#include "kbuf.h"
43e0bbbd 15#include "alloc_cache.h"
f9ead18c 16#include "net.h"
06a5464b 17#include "notif.h"
10c7d33e 18#include "rsrc.h"
f9ead18c
JA
19
20#if defined(CONFIG_NET)
21struct io_shutdown {
22 struct file *file;
23 int how;
24};
25
26struct io_accept {
27 struct file *file;
28 struct sockaddr __user *addr;
29 int __user *addr_len;
30 int flags;
31 u32 file_slot;
32 unsigned long nofile;
33};
34
35struct io_socket {
36 struct file *file;
37 int domain;
38 int type;
39 int protocol;
40 int flags;
41 u32 file_slot;
42 unsigned long nofile;
43};
44
45struct io_connect {
46 struct file *file;
47 struct sockaddr __user *addr;
48 int addr_len;
49};
50
51struct io_sr_msg {
52 struct file *file;
53 union {
54 struct compat_msghdr __user *umsg_compat;
55 struct user_msghdr __user *umsg;
56 void __user *buf;
57 };
0b048557
PB
58 unsigned len;
59 unsigned done_io;
293402e5 60 unsigned msg_flags;
0b048557 61 u16 flags;
516e82f0 62 /* initialised and used only by !msg send variants */
0b048557 63 u16 addr_len;
092aeedb 64 void __user *addr;
516e82f0 65 /* used only for send zerocopy */
b48c312b 66 struct io_kiocb *notif;
06a5464b
PB
67};
68
f9ead18c
JA
69#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
70
71int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
72{
f2ccb5ae 73 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
74
75 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
76 sqe->buf_index || sqe->splice_fd_in))
77 return -EINVAL;
78
79 shutdown->how = READ_ONCE(sqe->len);
80 return 0;
81}
82
83int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
84{
f2ccb5ae 85 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
f9ead18c
JA
86 struct socket *sock;
87 int ret;
88
89 if (issue_flags & IO_URING_F_NONBLOCK)
90 return -EAGAIN;
91
92 sock = sock_from_file(req->file);
93 if (unlikely(!sock))
94 return -ENOTSOCK;
95
96 ret = __sys_shutdown_sock(sock, shutdown->how);
97 io_req_set_res(req, ret, 0);
98 return IOU_OK;
99}
100
101static bool io_net_retry(struct socket *sock, int flags)
102{
103 if (!(flags & MSG_WAITALL))
104 return false;
105 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
106}
107
43e0bbbd
JA
108static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
109{
110 struct io_async_msghdr *hdr = req->async_data;
111
06360426 112 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
43e0bbbd
JA
113 return;
114
115 /* Let normal cleanup path reap it if we fail adding to the cache */
116 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
117 req->async_data = NULL;
118 req->flags &= ~REQ_F_ASYNC_DATA;
119 }
120}
121
858c293e
PB
122static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
123 unsigned int issue_flags)
43e0bbbd
JA
124{
125 struct io_ring_ctx *ctx = req->ctx;
126 struct io_cache_entry *entry;
127
128 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
129 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
130 struct io_async_msghdr *hdr;
131
132 hdr = container_of(entry, struct io_async_msghdr, cache);
133 req->flags |= REQ_F_ASYNC_DATA;
134 req->async_data = hdr;
135 return hdr;
136 }
137
138 if (!io_alloc_async_data(req))
139 return req->async_data;
140
141 return NULL;
142}
143
858c293e
PB
144static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
145{
146 /* ->prep_async is always called from the submission context */
147 return io_msg_alloc_async(req, 0);
148}
149
f9ead18c 150static int io_setup_async_msg(struct io_kiocb *req,
43e0bbbd
JA
151 struct io_async_msghdr *kmsg,
152 unsigned int issue_flags)
f9ead18c 153{
3f743e9b 154 struct io_async_msghdr *async_msg;
f9ead18c 155
3f743e9b 156 if (req_has_async_data(req))
f9ead18c 157 return -EAGAIN;
858c293e 158 async_msg = io_msg_alloc_async(req, issue_flags);
43e0bbbd 159 if (!async_msg) {
f9ead18c
JA
160 kfree(kmsg->free_iov);
161 return -ENOMEM;
162 }
f9ead18c
JA
163 req->flags |= REQ_F_NEED_CLEANUP;
164 memcpy(async_msg, kmsg, sizeof(*kmsg));
165 async_msg->msg.msg_name = &async_msg->addr;
166 /* if were using fast_iov, set it to the new one */
167 if (!async_msg->free_iov)
168 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
169
170 return -EAGAIN;
171}
172
173static int io_sendmsg_copy_hdr(struct io_kiocb *req,
174 struct io_async_msghdr *iomsg)
175{
f2ccb5ae 176 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
177
178 iomsg->msg.msg_name = &iomsg->addr;
179 iomsg->free_iov = iomsg->fast_iov;
180 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
181 &iomsg->free_iov);
182}
183
516e82f0 184int io_send_prep_async(struct io_kiocb *req)
581711c4 185{
ac9e5784 186 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
187 struct io_async_msghdr *io;
188 int ret;
189
190 if (!zc->addr || req_has_async_data(req))
191 return 0;
6bf8ad25
PB
192 io = io_msg_alloc_async_prep(req);
193 if (!io)
581711c4 194 return -ENOMEM;
581711c4
PB
195 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
196 return ret;
197}
198
199static int io_setup_async_addr(struct io_kiocb *req,
6ae61b7a 200 struct sockaddr_storage *addr_storage,
581711c4
PB
201 unsigned int issue_flags)
202{
6ae61b7a 203 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
581711c4
PB
204 struct io_async_msghdr *io;
205
6ae61b7a 206 if (!sr->addr || req_has_async_data(req))
581711c4 207 return -EAGAIN;
6bf8ad25
PB
208 io = io_msg_alloc_async(req, issue_flags);
209 if (!io)
581711c4 210 return -ENOMEM;
6ae61b7a 211 memcpy(&io->addr, addr_storage, sizeof(io->addr));
581711c4
PB
212 return -EAGAIN;
213}
214
f9ead18c
JA
215int io_sendmsg_prep_async(struct io_kiocb *req)
216{
217 int ret;
218
858c293e
PB
219 if (!io_msg_alloc_async_prep(req))
220 return -ENOMEM;
f9ead18c
JA
221 ret = io_sendmsg_copy_hdr(req, req->async_data);
222 if (!ret)
223 req->flags |= REQ_F_NEED_CLEANUP;
224 return ret;
225}
226
227void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
228{
229 struct io_async_msghdr *io = req->async_data;
230
231 kfree(io->free_iov);
232}
233
234int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
235{
f2ccb5ae 236 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c 237
516e82f0
PB
238 if (req->opcode == IORING_OP_SEND) {
239 if (READ_ONCE(sqe->__pad3[0]))
240 return -EINVAL;
241 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
242 sr->addr_len = READ_ONCE(sqe->addr_len);
243 } else if (sqe->addr2 || sqe->file_index) {
f9ead18c 244 return -EINVAL;
516e82f0 245 }
f9ead18c
JA
246
247 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
248 sr->len = READ_ONCE(sqe->len);
249 sr->flags = READ_ONCE(sqe->ioprio);
250 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
251 return -EINVAL;
252 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
253 if (sr->msg_flags & MSG_DONTWAIT)
254 req->flags |= REQ_F_NOWAIT;
255
256#ifdef CONFIG_COMPAT
257 if (req->ctx->compat)
258 sr->msg_flags |= MSG_CMSG_COMPAT;
259#endif
260 sr->done_io = 0;
261 return 0;
262}
263
264int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
265{
f2ccb5ae 266 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
267 struct io_async_msghdr iomsg, *kmsg;
268 struct socket *sock;
269 unsigned flags;
270 int min_ret = 0;
271 int ret;
272
273 sock = sock_from_file(req->file);
274 if (unlikely(!sock))
275 return -ENOTSOCK;
276
277 if (req_has_async_data(req)) {
278 kmsg = req->async_data;
279 } else {
280 ret = io_sendmsg_copy_hdr(req, &iomsg);
281 if (ret)
282 return ret;
283 kmsg = &iomsg;
284 }
285
286 if (!(req->flags & REQ_F_POLLED) &&
287 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 288 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
289
290 flags = sr->msg_flags;
291 if (issue_flags & IO_URING_F_NONBLOCK)
292 flags |= MSG_DONTWAIT;
293 if (flags & MSG_WAITALL)
294 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
295
296 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
297
298 if (ret < min_ret) {
299 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
43e0bbbd 300 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c
JA
301 if (ret > 0 && io_net_retry(sock, flags)) {
302 sr->done_io += ret;
303 req->flags |= REQ_F_PARTIAL_IO;
43e0bbbd 304 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 305 }
95eafc74
PB
306 if (ret == -ERESTARTSYS)
307 ret = -EINTR;
f9ead18c
JA
308 req_set_fail(req);
309 }
310 /* fast path, check for non-NULL to avoid function call */
311 if (kmsg->free_iov)
312 kfree(kmsg->free_iov);
313 req->flags &= ~REQ_F_NEED_CLEANUP;
43e0bbbd 314 io_netmsg_recycle(req, issue_flags);
f9ead18c
JA
315 if (ret >= 0)
316 ret += sr->done_io;
317 else if (sr->done_io)
318 ret = sr->done_io;
319 io_req_set_res(req, ret, 0);
320 return IOU_OK;
321}
322
323int io_send(struct io_kiocb *req, unsigned int issue_flags)
324{
516e82f0 325 struct sockaddr_storage __address;
f2ccb5ae 326 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
327 struct msghdr msg;
328 struct iovec iov;
329 struct socket *sock;
330 unsigned flags;
331 int min_ret = 0;
332 int ret;
333
516e82f0
PB
334 if (sr->addr) {
335 if (req_has_async_data(req)) {
336 struct io_async_msghdr *io = req->async_data;
337
338 msg.msg_name = &io->addr;
339 } else {
340 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
341 if (unlikely(ret < 0))
342 return ret;
343 msg.msg_name = (struct sockaddr *)&__address;
344 }
345 msg.msg_namelen = sr->addr_len;
346 }
347
f9ead18c
JA
348 if (!(req->flags & REQ_F_POLLED) &&
349 (sr->flags & IORING_RECVSEND_POLL_FIRST))
516e82f0 350 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c
JA
351
352 sock = sock_from_file(req->file);
353 if (unlikely(!sock))
354 return -ENOTSOCK;
355
356 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
357 if (unlikely(ret))
358 return ret;
359
360 msg.msg_name = NULL;
361 msg.msg_control = NULL;
362 msg.msg_controllen = 0;
363 msg.msg_namelen = 0;
e02b6651 364 msg.msg_ubuf = NULL;
f9ead18c
JA
365
366 flags = sr->msg_flags;
367 if (issue_flags & IO_URING_F_NONBLOCK)
368 flags |= MSG_DONTWAIT;
369 if (flags & MSG_WAITALL)
370 min_ret = iov_iter_count(&msg.msg_iter);
371
372 msg.msg_flags = flags;
373 ret = sock_sendmsg(sock, &msg);
374 if (ret < min_ret) {
375 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
516e82f0
PB
376 return io_setup_async_addr(req, &__address, issue_flags);
377
f9ead18c
JA
378 if (ret > 0 && io_net_retry(sock, flags)) {
379 sr->len -= ret;
380 sr->buf += ret;
381 sr->done_io += ret;
382 req->flags |= REQ_F_PARTIAL_IO;
516e82f0 383 return io_setup_async_addr(req, &__address, issue_flags);
f9ead18c 384 }
95eafc74
PB
385 if (ret == -ERESTARTSYS)
386 ret = -EINTR;
f9ead18c
JA
387 req_set_fail(req);
388 }
389 if (ret >= 0)
390 ret += sr->done_io;
391 else if (sr->done_io)
392 ret = sr->done_io;
393 io_req_set_res(req, ret, 0);
394 return IOU_OK;
395}
396
9bb66906
DY
397static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
398{
9b0fc3c0 399 int hdr;
9bb66906 400
9b0fc3c0 401 if (iomsg->namelen < 0)
9bb66906 402 return true;
9b0fc3c0
DY
403 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
404 iomsg->namelen, &hdr))
9bb66906 405 return true;
9b0fc3c0 406 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
9bb66906
DY
407 return true;
408
409 return false;
410}
411
f9ead18c
JA
412static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
413 struct io_async_msghdr *iomsg)
414{
f2ccb5ae 415 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
7fa875b8 416 struct user_msghdr msg;
f9ead18c
JA
417 int ret;
418
7fa875b8
DY
419 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
420 return -EFAULT;
421
422 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
f9ead18c
JA
423 if (ret)
424 return ret;
425
426 if (req->flags & REQ_F_BUFFER_SELECT) {
7fa875b8 427 if (msg.msg_iovlen == 0) {
5702196e
DY
428 sr->len = iomsg->fast_iov[0].iov_len = 0;
429 iomsg->fast_iov[0].iov_base = NULL;
430 iomsg->free_iov = NULL;
7fa875b8 431 } else if (msg.msg_iovlen > 1) {
f9ead18c 432 return -EINVAL;
5702196e 433 } else {
7fa875b8 434 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
5702196e
DY
435 return -EFAULT;
436 sr->len = iomsg->fast_iov[0].iov_len;
437 iomsg->free_iov = NULL;
438 }
9bb66906
DY
439
440 if (req->flags & REQ_F_APOLL_MULTISHOT) {
441 iomsg->namelen = msg.msg_namelen;
442 iomsg->controllen = msg.msg_controllen;
443 if (io_recvmsg_multishot_overflow(iomsg))
444 return -EOVERFLOW;
445 }
f9ead18c
JA
446 } else {
447 iomsg->free_iov = iomsg->fast_iov;
7fa875b8 448 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
f9ead18c
JA
449 &iomsg->free_iov, &iomsg->msg.msg_iter,
450 false);
451 if (ret > 0)
452 ret = 0;
453 }
454
455 return ret;
456}
457
458#ifdef CONFIG_COMPAT
459static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
460 struct io_async_msghdr *iomsg)
461{
f2ccb5ae 462 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
72c531f8 463 struct compat_msghdr msg;
f9ead18c 464 struct compat_iovec __user *uiov;
f9ead18c
JA
465 int ret;
466
72c531f8
DY
467 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
468 return -EFAULT;
469
4f6a94d3 470 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
f9ead18c
JA
471 if (ret)
472 return ret;
473
72c531f8 474 uiov = compat_ptr(msg.msg_iov);
f9ead18c
JA
475 if (req->flags & REQ_F_BUFFER_SELECT) {
476 compat_ssize_t clen;
477
72c531f8 478 if (msg.msg_iovlen == 0) {
6d2f75a0
DY
479 sr->len = 0;
480 iomsg->free_iov = NULL;
72c531f8 481 } else if (msg.msg_iovlen > 1) {
f9ead18c 482 return -EINVAL;
6d2f75a0
DY
483 } else {
484 if (!access_ok(uiov, sizeof(*uiov)))
485 return -EFAULT;
486 if (__get_user(clen, &uiov->iov_len))
487 return -EFAULT;
488 if (clen < 0)
489 return -EINVAL;
490 sr->len = clen;
491 iomsg->free_iov = NULL;
492 }
9bb66906
DY
493
494 if (req->flags & REQ_F_APOLL_MULTISHOT) {
495 iomsg->namelen = msg.msg_namelen;
496 iomsg->controllen = msg.msg_controllen;
497 if (io_recvmsg_multishot_overflow(iomsg))
498 return -EOVERFLOW;
499 }
f9ead18c
JA
500 } else {
501 iomsg->free_iov = iomsg->fast_iov;
72c531f8 502 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
f9ead18c
JA
503 UIO_FASTIOV, &iomsg->free_iov,
504 &iomsg->msg.msg_iter, true);
505 if (ret < 0)
506 return ret;
507 }
508
509 return 0;
510}
511#endif
512
513static int io_recvmsg_copy_hdr(struct io_kiocb *req,
514 struct io_async_msghdr *iomsg)
515{
516 iomsg->msg.msg_name = &iomsg->addr;
517
518#ifdef CONFIG_COMPAT
519 if (req->ctx->compat)
520 return __io_compat_recvmsg_copy_hdr(req, iomsg);
521#endif
522
523 return __io_recvmsg_copy_hdr(req, iomsg);
524}
525
526int io_recvmsg_prep_async(struct io_kiocb *req)
527{
528 int ret;
529
858c293e
PB
530 if (!io_msg_alloc_async_prep(req))
531 return -ENOMEM;
f9ead18c
JA
532 ret = io_recvmsg_copy_hdr(req, req->async_data);
533 if (!ret)
534 req->flags |= REQ_F_NEED_CLEANUP;
535 return ret;
536}
537
b3fdea6e
DY
538#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
539
f9ead18c
JA
540int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
541{
f2ccb5ae 542 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
543
544 if (unlikely(sqe->file_index || sqe->addr2))
545 return -EINVAL;
546
547 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
548 sr->len = READ_ONCE(sqe->len);
549 sr->flags = READ_ONCE(sqe->ioprio);
b3fdea6e 550 if (sr->flags & ~(RECVMSG_FLAGS))
f9ead18c
JA
551 return -EINVAL;
552 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
553 if (sr->msg_flags & MSG_DONTWAIT)
554 req->flags |= REQ_F_NOWAIT;
555 if (sr->msg_flags & MSG_ERRQUEUE)
556 req->flags |= REQ_F_CLEAR_POLLIN;
b3fdea6e
DY
557 if (sr->flags & IORING_RECV_MULTISHOT) {
558 if (!(req->flags & REQ_F_BUFFER_SELECT))
559 return -EINVAL;
560 if (sr->msg_flags & MSG_WAITALL)
561 return -EINVAL;
562 if (req->opcode == IORING_OP_RECV && sr->len)
563 return -EINVAL;
564 req->flags |= REQ_F_APOLL_MULTISHOT;
565 }
f9ead18c
JA
566
567#ifdef CONFIG_COMPAT
568 if (req->ctx->compat)
569 sr->msg_flags |= MSG_CMSG_COMPAT;
570#endif
571 sr->done_io = 0;
572 return 0;
573}
574
b3fdea6e
DY
575static inline void io_recv_prep_retry(struct io_kiocb *req)
576{
f2ccb5ae 577 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
b3fdea6e
DY
578
579 sr->done_io = 0;
580 sr->len = 0; /* get from the provided buffer */
581}
582
583/*
9bb66906 584 * Finishes io_recv and io_recvmsg.
b3fdea6e
DY
585 *
586 * Returns true if it is actually finished, or false if it should run
587 * again (for multishot).
588 */
9bb66906
DY
589static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
590 unsigned int cflags, bool mshot_finished)
b3fdea6e
DY
591{
592 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
593 io_req_set_res(req, *ret, cflags);
594 *ret = IOU_OK;
595 return true;
596 }
597
9bb66906 598 if (!mshot_finished) {
b3fdea6e
DY
599 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
600 cflags | IORING_CQE_F_MORE, false)) {
601 io_recv_prep_retry(req);
602 return false;
603 }
604 /*
605 * Otherwise stop multishot but use the current result.
606 * Probably will end up going into overflow, but this means
607 * we cannot trust the ordering anymore
608 */
609 }
610
611 io_req_set_res(req, *ret, cflags);
612
613 if (req->flags & REQ_F_POLLED)
614 *ret = IOU_STOP_MULTISHOT;
e2df2ccb
DY
615 else
616 *ret = IOU_OK;
b3fdea6e
DY
617 return true;
618}
619
9bb66906
DY
620static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
621 struct io_sr_msg *sr, void __user **buf,
622 size_t *len)
623{
624 unsigned long ubuf = (unsigned long) *buf;
625 unsigned long hdr;
626
627 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
628 kmsg->controllen;
629 if (*len < hdr)
630 return -EFAULT;
631
632 if (kmsg->controllen) {
633 unsigned long control = ubuf + hdr - kmsg->controllen;
634
d1f6222c 635 kmsg->msg.msg_control_user = (void __user *) control;
9bb66906
DY
636 kmsg->msg.msg_controllen = kmsg->controllen;
637 }
638
639 sr->buf = *buf; /* stash for later copy */
d1f6222c 640 *buf = (void __user *) (ubuf + hdr);
9bb66906
DY
641 kmsg->payloadlen = *len = *len - hdr;
642 return 0;
643}
644
645struct io_recvmsg_multishot_hdr {
646 struct io_uring_recvmsg_out msg;
647 struct sockaddr_storage addr;
648};
649
650static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
651 struct io_async_msghdr *kmsg,
652 unsigned int flags, bool *finished)
653{
654 int err;
655 int copy_len;
656 struct io_recvmsg_multishot_hdr hdr;
657
658 if (kmsg->namelen)
659 kmsg->msg.msg_name = &hdr.addr;
660 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
661 kmsg->msg.msg_namelen = 0;
662
663 if (sock->file->f_flags & O_NONBLOCK)
664 flags |= MSG_DONTWAIT;
665
666 err = sock_recvmsg(sock, &kmsg->msg, flags);
667 *finished = err <= 0;
668 if (err < 0)
669 return err;
670
671 hdr.msg = (struct io_uring_recvmsg_out) {
672 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
673 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
674 };
675
676 hdr.msg.payloadlen = err;
677 if (err > kmsg->payloadlen)
678 err = kmsg->payloadlen;
679
680 copy_len = sizeof(struct io_uring_recvmsg_out);
681 if (kmsg->msg.msg_namelen > kmsg->namelen)
682 copy_len += kmsg->namelen;
683 else
684 copy_len += kmsg->msg.msg_namelen;
685
686 /*
687 * "fromlen shall refer to the value before truncation.."
688 * 1003.1g
689 */
690 hdr.msg.namelen = kmsg->msg.msg_namelen;
691
692 /* ensure that there is no gap between hdr and sockaddr_storage */
693 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
694 sizeof(struct io_uring_recvmsg_out));
695 if (copy_to_user(io->buf, &hdr, copy_len)) {
696 *finished = true;
697 return -EFAULT;
698 }
699
700 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
701 kmsg->controllen + err;
702}
703
f9ead18c
JA
704int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
705{
f2ccb5ae 706 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
707 struct io_async_msghdr iomsg, *kmsg;
708 struct socket *sock;
709 unsigned int cflags;
710 unsigned flags;
711 int ret, min_ret = 0;
712 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
9bb66906 713 bool mshot_finished = true;
f9ead18c
JA
714
715 sock = sock_from_file(req->file);
716 if (unlikely(!sock))
717 return -ENOTSOCK;
718
719 if (req_has_async_data(req)) {
720 kmsg = req->async_data;
721 } else {
722 ret = io_recvmsg_copy_hdr(req, &iomsg);
723 if (ret)
724 return ret;
725 kmsg = &iomsg;
726 }
727
728 if (!(req->flags & REQ_F_POLLED) &&
729 (sr->flags & IORING_RECVSEND_POLL_FIRST))
43e0bbbd 730 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 731
9bb66906 732retry_multishot:
f9ead18c
JA
733 if (io_do_buffer_select(req)) {
734 void __user *buf;
9bb66906 735 size_t len = sr->len;
f9ead18c 736
9bb66906 737 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
738 if (!buf)
739 return -ENOBUFS;
9bb66906
DY
740
741 if (req->flags & REQ_F_APOLL_MULTISHOT) {
742 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
743 if (ret) {
744 io_kbuf_recycle(req, issue_flags);
745 return ret;
746 }
747 }
748
f9ead18c 749 kmsg->fast_iov[0].iov_base = buf;
9bb66906 750 kmsg->fast_iov[0].iov_len = len;
f9ead18c 751 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
9bb66906 752 len);
f9ead18c
JA
753 }
754
755 flags = sr->msg_flags;
756 if (force_nonblock)
757 flags |= MSG_DONTWAIT;
758 if (flags & MSG_WAITALL)
759 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
760
761 kmsg->msg.msg_get_inq = 1;
9bb66906
DY
762 if (req->flags & REQ_F_APOLL_MULTISHOT)
763 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
764 &mshot_finished);
765 else
766 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
767 kmsg->uaddr, flags);
768
f9ead18c 769 if (ret < min_ret) {
9bb66906
DY
770 if (ret == -EAGAIN && force_nonblock) {
771 ret = io_setup_async_msg(req, kmsg, issue_flags);
772 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
773 IO_APOLL_MULTI_POLLED) {
774 io_kbuf_recycle(req, issue_flags);
775 return IOU_ISSUE_SKIP_COMPLETE;
776 }
777 return ret;
778 }
f9ead18c
JA
779 if (ret > 0 && io_net_retry(sock, flags)) {
780 sr->done_io += ret;
781 req->flags |= REQ_F_PARTIAL_IO;
43e0bbbd 782 return io_setup_async_msg(req, kmsg, issue_flags);
f9ead18c 783 }
95eafc74
PB
784 if (ret == -ERESTARTSYS)
785 ret = -EINTR;
f9ead18c
JA
786 req_set_fail(req);
787 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
788 req_set_fail(req);
789 }
790
d4e097da 791 if (ret > 0)
f9ead18c
JA
792 ret += sr->done_io;
793 else if (sr->done_io)
794 ret = sr->done_io;
d4e097da
DY
795 else
796 io_kbuf_recycle(req, issue_flags);
797
f9ead18c
JA
798 cflags = io_put_kbuf(req, issue_flags);
799 if (kmsg->msg.msg_inq)
800 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
b3fdea6e 801
9bb66906
DY
802 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
803 goto retry_multishot;
804
805 if (mshot_finished) {
806 io_netmsg_recycle(req, issue_flags);
807 /* fast path, check for non-NULL to avoid function call */
808 if (kmsg->free_iov)
809 kfree(kmsg->free_iov);
810 req->flags &= ~REQ_F_NEED_CLEANUP;
811 }
812
813 return ret;
f9ead18c
JA
814}
815
816int io_recv(struct io_kiocb *req, unsigned int issue_flags)
817{
f2ccb5ae 818 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
f9ead18c
JA
819 struct msghdr msg;
820 struct socket *sock;
821 struct iovec iov;
822 unsigned int cflags;
823 unsigned flags;
824 int ret, min_ret = 0;
825 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
b3fdea6e 826 size_t len = sr->len;
f9ead18c
JA
827
828 if (!(req->flags & REQ_F_POLLED) &&
829 (sr->flags & IORING_RECVSEND_POLL_FIRST))
830 return -EAGAIN;
831
832 sock = sock_from_file(req->file);
833 if (unlikely(!sock))
834 return -ENOTSOCK;
835
b3fdea6e 836retry_multishot:
f9ead18c
JA
837 if (io_do_buffer_select(req)) {
838 void __user *buf;
839
b3fdea6e 840 buf = io_buffer_select(req, &len, issue_flags);
f9ead18c
JA
841 if (!buf)
842 return -ENOBUFS;
843 sr->buf = buf;
844 }
845
b3fdea6e 846 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
f9ead18c
JA
847 if (unlikely(ret))
848 goto out_free;
849
850 msg.msg_name = NULL;
851 msg.msg_namelen = 0;
852 msg.msg_control = NULL;
853 msg.msg_get_inq = 1;
854 msg.msg_flags = 0;
855 msg.msg_controllen = 0;
856 msg.msg_iocb = NULL;
e02b6651 857 msg.msg_ubuf = NULL;
f9ead18c
JA
858
859 flags = sr->msg_flags;
860 if (force_nonblock)
861 flags |= MSG_DONTWAIT;
862 if (flags & MSG_WAITALL)
863 min_ret = iov_iter_count(&msg.msg_iter);
864
865 ret = sock_recvmsg(sock, &msg, flags);
866 if (ret < min_ret) {
b3fdea6e
DY
867 if (ret == -EAGAIN && force_nonblock) {
868 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
869 io_kbuf_recycle(req, issue_flags);
870 return IOU_ISSUE_SKIP_COMPLETE;
871 }
872
f9ead18c 873 return -EAGAIN;
b3fdea6e 874 }
f9ead18c
JA
875 if (ret > 0 && io_net_retry(sock, flags)) {
876 sr->len -= ret;
877 sr->buf += ret;
878 sr->done_io += ret;
879 req->flags |= REQ_F_PARTIAL_IO;
880 return -EAGAIN;
881 }
95eafc74
PB
882 if (ret == -ERESTARTSYS)
883 ret = -EINTR;
f9ead18c
JA
884 req_set_fail(req);
885 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
886out_free:
887 req_set_fail(req);
888 }
889
d4e097da 890 if (ret > 0)
f9ead18c
JA
891 ret += sr->done_io;
892 else if (sr->done_io)
893 ret = sr->done_io;
d4e097da
DY
894 else
895 io_kbuf_recycle(req, issue_flags);
896
f9ead18c
JA
897 cflags = io_put_kbuf(req, issue_flags);
898 if (msg.msg_inq)
899 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
b3fdea6e 900
9bb66906 901 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
b3fdea6e
DY
902 goto retry_multishot;
903
904 return ret;
f9ead18c
JA
905}
906
b48c312b
PB
907void io_sendzc_cleanup(struct io_kiocb *req)
908{
ac9e5784 909 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
b48c312b
PB
910
911 zc->notif->flags |= REQ_F_CQE_SKIP;
912 io_notif_flush(zc->notif);
913 zc->notif = NULL;
914}
915
06a5464b
PB
916int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
917{
ac9e5784 918 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
10c7d33e 919 struct io_ring_ctx *ctx = req->ctx;
b48c312b 920 struct io_kiocb *notif;
06a5464b 921
b48c312b
PB
922 if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3) ||
923 READ_ONCE(sqe->__pad3[0]))
924 return -EINVAL;
925 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
926 if (req->flags & REQ_F_CQE_SKIP)
06a5464b
PB
927 return -EINVAL;
928
929 zc->flags = READ_ONCE(sqe->ioprio);
63809137 930 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
57f33224 931 IORING_RECVSEND_FIXED_BUF))
06a5464b 932 return -EINVAL;
e3366e02
PB
933 notif = zc->notif = io_alloc_notif(ctx);
934 if (!notif)
935 return -ENOMEM;
936 notif->cqe.user_data = req->cqe.user_data;
937 notif->cqe.res = 0;
938 notif->cqe.flags = IORING_CQE_F_NOTIF;
939 req->flags |= REQ_F_NEED_CLEANUP;
10c7d33e
PB
940 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
941 unsigned idx = READ_ONCE(sqe->buf_index);
942
943 if (unlikely(idx >= ctx->nr_user_bufs))
944 return -EFAULT;
945 idx = array_index_nospec(idx, ctx->nr_user_bufs);
946 req->imu = READ_ONCE(ctx->user_bufs[idx]);
e3366e02 947 io_req_set_rsrc_node(notif, ctx, 0);
10c7d33e 948 }
06a5464b
PB
949
950 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
951 zc->len = READ_ONCE(sqe->len);
952 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
06a5464b
PB
953 if (zc->msg_flags & MSG_DONTWAIT)
954 req->flags |= REQ_F_NOWAIT;
092aeedb
PB
955
956 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
957 zc->addr_len = READ_ONCE(sqe->addr_len);
4a933e62 958 zc->done_io = 0;
092aeedb 959
06a5464b
PB
960#ifdef CONFIG_COMPAT
961 if (req->ctx->compat)
962 zc->msg_flags |= MSG_CMSG_COMPAT;
963#endif
964 return 0;
965}
966
cd9021e8
PB
967static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
968 struct iov_iter *from, size_t length)
969{
970 skb_zcopy_downgrade_managed(skb);
971 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
972}
973
3ff1a0d3
PB
974static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
975 struct iov_iter *from, size_t length)
976{
977 struct skb_shared_info *shinfo = skb_shinfo(skb);
978 int frag = shinfo->nr_frags;
979 int ret = 0;
980 struct bvec_iter bi;
981 ssize_t copied = 0;
982 unsigned long truesize = 0;
983
cd9021e8 984 if (!frag)
3ff1a0d3 985 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
cd9021e8 986 else if (unlikely(!skb_zcopy_managed(skb)))
3ff1a0d3 987 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
3ff1a0d3
PB
988
989 bi.bi_size = min(from->count, length);
990 bi.bi_bvec_done = from->iov_offset;
991 bi.bi_idx = 0;
992
993 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
994 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
995
996 copied += v.bv_len;
997 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
998 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
999 v.bv_offset, v.bv_len);
1000 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1001 }
1002 if (bi.bi_size)
1003 ret = -EMSGSIZE;
1004
1005 shinfo->nr_frags = frag;
1006 from->bvec += bi.bi_idx;
1007 from->nr_segs -= bi.bi_idx;
dfb58b17 1008 from->count -= copied;
3ff1a0d3
PB
1009 from->iov_offset = bi.bi_bvec_done;
1010
1011 skb->data_len += copied;
1012 skb->len += copied;
1013 skb->truesize += truesize;
1014
1015 if (sk && sk->sk_type == SOCK_STREAM) {
1016 sk_wmem_queued_add(sk, truesize);
1017 if (!skb_zcopy_pure(skb))
1018 sk_mem_charge(sk, truesize);
1019 } else {
1020 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1021 }
1022 return ret;
1023}
1024
06a5464b
PB
1025int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
1026{
6ae61b7a 1027 struct sockaddr_storage __address;
ac9e5784 1028 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
06a5464b
PB
1029 struct msghdr msg;
1030 struct iovec iov;
1031 struct socket *sock;
b48c312b 1032 unsigned msg_flags, cflags;
06a5464b
PB
1033 int ret, min_ret = 0;
1034
06a5464b
PB
1035 sock = sock_from_file(req->file);
1036 if (unlikely(!sock))
1037 return -ENOTSOCK;
1038
06a5464b
PB
1039 msg.msg_name = NULL;
1040 msg.msg_control = NULL;
1041 msg.msg_controllen = 0;
1042 msg.msg_namelen = 0;
1043
86dc8f23 1044 if (zc->addr) {
581711c4
PB
1045 if (req_has_async_data(req)) {
1046 struct io_async_msghdr *io = req->async_data;
1047
6ae61b7a 1048 msg.msg_name = &io->addr;
581711c4
PB
1049 } else {
1050 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1051 if (unlikely(ret < 0))
1052 return ret;
1053 msg.msg_name = (struct sockaddr *)&__address;
581711c4 1054 }
86dc8f23
PB
1055 msg.msg_namelen = zc->addr_len;
1056 }
1057
3c840053
PB
1058 if (!(req->flags & REQ_F_POLLED) &&
1059 (zc->flags & IORING_RECVSEND_POLL_FIRST))
6ae61b7a 1060 return io_setup_async_addr(req, &__address, issue_flags);
3c840053 1061
10c7d33e
PB
1062 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1063 ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
1064 (u64)(uintptr_t)zc->buf, zc->len);
1065 if (unlikely(ret))
986e263d 1066 return ret;
cd9021e8 1067 msg.sg_from_iter = io_sg_from_iter;
10c7d33e
PB
1068 } else {
1069 ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
1070 &msg.msg_iter);
1071 if (unlikely(ret))
1072 return ret;
b48c312b 1073 ret = io_notif_account_mem(zc->notif, zc->len);
2e32ba56
PB
1074 if (unlikely(ret))
1075 return ret;
cd9021e8 1076 msg.sg_from_iter = io_sg_from_iter_iovec;
10c7d33e 1077 }
06a5464b
PB
1078
1079 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1080 if (issue_flags & IO_URING_F_NONBLOCK)
1081 msg_flags |= MSG_DONTWAIT;
1082 if (msg_flags & MSG_WAITALL)
1083 min_ret = iov_iter_count(&msg.msg_iter);
1084
1085 msg.msg_flags = msg_flags;
b48c312b 1086 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
06a5464b
PB
1087 ret = sock_sendmsg(sock, &msg);
1088
1089 if (unlikely(ret < min_ret)) {
1090 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
6ae61b7a 1091 return io_setup_async_addr(req, &__address, issue_flags);
581711c4 1092
4a933e62
PB
1093 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1094 zc->len -= ret;
1095 zc->buf += ret;
1096 zc->done_io += ret;
1097 req->flags |= REQ_F_PARTIAL_IO;
6ae61b7a 1098 return io_setup_async_addr(req, &__address, issue_flags);
4a933e62 1099 }
b48c312b
PB
1100 if (ret < 0 && !zc->done_io)
1101 zc->notif->flags |= REQ_F_CQE_SKIP;
4a933e62
PB
1102 if (ret == -ERESTARTSYS)
1103 ret = -EINTR;
5a848b7c 1104 req_set_fail(req);
06a5464b
PB
1105 }
1106
4a933e62
PB
1107 if (ret >= 0)
1108 ret += zc->done_io;
1109 else if (zc->done_io)
1110 ret = zc->done_io;
b48c312b
PB
1111
1112 io_notif_flush(zc->notif);
1113 req->flags &= ~REQ_F_NEED_CLEANUP;
1114 cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
1115 io_req_set_res(req, ret, cflags);
06a5464b
PB
1116 return IOU_OK;
1117}
1118
7e6b638e
PB
1119void io_sendrecv_fail(struct io_kiocb *req)
1120{
1121 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1122 int res = req->cqe.res;
1123
1124 if (req->flags & REQ_F_PARTIAL_IO)
1125 res = sr->done_io;
1126 io_req_set_res(req, res, req->cqe.flags);
1127}
1128
5693bcce
PB
1129void io_send_zc_fail(struct io_kiocb *req)
1130{
1131 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1132 int res = req->cqe.res;
1133
1134 if (req->flags & REQ_F_PARTIAL_IO) {
1135 if (req->flags & REQ_F_NEED_CLEANUP) {
1136 io_notif_flush(sr->notif);
1137 sr->notif = NULL;
1138 req->flags &= ~REQ_F_NEED_CLEANUP;
1139 }
1140 res = sr->done_io;
1141 }
1142 io_req_set_res(req, res, req->cqe.flags);
1143}
1144
f9ead18c
JA
1145int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1146{
f2ccb5ae 1147 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1148 unsigned flags;
1149
1150 if (sqe->len || sqe->buf_index)
1151 return -EINVAL;
1152
1153 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1154 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1155 accept->flags = READ_ONCE(sqe->accept_flags);
1156 accept->nofile = rlimit(RLIMIT_NOFILE);
1157 flags = READ_ONCE(sqe->ioprio);
1158 if (flags & ~IORING_ACCEPT_MULTISHOT)
1159 return -EINVAL;
1160
1161 accept->file_slot = READ_ONCE(sqe->file_index);
1162 if (accept->file_slot) {
1163 if (accept->flags & SOCK_CLOEXEC)
1164 return -EINVAL;
1165 if (flags & IORING_ACCEPT_MULTISHOT &&
1166 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1167 return -EINVAL;
1168 }
1169 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1170 return -EINVAL;
1171 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1172 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1173 if (flags & IORING_ACCEPT_MULTISHOT)
1174 req->flags |= REQ_F_APOLL_MULTISHOT;
1175 return 0;
1176}
1177
1178int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1179{
1180 struct io_ring_ctx *ctx = req->ctx;
f2ccb5ae 1181 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
f9ead18c
JA
1182 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1183 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1184 bool fixed = !!accept->file_slot;
1185 struct file *file;
1186 int ret, fd;
1187
1188retry:
1189 if (!fixed) {
1190 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1191 if (unlikely(fd < 0))
1192 return fd;
1193 }
1194 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1195 accept->flags);
1196 if (IS_ERR(file)) {
1197 if (!fixed)
1198 put_unused_fd(fd);
1199 ret = PTR_ERR(file);
1200 if (ret == -EAGAIN && force_nonblock) {
1201 /*
1202 * if it's multishot and polled, we don't need to
1203 * return EAGAIN to arm the poll infra since it
1204 * has already been done
1205 */
1206 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
1207 IO_APOLL_MULTI_POLLED)
1208 ret = IOU_ISSUE_SKIP_COMPLETE;
1209 return ret;
1210 }
1211 if (ret == -ERESTARTSYS)
1212 ret = -EINTR;
1213 req_set_fail(req);
1214 } else if (!fixed) {
1215 fd_install(fd, file);
1216 ret = fd;
1217 } else {
1218 ret = io_fixed_fd_install(req, issue_flags, file,
1219 accept->file_slot);
1220 }
1221
1222 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1223 io_req_set_res(req, ret, 0);
1224 return IOU_OK;
1225 }
f9ead18c 1226
cbd25748
DY
1227 if (ret >= 0 &&
1228 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
d245bca6 1229 goto retry;
cbd25748
DY
1230
1231 io_req_set_res(req, ret, 0);
1232 if (req->flags & REQ_F_POLLED)
1233 return IOU_STOP_MULTISHOT;
1234 return IOU_OK;
f9ead18c
JA
1235}
1236
1237int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1238{
f2ccb5ae 1239 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1240
1241 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1242 return -EINVAL;
1243
1244 sock->domain = READ_ONCE(sqe->fd);
1245 sock->type = READ_ONCE(sqe->off);
1246 sock->protocol = READ_ONCE(sqe->len);
1247 sock->file_slot = READ_ONCE(sqe->file_index);
1248 sock->nofile = rlimit(RLIMIT_NOFILE);
1249
1250 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1251 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1252 return -EINVAL;
1253 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1254 return -EINVAL;
1255 return 0;
1256}
1257
1258int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1259{
f2ccb5ae 1260 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
f9ead18c
JA
1261 bool fixed = !!sock->file_slot;
1262 struct file *file;
1263 int ret, fd;
1264
1265 if (!fixed) {
1266 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1267 if (unlikely(fd < 0))
1268 return fd;
1269 }
1270 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1271 if (IS_ERR(file)) {
1272 if (!fixed)
1273 put_unused_fd(fd);
1274 ret = PTR_ERR(file);
1275 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1276 return -EAGAIN;
1277 if (ret == -ERESTARTSYS)
1278 ret = -EINTR;
1279 req_set_fail(req);
1280 } else if (!fixed) {
1281 fd_install(fd, file);
1282 ret = fd;
1283 } else {
1284 ret = io_fixed_fd_install(req, issue_flags, file,
1285 sock->file_slot);
1286 }
1287 io_req_set_res(req, ret, 0);
1288 return IOU_OK;
1289}
1290
1291int io_connect_prep_async(struct io_kiocb *req)
1292{
1293 struct io_async_connect *io = req->async_data;
f2ccb5ae 1294 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1295
1296 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1297}
1298
1299int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1300{
f2ccb5ae 1301 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1302
1303 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1304 return -EINVAL;
1305
1306 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1307 conn->addr_len = READ_ONCE(sqe->addr2);
1308 return 0;
1309}
1310
1311int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1312{
f2ccb5ae 1313 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
f9ead18c
JA
1314 struct io_async_connect __io, *io;
1315 unsigned file_flags;
1316 int ret;
1317 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1318
1319 if (req_has_async_data(req)) {
1320 io = req->async_data;
1321 } else {
1322 ret = move_addr_to_kernel(connect->addr,
1323 connect->addr_len,
1324 &__io.address);
1325 if (ret)
1326 goto out;
1327 io = &__io;
1328 }
1329
1330 file_flags = force_nonblock ? O_NONBLOCK : 0;
1331
1332 ret = __sys_connect_file(req->file, &io->address,
1333 connect->addr_len, file_flags);
1334 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1335 if (req_has_async_data(req))
1336 return -EAGAIN;
1337 if (io_alloc_async_data(req)) {
1338 ret = -ENOMEM;
1339 goto out;
1340 }
1341 memcpy(req->async_data, &__io, sizeof(__io));
1342 return -EAGAIN;
1343 }
1344 if (ret == -ERESTARTSYS)
1345 ret = -EINTR;
1346out:
1347 if (ret < 0)
1348 req_set_fail(req);
1349 io_req_set_res(req, ret, 0);
1350 return IOU_OK;
1351}
43e0bbbd
JA
1352
1353void io_netmsg_cache_free(struct io_cache_entry *entry)
1354{
1355 kfree(container_of(entry, struct io_async_msghdr, cache));
1356}
f9ead18c 1357#endif