io_uring: restore bgid in io_put_kbuf
[linux-block.git] / io_uring / net.c
CommitLineData
f9ead18c
JA
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/errno.h>
4#include <linux/file.h>
5#include <linux/slab.h>
6#include <linux/net.h>
7#include <linux/compat.h>
8#include <net/compat.h>
9#include <linux/io_uring.h>
10
11#include <uapi/linux/io_uring.h>
12
f9ead18c 13#include "io_uring.h"
3b77495a 14#include "kbuf.h"
f9ead18c
JA
15#include "net.h"
16
17#if defined(CONFIG_NET)
18struct io_shutdown {
19 struct file *file;
20 int how;
21};
22
23struct io_accept {
24 struct file *file;
25 struct sockaddr __user *addr;
26 int __user *addr_len;
27 int flags;
28 u32 file_slot;
29 unsigned long nofile;
30};
31
32struct io_socket {
33 struct file *file;
34 int domain;
35 int type;
36 int protocol;
37 int flags;
38 u32 file_slot;
39 unsigned long nofile;
40};
41
42struct io_connect {
43 struct file *file;
44 struct sockaddr __user *addr;
45 int addr_len;
46};
47
48struct io_sr_msg {
49 struct file *file;
50 union {
51 struct compat_msghdr __user *umsg_compat;
52 struct user_msghdr __user *umsg;
53 void __user *buf;
54 };
55 int msg_flags;
56 size_t len;
57 size_t done_io;
58 unsigned int flags;
59};
60
61#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
62
63int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
64{
65 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
66
67 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
68 sqe->buf_index || sqe->splice_fd_in))
69 return -EINVAL;
70
71 shutdown->how = READ_ONCE(sqe->len);
72 return 0;
73}
74
75int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
76{
77 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
78 struct socket *sock;
79 int ret;
80
81 if (issue_flags & IO_URING_F_NONBLOCK)
82 return -EAGAIN;
83
84 sock = sock_from_file(req->file);
85 if (unlikely(!sock))
86 return -ENOTSOCK;
87
88 ret = __sys_shutdown_sock(sock, shutdown->how);
89 io_req_set_res(req, ret, 0);
90 return IOU_OK;
91}
92
93static bool io_net_retry(struct socket *sock, int flags)
94{
95 if (!(flags & MSG_WAITALL))
96 return false;
97 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
98}
99
100static int io_setup_async_msg(struct io_kiocb *req,
101 struct io_async_msghdr *kmsg)
102{
103 struct io_async_msghdr *async_msg = req->async_data;
104
105 if (async_msg)
106 return -EAGAIN;
107 if (io_alloc_async_data(req)) {
108 kfree(kmsg->free_iov);
109 return -ENOMEM;
110 }
111 async_msg = req->async_data;
112 req->flags |= REQ_F_NEED_CLEANUP;
113 memcpy(async_msg, kmsg, sizeof(*kmsg));
114 async_msg->msg.msg_name = &async_msg->addr;
115 /* if were using fast_iov, set it to the new one */
116 if (!async_msg->free_iov)
117 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
118
119 return -EAGAIN;
120}
121
122static int io_sendmsg_copy_hdr(struct io_kiocb *req,
123 struct io_async_msghdr *iomsg)
124{
125 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
126
127 iomsg->msg.msg_name = &iomsg->addr;
128 iomsg->free_iov = iomsg->fast_iov;
129 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
130 &iomsg->free_iov);
131}
132
133int io_sendmsg_prep_async(struct io_kiocb *req)
134{
135 int ret;
136
137 ret = io_sendmsg_copy_hdr(req, req->async_data);
138 if (!ret)
139 req->flags |= REQ_F_NEED_CLEANUP;
140 return ret;
141}
142
143void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
144{
145 struct io_async_msghdr *io = req->async_data;
146
147 kfree(io->free_iov);
148}
149
150int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
151{
152 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
153
154 if (unlikely(sqe->file_index || sqe->addr2))
155 return -EINVAL;
156
157 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
158 sr->len = READ_ONCE(sqe->len);
159 sr->flags = READ_ONCE(sqe->ioprio);
160 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
161 return -EINVAL;
162 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
163 if (sr->msg_flags & MSG_DONTWAIT)
164 req->flags |= REQ_F_NOWAIT;
165
166#ifdef CONFIG_COMPAT
167 if (req->ctx->compat)
168 sr->msg_flags |= MSG_CMSG_COMPAT;
169#endif
170 sr->done_io = 0;
171 return 0;
172}
173
174int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
175{
176 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
177 struct io_async_msghdr iomsg, *kmsg;
178 struct socket *sock;
179 unsigned flags;
180 int min_ret = 0;
181 int ret;
182
183 sock = sock_from_file(req->file);
184 if (unlikely(!sock))
185 return -ENOTSOCK;
186
187 if (req_has_async_data(req)) {
188 kmsg = req->async_data;
189 } else {
190 ret = io_sendmsg_copy_hdr(req, &iomsg);
191 if (ret)
192 return ret;
193 kmsg = &iomsg;
194 }
195
196 if (!(req->flags & REQ_F_POLLED) &&
197 (sr->flags & IORING_RECVSEND_POLL_FIRST))
198 return io_setup_async_msg(req, kmsg);
199
200 flags = sr->msg_flags;
201 if (issue_flags & IO_URING_F_NONBLOCK)
202 flags |= MSG_DONTWAIT;
203 if (flags & MSG_WAITALL)
204 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
205
206 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
207
208 if (ret < min_ret) {
209 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
210 return io_setup_async_msg(req, kmsg);
211 if (ret == -ERESTARTSYS)
212 ret = -EINTR;
213 if (ret > 0 && io_net_retry(sock, flags)) {
214 sr->done_io += ret;
215 req->flags |= REQ_F_PARTIAL_IO;
216 return io_setup_async_msg(req, kmsg);
217 }
218 req_set_fail(req);
219 }
220 /* fast path, check for non-NULL to avoid function call */
221 if (kmsg->free_iov)
222 kfree(kmsg->free_iov);
223 req->flags &= ~REQ_F_NEED_CLEANUP;
224 if (ret >= 0)
225 ret += sr->done_io;
226 else if (sr->done_io)
227 ret = sr->done_io;
228 io_req_set_res(req, ret, 0);
229 return IOU_OK;
230}
231
232int io_send(struct io_kiocb *req, unsigned int issue_flags)
233{
234 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
235 struct msghdr msg;
236 struct iovec iov;
237 struct socket *sock;
238 unsigned flags;
239 int min_ret = 0;
240 int ret;
241
242 if (!(req->flags & REQ_F_POLLED) &&
243 (sr->flags & IORING_RECVSEND_POLL_FIRST))
244 return -EAGAIN;
245
246 sock = sock_from_file(req->file);
247 if (unlikely(!sock))
248 return -ENOTSOCK;
249
250 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
251 if (unlikely(ret))
252 return ret;
253
254 msg.msg_name = NULL;
255 msg.msg_control = NULL;
256 msg.msg_controllen = 0;
257 msg.msg_namelen = 0;
258
259 flags = sr->msg_flags;
260 if (issue_flags & IO_URING_F_NONBLOCK)
261 flags |= MSG_DONTWAIT;
262 if (flags & MSG_WAITALL)
263 min_ret = iov_iter_count(&msg.msg_iter);
264
265 msg.msg_flags = flags;
266 ret = sock_sendmsg(sock, &msg);
267 if (ret < min_ret) {
268 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
269 return -EAGAIN;
270 if (ret == -ERESTARTSYS)
271 ret = -EINTR;
272 if (ret > 0 && io_net_retry(sock, flags)) {
273 sr->len -= ret;
274 sr->buf += ret;
275 sr->done_io += ret;
276 req->flags |= REQ_F_PARTIAL_IO;
277 return -EAGAIN;
278 }
279 req_set_fail(req);
280 }
281 if (ret >= 0)
282 ret += sr->done_io;
283 else if (sr->done_io)
284 ret = sr->done_io;
285 io_req_set_res(req, ret, 0);
286 return IOU_OK;
287}
288
289static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
290 struct io_async_msghdr *iomsg)
291{
292 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
293 struct iovec __user *uiov;
294 size_t iov_len;
295 int ret;
296
297 ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
298 &iomsg->uaddr, &uiov, &iov_len);
299 if (ret)
300 return ret;
301
302 if (req->flags & REQ_F_BUFFER_SELECT) {
303 if (iov_len > 1)
304 return -EINVAL;
305 if (copy_from_user(iomsg->fast_iov, uiov, sizeof(*uiov)))
306 return -EFAULT;
307 sr->len = iomsg->fast_iov[0].iov_len;
308 iomsg->free_iov = NULL;
309 } else {
310 iomsg->free_iov = iomsg->fast_iov;
311 ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
312 &iomsg->free_iov, &iomsg->msg.msg_iter,
313 false);
314 if (ret > 0)
315 ret = 0;
316 }
317
318 return ret;
319}
320
321#ifdef CONFIG_COMPAT
322static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
323 struct io_async_msghdr *iomsg)
324{
325 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
326 struct compat_iovec __user *uiov;
327 compat_uptr_t ptr;
328 compat_size_t len;
329 int ret;
330
331 ret = __get_compat_msghdr(&iomsg->msg, sr->umsg_compat, &iomsg->uaddr,
332 &ptr, &len);
333 if (ret)
334 return ret;
335
336 uiov = compat_ptr(ptr);
337 if (req->flags & REQ_F_BUFFER_SELECT) {
338 compat_ssize_t clen;
339
340 if (len > 1)
341 return -EINVAL;
342 if (!access_ok(uiov, sizeof(*uiov)))
343 return -EFAULT;
344 if (__get_user(clen, &uiov->iov_len))
345 return -EFAULT;
346 if (clen < 0)
347 return -EINVAL;
348 sr->len = clen;
349 iomsg->free_iov = NULL;
350 } else {
351 iomsg->free_iov = iomsg->fast_iov;
352 ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
353 UIO_FASTIOV, &iomsg->free_iov,
354 &iomsg->msg.msg_iter, true);
355 if (ret < 0)
356 return ret;
357 }
358
359 return 0;
360}
361#endif
362
363static int io_recvmsg_copy_hdr(struct io_kiocb *req,
364 struct io_async_msghdr *iomsg)
365{
366 iomsg->msg.msg_name = &iomsg->addr;
367
368#ifdef CONFIG_COMPAT
369 if (req->ctx->compat)
370 return __io_compat_recvmsg_copy_hdr(req, iomsg);
371#endif
372
373 return __io_recvmsg_copy_hdr(req, iomsg);
374}
375
376int io_recvmsg_prep_async(struct io_kiocb *req)
377{
378 int ret;
379
380 ret = io_recvmsg_copy_hdr(req, req->async_data);
381 if (!ret)
382 req->flags |= REQ_F_NEED_CLEANUP;
383 return ret;
384}
385
386int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
387{
388 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
389
390 if (unlikely(sqe->file_index || sqe->addr2))
391 return -EINVAL;
392
393 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
394 sr->len = READ_ONCE(sqe->len);
395 sr->flags = READ_ONCE(sqe->ioprio);
396 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
397 return -EINVAL;
398 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
399 if (sr->msg_flags & MSG_DONTWAIT)
400 req->flags |= REQ_F_NOWAIT;
401 if (sr->msg_flags & MSG_ERRQUEUE)
402 req->flags |= REQ_F_CLEAR_POLLIN;
403
404#ifdef CONFIG_COMPAT
405 if (req->ctx->compat)
406 sr->msg_flags |= MSG_CMSG_COMPAT;
407#endif
408 sr->done_io = 0;
409 return 0;
410}
411
412int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
413{
414 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
415 struct io_async_msghdr iomsg, *kmsg;
416 struct socket *sock;
417 unsigned int cflags;
418 unsigned flags;
419 int ret, min_ret = 0;
420 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
421
422 sock = sock_from_file(req->file);
423 if (unlikely(!sock))
424 return -ENOTSOCK;
425
426 if (req_has_async_data(req)) {
427 kmsg = req->async_data;
428 } else {
429 ret = io_recvmsg_copy_hdr(req, &iomsg);
430 if (ret)
431 return ret;
432 kmsg = &iomsg;
433 }
434
435 if (!(req->flags & REQ_F_POLLED) &&
436 (sr->flags & IORING_RECVSEND_POLL_FIRST))
437 return io_setup_async_msg(req, kmsg);
438
439 if (io_do_buffer_select(req)) {
440 void __user *buf;
441
442 buf = io_buffer_select(req, &sr->len, issue_flags);
443 if (!buf)
444 return -ENOBUFS;
445 kmsg->fast_iov[0].iov_base = buf;
446 kmsg->fast_iov[0].iov_len = sr->len;
447 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
448 sr->len);
449 }
450
451 flags = sr->msg_flags;
452 if (force_nonblock)
453 flags |= MSG_DONTWAIT;
454 if (flags & MSG_WAITALL)
455 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
456
457 kmsg->msg.msg_get_inq = 1;
458 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, kmsg->uaddr, flags);
459 if (ret < min_ret) {
460 if (ret == -EAGAIN && force_nonblock)
461 return io_setup_async_msg(req, kmsg);
462 if (ret == -ERESTARTSYS)
463 ret = -EINTR;
464 if (ret > 0 && io_net_retry(sock, flags)) {
465 sr->done_io += ret;
466 req->flags |= REQ_F_PARTIAL_IO;
467 return io_setup_async_msg(req, kmsg);
468 }
469 req_set_fail(req);
470 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
471 req_set_fail(req);
472 }
473
474 /* fast path, check for non-NULL to avoid function call */
475 if (kmsg->free_iov)
476 kfree(kmsg->free_iov);
477 req->flags &= ~REQ_F_NEED_CLEANUP;
478 if (ret >= 0)
479 ret += sr->done_io;
480 else if (sr->done_io)
481 ret = sr->done_io;
482 cflags = io_put_kbuf(req, issue_flags);
483 if (kmsg->msg.msg_inq)
484 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
485 io_req_set_res(req, ret, cflags);
486 return IOU_OK;
487}
488
489int io_recv(struct io_kiocb *req, unsigned int issue_flags)
490{
491 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
492 struct msghdr msg;
493 struct socket *sock;
494 struct iovec iov;
495 unsigned int cflags;
496 unsigned flags;
497 int ret, min_ret = 0;
498 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
499
500 if (!(req->flags & REQ_F_POLLED) &&
501 (sr->flags & IORING_RECVSEND_POLL_FIRST))
502 return -EAGAIN;
503
504 sock = sock_from_file(req->file);
505 if (unlikely(!sock))
506 return -ENOTSOCK;
507
508 if (io_do_buffer_select(req)) {
509 void __user *buf;
510
511 buf = io_buffer_select(req, &sr->len, issue_flags);
512 if (!buf)
513 return -ENOBUFS;
514 sr->buf = buf;
515 }
516
517 ret = import_single_range(READ, sr->buf, sr->len, &iov, &msg.msg_iter);
518 if (unlikely(ret))
519 goto out_free;
520
521 msg.msg_name = NULL;
522 msg.msg_namelen = 0;
523 msg.msg_control = NULL;
524 msg.msg_get_inq = 1;
525 msg.msg_flags = 0;
526 msg.msg_controllen = 0;
527 msg.msg_iocb = NULL;
528
529 flags = sr->msg_flags;
530 if (force_nonblock)
531 flags |= MSG_DONTWAIT;
532 if (flags & MSG_WAITALL)
533 min_ret = iov_iter_count(&msg.msg_iter);
534
535 ret = sock_recvmsg(sock, &msg, flags);
536 if (ret < min_ret) {
537 if (ret == -EAGAIN && force_nonblock)
538 return -EAGAIN;
539 if (ret == -ERESTARTSYS)
540 ret = -EINTR;
541 if (ret > 0 && io_net_retry(sock, flags)) {
542 sr->len -= ret;
543 sr->buf += ret;
544 sr->done_io += ret;
545 req->flags |= REQ_F_PARTIAL_IO;
546 return -EAGAIN;
547 }
548 req_set_fail(req);
549 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
550out_free:
551 req_set_fail(req);
552 }
553
554 if (ret >= 0)
555 ret += sr->done_io;
556 else if (sr->done_io)
557 ret = sr->done_io;
558 cflags = io_put_kbuf(req, issue_flags);
559 if (msg.msg_inq)
560 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
561 io_req_set_res(req, ret, cflags);
562 return IOU_OK;
563}
564
565int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
566{
567 struct io_accept *accept = io_kiocb_to_cmd(req);
568 unsigned flags;
569
570 if (sqe->len || sqe->buf_index)
571 return -EINVAL;
572
573 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
574 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
575 accept->flags = READ_ONCE(sqe->accept_flags);
576 accept->nofile = rlimit(RLIMIT_NOFILE);
577 flags = READ_ONCE(sqe->ioprio);
578 if (flags & ~IORING_ACCEPT_MULTISHOT)
579 return -EINVAL;
580
581 accept->file_slot = READ_ONCE(sqe->file_index);
582 if (accept->file_slot) {
583 if (accept->flags & SOCK_CLOEXEC)
584 return -EINVAL;
585 if (flags & IORING_ACCEPT_MULTISHOT &&
586 accept->file_slot != IORING_FILE_INDEX_ALLOC)
587 return -EINVAL;
588 }
589 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
590 return -EINVAL;
591 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
592 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
593 if (flags & IORING_ACCEPT_MULTISHOT)
594 req->flags |= REQ_F_APOLL_MULTISHOT;
595 return 0;
596}
597
598int io_accept(struct io_kiocb *req, unsigned int issue_flags)
599{
600 struct io_ring_ctx *ctx = req->ctx;
601 struct io_accept *accept = io_kiocb_to_cmd(req);
602 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
603 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
604 bool fixed = !!accept->file_slot;
605 struct file *file;
606 int ret, fd;
607
608retry:
609 if (!fixed) {
610 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
611 if (unlikely(fd < 0))
612 return fd;
613 }
614 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
615 accept->flags);
616 if (IS_ERR(file)) {
617 if (!fixed)
618 put_unused_fd(fd);
619 ret = PTR_ERR(file);
620 if (ret == -EAGAIN && force_nonblock) {
621 /*
622 * if it's multishot and polled, we don't need to
623 * return EAGAIN to arm the poll infra since it
624 * has already been done
625 */
626 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
627 IO_APOLL_MULTI_POLLED)
628 ret = IOU_ISSUE_SKIP_COMPLETE;
629 return ret;
630 }
631 if (ret == -ERESTARTSYS)
632 ret = -EINTR;
633 req_set_fail(req);
634 } else if (!fixed) {
635 fd_install(fd, file);
636 ret = fd;
637 } else {
638 ret = io_fixed_fd_install(req, issue_flags, file,
639 accept->file_slot);
640 }
641
642 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
643 io_req_set_res(req, ret, 0);
644 return IOU_OK;
645 }
f9ead18c 646
d245bca6
PB
647 if (ret < 0)
648 return ret;
649 if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE))
650 goto retry;
651 return -ECANCELED;
f9ead18c
JA
652}
653
654int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
655{
656 struct io_socket *sock = io_kiocb_to_cmd(req);
657
658 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
659 return -EINVAL;
660
661 sock->domain = READ_ONCE(sqe->fd);
662 sock->type = READ_ONCE(sqe->off);
663 sock->protocol = READ_ONCE(sqe->len);
664 sock->file_slot = READ_ONCE(sqe->file_index);
665 sock->nofile = rlimit(RLIMIT_NOFILE);
666
667 sock->flags = sock->type & ~SOCK_TYPE_MASK;
668 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
669 return -EINVAL;
670 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
671 return -EINVAL;
672 return 0;
673}
674
675int io_socket(struct io_kiocb *req, unsigned int issue_flags)
676{
677 struct io_socket *sock = io_kiocb_to_cmd(req);
678 bool fixed = !!sock->file_slot;
679 struct file *file;
680 int ret, fd;
681
682 if (!fixed) {
683 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
684 if (unlikely(fd < 0))
685 return fd;
686 }
687 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
688 if (IS_ERR(file)) {
689 if (!fixed)
690 put_unused_fd(fd);
691 ret = PTR_ERR(file);
692 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
693 return -EAGAIN;
694 if (ret == -ERESTARTSYS)
695 ret = -EINTR;
696 req_set_fail(req);
697 } else if (!fixed) {
698 fd_install(fd, file);
699 ret = fd;
700 } else {
701 ret = io_fixed_fd_install(req, issue_flags, file,
702 sock->file_slot);
703 }
704 io_req_set_res(req, ret, 0);
705 return IOU_OK;
706}
707
708int io_connect_prep_async(struct io_kiocb *req)
709{
710 struct io_async_connect *io = req->async_data;
711 struct io_connect *conn = io_kiocb_to_cmd(req);
712
713 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
714}
715
716int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
717{
718 struct io_connect *conn = io_kiocb_to_cmd(req);
719
720 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
721 return -EINVAL;
722
723 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
724 conn->addr_len = READ_ONCE(sqe->addr2);
725 return 0;
726}
727
728int io_connect(struct io_kiocb *req, unsigned int issue_flags)
729{
730 struct io_connect *connect = io_kiocb_to_cmd(req);
731 struct io_async_connect __io, *io;
732 unsigned file_flags;
733 int ret;
734 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
735
736 if (req_has_async_data(req)) {
737 io = req->async_data;
738 } else {
739 ret = move_addr_to_kernel(connect->addr,
740 connect->addr_len,
741 &__io.address);
742 if (ret)
743 goto out;
744 io = &__io;
745 }
746
747 file_flags = force_nonblock ? O_NONBLOCK : 0;
748
749 ret = __sys_connect_file(req->file, &io->address,
750 connect->addr_len, file_flags);
751 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
752 if (req_has_async_data(req))
753 return -EAGAIN;
754 if (io_alloc_async_data(req)) {
755 ret = -ENOMEM;
756 goto out;
757 }
758 memcpy(req->async_data, &__io, sizeof(__io));
759 return -EAGAIN;
760 }
761 if (ret == -ERESTARTSYS)
762 ret = -EINTR;
763out:
764 if (ret < 0)
765 req_set_fail(req);
766 io_req_set_res(req, ret, 0);
767 return IOU_OK;
768}
769#endif