1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
54 struct compat_msghdr __user *umsg_compat;
55 struct user_msghdr __user *umsg;
62 /* initialised and used only by !msg send variants */
65 /* used only for send zerocopy */
66 struct io_kiocb *notif;
69 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
71 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
73 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
75 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
76 sqe->buf_index || sqe->splice_fd_in))
79 shutdown->how = READ_ONCE(sqe->len);
83 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
85 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
89 if (issue_flags & IO_URING_F_NONBLOCK)
92 sock = sock_from_file(req->file);
96 ret = __sys_shutdown_sock(sock, shutdown->how);
97 io_req_set_res(req, ret, 0);
101 static bool io_net_retry(struct socket *sock, int flags)
103 if (!(flags & MSG_WAITALL))
105 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
108 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
110 struct io_async_msghdr *hdr = req->async_data;
112 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
115 /* Let normal cleanup path reap it if we fail adding to the cache */
116 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
117 req->async_data = NULL;
118 req->flags &= ~REQ_F_ASYNC_DATA;
122 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
123 unsigned int issue_flags)
125 struct io_ring_ctx *ctx = req->ctx;
126 struct io_cache_entry *entry;
128 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
129 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
130 struct io_async_msghdr *hdr;
132 hdr = container_of(entry, struct io_async_msghdr, cache);
133 req->flags |= REQ_F_ASYNC_DATA;
134 req->async_data = hdr;
138 if (!io_alloc_async_data(req))
139 return req->async_data;
144 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
146 /* ->prep_async is always called from the submission context */
147 return io_msg_alloc_async(req, 0);
150 static int io_setup_async_msg(struct io_kiocb *req,
151 struct io_async_msghdr *kmsg,
152 unsigned int issue_flags)
154 struct io_async_msghdr *async_msg;
156 if (req_has_async_data(req))
158 async_msg = io_msg_alloc_async(req, issue_flags);
160 kfree(kmsg->free_iov);
163 req->flags |= REQ_F_NEED_CLEANUP;
164 memcpy(async_msg, kmsg, sizeof(*kmsg));
165 async_msg->msg.msg_name = &async_msg->addr;
166 /* if were using fast_iov, set it to the new one */
167 if (!async_msg->free_iov)
168 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
173 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
174 struct io_async_msghdr *iomsg)
176 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
178 iomsg->msg.msg_name = &iomsg->addr;
179 iomsg->free_iov = iomsg->fast_iov;
180 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
184 int io_send_prep_async(struct io_kiocb *req)
186 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
187 struct io_async_msghdr *io;
190 if (!zc->addr || req_has_async_data(req))
192 io = io_msg_alloc_async_prep(req);
196 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
200 static int io_setup_async_addr(struct io_kiocb *req,
201 struct sockaddr_storage *addr_storage,
202 unsigned int issue_flags)
204 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
205 struct io_async_msghdr *io;
207 if (!sr->addr || req_has_async_data(req))
209 io = io_msg_alloc_async(req, issue_flags);
213 memcpy(&io->addr, addr_storage, sizeof(io->addr));
217 int io_sendmsg_prep_async(struct io_kiocb *req)
221 if (!io_msg_alloc_async_prep(req))
223 ret = io_sendmsg_copy_hdr(req, req->async_data);
225 req->flags |= REQ_F_NEED_CLEANUP;
229 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
231 struct io_async_msghdr *io = req->async_data;
236 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
238 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
240 if (req->opcode == IORING_OP_SEND) {
241 if (READ_ONCE(sqe->__pad3[0]))
243 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
244 sr->addr_len = READ_ONCE(sqe->addr_len);
245 } else if (sqe->addr2 || sqe->file_index) {
249 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
250 sr->len = READ_ONCE(sqe->len);
251 sr->flags = READ_ONCE(sqe->ioprio);
252 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
254 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
255 if (sr->msg_flags & MSG_DONTWAIT)
256 req->flags |= REQ_F_NOWAIT;
259 if (req->ctx->compat)
260 sr->msg_flags |= MSG_CMSG_COMPAT;
266 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
268 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
269 struct io_async_msghdr iomsg, *kmsg;
275 sock = sock_from_file(req->file);
279 if (req_has_async_data(req)) {
280 kmsg = req->async_data;
282 ret = io_sendmsg_copy_hdr(req, &iomsg);
288 if (!(req->flags & REQ_F_POLLED) &&
289 (sr->flags & IORING_RECVSEND_POLL_FIRST))
290 return io_setup_async_msg(req, kmsg, issue_flags);
292 flags = sr->msg_flags;
293 if (issue_flags & IO_URING_F_NONBLOCK)
294 flags |= MSG_DONTWAIT;
295 if (flags & MSG_WAITALL)
296 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
298 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
301 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
302 return io_setup_async_msg(req, kmsg, issue_flags);
303 if (ret > 0 && io_net_retry(sock, flags)) {
305 req->flags |= REQ_F_PARTIAL_IO;
306 return io_setup_async_msg(req, kmsg, issue_flags);
308 if (ret == -ERESTARTSYS)
312 /* fast path, check for non-NULL to avoid function call */
314 kfree(kmsg->free_iov);
315 req->flags &= ~REQ_F_NEED_CLEANUP;
316 io_netmsg_recycle(req, issue_flags);
319 else if (sr->done_io)
321 io_req_set_res(req, ret, 0);
325 int io_send(struct io_kiocb *req, unsigned int issue_flags)
327 struct sockaddr_storage __address;
328 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
337 if (req_has_async_data(req)) {
338 struct io_async_msghdr *io = req->async_data;
340 msg.msg_name = &io->addr;
342 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
343 if (unlikely(ret < 0))
345 msg.msg_name = (struct sockaddr *)&__address;
347 msg.msg_namelen = sr->addr_len;
350 if (!(req->flags & REQ_F_POLLED) &&
351 (sr->flags & IORING_RECVSEND_POLL_FIRST))
352 return io_setup_async_addr(req, &__address, issue_flags);
354 sock = sock_from_file(req->file);
358 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
363 msg.msg_control = NULL;
364 msg.msg_controllen = 0;
368 flags = sr->msg_flags;
369 if (issue_flags & IO_URING_F_NONBLOCK)
370 flags |= MSG_DONTWAIT;
371 if (flags & MSG_WAITALL)
372 min_ret = iov_iter_count(&msg.msg_iter);
374 msg.msg_flags = flags;
375 ret = sock_sendmsg(sock, &msg);
377 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
378 return io_setup_async_addr(req, &__address, issue_flags);
380 if (ret > 0 && io_net_retry(sock, flags)) {
384 req->flags |= REQ_F_PARTIAL_IO;
385 return io_setup_async_addr(req, &__address, issue_flags);
387 if (ret == -ERESTARTSYS)
393 else if (sr->done_io)
395 io_req_set_res(req, ret, 0);
399 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
403 if (iomsg->namelen < 0)
405 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
406 iomsg->namelen, &hdr))
408 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
414 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
415 struct io_async_msghdr *iomsg)
417 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
418 struct user_msghdr msg;
421 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
424 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
428 if (req->flags & REQ_F_BUFFER_SELECT) {
429 if (msg.msg_iovlen == 0) {
430 sr->len = iomsg->fast_iov[0].iov_len = 0;
431 iomsg->fast_iov[0].iov_base = NULL;
432 iomsg->free_iov = NULL;
433 } else if (msg.msg_iovlen > 1) {
436 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
438 sr->len = iomsg->fast_iov[0].iov_len;
439 iomsg->free_iov = NULL;
442 if (req->flags & REQ_F_APOLL_MULTISHOT) {
443 iomsg->namelen = msg.msg_namelen;
444 iomsg->controllen = msg.msg_controllen;
445 if (io_recvmsg_multishot_overflow(iomsg))
449 iomsg->free_iov = iomsg->fast_iov;
450 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
451 &iomsg->free_iov, &iomsg->msg.msg_iter,
461 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
462 struct io_async_msghdr *iomsg)
464 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
465 struct compat_msghdr msg;
466 struct compat_iovec __user *uiov;
469 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
472 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
476 uiov = compat_ptr(msg.msg_iov);
477 if (req->flags & REQ_F_BUFFER_SELECT) {
480 if (msg.msg_iovlen == 0) {
482 iomsg->free_iov = NULL;
483 } else if (msg.msg_iovlen > 1) {
486 if (!access_ok(uiov, sizeof(*uiov)))
488 if (__get_user(clen, &uiov->iov_len))
493 iomsg->free_iov = NULL;
496 if (req->flags & REQ_F_APOLL_MULTISHOT) {
497 iomsg->namelen = msg.msg_namelen;
498 iomsg->controllen = msg.msg_controllen;
499 if (io_recvmsg_multishot_overflow(iomsg))
503 iomsg->free_iov = iomsg->fast_iov;
504 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
505 UIO_FASTIOV, &iomsg->free_iov,
506 &iomsg->msg.msg_iter, true);
515 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
516 struct io_async_msghdr *iomsg)
518 iomsg->msg.msg_name = &iomsg->addr;
521 if (req->ctx->compat)
522 return __io_compat_recvmsg_copy_hdr(req, iomsg);
525 return __io_recvmsg_copy_hdr(req, iomsg);
528 int io_recvmsg_prep_async(struct io_kiocb *req)
532 if (!io_msg_alloc_async_prep(req))
534 ret = io_recvmsg_copy_hdr(req, req->async_data);
536 req->flags |= REQ_F_NEED_CLEANUP;
540 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
542 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
544 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
546 if (unlikely(sqe->file_index || sqe->addr2))
549 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
550 sr->len = READ_ONCE(sqe->len);
551 sr->flags = READ_ONCE(sqe->ioprio);
552 if (sr->flags & ~(RECVMSG_FLAGS))
554 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
555 if (sr->msg_flags & MSG_DONTWAIT)
556 req->flags |= REQ_F_NOWAIT;
557 if (sr->msg_flags & MSG_ERRQUEUE)
558 req->flags |= REQ_F_CLEAR_POLLIN;
559 if (sr->flags & IORING_RECV_MULTISHOT) {
560 if (!(req->flags & REQ_F_BUFFER_SELECT))
562 if (sr->msg_flags & MSG_WAITALL)
564 if (req->opcode == IORING_OP_RECV && sr->len)
566 req->flags |= REQ_F_APOLL_MULTISHOT;
570 if (req->ctx->compat)
571 sr->msg_flags |= MSG_CMSG_COMPAT;
577 static inline void io_recv_prep_retry(struct io_kiocb *req)
579 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
582 sr->len = 0; /* get from the provided buffer */
586 * Finishes io_recv and io_recvmsg.
588 * Returns true if it is actually finished, or false if it should run
589 * again (for multishot).
591 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
592 unsigned int cflags, bool mshot_finished)
594 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
595 io_req_set_res(req, *ret, cflags);
600 if (!mshot_finished) {
601 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
602 cflags | IORING_CQE_F_MORE, false)) {
603 io_recv_prep_retry(req);
607 * Otherwise stop multishot but use the current result.
608 * Probably will end up going into overflow, but this means
609 * we cannot trust the ordering anymore
613 io_req_set_res(req, *ret, cflags);
615 if (req->flags & REQ_F_POLLED)
616 *ret = IOU_STOP_MULTISHOT;
622 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
623 struct io_sr_msg *sr, void __user **buf,
626 unsigned long ubuf = (unsigned long) *buf;
629 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
634 if (kmsg->controllen) {
635 unsigned long control = ubuf + hdr - kmsg->controllen;
637 kmsg->msg.msg_control_user = (void __user *) control;
638 kmsg->msg.msg_controllen = kmsg->controllen;
641 sr->buf = *buf; /* stash for later copy */
642 *buf = (void __user *) (ubuf + hdr);
643 kmsg->payloadlen = *len = *len - hdr;
647 struct io_recvmsg_multishot_hdr {
648 struct io_uring_recvmsg_out msg;
649 struct sockaddr_storage addr;
652 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
653 struct io_async_msghdr *kmsg,
654 unsigned int flags, bool *finished)
658 struct io_recvmsg_multishot_hdr hdr;
661 kmsg->msg.msg_name = &hdr.addr;
662 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
663 kmsg->msg.msg_namelen = 0;
665 if (sock->file->f_flags & O_NONBLOCK)
666 flags |= MSG_DONTWAIT;
668 err = sock_recvmsg(sock, &kmsg->msg, flags);
669 *finished = err <= 0;
673 hdr.msg = (struct io_uring_recvmsg_out) {
674 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
675 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
678 hdr.msg.payloadlen = err;
679 if (err > kmsg->payloadlen)
680 err = kmsg->payloadlen;
682 copy_len = sizeof(struct io_uring_recvmsg_out);
683 if (kmsg->msg.msg_namelen > kmsg->namelen)
684 copy_len += kmsg->namelen;
686 copy_len += kmsg->msg.msg_namelen;
689 * "fromlen shall refer to the value before truncation.."
692 hdr.msg.namelen = kmsg->msg.msg_namelen;
694 /* ensure that there is no gap between hdr and sockaddr_storage */
695 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
696 sizeof(struct io_uring_recvmsg_out));
697 if (copy_to_user(io->buf, &hdr, copy_len)) {
702 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
703 kmsg->controllen + err;
706 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
708 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
709 struct io_async_msghdr iomsg, *kmsg;
713 int ret, min_ret = 0;
714 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
715 bool mshot_finished = true;
717 sock = sock_from_file(req->file);
721 if (req_has_async_data(req)) {
722 kmsg = req->async_data;
724 ret = io_recvmsg_copy_hdr(req, &iomsg);
730 if (!(req->flags & REQ_F_POLLED) &&
731 (sr->flags & IORING_RECVSEND_POLL_FIRST))
732 return io_setup_async_msg(req, kmsg, issue_flags);
735 if (io_do_buffer_select(req)) {
737 size_t len = sr->len;
739 buf = io_buffer_select(req, &len, issue_flags);
743 if (req->flags & REQ_F_APOLL_MULTISHOT) {
744 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
746 io_kbuf_recycle(req, issue_flags);
751 kmsg->fast_iov[0].iov_base = buf;
752 kmsg->fast_iov[0].iov_len = len;
753 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
757 flags = sr->msg_flags;
759 flags |= MSG_DONTWAIT;
760 if (flags & MSG_WAITALL)
761 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
763 kmsg->msg.msg_get_inq = 1;
764 if (req->flags & REQ_F_APOLL_MULTISHOT)
765 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
768 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
772 if (ret == -EAGAIN && force_nonblock) {
773 ret = io_setup_async_msg(req, kmsg, issue_flags);
774 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
775 IO_APOLL_MULTI_POLLED) {
776 io_kbuf_recycle(req, issue_flags);
777 return IOU_ISSUE_SKIP_COMPLETE;
781 if (ret > 0 && io_net_retry(sock, flags)) {
783 req->flags |= REQ_F_PARTIAL_IO;
784 return io_setup_async_msg(req, kmsg, issue_flags);
786 if (ret == -ERESTARTSYS)
789 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
795 else if (sr->done_io)
798 io_kbuf_recycle(req, issue_flags);
800 cflags = io_put_kbuf(req, issue_flags);
801 if (kmsg->msg.msg_inq)
802 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
804 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
805 goto retry_multishot;
807 if (mshot_finished) {
808 io_netmsg_recycle(req, issue_flags);
809 /* fast path, check for non-NULL to avoid function call */
811 kfree(kmsg->free_iov);
812 req->flags &= ~REQ_F_NEED_CLEANUP;
818 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
820 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
826 int ret, min_ret = 0;
827 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
828 size_t len = sr->len;
830 if (!(req->flags & REQ_F_POLLED) &&
831 (sr->flags & IORING_RECVSEND_POLL_FIRST))
834 sock = sock_from_file(req->file);
839 if (io_do_buffer_select(req)) {
842 buf = io_buffer_select(req, &len, issue_flags);
848 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
854 msg.msg_control = NULL;
857 msg.msg_controllen = 0;
861 flags = sr->msg_flags;
863 flags |= MSG_DONTWAIT;
864 if (flags & MSG_WAITALL)
865 min_ret = iov_iter_count(&msg.msg_iter);
867 ret = sock_recvmsg(sock, &msg, flags);
869 if (ret == -EAGAIN && force_nonblock) {
870 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
871 io_kbuf_recycle(req, issue_flags);
872 return IOU_ISSUE_SKIP_COMPLETE;
877 if (ret > 0 && io_net_retry(sock, flags)) {
881 req->flags |= REQ_F_PARTIAL_IO;
884 if (ret == -ERESTARTSYS)
887 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
894 else if (sr->done_io)
897 io_kbuf_recycle(req, issue_flags);
899 cflags = io_put_kbuf(req, issue_flags);
901 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
903 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
904 goto retry_multishot;
909 void io_send_zc_cleanup(struct io_kiocb *req)
911 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
912 struct io_async_msghdr *io;
914 if (req_has_async_data(req)) {
915 io = req->async_data;
918 zc->notif->flags |= REQ_F_CQE_SKIP;
919 io_notif_flush(zc->notif);
923 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
925 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
926 struct io_ring_ctx *ctx = req->ctx;
927 struct io_kiocb *notif;
929 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
931 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
932 if (req->flags & REQ_F_CQE_SKIP)
935 zc->flags = READ_ONCE(sqe->ioprio);
936 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
937 IORING_RECVSEND_FIXED_BUF))
939 notif = zc->notif = io_alloc_notif(ctx);
942 notif->cqe.user_data = req->cqe.user_data;
944 notif->cqe.flags = IORING_CQE_F_NOTIF;
945 req->flags |= REQ_F_NEED_CLEANUP;
946 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
947 unsigned idx = READ_ONCE(sqe->buf_index);
949 if (unlikely(idx >= ctx->nr_user_bufs))
951 idx = array_index_nospec(idx, ctx->nr_user_bufs);
952 req->imu = READ_ONCE(ctx->user_bufs[idx]);
953 io_req_set_rsrc_node(notif, ctx, 0);
956 if (req->opcode == IORING_OP_SEND_ZC) {
957 if (READ_ONCE(sqe->__pad3[0]))
959 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
960 zc->addr_len = READ_ONCE(sqe->addr_len);
962 if (unlikely(sqe->addr2 || sqe->file_index))
964 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
968 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
969 zc->len = READ_ONCE(sqe->len);
970 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
971 if (zc->msg_flags & MSG_DONTWAIT)
972 req->flags |= REQ_F_NOWAIT;
977 if (req->ctx->compat)
978 zc->msg_flags |= MSG_CMSG_COMPAT;
983 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
984 struct iov_iter *from, size_t length)
986 skb_zcopy_downgrade_managed(skb);
987 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
990 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
991 struct iov_iter *from, size_t length)
993 struct skb_shared_info *shinfo = skb_shinfo(skb);
994 int frag = shinfo->nr_frags;
998 unsigned long truesize = 0;
1001 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1002 else if (unlikely(!skb_zcopy_managed(skb)))
1003 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1005 bi.bi_size = min(from->count, length);
1006 bi.bi_bvec_done = from->iov_offset;
1009 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1010 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1013 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1014 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1015 v.bv_offset, v.bv_len);
1016 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1021 shinfo->nr_frags = frag;
1022 from->bvec += bi.bi_idx;
1023 from->nr_segs -= bi.bi_idx;
1024 from->count -= copied;
1025 from->iov_offset = bi.bi_bvec_done;
1027 skb->data_len += copied;
1029 skb->truesize += truesize;
1031 if (sk && sk->sk_type == SOCK_STREAM) {
1032 sk_wmem_queued_add(sk, truesize);
1033 if (!skb_zcopy_pure(skb))
1034 sk_mem_charge(sk, truesize);
1036 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1041 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1043 struct sockaddr_storage __address;
1044 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1047 struct socket *sock;
1048 unsigned msg_flags, cflags;
1049 int ret, min_ret = 0;
1051 sock = sock_from_file(req->file);
1052 if (unlikely(!sock))
1055 msg.msg_name = NULL;
1056 msg.msg_control = NULL;
1057 msg.msg_controllen = 0;
1058 msg.msg_namelen = 0;
1061 if (req_has_async_data(req)) {
1062 struct io_async_msghdr *io = req->async_data;
1064 msg.msg_name = &io->addr;
1066 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1067 if (unlikely(ret < 0))
1069 msg.msg_name = (struct sockaddr *)&__address;
1071 msg.msg_namelen = zc->addr_len;
1074 if (!(req->flags & REQ_F_POLLED) &&
1075 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1076 return io_setup_async_addr(req, &__address, issue_flags);
1078 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1079 ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
1080 (u64)(uintptr_t)zc->buf, zc->len);
1083 msg.sg_from_iter = io_sg_from_iter;
1085 ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
1089 ret = io_notif_account_mem(zc->notif, zc->len);
1092 msg.sg_from_iter = io_sg_from_iter_iovec;
1095 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1096 if (issue_flags & IO_URING_F_NONBLOCK)
1097 msg_flags |= MSG_DONTWAIT;
1098 if (msg_flags & MSG_WAITALL)
1099 min_ret = iov_iter_count(&msg.msg_iter);
1101 msg.msg_flags = msg_flags;
1102 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1103 ret = sock_sendmsg(sock, &msg);
1105 if (unlikely(ret < min_ret)) {
1106 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1107 return io_setup_async_addr(req, &__address, issue_flags);
1109 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1113 req->flags |= REQ_F_PARTIAL_IO;
1114 return io_setup_async_addr(req, &__address, issue_flags);
1116 if (ret < 0 && !zc->done_io)
1117 zc->notif->flags |= REQ_F_CQE_SKIP;
1118 if (ret == -ERESTARTSYS)
1125 else if (zc->done_io)
1128 io_notif_flush(zc->notif);
1129 req->flags &= ~REQ_F_NEED_CLEANUP;
1130 cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
1131 io_req_set_res(req, ret, cflags);
1135 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1137 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1138 struct io_async_msghdr iomsg, *kmsg;
1139 struct socket *sock;
1140 unsigned flags, cflags;
1141 int ret, min_ret = 0;
1143 sock = sock_from_file(req->file);
1144 if (unlikely(!sock))
1147 if (req_has_async_data(req)) {
1148 kmsg = req->async_data;
1150 ret = io_sendmsg_copy_hdr(req, &iomsg);
1156 if (!(req->flags & REQ_F_POLLED) &&
1157 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1158 return io_setup_async_msg(req, kmsg, issue_flags);
1160 flags = sr->msg_flags | MSG_ZEROCOPY;
1161 if (issue_flags & IO_URING_F_NONBLOCK)
1162 flags |= MSG_DONTWAIT;
1163 if (flags & MSG_WAITALL)
1164 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1166 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1167 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1168 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1170 if (unlikely(ret < min_ret)) {
1171 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1172 return io_setup_async_msg(req, kmsg, issue_flags);
1174 if (ret > 0 && io_net_retry(sock, flags)) {
1176 req->flags |= REQ_F_PARTIAL_IO;
1177 return io_setup_async_msg(req, kmsg, issue_flags);
1179 if (ret < 0 && !sr->done_io)
1180 sr->notif->flags |= REQ_F_CQE_SKIP;
1181 if (ret == -ERESTARTSYS)
1185 /* fast path, check for non-NULL to avoid function call */
1187 kfree(kmsg->free_iov);
1189 io_netmsg_recycle(req, issue_flags);
1192 else if (sr->done_io)
1195 io_notif_flush(sr->notif);
1196 req->flags &= ~REQ_F_NEED_CLEANUP;
1197 cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
1198 io_req_set_res(req, ret, cflags);
1202 void io_sendrecv_fail(struct io_kiocb *req)
1204 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1205 struct io_async_msghdr *io;
1206 int res = req->cqe.res;
1208 if (req->flags & REQ_F_PARTIAL_IO)
1210 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1211 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC)) {
1212 /* preserve notification for partial I/O */
1214 sr->notif->flags |= REQ_F_CQE_SKIP;
1215 io_notif_flush(sr->notif);
1218 if (req_has_async_data(req)) {
1219 io = req->async_data;
1220 kfree(io->free_iov);
1221 io->free_iov = NULL;
1223 req->flags &= ~REQ_F_NEED_CLEANUP;
1224 io_req_set_res(req, res, req->cqe.flags);
1227 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1229 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1232 if (sqe->len || sqe->buf_index)
1235 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1236 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1237 accept->flags = READ_ONCE(sqe->accept_flags);
1238 accept->nofile = rlimit(RLIMIT_NOFILE);
1239 flags = READ_ONCE(sqe->ioprio);
1240 if (flags & ~IORING_ACCEPT_MULTISHOT)
1243 accept->file_slot = READ_ONCE(sqe->file_index);
1244 if (accept->file_slot) {
1245 if (accept->flags & SOCK_CLOEXEC)
1247 if (flags & IORING_ACCEPT_MULTISHOT &&
1248 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1251 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1253 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1254 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1255 if (flags & IORING_ACCEPT_MULTISHOT)
1256 req->flags |= REQ_F_APOLL_MULTISHOT;
1260 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1262 struct io_ring_ctx *ctx = req->ctx;
1263 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1264 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1265 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1266 bool fixed = !!accept->file_slot;
1272 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1273 if (unlikely(fd < 0))
1276 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1281 ret = PTR_ERR(file);
1282 if (ret == -EAGAIN && force_nonblock) {
1284 * if it's multishot and polled, we don't need to
1285 * return EAGAIN to arm the poll infra since it
1286 * has already been done
1288 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
1289 IO_APOLL_MULTI_POLLED)
1290 ret = IOU_ISSUE_SKIP_COMPLETE;
1293 if (ret == -ERESTARTSYS)
1296 } else if (!fixed) {
1297 fd_install(fd, file);
1300 ret = io_fixed_fd_install(req, issue_flags, file,
1304 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1305 io_req_set_res(req, ret, 0);
1310 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1313 io_req_set_res(req, ret, 0);
1314 if (req->flags & REQ_F_POLLED)
1315 return IOU_STOP_MULTISHOT;
1319 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1321 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1323 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1326 sock->domain = READ_ONCE(sqe->fd);
1327 sock->type = READ_ONCE(sqe->off);
1328 sock->protocol = READ_ONCE(sqe->len);
1329 sock->file_slot = READ_ONCE(sqe->file_index);
1330 sock->nofile = rlimit(RLIMIT_NOFILE);
1332 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1333 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1335 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1340 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1342 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1343 bool fixed = !!sock->file_slot;
1348 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1349 if (unlikely(fd < 0))
1352 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1356 ret = PTR_ERR(file);
1357 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1359 if (ret == -ERESTARTSYS)
1362 } else if (!fixed) {
1363 fd_install(fd, file);
1366 ret = io_fixed_fd_install(req, issue_flags, file,
1369 io_req_set_res(req, ret, 0);
1373 int io_connect_prep_async(struct io_kiocb *req)
1375 struct io_async_connect *io = req->async_data;
1376 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1378 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1381 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1383 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1385 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1388 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1389 conn->addr_len = READ_ONCE(sqe->addr2);
1393 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1395 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1396 struct io_async_connect __io, *io;
1397 unsigned file_flags;
1399 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1401 if (req_has_async_data(req)) {
1402 io = req->async_data;
1404 ret = move_addr_to_kernel(connect->addr,
1412 file_flags = force_nonblock ? O_NONBLOCK : 0;
1414 ret = __sys_connect_file(req->file, &io->address,
1415 connect->addr_len, file_flags);
1416 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1417 if (req_has_async_data(req))
1419 if (io_alloc_async_data(req)) {
1423 memcpy(req->async_data, &__io, sizeof(__io));
1426 if (ret == -ERESTARTSYS)
1431 io_req_set_res(req, ret, 0);
1435 void io_netmsg_cache_free(struct io_cache_entry *entry)
1437 kfree(container_of(entry, struct io_async_msghdr, cache));