io_uring/net: get rid of ->prep_async() for receive side
authorJens Axboe <axboe@kernel.dk>
Mon, 18 Mar 2024 13:36:03 +0000 (07:36 -0600)
committerJens Axboe <axboe@kernel.dk>
Mon, 15 Apr 2024 14:10:25 +0000 (08:10 -0600)
Move the io_async_msghdr out of the issue path and into prep handling,
since it's now done unconditionally and hence does not need to be part
of the issue path. This reduces the footprint of the multishot fast
path of multiple invocations of ->issue() per prep, and also means that
using ->prep_async() can be dropped for recvmsg asthis is now done via
setup on the prep side.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/net.c
io_uring/net.h
io_uring/opdef.c

index b08c0ae5951ae8970974fe221a9dcde4cd8773f2..7cd93cd8b8c4755444b5944b5217c3c8c0ef915c 100644 (file)
@@ -595,17 +595,36 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req,
                                        msg.msg_controllen);
 }
 
-int io_recvmsg_prep_async(struct io_kiocb *req)
+static int io_recvmsg_prep_setup(struct io_kiocb *req)
 {
        struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-       struct io_async_msghdr *iomsg;
+       struct io_async_msghdr *kmsg;
        int ret;
 
-       sr->done_io = 0;
-       if (!io_msg_alloc_async_prep(req))
+       /* always locked for prep */
+       kmsg = io_msg_alloc_async(req, 0);
+       if (unlikely(!kmsg))
                return -ENOMEM;
-       iomsg = req->async_data;
-       ret = io_recvmsg_copy_hdr(req, iomsg);
+
+       if (req->opcode == IORING_OP_RECV) {
+               kmsg->msg.msg_name = NULL;
+               kmsg->msg.msg_namelen = 0;
+               kmsg->msg.msg_control = NULL;
+               kmsg->msg.msg_get_inq = 1;
+               kmsg->msg.msg_controllen = 0;
+               kmsg->msg.msg_iocb = NULL;
+               kmsg->msg.msg_ubuf = NULL;
+
+               if (!io_do_buffer_select(req)) {
+                       ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
+                                         &kmsg->msg.msg_iter);
+                       if (unlikely(ret))
+                               return ret;
+               }
+               return 0;
+       }
+
+       ret = io_recvmsg_copy_hdr(req, kmsg);
        if (!ret)
                req->flags |= REQ_F_NEED_CLEANUP;
        return ret;
@@ -656,7 +675,7 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                sr->msg_flags |= MSG_CMSG_COMPAT;
 #endif
        sr->nr_multishot_loops = 0;
-       return 0;
+       return io_recvmsg_prep_setup(req);
 }
 
 static inline void io_recv_prep_retry(struct io_kiocb *req,
@@ -814,7 +833,7 @@ static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-       struct io_async_msghdr *kmsg;
+       struct io_async_msghdr *kmsg = req->async_data;
        struct socket *sock;
        unsigned flags;
        int ret, min_ret = 0;
@@ -825,17 +844,6 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
        if (unlikely(!sock))
                return -ENOTSOCK;
 
-       if (req_has_async_data(req)) {
-               kmsg = req->async_data;
-       } else {
-               kmsg = io_msg_alloc_async(req, issue_flags);
-               if (unlikely(!kmsg))
-                       return -ENOMEM;
-               ret = io_recvmsg_copy_hdr(req, kmsg);
-               if (ret)
-                       return ret;
-       }
-
        if (!(req->flags & REQ_F_POLLED) &&
            (sr->flags & IORING_RECVSEND_POLL_FIRST))
                return -EAGAIN;
@@ -914,36 +922,13 @@ retry_multishot:
 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-       struct io_async_msghdr *kmsg;
+       struct io_async_msghdr *kmsg = req->async_data;
        struct socket *sock;
        unsigned flags;
        int ret, min_ret = 0;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
        size_t len = sr->len;
 
-       if (req_has_async_data(req)) {
-               kmsg = req->async_data;
-       } else {
-               kmsg = io_msg_alloc_async(req, issue_flags);
-               if (unlikely(!kmsg))
-                       return -ENOMEM;
-               kmsg->free_iov = NULL;
-               kmsg->msg.msg_name = NULL;
-               kmsg->msg.msg_namelen = 0;
-               kmsg->msg.msg_control = NULL;
-               kmsg->msg.msg_get_inq = 1;
-               kmsg->msg.msg_controllen = 0;
-               kmsg->msg.msg_iocb = NULL;
-               kmsg->msg.msg_ubuf = NULL;
-
-               if (!io_do_buffer_select(req)) {
-                       ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
-                                         &kmsg->msg.msg_iter);
-                       if (unlikely(ret))
-                               return ret;
-               }
-       }
-
        if (!(req->flags & REQ_F_POLLED) &&
            (sr->flags & IORING_RECVSEND_POLL_FIRST))
                return -EAGAIN;
index 5c1230f1aaf98f97b52c1da1bf36899df443e54e..4b4fd9b1b7b4bd2b8c97703f916639e73c531eb4 100644 (file)
@@ -42,7 +42,6 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
 int io_send(struct io_kiocb *req, unsigned int issue_flags);
 int io_sendrecv_prep_async(struct io_kiocb *req);
 
-int io_recvmsg_prep_async(struct io_kiocb *req);
 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
 int io_recv(struct io_kiocb *req, unsigned int issue_flags);
index 77131826d60398d18d4e08dc4ea8c3bd786296a2..1368193edc57299c027dc2021344db008a25bf76 100644 (file)
@@ -536,7 +536,6 @@ const struct io_cold_def io_cold_defs[] = {
                .name                   = "RECVMSG",
 #if defined(CONFIG_NET)
                .async_size             = sizeof(struct io_async_msghdr),
-               .prep_async             = io_recvmsg_prep_async,
                .cleanup                = io_sendmsg_recvmsg_cleanup,
                .fail                   = io_sendrecv_fail,
 #endif
@@ -613,7 +612,6 @@ const struct io_cold_def io_cold_defs[] = {
                .async_size             = sizeof(struct io_async_msghdr),
                .cleanup                = io_sendmsg_recvmsg_cleanup,
                .fail                   = io_sendrecv_fail,
-               .prep_async             = io_sendrecv_prep_async,
 #endif
        },
        [IORING_OP_OPENAT2] = {