io_uring/net: refactor io_setup_async_addr
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 21 Sep 2022 11:17:50 +0000 (12:17 +0100)
committerJens Axboe <axboe@kernel.dk>
Wed, 21 Sep 2022 19:15:02 +0000 (13:15 -0600)
Instead of passing the right address into io_setup_async_addr() only
specify local on-stack storage and let the function infer where to grab
it from. It optimises out one local variable we have to deal with.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/6bfa9ab810d776853eb26ed59301e2536c3a5471.1663668091.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/net.c

index 8d90f8eeb2d0d066626a081b10475ca270d4d19d..021ca2edf44a1a140134f9037a8ce76037e23fb7 100644 (file)
@@ -196,17 +196,18 @@ int io_sendzc_prep_async(struct io_kiocb *req)
 }
 
 static int io_setup_async_addr(struct io_kiocb *req,
-                             struct sockaddr_storage *addr,
+                             struct sockaddr_storage *addr_storage,
                              unsigned int issue_flags)
 {
+       struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
        struct io_async_msghdr *io;
 
-       if (!addr || req_has_async_data(req))
+       if (!sr->addr || req_has_async_data(req))
                return -EAGAIN;
        io = io_msg_alloc_async(req, issue_flags);
        if (!io)
                return -ENOMEM;
-       memcpy(&io->addr, addr, sizeof(io->addr));
+       memcpy(&io->addr, addr_storage, sizeof(io->addr));
        return -EAGAIN;
 }
 
@@ -1000,7 +1001,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
 
 int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
 {
-       struct sockaddr_storage __address, *addr = NULL;
+       struct sockaddr_storage __address;
        struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
        struct msghdr msg;
        struct iovec iov;
@@ -1021,20 +1022,19 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
                if (req_has_async_data(req)) {
                        struct io_async_msghdr *io = req->async_data;
 
-                       msg.msg_name = addr = &io->addr;
+                       msg.msg_name = &io->addr;
                } else {
                        ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
                        if (unlikely(ret < 0))
                                return ret;
                        msg.msg_name = (struct sockaddr *)&__address;
-                       addr = &__address;
                }
                msg.msg_namelen = zc->addr_len;
        }
 
        if (!(req->flags & REQ_F_POLLED) &&
            (zc->flags & IORING_RECVSEND_POLL_FIRST))
-               return io_setup_async_addr(req, addr, issue_flags);
+               return io_setup_async_addr(req, &__address, issue_flags);
 
        if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
                ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
@@ -1065,14 +1065,14 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
 
        if (unlikely(ret < min_ret)) {
                if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
-                       return io_setup_async_addr(req, addr, issue_flags);
+                       return io_setup_async_addr(req, &__address, issue_flags);
 
                if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
                        zc->len -= ret;
                        zc->buf += ret;
                        zc->done_io += ret;
                        req->flags |= REQ_F_PARTIAL_IO;
-                       return io_setup_async_addr(req, addr, issue_flags);
+                       return io_setup_async_addr(req, &__address, issue_flags);
                }
                if (ret < 0 && !zc->done_io)
                        zc->notif->flags |= REQ_F_CQE_SKIP;