io_uring/net: import zc ubuf earlier
authorPavel Begunkov <asml.silence@gmail.com>
Fri, 28 Mar 2025 23:11:00 +0000 (23:11 +0000)
committerJens Axboe <axboe@kernel.dk>
Fri, 28 Mar 2025 23:11:20 +0000 (17:11 -0600)
io_send_setup() already sets up the iterator for IORING_OP_SEND_ZC, we
don't need repeating that at issue time. Move it all together with mem
accounting at prep time, which is more consistent with how the non-zc
version does that.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/eb54f007c493ad9f4ca89aa8e715baf30d83fb88.1743202294.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/net.c

index 749dd298c5026429e281234d98fa3d5541594005..eaa627eddb4a44c6ed2dcd149f8f492078e8ca47 100644 (file)
@@ -1318,23 +1318,23 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(!iomsg))
                return -ENOMEM;
 
-       if (zc->flags & IORING_RECVSEND_FIXED_BUF)
-               iomsg->msg.sg_from_iter = io_sg_from_iter;
-       else
-               iomsg->msg.sg_from_iter = io_sg_from_iter_iovec;
-
        if (req->opcode == IORING_OP_SEND_ZC) {
-               req->flags |= REQ_F_IMPORT_BUFFER;
-               return io_send_setup(req, sqe);
+               if (zc->flags & IORING_RECVSEND_FIXED_BUF)
+                       req->flags |= REQ_F_IMPORT_BUFFER;
+               ret = io_send_setup(req, sqe);
+       } else {
+               if (unlikely(sqe->addr2 || sqe->file_index))
+                       return -EINVAL;
+               ret = io_sendmsg_setup(req, sqe);
        }
-       if (unlikely(sqe->addr2 || sqe->file_index))
-               return -EINVAL;
-       ret = io_sendmsg_setup(req, sqe);
        if (unlikely(ret))
                return ret;
 
-       if (!(zc->flags & IORING_RECVSEND_FIXED_BUF))
+       if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) {
+               iomsg->msg.sg_from_iter = io_sg_from_iter_iovec;
                return io_notif_account_mem(zc->notif, iomsg->msg.msg_iter.count);
+       }
+       iomsg->msg.sg_from_iter = io_sg_from_iter;
        return 0;
 }
 
@@ -1392,25 +1392,13 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
        struct io_async_msghdr *kmsg = req->async_data;
-       int ret;
 
-       if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
-               sr->notif->buf_index = req->buf_index;
-               ret = io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
-                                       (u64)(uintptr_t)sr->buf, sr->len,
-                                       ITER_SOURCE, issue_flags);
-               if (unlikely(ret))
-                       return ret;
-       } else {
-               ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
-               if (unlikely(ret))
-                       return ret;
-               ret = io_notif_account_mem(sr->notif, sr->len);
-               if (unlikely(ret))
-                       return ret;
-       }
+       WARN_ON_ONCE(!(sr->flags & IORING_RECVSEND_FIXED_BUF));
 
-       return ret;
+       sr->notif->buf_index = req->buf_index;
+       return io_import_reg_buf(sr->notif, &kmsg->msg.msg_iter,
+                               (u64)(uintptr_t)sr->buf, sr->len,
+                               ITER_SOURCE, issue_flags);
 }
 
 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)