io_uring: don't expose io_fill_cqe_aux()
authorPavel Begunkov <asml.silence@gmail.com>
Fri, 17 Jun 2022 08:48:00 +0000 (09:48 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 25 Jul 2022 00:39:14 +0000 (18:39 -0600)
Deduplicate some code and add a helper for filling an aux CQE, locking
and notification.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/b7c6557c8f9dc5c4cfb01292116c682a0ff61081.1655455613.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/msg_ring.c
io_uring/net.c
io_uring/poll.c
io_uring/rsrc.c

index eeda16731795445bfb9484598ce1558c088a1179..8c1b0e0ce5bb8a213fdae4cd250677bae24b0531 100644 (file)
@@ -676,8 +676,8 @@ bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
        return true;
 }
 
-bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
-                    u32 cflags)
+static bool io_fill_cqe_aux(struct io_ring_ctx *ctx,
+                           u64 user_data, s32 res, u32 cflags)
 {
        struct io_uring_cqe *cqe;
 
@@ -704,6 +704,20 @@ bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
        return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
 }
 
+bool io_post_aux_cqe(struct io_ring_ctx *ctx,
+                    u64 user_data, s32 res, u32 cflags)
+{
+       bool filled;
+
+       spin_lock(&ctx->completion_lock);
+       filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
+       io_commit_cqring(ctx);
+       spin_unlock(&ctx->completion_lock);
+       if (filled)
+               io_cqring_ev_posted(ctx);
+       return filled;
+}
+
 static void __io_req_complete_put(struct io_kiocb *req)
 {
        /*
index 3f06fbae0ee9e50acc0504bbed46add2a778f361..18754fb790255e75446e849a79088bcfce977426 100644 (file)
@@ -239,8 +239,7 @@ void io_req_complete_failed(struct io_kiocb *req, s32 res);
 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
 void io_req_complete_post(struct io_kiocb *req);
 void __io_req_complete_post(struct io_kiocb *req);
-bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
-                    u32 cflags);
+bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags);
 void io_cqring_ev_posted(struct io_ring_ctx *ctx);
 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
 
index 3b89f9a0a0b459d522220fe809bc912548f7e2f1..7c3c5f3ab06b5728902ffe565e64761e4339efbc 100644 (file)
@@ -34,7 +34,6 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_msg *msg = io_kiocb_to_cmd(req);
        struct io_ring_ctx *target_ctx;
-       bool filled;
        int ret;
 
        ret = -EBADFD;
@@ -43,16 +42,8 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
 
        ret = -EOVERFLOW;
        target_ctx = req->file->private_data;
-
-       spin_lock(&target_ctx->completion_lock);
-       filled = io_fill_cqe_aux(target_ctx, msg->user_data, msg->len, 0);
-       io_commit_cqring(target_ctx);
-       spin_unlock(&target_ctx->completion_lock);
-
-       if (filled) {
-               io_cqring_ev_posted(target_ctx);
+       if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
                ret = 0;
-       }
 
 done:
        if (ret < 0)
index fe1fe920b9291faa361602835bf824c1e3800e5a..35d0183fe75817c9a2250f8ce9e3f84115d50100 100644 (file)
@@ -644,22 +644,12 @@ retry:
                io_req_set_res(req, ret, 0);
                return IOU_OK;
        }
-       if (ret >= 0) {
-               bool filled;
-
-               spin_lock(&ctx->completion_lock);
-               filled = io_fill_cqe_aux(ctx, req->cqe.user_data, ret,
-                                        IORING_CQE_F_MORE);
-               io_commit_cqring(ctx);
-               spin_unlock(&ctx->completion_lock);
-               if (filled) {
-                       io_cqring_ev_posted(ctx);
-                       goto retry;
-               }
-               ret = -ECANCELED;
-       }
 
-       return ret;
+       if (ret < 0)
+               return ret;
+       if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE))
+               goto retry;
+       return -ECANCELED;
 }
 
 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
index 9ae2982aef7c6cfeb7b1c82bfa88641736a58a00..e0c181fe6264c0fe8f55ec092301c0999b382520 100644 (file)
@@ -214,23 +214,15 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
                if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
                        __poll_t mask = mangle_poll(req->cqe.res &
                                                    req->apoll_events);
-                       bool filled;
-
-                       spin_lock(&ctx->completion_lock);
-                       filled = io_fill_cqe_aux(ctx, req->cqe.user_data,
-                                                mask, IORING_CQE_F_MORE);
-                       io_commit_cqring(ctx);
-                       spin_unlock(&ctx->completion_lock);
-                       if (filled) {
-                               io_cqring_ev_posted(ctx);
-                               continue;
-                       }
-                       return -ECANCELED;
-               }
 
-               ret = io_poll_issue(req, locked);
-               if (ret)
-                       return ret;
+                       if (!io_post_aux_cqe(ctx, req->cqe.user_data,
+                                            mask, IORING_CQE_F_MORE))
+                               return -ECANCELED;
+               } else {
+                       ret = io_poll_issue(req, locked);
+                       if (ret)
+                               return ret;
+               }
 
                /*
                 * Release all references, retry if someone tried to restart
index 214ff0dfa6a48e23ae31a044fac2dcc18d9a2989..7fed3105152a95bce28bbe872541fcbaedcd96be 100644 (file)
@@ -174,17 +174,13 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
                list_del(&prsrc->list);
 
                if (prsrc->tag) {
-                       if (ctx->flags & IORING_SETUP_IOPOLL)
+                       if (ctx->flags & IORING_SETUP_IOPOLL) {
                                mutex_lock(&ctx->uring_lock);
-
-                       spin_lock(&ctx->completion_lock);
-                       io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
-                       io_commit_cqring(ctx);
-                       spin_unlock(&ctx->completion_lock);
-                       io_cqring_ev_posted(ctx);
-
-                       if (ctx->flags & IORING_SETUP_IOPOLL)
+                               io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
                                mutex_unlock(&ctx->uring_lock);
+                       } else {
+                               io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
+                       }
                }
 
                rsrc_data->do_put(ctx, prsrc);