io_uring: lockdep annotate CQ locking
authorPavel Begunkov <asml.silence@gmail.com>
Wed, 4 Jan 2023 01:34:57 +0000 (01:34 +0000)
committerJens Axboe <axboe@kernel.dk>
Wed, 4 Jan 2023 02:05:41 +0000 (19:05 -0700)
Locking around CQE posting is complex and depends on options the ring is
created with, add more thorough lockdep annotations checking all
invariants.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/aa3770b4eacae3915d782cc2ab2f395a99b4b232.1672795976.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/io_uring.c
io_uring/io_uring.h

index 6bed44855679867b4c52eadd95d8977962f289ad..472574192dd63f4d58d7f626d277e763cd9a7bc1 100644 (file)
@@ -731,6 +731,8 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
        size_t ocq_size = sizeof(struct io_overflow_cqe);
        bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
 
+       lockdep_assert_held(&ctx->completion_lock);
+
        if (is_cqe32)
                ocq_size += sizeof(struct io_uring_cqe);
 
@@ -820,9 +822,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
 {
        struct io_uring_cqe *cqe;
 
-       if (!ctx->task_complete)
-               lockdep_assert_held(&ctx->completion_lock);
-
        ctx->cq_extra++;
 
        /*
index e9f0d41ebb9960189930e2699669fc206b0b764f..ab4b2a1c3b7e80fba53d0c090962fd99a475aea4 100644 (file)
@@ -79,6 +79,19 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
                        bool cancel_all);
 
+#define io_lockdep_assert_cq_locked(ctx)                               \
+       do {                                                            \
+               if (ctx->flags & IORING_SETUP_IOPOLL) {                 \
+                       lockdep_assert_held(&ctx->uring_lock);          \
+               } else if (!ctx->task_complete) {                       \
+                       lockdep_assert_held(&ctx->completion_lock);     \
+               } else if (ctx->submitter_task->flags & PF_EXITING) {   \
+                       lockdep_assert(current_work());                 \
+               } else {                                                \
+                       lockdep_assert(current == ctx->submitter_task); \
+               }                                                       \
+       } while (0)
+
 static inline void io_req_task_work_add(struct io_kiocb *req)
 {
        __io_req_task_work_add(req, true);
@@ -92,6 +105,8 @@ void io_cq_unlock_post(struct io_ring_ctx *ctx);
 static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
                                                       bool overflow)
 {
+       io_lockdep_assert_cq_locked(ctx);
+
        if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
                struct io_uring_cqe *cqe = ctx->cqe_cached;