WARN_ON_ONCE(ret);
}
-static inline void io_req_local_work_add(struct io_kiocb *req,
- struct io_ring_ctx *ctx,
- unsigned flags)
+static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
+ struct io_ring_ctx *ctx = req->ctx;
unsigned nr_wait, nr_tw, nr_tw_prev;
struct llist_node *head;
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
{
if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
- io_req_local_work_add(req, req->ctx, flags);
+ io_req_local_work_add(req, flags);
else
io_req_normal_work_add(req);
}
-void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
- unsigned flags)
+void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags)
{
- if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
+ if (WARN_ON_ONCE(!(req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
return;
- io_req_local_work_add(req, ctx, flags);
+ __io_req_task_work_add(req, flags);
}
static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
unsigned issue_flags);
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
-void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
- unsigned flags);
+void io_req_task_work_add_remote(struct io_kiocb *req, unsigned flags);
void io_req_task_queue(struct io_kiocb *req);
void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw);
void io_req_task_queue_fail(struct io_kiocb *req, int ret);
req->ctx = ctx;
req->tctx = NULL;
req->io_task_work.func = io_msg_tw_complete;
- io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
+ io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE);
return 0;
}