From: Jens Axboe Date: Fri, 22 Nov 2024 16:08:06 +0000 (-0700) Subject: io_uring: make __tctx_task_work_run() take an io_wq_work_list X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=refs%2Fheads%2Fio_uring-defer-tw.3;p=linux-block.git io_uring: make __tctx_task_work_run() take an io_wq_work_list The normal task_work logic doesn't really need it, as it always runs all of the pending work. But for SQPOLL, it can now pass in its retry_list which simplifies the tracking of split up task_work running. This avoids passing io_wq_work_node around. Rather than pass in a list, SQPOLL could re-add the leftover items to the generic task_work list. But that requires re-locking the task_lock and using task_list for that, whereas having a separate retry list allows for skipping those steps. The downside is that now two lists need checking, but that's now it was before as well. Signed-off-by: Jens Axboe --- diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index b9036925f307..a28ebff41184 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1058,20 +1058,20 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, io_tw_token_t tw) /* * Run queued task_work, returning the number of entries processed in *count. * If more entries than max_entries are available, stop processing once this - * is reached and return the rest of the list. + * is reached. */ -struct io_wq_work_node *io_handle_tw_list(struct io_wq_work_node *node, - unsigned int *count, - unsigned int max_entries) +void io_handle_tw_list(struct io_wq_work_list *list, unsigned int *count, + unsigned int max_entries) { struct io_ring_ctx *ctx = NULL; struct io_tw_state ts = { }; do { - struct io_wq_work_node *next = node->next; + struct io_wq_work_node *node = list->first; struct io_kiocb *req = container_of(node, struct io_kiocb, io_task_work.node); + list->first = node->next; if (req->ctx != ctx) { ctx_flush_and_put(ctx, ts); ctx = req->ctx; @@ -1081,17 +1081,15 @@ struct io_wq_work_node *io_handle_tw_list(struct io_wq_work_node *node, INDIRECT_CALL_2(req->io_task_work.func, io_poll_task_func, io_req_rw_complete, req, ts); - node = next; (*count)++; if (unlikely(need_resched())) { ctx_flush_and_put(ctx, ts); ctx = NULL; cond_resched(); } - } while (node && *count < max_entries); + } while (list->first && *count < max_entries); ctx_flush_and_put(ctx, ts); - return node; } static __cold void __io_fallback_schedule(struct io_ring_ctx *ctx, @@ -1152,41 +1150,39 @@ static void io_fallback_tw(struct io_uring_task *tctx, bool sync) __io_fallback_tw(&tctx->task_list, &tctx->task_lock, sync); } -struct io_wq_work_node *__tctx_task_work_run(struct io_uring_task *tctx, - unsigned int max_entries, - unsigned int *count) +void __tctx_task_work_run(struct io_uring_task *tctx, + struct io_wq_work_list *list, + unsigned int max_entries, unsigned int *count) { - struct io_wq_work_node *node; - if (unlikely(current->flags & PF_EXITING)) { io_fallback_tw(tctx, true); - return NULL; + return; } if (!READ_ONCE(tctx->task_list.first)) - return NULL; + return; spin_lock_irq(&tctx->task_lock); - node = tctx->task_list.first; + *list = tctx->task_list; INIT_WQ_LIST(&tctx->task_list); spin_unlock_irq(&tctx->task_lock); - if (node) - node = io_handle_tw_list(node, count, max_entries); + if (!wq_list_empty(list)) + io_handle_tw_list(list, count, max_entries); /* relaxed read is enough as only the task itself sets ->in_cancel */ if (unlikely(atomic_read(&tctx->in_cancel))) io_uring_drop_tctx_refs(current); trace_io_uring_task_work_run(tctx, *count); - return node; } unsigned int tctx_task_work_run(struct io_uring_task *tctx) { + struct io_wq_work_list list; unsigned int count = 0; - __tctx_task_work_run(tctx, UINT_MAX, &count); + __tctx_task_work_run(tctx, &list, UINT_MAX, &count); return count; } diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 91844abbc60b..b1cefacd32af 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -95,12 +95,11 @@ void io_req_task_complete(struct io_kiocb *req, io_tw_token_t tw); void io_req_task_queue_fail(struct io_kiocb *req, int ret); void io_req_task_submit(struct io_kiocb *req, io_tw_token_t tw); void tctx_task_work(struct callback_head *cb); -struct io_wq_work_node *io_handle_tw_list(struct io_wq_work_node *node, - unsigned int *count, unsigned int max_entries); -struct io_wq_work_node *__tctx_task_work_run(struct io_uring_task *tctx, - unsigned int max_entries, unsigned int *count); -struct io_wq_work_node *__tctx_task_work_run(struct io_uring_task *tctx, - unsigned int max_entries, unsigned int *count); +void io_handle_tw_list(struct io_wq_work_list *list, unsigned int *count, + unsigned int max_entries); +void __tctx_task_work_run(struct io_uring_task *tctx, + struct io_wq_work_list *list, unsigned int max_entries, + unsigned int *count); unsigned int tctx_task_work_run(struct io_uring_task *tctx); __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); int io_uring_alloc_task_context(struct task_struct *task, diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 6d0a71304ab8..4df1bb124aab 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -229,29 +229,29 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd) * than we were asked to process. Newly queued task_work isn't run until the * retry list has been fully processed. */ -static unsigned int io_sq_tw(struct io_wq_work_node **retry_list, int max_entries) +static unsigned int io_sq_tw(struct io_wq_work_list *retry_list, int max_entries) { struct io_uring_task *tctx = current->io_uring; unsigned int count = 0; - if (*retry_list) { - *retry_list = io_handle_tw_list(*retry_list, &count, max_entries); + if (!wq_list_empty(retry_list)) { + io_handle_tw_list(retry_list, &count, max_entries); if (count >= max_entries) goto out; max_entries -= count; } - *retry_list = __tctx_task_work_run(tctx, max_entries, &count); + __tctx_task_work_run(tctx, retry_list, max_entries, &count); out: if (task_work_pending(current)) task_work_run(); return count; } -static bool io_sq_tw_pending(struct io_wq_work_node *retry_list) +static bool io_sq_tw_pending(struct io_wq_work_list *retry_list) { struct io_uring_task *tctx = current->io_uring; - return retry_list || READ_ONCE(tctx->task_list.first); + return !wq_list_empty(retry_list) || !wq_list_empty(&tctx->task_list); } static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start) @@ -267,7 +267,7 @@ static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start) static int io_sq_thread(void *data) { - struct io_wq_work_node *retry_list = NULL; + struct io_wq_work_list retry_list; struct io_sq_data *sqd = data; struct io_ring_ctx *ctx; struct rusage start; @@ -305,6 +305,7 @@ static int io_sq_thread(void *data) audit_uring_entry(IORING_OP_NOP); audit_uring_exit(true, 0); + INIT_WQ_LIST(&retry_list); mutex_lock(&sqd->lock); while (1) { bool cap_entries, sqt_spin = false; @@ -345,7 +346,8 @@ static int io_sq_thread(void *data) } prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE); - if (!io_sqd_events_pending(sqd) && !io_sq_tw_pending(retry_list)) { + if (!io_sqd_events_pending(sqd) && + !io_sq_tw_pending(&retry_list)) { bool needs_sched = true; list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { @@ -384,7 +386,7 @@ static int io_sq_thread(void *data) timeout = jiffies + sqd->sq_thread_idle; } - if (retry_list) + if (!wq_list_empty(&retry_list)) io_sq_tw(&retry_list, UINT_MAX); io_uring_cancel_generic(true, sqd);