diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-08-09 20:18:10 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-08-09 13:47:12 -0600 |
commit | f60d3044cbe617d1b8cd72fd825b489f0bc88a83 (patch) | |
tree | 15bc8bfbaa882615d2cdfc990c50788c1311cff8 | |
parent | cd22a99bda02864041878392478181722e168781 (diff) |
io_uring: use inflight_entry instead of compl.list
req->compl.list is used to cache freed requests, and so can't overlap in
time with req->inflight_entry. So, use inflight_entry to link requests
and remove compl.list.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e430e79d22d70a190d718831bda7bfed1daf8976.1628536684.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | fs/io_uring.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index a10b371b78dd..039ef580aabe 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -670,7 +670,6 @@ struct io_unlink { struct io_completion { struct file *file; - struct list_head list; u32 cflags; }; @@ -1671,7 +1670,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res, } io_dismantle_req(req); io_put_task(req->task, 1); - list_add(&req->compl.list, &ctx->locked_free_list); + list_add(&req->inflight_entry, &ctx->locked_free_list); ctx->locked_free_nr++; } else { if (!percpu_ref_tryget(&ctx->refs)) @@ -1762,9 +1761,9 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) nr = state->free_reqs; while (!list_empty(&cs->free_list)) { struct io_kiocb *req = list_first_entry(&cs->free_list, - struct io_kiocb, compl.list); + struct io_kiocb, inflight_entry); - list_del(&req->compl.list); + list_del(&req->inflight_entry); state->reqs[nr++] = req; if (nr == ARRAY_SIZE(state->reqs)) break; @@ -1838,7 +1837,7 @@ static void __io_free_req(struct io_kiocb *req) io_put_task(req->task, 1); spin_lock_irqsave(&ctx->completion_lock, flags); - list_add(&req->compl.list, &ctx->locked_free_list); + list_add(&req->inflight_entry, &ctx->locked_free_list); ctx->locked_free_nr++; spin_unlock_irqrestore(&ctx->completion_lock, flags); @@ -2145,7 +2144,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, if (state->free_reqs != ARRAY_SIZE(state->reqs)) state->reqs[state->free_reqs++] = req; else - list_add(&req->compl.list, &state->comp.free_list); + list_add(&req->inflight_entry, &state->comp.free_list); } static void io_submit_flush_completions(struct io_ring_ctx *ctx) @@ -8629,8 +8628,8 @@ static void io_req_cache_free(struct list_head *list) { struct io_kiocb *req, *nxt; - list_for_each_entry_safe(req, nxt, list, compl.list) { - list_del(&req->compl.list); + list_for_each_entry_safe(req, nxt, list, inflight_entry) { + list_del(&req->inflight_entry); kmem_cache_free(req_cachep, req); } } |