summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-02-19 12:33:30 -0700
committerJens Axboe <axboe@kernel.dk>2021-02-19 13:07:17 -0700
commit779f4c5754c5a9d30dfe909fad2a2546afd70621 (patch)
treee8b130b8c1e4fbdb732b590b61ac2d69791537ef /fs/io_uring.c
parent0ed35371334de739d8a3293408be41f97a8f93bf (diff)
io-wq: make hashed work map + lock per-ctxio_uring-worker.v2
Before the io-wq thread change, we maintained a hash work map and lock per-node per-ring. That wasn't ideal, as we really wanted it to be per ring. But now that we have per-task workers, the hash map ends up being just per-task. That'll work just fine for the normal case of having one task use a ring, but if you share the ring between tasks, then it's considerably worse than it was before. Make the hash map per ctx instead, which provides full per-ctx buffered write serialization on hashed writes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 239eacec3f3a..e71bc4e3bf08 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -359,6 +359,9 @@ struct io_ring_ctx {
unsigned cached_cq_overflow;
unsigned long sq_check_overflow;
+ /* hashed buffered write serialization */
+ unsigned long hash_map;
+
struct list_head defer_list;
struct list_head timeout_list;
struct list_head cq_overflow_list;
@@ -7796,6 +7799,7 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx)
unsigned int concurrency;
data.user = ctx->user;
+ data.hash_map = &ctx->hash_map;
data.free_work = io_free_work;
data.do_work = io_wq_submit_work;