summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2021-12-03 16:17:03 +0800
committerJens Axboe <axboe@kernel.dk>2021-12-03 06:36:28 -0700
commit2385ebf38f94d4f7761b1e9a4973d04753da02c2 (patch)
treed65257b171cf5644fa5e87a4949248815c364cae
parent545a32498c536ee152331cd2e7d2416aa0f20e01 (diff)
block: null_blk: batched complete poll requests
Complete poll requests via blk_mq_add_to_batch() and blk_mq_end_request_batch(), so that we can cover batched complete code path by running null_blk test. Meantime this way shows ~14% IOPS boost on 't/io_uring /dev/nullb0' in my test. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20211203081703.3506020-1-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/block/null_blk/main.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index b4ff5ae1f70c..20534a2daf17 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1574,7 +1574,9 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
- end_cmd(cmd);
+ if (!blk_mq_add_to_batch(req, iob, cmd->error,
+ blk_mq_end_request_batch))
+ end_cmd(cmd);
nr++;
}