ublk_drv: requeue rqs with recovery feature enabled
authorZiyangZhang <ZiyangZhang@linux.alibaba.com>
Fri, 23 Sep 2022 15:39:15 +0000 (23:39 +0800)
committerJens Axboe <axboe@kernel.dk>
Sat, 24 Sep 2022 01:09:56 +0000 (19:09 -0600)
With recovery feature enabled, in ublk_queue_rq or task work
(in exit_task_work or fallback wq), we requeue rqs instead of
ending(aborting) them. Besides, No matter recovery feature is enabled
or disabled, we schedule monitor_work immediately.

Signed-off-by: ZiyangZhang <ZiyangZhang@linux.alibaba.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20220923153919.44078-4-ZiyangZhang@linux.alibaba.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/ublk_drv.c

index 05bfbaa49696ab9b03b97d9a1e2998c0df02d143..9ce5617d21df23f3cd33e45778b05cbe19dc8f6d 100644 (file)
@@ -655,10 +655,21 @@ static void ubq_complete_io_cmd(struct ublk_io *io, int res)
 
 #define UBLK_REQUEUE_DELAY_MS  3
 
+static inline void __ublk_abort_rq(struct ublk_queue *ubq,
+               struct request *rq)
+{
+       /* We cannot process this rq so just requeue it. */
+       if (ublk_queue_can_use_recovery(ubq))
+               blk_mq_requeue_request(rq, false);
+       else
+               blk_mq_end_request(rq, BLK_STS_IOERR);
+
+       mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
+}
+
 static inline void __ublk_rq_task_work(struct request *req)
 {
        struct ublk_queue *ubq = req->mq_hctx->driver_data;
-       struct ublk_device *ub = ubq->dev;
        int tag = req->tag;
        struct ublk_io *io = &ubq->ios[tag];
        unsigned int mapped_bytes;
@@ -677,8 +688,7 @@ static inline void __ublk_rq_task_work(struct request *req)
         * (2) current->flags & PF_EXITING.
         */
        if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
-               blk_mq_end_request(req, BLK_STS_IOERR);
-               mod_delayed_work(system_wq, &ub->monitor_work, 0);
+               __ublk_abort_rq(ubq, req);
                return;
        }
 
@@ -768,8 +778,8 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        if (unlikely(ubq_daemon_is_dying(ubq))) {
  fail:
-               mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
-               return BLK_STS_IOERR;
+               __ublk_abort_rq(ubq, rq);
+               return BLK_STS_OK;
        }
 
        if (ublk_can_use_task_work(ubq)) {