block: wake up waiters when a queue is marked dying
authorJens Axboe <axboe@fb.com>
Mon, 22 Dec 2014 21:04:42 +0000 (14:04 -0700)
committerJens Axboe <axboe@fb.com>
Wed, 31 Dec 2014 16:39:16 +0000 (09:39 -0700)
If it's dying, we can't expect new request to complete and come
in an wake up other tasks waiting for requests. So after we
have marked it as dying, wake up everybody currently waiting
for a request. Once they wake, they will retry their allocation
and fail appropriately due to the state of the queue.

Tested-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-core.c
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk-mq.h

index 30f6153a40c27c7154dd453301807c7d39d1451c..3ad405571dcc5105a52da4284477a187db936f64 100644 (file)
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 
+void blk_set_queue_dying(struct request_queue *q)
+{
+       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+
+       if (q->mq_ops)
+               blk_mq_wake_waiters(q);
+       else {
+               struct request_list *rl;
+
+               blk_queue_for_each_rl(rl, q) {
+                       if (rl->rq_pool) {
+                               wake_up(&rl->wait[BLK_RW_SYNC]);
+                               wake_up(&rl->wait[BLK_RW_ASYNC]);
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+
 /**
  * blk_cleanup_queue - shutdown a request queue
  * @q: request queue to shutdown
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
 
        /* mark @q DYING, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
-       queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+       blk_set_queue_dying(q);
        spin_lock_irq(lock);
 
        /*
index 32e8dbb9ad1c49f0078e57fae100f0e6a7eb8a73..60c9d4a93fe470ced7471cd8653d8a00fc8922d7 100644 (file)
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 }
 
 /*
- * Wakeup all potentially sleeping on normal (non-reserved) tags
+ * Wakeup all potentially sleeping on tags
  */
-static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
+void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 {
        struct blk_mq_bitmap_tags *bt;
        int i, wake_index;
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
 
                wake_index = bt_index_inc(wake_index);
        }
+
+       if (include_reserve) {
+               bt = &tags->breserved_tags;
+               if (waitqueue_active(&bt->bs[0].wait))
+                       wake_up(&bt->bs[0].wait);
+       }
 }
 
 /*
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 
        atomic_dec(&tags->active_queues);
 
-       blk_mq_tag_wakeup_all(tags);
+       blk_mq_tag_wakeup_all(tags, false);
 }
 
 /*
@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
         * static and should never need resizing.
         */
        bt_update_count(&tags->bitmap_tags, tdepth);
-       blk_mq_tag_wakeup_all(tags);
+       blk_mq_tag_wakeup_all(tags, false);
        return 0;
 }
 
index 6206ed17ef766714b655a715ffbc05fd34b0463a..a6fa0fc9d41a2e91c8bb4ce29bb2b1a0d952c8ed 100644 (file)
@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
 extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
+extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
 
 enum {
        BLK_MQ_TAG_CACHE_MIN    = 1,
index 97ebb84b5633b1f86e5a21a18319b3d061729b5b..1a41d7aefbd57fe923e25c02835db5971838632b 100644 (file)
@@ -152,6 +152,16 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 
+void blk_mq_wake_waiters(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+
+       queue_for_each_hw_ctx(q, hctx, i)
+               if (blk_mq_hw_queue_mapped(hctx))
+                       blk_mq_tag_wakeup_all(hctx->tags, true);
+}
+
 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
 {
        return blk_mq_has_free_tags(hctx->tags);
index 206230e64f7915e642ce7306aec8b949deca43b1..4f4f943c22c3d1e907ef2c18224b8635f0057e82 100644 (file)
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
 void blk_mq_clone_flush_request(struct request *flush_rq,
                struct request *orig_rq);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+void blk_mq_wake_waiters(struct request_queue *q);
 
 /*
  * CPU hotplug helpers