Merge branch 'for-3.20' of git://git.infradead.org/users/kbusch/linux-nvme into for...
[linux-2.6-block.git] / block / blk-mq.c
index 9ee3b87c44984d336dbd4c82572fd3a4c3d35e90..4f4bea21052e41068112ead8cbb4e0a42cb7a9d6 100644 (file)
@@ -33,6 +33,7 @@ static DEFINE_MUTEX(all_q_mutex);
 static LIST_HEAD(all_q_list);
 
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
+static void blk_mq_run_queues(struct request_queue *q);
 
 /*
  * Check if any of the ctx's have pending work in this hardware queue
@@ -117,7 +118,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
 
        if (freeze) {
                percpu_ref_kill(&q->mq_usage_counter);
-               blk_mq_run_queues(q, false);
+               blk_mq_run_queues(q);
        }
 }
 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
@@ -136,6 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
        blk_mq_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
 }
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
 void blk_mq_unfreeze_queue(struct request_queue *q)
 {
@@ -902,7 +904,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
                        &hctx->run_work, 0);
 }
 
-void blk_mq_run_queues(struct request_queue *q, bool async)
+static void blk_mq_run_queues(struct request_queue *q)
 {
        struct blk_mq_hw_ctx *hctx;
        int i;
@@ -913,10 +915,9 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
                    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
                        continue;
 
-               blk_mq_run_hw_queue(hctx, async);
+               blk_mq_run_hw_queue(hctx, false);
        }
 }
-EXPORT_SYMBOL(blk_mq_run_queues);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
@@ -954,7 +955,6 @@ void blk_mq_start_hw_queues(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_mq_start_hw_queues);
 
-
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
 {
        struct blk_mq_hw_ctx *hctx;
@@ -1423,7 +1423,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
        size_t rq_size, left;
 
        tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
-                               set->numa_node);
+                               set->numa_node,
+                               BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
        if (!tags)
                return NULL;
 
@@ -1867,6 +1868,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
        mutex_unlock(&set->tag_list_lock);
 }
 
+/*
+ * It is the actual release handler for mq, but we do it from
+ * request queue's release handler for avoiding use-after-free
+ * and headache because q->mq_kobj shouldn't have been introduced,
+ * but we can't group ctx/kctx kobj without it.
+ */
+void blk_mq_release(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+
+       /* hctx kobj stays in hctx */
+       queue_for_each_hw_ctx(q, hctx, i)
+               kfree(hctx);
+
+       kfree(q->queue_hw_ctx);
+
+       /* ctx kobj stays in queue_ctx */
+       free_percpu(q->queue_ctx);
+}
+
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 {
        struct blk_mq_hw_ctx **hctxs;
@@ -2000,10 +2022,8 @@ void blk_mq_free_queue(struct request_queue *q)
 
        percpu_ref_exit(&q->mq_usage_counter);
 
-       kfree(q->queue_hw_ctx);
        kfree(q->mq_map);
 
-       q->queue_hw_ctx = NULL;
        q->mq_map = NULL;
 
        mutex_lock(&all_q_mutex);