blk-mq: add blk_mq_init_allocated_queue and export blk_mq_register_disk
[linux-2.6-block.git] / block / blk-mq.c
index b7b8933ec24188b2807229af1aa844503b46dc89..3000121840bb1ca18430fb9421d5647a44f3dabd 100644 (file)
@@ -1890,10 +1890,26 @@ void blk_mq_release(struct request_queue *q)
 }
 
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
+{
+       struct request_queue *uninit_q, *q;
+
+       uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
+       if (!uninit_q)
+               return ERR_PTR(-ENOMEM);
+
+       q = blk_mq_init_allocated_queue(set, uninit_q);
+       if (IS_ERR(q))
+               blk_cleanup_queue(uninit_q);
+
+       return q;
+}
+EXPORT_SYMBOL(blk_mq_init_queue);
+
+struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+                                                 struct request_queue *q)
 {
        struct blk_mq_hw_ctx **hctxs;
        struct blk_mq_ctx __percpu *ctx;
-       struct request_queue *q;
        unsigned int *map;
        int i;
 
@@ -1928,17 +1944,13 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
                hctxs[i]->queue_num = i;
        }
 
-       q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
-       if (!q)
-               goto err_hctxs;
-
        /*
         * Init percpu_ref in atomic mode so that it's faster to shutdown.
         * See blk_register_queue() for details.
         */
        if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
                            PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto err_mq_usage;
+               goto err_hctxs;
 
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, 30000);
@@ -1981,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
        if (blk_mq_init_hw_queues(q, set))
-               goto err_mq_usage;
+               goto err_hctxs;
 
        mutex_lock(&all_q_mutex);
        list_add_tail(&q->all_q_node, &all_q_list);
@@ -1993,8 +2005,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 
        return q;
 
-err_mq_usage:
-       blk_cleanup_queue(q);
 err_hctxs:
        kfree(map);
        for (i = 0; i < set->nr_hw_queues; i++) {
@@ -2009,7 +2019,7 @@ err_percpu:
        free_percpu(ctx);
        return ERR_PTR(-ENOMEM);
 }
-EXPORT_SYMBOL(blk_mq_init_queue);
+EXPORT_SYMBOL(blk_mq_init_allocated_queue);
 
 void blk_mq_free_queue(struct request_queue *q)
 {