Merge tag 'x86_mm_for_6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux-2.6-block.git] / block / blk-mq.c
index 953f08354c8c36c61b972d1ffe0ede740af2687a..ec922c6bccbe20737d21e03a4d4964c8dab1d66f 100644 (file)
@@ -43,6 +43,7 @@
 #include "blk-ioprio.h"
 
 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
+static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
 
 static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
 static void blk_mq_request_bypass_insert(struct request *rq,
@@ -1174,15 +1175,11 @@ static inline bool blk_mq_complete_need_ipi(struct request *rq)
 
 static void blk_mq_complete_send_ipi(struct request *rq)
 {
-       struct llist_head *list;
        unsigned int cpu;
 
        cpu = rq->mq_ctx->cpu;
-       list = &per_cpu(blk_cpu_done, cpu);
-       if (llist_add(&rq->ipi_list, list)) {
-               INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
-               smp_call_function_single_async(cpu, &rq->csd);
-       }
+       if (llist_add(&rq->ipi_list, &per_cpu(blk_cpu_done, cpu)))
+               smp_call_function_single_async(cpu, &per_cpu(blk_cpu_csd, cpu));
 }
 
 static void blk_mq_raise_softirq(struct request *rq)
@@ -1343,7 +1340,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
        }
 
        blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
-       blk_mq_run_hw_queue(hctx, false);
+       blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
 }
 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 
@@ -2242,6 +2239,8 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
         */
        WARN_ON_ONCE(!async && in_interrupt());
 
+       might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
+
        /*
         * When queue is quiesced, we may be switching io scheduler, or
         * updating nr_hw_queues, or other things, and we can't run queue
@@ -2257,8 +2256,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
        if (!need_run)
                return;
 
-       if (async || (hctx->flags & BLK_MQ_F_BLOCKING) ||
-           !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
+       if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
                blk_mq_delay_run_hw_queue(hctx, 0);
                return;
        }
@@ -2393,7 +2391,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
        clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
 
-       blk_mq_run_hw_queue(hctx, false);
+       blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
 }
 EXPORT_SYMBOL(blk_mq_start_hw_queue);
 
@@ -2423,7 +2421,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
        unsigned long i;
 
        queue_for_each_hw_ctx(q, hctx, i)
-               blk_mq_start_stopped_hw_queue(hctx, async);
+               blk_mq_start_stopped_hw_queue(hctx, async ||
+                                       (hctx->flags & BLK_MQ_F_BLOCKING));
 }
 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
 
@@ -2481,6 +2480,8 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
        list_for_each_entry(rq, list, queuelist) {
                BUG_ON(rq->mq_ctx != ctx);
                trace_block_rq_insert(rq);
+               if (rq->cmd_flags & REQ_NOWAIT)
+                       run_queue_async = true;
        }
 
        spin_lock(&ctx->lock);
@@ -2641,7 +2642,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 
        if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
                blk_mq_insert_request(rq, 0);
-               blk_mq_run_hw_queue(hctx, false);
+               blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
                return;
        }
 
@@ -4402,9 +4403,13 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
                                       int new_nr_hw_queues)
 {
        struct blk_mq_tags **new_tags;
+       int i;
 
-       if (set->nr_hw_queues >= new_nr_hw_queues)
+       if (set->nr_hw_queues >= new_nr_hw_queues) {
+               for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++)
+                       __blk_mq_free_map_and_rqs(set, i);
                goto done;
+       }
 
        new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
                                GFP_KERNEL, set->numa_node);
@@ -4416,6 +4421,16 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
                       sizeof(*set->tags));
        kfree(set->tags);
        set->tags = new_tags;
+
+       for (i = set->nr_hw_queues; i < new_nr_hw_queues; i++) {
+               if (!__blk_mq_alloc_map_and_rqs(set, i)) {
+                       while (--i >= set->nr_hw_queues)
+                               __blk_mq_free_map_and_rqs(set, i);
+                       return -ENOMEM;
+               }
+               cond_resched();
+       }
+
 done:
        set->nr_hw_queues = new_nr_hw_queues;
        return 0;
@@ -4749,7 +4764,6 @@ fallback:
                                __blk_mq_free_map_and_rqs(set, i);
 
                        set->nr_hw_queues = prev_nr_hw_queues;
-                       blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
                        goto fallback;
                }
                blk_mq_map_swqueue(q);
@@ -4853,6 +4867,9 @@ static int __init blk_mq_init(void)
 
        for_each_possible_cpu(i)
                init_llist_head(&per_cpu(blk_cpu_done, i));
+       for_each_possible_cpu(i)
+               INIT_CSD(&per_cpu(blk_cpu_csd, i),
+                        __blk_mq_complete_request_remote, NULL);
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
 
        cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,