c2777970f28bd3188c5fc033116292c2ed290d50
[linux-2.6-block.git] / block / blk-mq.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11 #include <linux/llist.h>
12 #include <linux/list_sort.h>
13 #include <linux/cpu.h>
14 #include <linux/cache.h>
15 #include <linux/sched/sysctl.h>
16 #include <linux/delay.h>
17
18 #include <trace/events/block.h>
19
20 #include <linux/blk-mq.h>
21 #include "blk.h"
22 #include "blk-mq.h"
23 #include "blk-mq-tag.h"
24
25 static DEFINE_MUTEX(all_q_mutex);
26 static LIST_HEAD(all_q_list);
27
28 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
29
30 static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
31                                            unsigned int cpu)
32 {
33         return per_cpu_ptr(q->queue_ctx, cpu);
34 }
35
36 /*
37  * This assumes per-cpu software queueing queues. They could be per-node
38  * as well, for instance. For now this is hardcoded as-is. Note that we don't
39  * care about preemption, since we know the ctx's are persistent. This does
40  * mean that we can't rely on ctx always matching the currently running CPU.
41  */
42 static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
43 {
44         return __blk_mq_get_ctx(q, get_cpu());
45 }
46
47 static void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
48 {
49         put_cpu();
50 }
51
52 /*
53  * Check if any of the ctx's have pending work in this hardware queue
54  */
55 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
56 {
57         unsigned int i;
58
59         for (i = 0; i < hctx->nr_ctx_map; i++)
60                 if (hctx->ctx_map[i])
61                         return true;
62
63         return false;
64 }
65
66 /*
67  * Mark this ctx as having pending work in this hardware queue
68  */
69 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
70                                      struct blk_mq_ctx *ctx)
71 {
72         if (!test_bit(ctx->index_hw, hctx->ctx_map))
73                 set_bit(ctx->index_hw, hctx->ctx_map);
74 }
75
76 static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
77                                               gfp_t gfp, bool reserved)
78 {
79         struct request *rq;
80         unsigned int tag;
81
82         tag = blk_mq_get_tag(hctx->tags, gfp, reserved);
83         if (tag != BLK_MQ_TAG_FAIL) {
84                 rq = hctx->tags->rqs[tag];
85                 blk_rq_init(hctx->queue, rq);
86                 rq->tag = tag;
87
88                 return rq;
89         }
90
91         return NULL;
92 }
93
94 static int blk_mq_queue_enter(struct request_queue *q)
95 {
96         int ret;
97
98         __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
99         smp_wmb();
100         /* we have problems to freeze the queue if it's initializing */
101         if (!blk_queue_bypass(q) || !blk_queue_init_done(q))
102                 return 0;
103
104         __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
105
106         spin_lock_irq(q->queue_lock);
107         ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq,
108                 !blk_queue_bypass(q) || blk_queue_dying(q),
109                 *q->queue_lock);
110         /* inc usage with lock hold to avoid freeze_queue runs here */
111         if (!ret && !blk_queue_dying(q))
112                 __percpu_counter_add(&q->mq_usage_counter, 1, 1000000);
113         else if (blk_queue_dying(q))
114                 ret = -ENODEV;
115         spin_unlock_irq(q->queue_lock);
116
117         return ret;
118 }
119
120 static void blk_mq_queue_exit(struct request_queue *q)
121 {
122         __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
123 }
124
125 static void __blk_mq_drain_queue(struct request_queue *q)
126 {
127         while (true) {
128                 s64 count;
129
130                 spin_lock_irq(q->queue_lock);
131                 count = percpu_counter_sum(&q->mq_usage_counter);
132                 spin_unlock_irq(q->queue_lock);
133
134                 if (count == 0)
135                         break;
136                 blk_mq_run_queues(q, false);
137                 msleep(10);
138         }
139 }
140
141 /*
142  * Guarantee no request is in use, so we can change any data structure of
143  * the queue afterward.
144  */
145 static void blk_mq_freeze_queue(struct request_queue *q)
146 {
147         bool drain;
148
149         spin_lock_irq(q->queue_lock);
150         drain = !q->bypass_depth++;
151         queue_flag_set(QUEUE_FLAG_BYPASS, q);
152         spin_unlock_irq(q->queue_lock);
153
154         if (drain)
155                 __blk_mq_drain_queue(q);
156 }
157
158 void blk_mq_drain_queue(struct request_queue *q)
159 {
160         __blk_mq_drain_queue(q);
161 }
162
163 static void blk_mq_unfreeze_queue(struct request_queue *q)
164 {
165         bool wake = false;
166
167         spin_lock_irq(q->queue_lock);
168         if (!--q->bypass_depth) {
169                 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
170                 wake = true;
171         }
172         WARN_ON_ONCE(q->bypass_depth < 0);
173         spin_unlock_irq(q->queue_lock);
174         if (wake)
175                 wake_up_all(&q->mq_freeze_wq);
176 }
177
178 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
179 {
180         return blk_mq_has_free_tags(hctx->tags);
181 }
182 EXPORT_SYMBOL(blk_mq_can_queue);
183
184 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
185                                struct request *rq, unsigned int rw_flags)
186 {
187         if (blk_queue_io_stat(q))
188                 rw_flags |= REQ_IO_STAT;
189
190         rq->mq_ctx = ctx;
191         rq->cmd_flags = rw_flags;
192         rq->start_time = jiffies;
193         set_start_time_ns(rq);
194         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
195 }
196
197 static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
198                                                    int rw, gfp_t gfp,
199                                                    bool reserved)
200 {
201         struct request *rq;
202
203         do {
204                 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
205                 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
206
207                 rq = __blk_mq_alloc_request(hctx, gfp & ~__GFP_WAIT, reserved);
208                 if (rq) {
209                         blk_mq_rq_ctx_init(q, ctx, rq, rw);
210                         break;
211                 }
212
213                 if (gfp & __GFP_WAIT) {
214                         __blk_mq_run_hw_queue(hctx);
215                         blk_mq_put_ctx(ctx);
216                 } else {
217                         blk_mq_put_ctx(ctx);
218                         break;
219                 }
220
221                 blk_mq_wait_for_tags(hctx->tags);
222         } while (1);
223
224         return rq;
225 }
226
227 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
228 {
229         struct request *rq;
230
231         if (blk_mq_queue_enter(q))
232                 return NULL;
233
234         rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
235         if (rq)
236                 blk_mq_put_ctx(rq->mq_ctx);
237         return rq;
238 }
239
240 struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
241                                               gfp_t gfp)
242 {
243         struct request *rq;
244
245         if (blk_mq_queue_enter(q))
246                 return NULL;
247
248         rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
249         if (rq)
250                 blk_mq_put_ctx(rq->mq_ctx);
251         return rq;
252 }
253 EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
254
255 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
256                                   struct blk_mq_ctx *ctx, struct request *rq)
257 {
258         const int tag = rq->tag;
259         struct request_queue *q = rq->q;
260
261         blk_mq_put_tag(hctx->tags, tag);
262         blk_mq_queue_exit(q);
263 }
264
265 void blk_mq_free_request(struct request *rq)
266 {
267         struct blk_mq_ctx *ctx = rq->mq_ctx;
268         struct blk_mq_hw_ctx *hctx;
269         struct request_queue *q = rq->q;
270
271         ctx->rq_completed[rq_is_sync(rq)]++;
272
273         hctx = q->mq_ops->map_queue(q, ctx->cpu);
274         __blk_mq_free_request(hctx, ctx, rq);
275 }
276
277 /*
278  * Clone all relevant state from a request that has been put on hold in
279  * the flush state machine into the preallocated flush request that hangs
280  * off the request queue.
281  *
282  * For a driver the flush request should be invisible, that's why we are
283  * impersonating the original request here.
284  */
285 void blk_mq_clone_flush_request(struct request *flush_rq,
286                 struct request *orig_rq)
287 {
288         struct blk_mq_hw_ctx *hctx =
289                 orig_rq->q->mq_ops->map_queue(orig_rq->q, orig_rq->mq_ctx->cpu);
290
291         flush_rq->mq_ctx = orig_rq->mq_ctx;
292         flush_rq->tag = orig_rq->tag;
293         memcpy(blk_mq_rq_to_pdu(flush_rq), blk_mq_rq_to_pdu(orig_rq),
294                 hctx->cmd_size);
295 }
296
297 inline void __blk_mq_end_io(struct request *rq, int error)
298 {
299         blk_account_io_done(rq);
300
301         if (rq->end_io) {
302                 rq->end_io(rq, error);
303         } else {
304                 if (unlikely(blk_bidi_rq(rq)))
305                         blk_mq_free_request(rq->next_rq);
306                 blk_mq_free_request(rq);
307         }
308 }
309 EXPORT_SYMBOL(__blk_mq_end_io);
310
311 void blk_mq_end_io(struct request *rq, int error)
312 {
313         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
314                 BUG();
315         __blk_mq_end_io(rq, error);
316 }
317 EXPORT_SYMBOL(blk_mq_end_io);
318
319 static void __blk_mq_complete_request_remote(void *data)
320 {
321         struct request *rq = data;
322
323         rq->q->softirq_done_fn(rq);
324 }
325
326 void __blk_mq_complete_request(struct request *rq)
327 {
328         struct blk_mq_ctx *ctx = rq->mq_ctx;
329         int cpu;
330
331         if (!ctx->ipi_redirect) {
332                 rq->q->softirq_done_fn(rq);
333                 return;
334         }
335
336         cpu = get_cpu();
337         if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
338                 rq->csd.func = __blk_mq_complete_request_remote;
339                 rq->csd.info = rq;
340                 rq->csd.flags = 0;
341                 smp_call_function_single_async(ctx->cpu, &rq->csd);
342         } else {
343                 rq->q->softirq_done_fn(rq);
344         }
345         put_cpu();
346 }
347
348 /**
349  * blk_mq_complete_request - end I/O on a request
350  * @rq:         the request being processed
351  *
352  * Description:
353  *      Ends all I/O on a request. It does not handle partial completions.
354  *      The actual completion happens out-of-order, through a IPI handler.
355  **/
356 void blk_mq_complete_request(struct request *rq)
357 {
358         if (unlikely(blk_should_fake_timeout(rq->q)))
359                 return;
360         if (!blk_mark_rq_complete(rq))
361                 __blk_mq_complete_request(rq);
362 }
363 EXPORT_SYMBOL(blk_mq_complete_request);
364
365 static void blk_mq_start_request(struct request *rq, bool last)
366 {
367         struct request_queue *q = rq->q;
368
369         trace_block_rq_issue(q, rq);
370
371         rq->resid_len = blk_rq_bytes(rq);
372         if (unlikely(blk_bidi_rq(rq)))
373                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
374
375         /*
376          * Just mark start time and set the started bit. Due to memory
377          * ordering, we know we'll see the correct deadline as long as
378          * REQ_ATOMIC_STARTED is seen.
379          */
380         rq->deadline = jiffies + q->rq_timeout;
381         set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
382
383         if (q->dma_drain_size && blk_rq_bytes(rq)) {
384                 /*
385                  * Make sure space for the drain appears.  We know we can do
386                  * this because max_hw_segments has been adjusted to be one
387                  * fewer than the device can handle.
388                  */
389                 rq->nr_phys_segments++;
390         }
391
392         /*
393          * Flag the last request in the series so that drivers know when IO
394          * should be kicked off, if they don't do it on a per-request basis.
395          *
396          * Note: the flag isn't the only condition drivers should do kick off.
397          * If drive is busy, the last request might not have the bit set.
398          */
399         if (last)
400                 rq->cmd_flags |= REQ_END;
401 }
402
403 static void __blk_mq_requeue_request(struct request *rq)
404 {
405         struct request_queue *q = rq->q;
406
407         trace_block_rq_requeue(q, rq);
408         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
409
410         rq->cmd_flags &= ~REQ_END;
411
412         if (q->dma_drain_size && blk_rq_bytes(rq))
413                 rq->nr_phys_segments--;
414 }
415
416 void blk_mq_requeue_request(struct request *rq)
417 {
418         struct request_queue *q = rq->q;
419
420         __blk_mq_requeue_request(rq);
421         blk_clear_rq_complete(rq);
422
423         trace_block_rq_requeue(q, rq);
424
425         BUG_ON(blk_queued_rq(rq));
426         blk_mq_insert_request(rq, true, true, false);
427 }
428 EXPORT_SYMBOL(blk_mq_requeue_request);
429
430 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
431 {
432         return tags->rqs[tag];
433 }
434 EXPORT_SYMBOL(blk_mq_tag_to_rq);
435
436 struct blk_mq_timeout_data {
437         struct blk_mq_hw_ctx *hctx;
438         unsigned long *next;
439         unsigned int *next_set;
440 };
441
442 static void blk_mq_timeout_check(void *__data, unsigned long *free_tags)
443 {
444         struct blk_mq_timeout_data *data = __data;
445         struct blk_mq_hw_ctx *hctx = data->hctx;
446         unsigned int tag;
447
448          /* It may not be in flight yet (this is where
449          * the REQ_ATOMIC_STARTED flag comes in). The requests are
450          * statically allocated, so we know it's always safe to access the
451          * memory associated with a bit offset into ->rqs[].
452          */
453         tag = 0;
454         do {
455                 struct request *rq;
456
457                 tag = find_next_zero_bit(free_tags, hctx->tags->nr_tags, tag);
458                 if (tag >= hctx->tags->nr_tags)
459                         break;
460
461                 rq = blk_mq_tag_to_rq(hctx->tags, tag++);
462                 if (rq->q != hctx->queue)
463                         continue;
464                 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
465                         continue;
466
467                 blk_rq_check_expired(rq, data->next, data->next_set);
468         } while (1);
469 }
470
471 static void blk_mq_hw_ctx_check_timeout(struct blk_mq_hw_ctx *hctx,
472                                         unsigned long *next,
473                                         unsigned int *next_set)
474 {
475         struct blk_mq_timeout_data data = {
476                 .hctx           = hctx,
477                 .next           = next,
478                 .next_set       = next_set,
479         };
480
481         /*
482          * Ask the tagging code to iterate busy requests, so we can
483          * check them for timeout.
484          */
485         blk_mq_tag_busy_iter(hctx->tags, blk_mq_timeout_check, &data);
486 }
487
488 static void blk_mq_rq_timer(unsigned long data)
489 {
490         struct request_queue *q = (struct request_queue *) data;
491         struct blk_mq_hw_ctx *hctx;
492         unsigned long next = 0;
493         int i, next_set = 0;
494
495         queue_for_each_hw_ctx(q, hctx, i)
496                 blk_mq_hw_ctx_check_timeout(hctx, &next, &next_set);
497
498         if (next_set)
499                 mod_timer(&q->timeout, round_jiffies_up(next));
500 }
501
502 /*
503  * Reverse check our software queue for entries that we could potentially
504  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
505  * too much time checking for merges.
506  */
507 static bool blk_mq_attempt_merge(struct request_queue *q,
508                                  struct blk_mq_ctx *ctx, struct bio *bio)
509 {
510         struct request *rq;
511         int checked = 8;
512
513         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
514                 int el_ret;
515
516                 if (!checked--)
517                         break;
518
519                 if (!blk_rq_merge_ok(rq, bio))
520                         continue;
521
522                 el_ret = blk_try_merge(rq, bio);
523                 if (el_ret == ELEVATOR_BACK_MERGE) {
524                         if (bio_attempt_back_merge(q, rq, bio)) {
525                                 ctx->rq_merged++;
526                                 return true;
527                         }
528                         break;
529                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
530                         if (bio_attempt_front_merge(q, rq, bio)) {
531                                 ctx->rq_merged++;
532                                 return true;
533                         }
534                         break;
535                 }
536         }
537
538         return false;
539 }
540
541 void blk_mq_add_timer(struct request *rq)
542 {
543         __blk_add_timer(rq, NULL);
544 }
545
546 /*
547  * Run this hardware queue, pulling any software queues mapped to it in.
548  * Note that this function currently has various problems around ordering
549  * of IO. In particular, we'd like FIFO behaviour on handling existing
550  * items on the hctx->dispatch list. Ignore that for now.
551  */
552 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
553 {
554         struct request_queue *q = hctx->queue;
555         struct blk_mq_ctx *ctx;
556         struct request *rq;
557         LIST_HEAD(rq_list);
558         int bit, queued;
559
560         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
561
562         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
563                 return;
564
565         hctx->run++;
566
567         /*
568          * Touch any software queue that has pending entries.
569          */
570         for_each_set_bit(bit, hctx->ctx_map, hctx->nr_ctx) {
571                 clear_bit(bit, hctx->ctx_map);
572                 ctx = hctx->ctxs[bit];
573                 BUG_ON(bit != ctx->index_hw);
574
575                 spin_lock(&ctx->lock);
576                 list_splice_tail_init(&ctx->rq_list, &rq_list);
577                 spin_unlock(&ctx->lock);
578         }
579
580         /*
581          * If we have previous entries on our dispatch list, grab them
582          * and stuff them at the front for more fair dispatch.
583          */
584         if (!list_empty_careful(&hctx->dispatch)) {
585                 spin_lock(&hctx->lock);
586                 if (!list_empty(&hctx->dispatch))
587                         list_splice_init(&hctx->dispatch, &rq_list);
588                 spin_unlock(&hctx->lock);
589         }
590
591         /*
592          * Delete and return all entries from our dispatch list
593          */
594         queued = 0;
595
596         /*
597          * Now process all the entries, sending them to the driver.
598          */
599         while (!list_empty(&rq_list)) {
600                 int ret;
601
602                 rq = list_first_entry(&rq_list, struct request, queuelist);
603                 list_del_init(&rq->queuelist);
604
605                 blk_mq_start_request(rq, list_empty(&rq_list));
606
607                 ret = q->mq_ops->queue_rq(hctx, rq);
608                 switch (ret) {
609                 case BLK_MQ_RQ_QUEUE_OK:
610                         queued++;
611                         continue;
612                 case BLK_MQ_RQ_QUEUE_BUSY:
613                         /*
614                          * FIXME: we should have a mechanism to stop the queue
615                          * like blk_stop_queue, otherwise we will waste cpu
616                          * time
617                          */
618                         list_add(&rq->queuelist, &rq_list);
619                         __blk_mq_requeue_request(rq);
620                         break;
621                 default:
622                         pr_err("blk-mq: bad return on queue: %d\n", ret);
623                 case BLK_MQ_RQ_QUEUE_ERROR:
624                         rq->errors = -EIO;
625                         blk_mq_end_io(rq, rq->errors);
626                         break;
627                 }
628
629                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
630                         break;
631         }
632
633         if (!queued)
634                 hctx->dispatched[0]++;
635         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
636                 hctx->dispatched[ilog2(queued) + 1]++;
637
638         /*
639          * Any items that need requeuing? Stuff them into hctx->dispatch,
640          * that is where we will continue on next queue run.
641          */
642         if (!list_empty(&rq_list)) {
643                 spin_lock(&hctx->lock);
644                 list_splice(&rq_list, &hctx->dispatch);
645                 spin_unlock(&hctx->lock);
646         }
647 }
648
649 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
650 {
651         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
652                 return;
653
654         if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
655                 __blk_mq_run_hw_queue(hctx);
656         else if (hctx->queue->nr_hw_queues == 1)
657                 kblockd_schedule_delayed_work(&hctx->run_work, 0);
658         else {
659                 unsigned int cpu;
660
661                 /*
662                  * It'd be great if the workqueue API had a way to pass
663                  * in a mask and had some smarts for more clever placement
664                  * than the first CPU. Or we could round-robin here. For now,
665                  * just queue on the first CPU.
666                  */
667                 cpu = cpumask_first(hctx->cpumask);
668                 kblockd_schedule_delayed_work_on(cpu, &hctx->run_work, 0);
669         }
670 }
671
672 void blk_mq_run_queues(struct request_queue *q, bool async)
673 {
674         struct blk_mq_hw_ctx *hctx;
675         int i;
676
677         queue_for_each_hw_ctx(q, hctx, i) {
678                 if ((!blk_mq_hctx_has_pending(hctx) &&
679                     list_empty_careful(&hctx->dispatch)) ||
680                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
681                         continue;
682
683                 preempt_disable();
684                 blk_mq_run_hw_queue(hctx, async);
685                 preempt_enable();
686         }
687 }
688 EXPORT_SYMBOL(blk_mq_run_queues);
689
690 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
691 {
692         cancel_delayed_work(&hctx->run_work);
693         cancel_delayed_work(&hctx->delay_work);
694         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
695 }
696 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
697
698 void blk_mq_stop_hw_queues(struct request_queue *q)
699 {
700         struct blk_mq_hw_ctx *hctx;
701         int i;
702
703         queue_for_each_hw_ctx(q, hctx, i)
704                 blk_mq_stop_hw_queue(hctx);
705 }
706 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
707
708 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
709 {
710         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
711
712         preempt_disable();
713         __blk_mq_run_hw_queue(hctx);
714         preempt_enable();
715 }
716 EXPORT_SYMBOL(blk_mq_start_hw_queue);
717
718 void blk_mq_start_hw_queues(struct request_queue *q)
719 {
720         struct blk_mq_hw_ctx *hctx;
721         int i;
722
723         queue_for_each_hw_ctx(q, hctx, i)
724                 blk_mq_start_hw_queue(hctx);
725 }
726 EXPORT_SYMBOL(blk_mq_start_hw_queues);
727
728
729 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
730 {
731         struct blk_mq_hw_ctx *hctx;
732         int i;
733
734         queue_for_each_hw_ctx(q, hctx, i) {
735                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
736                         continue;
737
738                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
739                 preempt_disable();
740                 blk_mq_run_hw_queue(hctx, async);
741                 preempt_enable();
742         }
743 }
744 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
745
746 static void blk_mq_run_work_fn(struct work_struct *work)
747 {
748         struct blk_mq_hw_ctx *hctx;
749
750         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
751
752         __blk_mq_run_hw_queue(hctx);
753 }
754
755 static void blk_mq_delay_work_fn(struct work_struct *work)
756 {
757         struct blk_mq_hw_ctx *hctx;
758
759         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
760
761         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
762                 __blk_mq_run_hw_queue(hctx);
763 }
764
765 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
766 {
767         unsigned long tmo = msecs_to_jiffies(msecs);
768
769         if (hctx->queue->nr_hw_queues == 1)
770                 kblockd_schedule_delayed_work(&hctx->delay_work, tmo);
771         else {
772                 unsigned int cpu;
773
774                 /*
775                  * It'd be great if the workqueue API had a way to pass
776                  * in a mask and had some smarts for more clever placement
777                  * than the first CPU. Or we could round-robin here. For now,
778                  * just queue on the first CPU.
779                  */
780                 cpu = cpumask_first(hctx->cpumask);
781                 kblockd_schedule_delayed_work_on(cpu, &hctx->delay_work, tmo);
782         }
783 }
784 EXPORT_SYMBOL(blk_mq_delay_queue);
785
786 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
787                                     struct request *rq, bool at_head)
788 {
789         struct blk_mq_ctx *ctx = rq->mq_ctx;
790
791         trace_block_rq_insert(hctx->queue, rq);
792
793         if (at_head)
794                 list_add(&rq->queuelist, &ctx->rq_list);
795         else
796                 list_add_tail(&rq->queuelist, &ctx->rq_list);
797         blk_mq_hctx_mark_pending(hctx, ctx);
798
799         /*
800          * We do this early, to ensure we are on the right CPU.
801          */
802         blk_mq_add_timer(rq);
803 }
804
805 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
806                 bool async)
807 {
808         struct request_queue *q = rq->q;
809         struct blk_mq_hw_ctx *hctx;
810         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
811
812         current_ctx = blk_mq_get_ctx(q);
813         if (!cpu_online(ctx->cpu))
814                 rq->mq_ctx = ctx = current_ctx;
815
816         hctx = q->mq_ops->map_queue(q, ctx->cpu);
817
818         if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
819             !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
820                 blk_insert_flush(rq);
821         } else {
822                 spin_lock(&ctx->lock);
823                 __blk_mq_insert_request(hctx, rq, at_head);
824                 spin_unlock(&ctx->lock);
825         }
826
827         if (run_queue)
828                 blk_mq_run_hw_queue(hctx, async);
829
830         blk_mq_put_ctx(current_ctx);
831 }
832
833 static void blk_mq_insert_requests(struct request_queue *q,
834                                      struct blk_mq_ctx *ctx,
835                                      struct list_head *list,
836                                      int depth,
837                                      bool from_schedule)
838
839 {
840         struct blk_mq_hw_ctx *hctx;
841         struct blk_mq_ctx *current_ctx;
842
843         trace_block_unplug(q, depth, !from_schedule);
844
845         current_ctx = blk_mq_get_ctx(q);
846
847         if (!cpu_online(ctx->cpu))
848                 ctx = current_ctx;
849         hctx = q->mq_ops->map_queue(q, ctx->cpu);
850
851         /*
852          * preemption doesn't flush plug list, so it's possible ctx->cpu is
853          * offline now
854          */
855         spin_lock(&ctx->lock);
856         while (!list_empty(list)) {
857                 struct request *rq;
858
859                 rq = list_first_entry(list, struct request, queuelist);
860                 list_del_init(&rq->queuelist);
861                 rq->mq_ctx = ctx;
862                 __blk_mq_insert_request(hctx, rq, false);
863         }
864         spin_unlock(&ctx->lock);
865
866         blk_mq_run_hw_queue(hctx, from_schedule);
867         blk_mq_put_ctx(current_ctx);
868 }
869
870 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
871 {
872         struct request *rqa = container_of(a, struct request, queuelist);
873         struct request *rqb = container_of(b, struct request, queuelist);
874
875         return !(rqa->mq_ctx < rqb->mq_ctx ||
876                  (rqa->mq_ctx == rqb->mq_ctx &&
877                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
878 }
879
880 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
881 {
882         struct blk_mq_ctx *this_ctx;
883         struct request_queue *this_q;
884         struct request *rq;
885         LIST_HEAD(list);
886         LIST_HEAD(ctx_list);
887         unsigned int depth;
888
889         list_splice_init(&plug->mq_list, &list);
890
891         list_sort(NULL, &list, plug_ctx_cmp);
892
893         this_q = NULL;
894         this_ctx = NULL;
895         depth = 0;
896
897         while (!list_empty(&list)) {
898                 rq = list_entry_rq(list.next);
899                 list_del_init(&rq->queuelist);
900                 BUG_ON(!rq->q);
901                 if (rq->mq_ctx != this_ctx) {
902                         if (this_ctx) {
903                                 blk_mq_insert_requests(this_q, this_ctx,
904                                                         &ctx_list, depth,
905                                                         from_schedule);
906                         }
907
908                         this_ctx = rq->mq_ctx;
909                         this_q = rq->q;
910                         depth = 0;
911                 }
912
913                 depth++;
914                 list_add_tail(&rq->queuelist, &ctx_list);
915         }
916
917         /*
918          * If 'this_ctx' is set, we know we have entries to complete
919          * on 'ctx_list'. Do those.
920          */
921         if (this_ctx) {
922                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
923                                        from_schedule);
924         }
925 }
926
927 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
928 {
929         init_request_from_bio(rq, bio);
930         blk_account_io_start(rq, 1);
931 }
932
933 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
934 {
935         struct blk_mq_hw_ctx *hctx;
936         struct blk_mq_ctx *ctx;
937         const int is_sync = rw_is_sync(bio->bi_rw);
938         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
939         int rw = bio_data_dir(bio);
940         struct request *rq;
941         unsigned int use_plug, request_count = 0;
942
943         /*
944          * If we have multiple hardware queues, just go directly to
945          * one of those for sync IO.
946          */
947         use_plug = !is_flush_fua && ((q->nr_hw_queues == 1) || !is_sync);
948
949         blk_queue_bounce(q, &bio);
950
951         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
952                 bio_endio(bio, -EIO);
953                 return;
954         }
955
956         if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
957                 return;
958
959         if (blk_mq_queue_enter(q)) {
960                 bio_endio(bio, -EIO);
961                 return;
962         }
963
964         ctx = blk_mq_get_ctx(q);
965         hctx = q->mq_ops->map_queue(q, ctx->cpu);
966
967         if (is_sync)
968                 rw |= REQ_SYNC;
969         trace_block_getrq(q, bio, rw);
970         rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
971         if (likely(rq))
972                 blk_mq_rq_ctx_init(q, ctx, rq, rw);
973         else {
974                 blk_mq_put_ctx(ctx);
975                 trace_block_sleeprq(q, bio, rw);
976                 rq = blk_mq_alloc_request_pinned(q, rw, __GFP_WAIT|GFP_ATOMIC,
977                                                         false);
978                 ctx = rq->mq_ctx;
979                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
980         }
981
982         hctx->queued++;
983
984         if (unlikely(is_flush_fua)) {
985                 blk_mq_bio_to_request(rq, bio);
986                 blk_insert_flush(rq);
987                 goto run_queue;
988         }
989
990         /*
991          * A task plug currently exists. Since this is completely lockless,
992          * utilize that to temporarily store requests until the task is
993          * either done or scheduled away.
994          */
995         if (use_plug) {
996                 struct blk_plug *plug = current->plug;
997
998                 if (plug) {
999                         blk_mq_bio_to_request(rq, bio);
1000                         if (list_empty(&plug->mq_list))
1001                                 trace_block_plug(q);
1002                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1003                                 blk_flush_plug_list(plug, false);
1004                                 trace_block_plug(q);
1005                         }
1006                         list_add_tail(&rq->queuelist, &plug->mq_list);
1007                         blk_mq_put_ctx(ctx);
1008                         return;
1009                 }
1010         }
1011
1012         spin_lock(&ctx->lock);
1013
1014         if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1015             blk_mq_attempt_merge(q, ctx, bio))
1016                 __blk_mq_free_request(hctx, ctx, rq);
1017         else {
1018                 blk_mq_bio_to_request(rq, bio);
1019                 __blk_mq_insert_request(hctx, rq, false);
1020         }
1021
1022         spin_unlock(&ctx->lock);
1023
1024         /*
1025          * For a SYNC request, send it to the hardware immediately. For an
1026          * ASYNC request, just ensure that we run it later on. The latter
1027          * allows for merging opportunities and more efficient dispatching.
1028          */
1029 run_queue:
1030         blk_mq_run_hw_queue(hctx, !is_sync || is_flush_fua);
1031         blk_mq_put_ctx(ctx);
1032 }
1033
1034 /*
1035  * Default mapping to a software queue, since we use one per CPU.
1036  */
1037 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1038 {
1039         return q->queue_hw_ctx[q->mq_map[cpu]];
1040 }
1041 EXPORT_SYMBOL(blk_mq_map_queue);
1042
1043 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
1044                                                    unsigned int hctx_index)
1045 {
1046         return kmalloc_node(sizeof(struct blk_mq_hw_ctx),
1047                                 GFP_KERNEL | __GFP_ZERO, set->numa_node);
1048 }
1049 EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
1050
1051 void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
1052                                  unsigned int hctx_index)
1053 {
1054         kfree(hctx);
1055 }
1056 EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
1057
1058 static void blk_mq_hctx_notify(void *data, unsigned long action,
1059                                unsigned int cpu)
1060 {
1061         struct blk_mq_hw_ctx *hctx = data;
1062         struct request_queue *q = hctx->queue;
1063         struct blk_mq_ctx *ctx;
1064         LIST_HEAD(tmp);
1065
1066         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1067                 return;
1068
1069         /*
1070          * Move ctx entries to new CPU, if this one is going away.
1071          */
1072         ctx = __blk_mq_get_ctx(q, cpu);
1073
1074         spin_lock(&ctx->lock);
1075         if (!list_empty(&ctx->rq_list)) {
1076                 list_splice_init(&ctx->rq_list, &tmp);
1077                 clear_bit(ctx->index_hw, hctx->ctx_map);
1078         }
1079         spin_unlock(&ctx->lock);
1080
1081         if (list_empty(&tmp))
1082                 return;
1083
1084         ctx = blk_mq_get_ctx(q);
1085         spin_lock(&ctx->lock);
1086
1087         while (!list_empty(&tmp)) {
1088                 struct request *rq;
1089
1090                 rq = list_first_entry(&tmp, struct request, queuelist);
1091                 rq->mq_ctx = ctx;
1092                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1093         }
1094
1095         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1096         blk_mq_hctx_mark_pending(hctx, ctx);
1097
1098         spin_unlock(&ctx->lock);
1099
1100         blk_mq_run_hw_queue(hctx, true);
1101         blk_mq_put_ctx(ctx);
1102 }
1103
1104 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1105                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1106 {
1107         struct page *page;
1108
1109         if (tags->rqs && set->ops->exit_request) {
1110                 int i;
1111
1112                 for (i = 0; i < tags->nr_tags; i++) {
1113                         if (!tags->rqs[i])
1114                                 continue;
1115                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1116                                                 hctx_idx, i);
1117                 }
1118         }
1119
1120         while (!list_empty(&tags->page_list)) {
1121                 page = list_first_entry(&tags->page_list, struct page, lru);
1122                 list_del_init(&page->lru);
1123                 __free_pages(page, page->private);
1124         }
1125
1126         kfree(tags->rqs);
1127
1128         blk_mq_free_tags(tags);
1129 }
1130
1131 static size_t order_to_size(unsigned int order)
1132 {
1133         size_t ret = PAGE_SIZE;
1134
1135         while (order--)
1136                 ret *= 2;
1137
1138         return ret;
1139 }
1140
1141 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1142                 unsigned int hctx_idx)
1143 {
1144         struct blk_mq_tags *tags;
1145         unsigned int i, j, entries_per_page, max_order = 4;
1146         size_t rq_size, left;
1147
1148         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1149                                 set->numa_node);
1150         if (!tags)
1151                 return NULL;
1152
1153         INIT_LIST_HEAD(&tags->page_list);
1154
1155         tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *),
1156                                         GFP_KERNEL, set->numa_node);
1157         if (!tags->rqs) {
1158                 blk_mq_free_tags(tags);
1159                 return NULL;
1160         }
1161
1162         /*
1163          * rq_size is the size of the request plus driver payload, rounded
1164          * to the cacheline size
1165          */
1166         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1167                                 cache_line_size());
1168         left = rq_size * set->queue_depth;
1169
1170         for (i = 0; i < set->queue_depth; ) {
1171                 int this_order = max_order;
1172                 struct page *page;
1173                 int to_do;
1174                 void *p;
1175
1176                 while (left < order_to_size(this_order - 1) && this_order)
1177                         this_order--;
1178
1179                 do {
1180                         page = alloc_pages_node(set->numa_node, GFP_KERNEL,
1181                                                 this_order);
1182                         if (page)
1183                                 break;
1184                         if (!this_order--)
1185                                 break;
1186                         if (order_to_size(this_order) < rq_size)
1187                                 break;
1188                 } while (1);
1189
1190                 if (!page)
1191                         goto fail;
1192
1193                 page->private = this_order;
1194                 list_add_tail(&page->lru, &tags->page_list);
1195
1196                 p = page_address(page);
1197                 entries_per_page = order_to_size(this_order) / rq_size;
1198                 to_do = min(entries_per_page, set->queue_depth - i);
1199                 left -= to_do * rq_size;
1200                 for (j = 0; j < to_do; j++) {
1201                         tags->rqs[i] = p;
1202                         if (set->ops->init_request) {
1203                                 if (set->ops->init_request(set->driver_data,
1204                                                 tags->rqs[i], hctx_idx, i,
1205                                                 set->numa_node))
1206                                         goto fail;
1207                         }
1208
1209                         p += rq_size;
1210                         i++;
1211                 }
1212         }
1213
1214         return tags;
1215
1216 fail:
1217         pr_warn("%s: failed to allocate requests\n", __func__);
1218         blk_mq_free_rq_map(set, tags, hctx_idx);
1219         return NULL;
1220 }
1221
1222 static int blk_mq_init_hw_queues(struct request_queue *q,
1223                 struct blk_mq_tag_set *set)
1224 {
1225         struct blk_mq_hw_ctx *hctx;
1226         unsigned int i, j;
1227
1228         /*
1229          * Initialize hardware queues
1230          */
1231         queue_for_each_hw_ctx(q, hctx, i) {
1232                 unsigned int num_maps;
1233                 int node;
1234
1235                 node = hctx->numa_node;
1236                 if (node == NUMA_NO_NODE)
1237                         node = hctx->numa_node = set->numa_node;
1238
1239                 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1240                 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1241                 spin_lock_init(&hctx->lock);
1242                 INIT_LIST_HEAD(&hctx->dispatch);
1243                 hctx->queue = q;
1244                 hctx->queue_num = i;
1245                 hctx->flags = set->flags;
1246                 hctx->cmd_size = set->cmd_size;
1247
1248                 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1249                                                 blk_mq_hctx_notify, hctx);
1250                 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1251
1252                 hctx->tags = set->tags[i];
1253
1254                 /*
1255                  * Allocate space for all possible cpus to avoid allocation in
1256                  * runtime
1257                  */
1258                 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1259                                                 GFP_KERNEL, node);
1260                 if (!hctx->ctxs)
1261                         break;
1262
1263                 num_maps = ALIGN(nr_cpu_ids, BITS_PER_LONG) / BITS_PER_LONG;
1264                 hctx->ctx_map = kzalloc_node(num_maps * sizeof(unsigned long),
1265                                                 GFP_KERNEL, node);
1266                 if (!hctx->ctx_map)
1267                         break;
1268
1269                 hctx->nr_ctx_map = num_maps;
1270                 hctx->nr_ctx = 0;
1271
1272                 if (set->ops->init_hctx &&
1273                     set->ops->init_hctx(hctx, set->driver_data, i))
1274                         break;
1275         }
1276
1277         if (i == q->nr_hw_queues)
1278                 return 0;
1279
1280         /*
1281          * Init failed
1282          */
1283         queue_for_each_hw_ctx(q, hctx, j) {
1284                 if (i == j)
1285                         break;
1286
1287                 if (set->ops->exit_hctx)
1288                         set->ops->exit_hctx(hctx, j);
1289
1290                 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1291                 kfree(hctx->ctxs);
1292                 kfree(hctx->ctx_map);
1293         }
1294
1295         return 1;
1296 }
1297
1298 static void blk_mq_init_cpu_queues(struct request_queue *q,
1299                                    unsigned int nr_hw_queues)
1300 {
1301         unsigned int i;
1302
1303         for_each_possible_cpu(i) {
1304                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1305                 struct blk_mq_hw_ctx *hctx;
1306
1307                 memset(__ctx, 0, sizeof(*__ctx));
1308                 __ctx->cpu = i;
1309                 spin_lock_init(&__ctx->lock);
1310                 INIT_LIST_HEAD(&__ctx->rq_list);
1311                 __ctx->queue = q;
1312
1313                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1314                 if (!cpu_online(i))
1315                         continue;
1316
1317                 hctx = q->mq_ops->map_queue(q, i);
1318                 cpumask_set_cpu(i, hctx->cpumask);
1319                 hctx->nr_ctx++;
1320
1321                 /*
1322                  * Set local node, IFF we have more than one hw queue. If
1323                  * not, we remain on the home node of the device
1324                  */
1325                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1326                         hctx->numa_node = cpu_to_node(i);
1327         }
1328 }
1329
1330 static void blk_mq_map_swqueue(struct request_queue *q)
1331 {
1332         unsigned int i;
1333         struct blk_mq_hw_ctx *hctx;
1334         struct blk_mq_ctx *ctx;
1335
1336         queue_for_each_hw_ctx(q, hctx, i) {
1337                 cpumask_clear(hctx->cpumask);
1338                 hctx->nr_ctx = 0;
1339         }
1340
1341         /*
1342          * Map software to hardware queues
1343          */
1344         queue_for_each_ctx(q, ctx, i) {
1345                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1346                 if (!cpu_online(i))
1347                         continue;
1348
1349                 hctx = q->mq_ops->map_queue(q, i);
1350                 cpumask_set_cpu(i, hctx->cpumask);
1351                 ctx->index_hw = hctx->nr_ctx;
1352                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1353         }
1354 }
1355
1356 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1357 {
1358         struct blk_mq_hw_ctx **hctxs;
1359         struct blk_mq_ctx *ctx;
1360         struct request_queue *q;
1361         int i;
1362
1363         ctx = alloc_percpu(struct blk_mq_ctx);
1364         if (!ctx)
1365                 return ERR_PTR(-ENOMEM);
1366
1367         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1368                         set->numa_node);
1369
1370         if (!hctxs)
1371                 goto err_percpu;
1372
1373         for (i = 0; i < set->nr_hw_queues; i++) {
1374                 hctxs[i] = set->ops->alloc_hctx(set, i);
1375                 if (!hctxs[i])
1376                         goto err_hctxs;
1377
1378                 if (!zalloc_cpumask_var(&hctxs[i]->cpumask, GFP_KERNEL))
1379                         goto err_hctxs;
1380
1381                 hctxs[i]->numa_node = NUMA_NO_NODE;
1382                 hctxs[i]->queue_num = i;
1383         }
1384
1385         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1386         if (!q)
1387                 goto err_hctxs;
1388
1389         q->mq_map = blk_mq_make_queue_map(set);
1390         if (!q->mq_map)
1391                 goto err_map;
1392
1393         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1394         blk_queue_rq_timeout(q, 30000);
1395
1396         q->nr_queues = nr_cpu_ids;
1397         q->nr_hw_queues = set->nr_hw_queues;
1398
1399         q->queue_ctx = ctx;
1400         q->queue_hw_ctx = hctxs;
1401
1402         q->mq_ops = set->ops;
1403         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1404
1405         q->sg_reserved_size = INT_MAX;
1406
1407         blk_queue_make_request(q, blk_mq_make_request);
1408         blk_queue_rq_timed_out(q, set->ops->timeout);
1409         if (set->timeout)
1410                 blk_queue_rq_timeout(q, set->timeout);
1411
1412         if (set->ops->complete)
1413                 blk_queue_softirq_done(q, set->ops->complete);
1414
1415         blk_mq_init_flush(q);
1416         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1417
1418         q->flush_rq = kzalloc(round_up(sizeof(struct request) +
1419                                 set->cmd_size, cache_line_size()),
1420                                 GFP_KERNEL);
1421         if (!q->flush_rq)
1422                 goto err_hw;
1423
1424         if (blk_mq_init_hw_queues(q, set))
1425                 goto err_flush_rq;
1426
1427         blk_mq_map_swqueue(q);
1428
1429         mutex_lock(&all_q_mutex);
1430         list_add_tail(&q->all_q_node, &all_q_list);
1431         mutex_unlock(&all_q_mutex);
1432
1433         return q;
1434
1435 err_flush_rq:
1436         kfree(q->flush_rq);
1437 err_hw:
1438         kfree(q->mq_map);
1439 err_map:
1440         blk_cleanup_queue(q);
1441 err_hctxs:
1442         for (i = 0; i < set->nr_hw_queues; i++) {
1443                 if (!hctxs[i])
1444                         break;
1445                 free_cpumask_var(hctxs[i]->cpumask);
1446                 set->ops->free_hctx(hctxs[i], i);
1447         }
1448         kfree(hctxs);
1449 err_percpu:
1450         free_percpu(ctx);
1451         return ERR_PTR(-ENOMEM);
1452 }
1453 EXPORT_SYMBOL(blk_mq_init_queue);
1454
1455 void blk_mq_free_queue(struct request_queue *q)
1456 {
1457         struct blk_mq_hw_ctx *hctx;
1458         int i;
1459
1460         queue_for_each_hw_ctx(q, hctx, i) {
1461                 kfree(hctx->ctx_map);
1462                 kfree(hctx->ctxs);
1463                 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1464                 if (q->mq_ops->exit_hctx)
1465                         q->mq_ops->exit_hctx(hctx, i);
1466                 free_cpumask_var(hctx->cpumask);
1467                 q->mq_ops->free_hctx(hctx, i);
1468         }
1469
1470         free_percpu(q->queue_ctx);
1471         kfree(q->queue_hw_ctx);
1472         kfree(q->mq_map);
1473
1474         q->queue_ctx = NULL;
1475         q->queue_hw_ctx = NULL;
1476         q->mq_map = NULL;
1477
1478         mutex_lock(&all_q_mutex);
1479         list_del_init(&q->all_q_node);
1480         mutex_unlock(&all_q_mutex);
1481 }
1482
1483 /* Basically redo blk_mq_init_queue with queue frozen */
1484 static void blk_mq_queue_reinit(struct request_queue *q)
1485 {
1486         blk_mq_freeze_queue(q);
1487
1488         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1489
1490         /*
1491          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1492          * we should change hctx numa_node according to new topology (this
1493          * involves free and re-allocate memory, worthy doing?)
1494          */
1495
1496         blk_mq_map_swqueue(q);
1497
1498         blk_mq_unfreeze_queue(q);
1499 }
1500
1501 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1502                                       unsigned long action, void *hcpu)
1503 {
1504         struct request_queue *q;
1505
1506         /*
1507          * Before new mapping is established, hotadded cpu might already start
1508          * handling requests. This doesn't break anything as we map offline
1509          * CPUs to first hardware queue. We will re-init queue below to get
1510          * optimal settings.
1511          */
1512         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1513             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1514                 return NOTIFY_OK;
1515
1516         mutex_lock(&all_q_mutex);
1517         list_for_each_entry(q, &all_q_list, all_q_node)
1518                 blk_mq_queue_reinit(q);
1519         mutex_unlock(&all_q_mutex);
1520         return NOTIFY_OK;
1521 }
1522
1523 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
1524 {
1525         int i;
1526
1527         if (!set->nr_hw_queues)
1528                 return -EINVAL;
1529         if (!set->queue_depth || set->queue_depth > BLK_MQ_MAX_DEPTH)
1530                 return -EINVAL;
1531         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
1532                 return -EINVAL;
1533
1534         if (!set->nr_hw_queues ||
1535             !set->ops->queue_rq || !set->ops->map_queue ||
1536             !set->ops->alloc_hctx || !set->ops->free_hctx)
1537                 return -EINVAL;
1538
1539
1540         set->tags = kmalloc_node(set->nr_hw_queues *
1541                                  sizeof(struct blk_mq_tags *),
1542                                  GFP_KERNEL, set->numa_node);
1543         if (!set->tags)
1544                 goto out;
1545
1546         for (i = 0; i < set->nr_hw_queues; i++) {
1547                 set->tags[i] = blk_mq_init_rq_map(set, i);
1548                 if (!set->tags[i])
1549                         goto out_unwind;
1550         }
1551
1552         return 0;
1553
1554 out_unwind:
1555         while (--i >= 0)
1556                 blk_mq_free_rq_map(set, set->tags[i], i);
1557 out:
1558         return -ENOMEM;
1559 }
1560 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
1561
1562 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
1563 {
1564         int i;
1565
1566         for (i = 0; i < set->nr_hw_queues; i++)
1567                 blk_mq_free_rq_map(set, set->tags[i], i);
1568 }
1569 EXPORT_SYMBOL(blk_mq_free_tag_set);
1570
1571 void blk_mq_disable_hotplug(void)
1572 {
1573         mutex_lock(&all_q_mutex);
1574 }
1575
1576 void blk_mq_enable_hotplug(void)
1577 {
1578         mutex_unlock(&all_q_mutex);
1579 }
1580
1581 static int __init blk_mq_init(void)
1582 {
1583         blk_mq_cpu_init();
1584
1585         /* Must be called after percpu_counter_hotcpu_callback() */
1586         hotcpu_notifier(blk_mq_queue_reinit_notify, -10);
1587
1588         return 0;
1589 }
1590 subsys_initcall(blk_mq_init);