97ebb84b5633b1f86e5a21a18319b3d061729b5b
[linux-2.6-block.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/mm.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/workqueue.h>
16 #include <linux/smp.h>
17 #include <linux/llist.h>
18 #include <linux/list_sort.h>
19 #include <linux/cpu.h>
20 #include <linux/cache.h>
21 #include <linux/sched/sysctl.h>
22 #include <linux/delay.h>
23 #include <linux/crash_dump.h>
24
25 #include <trace/events/block.h>
26
27 #include <linux/blk-mq.h>
28 #include "blk.h"
29 #include "blk-mq.h"
30 #include "blk-mq-tag.h"
31
32 static DEFINE_MUTEX(all_q_mutex);
33 static LIST_HEAD(all_q_list);
34
35 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
36
37 /*
38  * Check if any of the ctx's have pending work in this hardware queue
39  */
40 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
41 {
42         unsigned int i;
43
44         for (i = 0; i < hctx->ctx_map.map_size; i++)
45                 if (hctx->ctx_map.map[i].word)
46                         return true;
47
48         return false;
49 }
50
51 static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
52                                               struct blk_mq_ctx *ctx)
53 {
54         return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
55 }
56
57 #define CTX_TO_BIT(hctx, ctx)   \
58         ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
59
60 /*
61  * Mark this ctx as having pending work in this hardware queue
62  */
63 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
64                                      struct blk_mq_ctx *ctx)
65 {
66         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
67
68         if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
69                 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
70 }
71
72 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
73                                       struct blk_mq_ctx *ctx)
74 {
75         struct blk_align_bitmap *bm = get_bm(hctx, ctx);
76
77         clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
78 }
79
80 static int blk_mq_queue_enter(struct request_queue *q)
81 {
82         while (true) {
83                 int ret;
84
85                 if (percpu_ref_tryget_live(&q->mq_usage_counter))
86                         return 0;
87
88                 ret = wait_event_interruptible(q->mq_freeze_wq,
89                                 !q->mq_freeze_depth || blk_queue_dying(q));
90                 if (blk_queue_dying(q))
91                         return -ENODEV;
92                 if (ret)
93                         return ret;
94         }
95 }
96
97 static void blk_mq_queue_exit(struct request_queue *q)
98 {
99         percpu_ref_put(&q->mq_usage_counter);
100 }
101
102 static void blk_mq_usage_counter_release(struct percpu_ref *ref)
103 {
104         struct request_queue *q =
105                 container_of(ref, struct request_queue, mq_usage_counter);
106
107         wake_up_all(&q->mq_freeze_wq);
108 }
109
110 void blk_mq_freeze_queue_start(struct request_queue *q)
111 {
112         bool freeze;
113
114         spin_lock_irq(q->queue_lock);
115         freeze = !q->mq_freeze_depth++;
116         spin_unlock_irq(q->queue_lock);
117
118         if (freeze) {
119                 percpu_ref_kill(&q->mq_usage_counter);
120                 blk_mq_run_queues(q, false);
121         }
122 }
123 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
124
125 static void blk_mq_freeze_queue_wait(struct request_queue *q)
126 {
127         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
128 }
129
130 /*
131  * Guarantee no request is in use, so we can change any data structure of
132  * the queue afterward.
133  */
134 void blk_mq_freeze_queue(struct request_queue *q)
135 {
136         blk_mq_freeze_queue_start(q);
137         blk_mq_freeze_queue_wait(q);
138 }
139
140 void blk_mq_unfreeze_queue(struct request_queue *q)
141 {
142         bool wake;
143
144         spin_lock_irq(q->queue_lock);
145         wake = !--q->mq_freeze_depth;
146         WARN_ON_ONCE(q->mq_freeze_depth < 0);
147         spin_unlock_irq(q->queue_lock);
148         if (wake) {
149                 percpu_ref_reinit(&q->mq_usage_counter);
150                 wake_up_all(&q->mq_freeze_wq);
151         }
152 }
153 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
154
155 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
156 {
157         return blk_mq_has_free_tags(hctx->tags);
158 }
159 EXPORT_SYMBOL(blk_mq_can_queue);
160
161 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
162                                struct request *rq, unsigned int rw_flags)
163 {
164         if (blk_queue_io_stat(q))
165                 rw_flags |= REQ_IO_STAT;
166
167         INIT_LIST_HEAD(&rq->queuelist);
168         /* csd/requeue_work/fifo_time is initialized before use */
169         rq->q = q;
170         rq->mq_ctx = ctx;
171         rq->cmd_flags |= rw_flags;
172         /* do not touch atomic flags, it needs atomic ops against the timer */
173         rq->cpu = -1;
174         INIT_HLIST_NODE(&rq->hash);
175         RB_CLEAR_NODE(&rq->rb_node);
176         rq->rq_disk = NULL;
177         rq->part = NULL;
178         rq->start_time = jiffies;
179 #ifdef CONFIG_BLK_CGROUP
180         rq->rl = NULL;
181         set_start_time_ns(rq);
182         rq->io_start_time_ns = 0;
183 #endif
184         rq->nr_phys_segments = 0;
185 #if defined(CONFIG_BLK_DEV_INTEGRITY)
186         rq->nr_integrity_segments = 0;
187 #endif
188         rq->special = NULL;
189         /* tag was already set */
190         rq->errors = 0;
191
192         rq->cmd = rq->__cmd;
193
194         rq->extra_len = 0;
195         rq->sense_len = 0;
196         rq->resid_len = 0;
197         rq->sense = NULL;
198
199         INIT_LIST_HEAD(&rq->timeout_list);
200         rq->timeout = 0;
201
202         rq->end_io = NULL;
203         rq->end_io_data = NULL;
204         rq->next_rq = NULL;
205
206         ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
207 }
208
209 static struct request *
210 __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
211 {
212         struct request *rq;
213         unsigned int tag;
214
215         tag = blk_mq_get_tag(data);
216         if (tag != BLK_MQ_TAG_FAIL) {
217                 rq = data->hctx->tags->rqs[tag];
218
219                 if (blk_mq_tag_busy(data->hctx)) {
220                         rq->cmd_flags = REQ_MQ_INFLIGHT;
221                         atomic_inc(&data->hctx->nr_active);
222                 }
223
224                 rq->tag = tag;
225                 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
226                 return rq;
227         }
228
229         return NULL;
230 }
231
232 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
233                 bool reserved)
234 {
235         struct blk_mq_ctx *ctx;
236         struct blk_mq_hw_ctx *hctx;
237         struct request *rq;
238         struct blk_mq_alloc_data alloc_data;
239         int ret;
240
241         ret = blk_mq_queue_enter(q);
242         if (ret)
243                 return ERR_PTR(ret);
244
245         ctx = blk_mq_get_ctx(q);
246         hctx = q->mq_ops->map_queue(q, ctx->cpu);
247         blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
248                         reserved, ctx, hctx);
249
250         rq = __blk_mq_alloc_request(&alloc_data, rw);
251         if (!rq && (gfp & __GFP_WAIT)) {
252                 __blk_mq_run_hw_queue(hctx);
253                 blk_mq_put_ctx(ctx);
254
255                 ctx = blk_mq_get_ctx(q);
256                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
257                 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
258                                 hctx);
259                 rq =  __blk_mq_alloc_request(&alloc_data, rw);
260                 ctx = alloc_data.ctx;
261         }
262         blk_mq_put_ctx(ctx);
263         if (!rq) {
264                 blk_mq_queue_exit(q);
265                 return ERR_PTR(-EWOULDBLOCK);
266         }
267         return rq;
268 }
269 EXPORT_SYMBOL(blk_mq_alloc_request);
270
271 static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
272                                   struct blk_mq_ctx *ctx, struct request *rq)
273 {
274         const int tag = rq->tag;
275         struct request_queue *q = rq->q;
276
277         if (rq->cmd_flags & REQ_MQ_INFLIGHT)
278                 atomic_dec(&hctx->nr_active);
279         rq->cmd_flags = 0;
280
281         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
282         blk_mq_put_tag(hctx, tag, &ctx->last_tag);
283         blk_mq_queue_exit(q);
284 }
285
286 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
287 {
288         struct blk_mq_ctx *ctx = rq->mq_ctx;
289
290         ctx->rq_completed[rq_is_sync(rq)]++;
291         __blk_mq_free_request(hctx, ctx, rq);
292
293 }
294 EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
295
296 void blk_mq_free_request(struct request *rq)
297 {
298         struct blk_mq_hw_ctx *hctx;
299         struct request_queue *q = rq->q;
300
301         hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
302         blk_mq_free_hctx_request(hctx, rq);
303 }
304 EXPORT_SYMBOL_GPL(blk_mq_free_request);
305
306 inline void __blk_mq_end_request(struct request *rq, int error)
307 {
308         blk_account_io_done(rq);
309
310         if (rq->end_io) {
311                 rq->end_io(rq, error);
312         } else {
313                 if (unlikely(blk_bidi_rq(rq)))
314                         blk_mq_free_request(rq->next_rq);
315                 blk_mq_free_request(rq);
316         }
317 }
318 EXPORT_SYMBOL(__blk_mq_end_request);
319
320 void blk_mq_end_request(struct request *rq, int error)
321 {
322         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
323                 BUG();
324         __blk_mq_end_request(rq, error);
325 }
326 EXPORT_SYMBOL(blk_mq_end_request);
327
328 static void __blk_mq_complete_request_remote(void *data)
329 {
330         struct request *rq = data;
331
332         rq->q->softirq_done_fn(rq);
333 }
334
335 static void blk_mq_ipi_complete_request(struct request *rq)
336 {
337         struct blk_mq_ctx *ctx = rq->mq_ctx;
338         bool shared = false;
339         int cpu;
340
341         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
342                 rq->q->softirq_done_fn(rq);
343                 return;
344         }
345
346         cpu = get_cpu();
347         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
348                 shared = cpus_share_cache(cpu, ctx->cpu);
349
350         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
351                 rq->csd.func = __blk_mq_complete_request_remote;
352                 rq->csd.info = rq;
353                 rq->csd.flags = 0;
354                 smp_call_function_single_async(ctx->cpu, &rq->csd);
355         } else {
356                 rq->q->softirq_done_fn(rq);
357         }
358         put_cpu();
359 }
360
361 void __blk_mq_complete_request(struct request *rq)
362 {
363         struct request_queue *q = rq->q;
364
365         if (!q->softirq_done_fn)
366                 blk_mq_end_request(rq, rq->errors);
367         else
368                 blk_mq_ipi_complete_request(rq);
369 }
370
371 /**
372  * blk_mq_complete_request - end I/O on a request
373  * @rq:         the request being processed
374  *
375  * Description:
376  *      Ends all I/O on a request. It does not handle partial completions.
377  *      The actual completion happens out-of-order, through a IPI handler.
378  **/
379 void blk_mq_complete_request(struct request *rq)
380 {
381         struct request_queue *q = rq->q;
382
383         if (unlikely(blk_should_fake_timeout(q)))
384                 return;
385         if (!blk_mark_rq_complete(rq))
386                 __blk_mq_complete_request(rq);
387 }
388 EXPORT_SYMBOL(blk_mq_complete_request);
389
390 void blk_mq_start_request(struct request *rq)
391 {
392         struct request_queue *q = rq->q;
393
394         trace_block_rq_issue(q, rq);
395
396         rq->resid_len = blk_rq_bytes(rq);
397         if (unlikely(blk_bidi_rq(rq)))
398                 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
399
400         blk_add_timer(rq);
401
402         /*
403          * Ensure that ->deadline is visible before set the started
404          * flag and clear the completed flag.
405          */
406         smp_mb__before_atomic();
407
408         /*
409          * Mark us as started and clear complete. Complete might have been
410          * set if requeue raced with timeout, which then marked it as
411          * complete. So be sure to clear complete again when we start
412          * the request, otherwise we'll ignore the completion event.
413          */
414         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
415                 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
416         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
417                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
418
419         if (q->dma_drain_size && blk_rq_bytes(rq)) {
420                 /*
421                  * Make sure space for the drain appears.  We know we can do
422                  * this because max_hw_segments has been adjusted to be one
423                  * fewer than the device can handle.
424                  */
425                 rq->nr_phys_segments++;
426         }
427 }
428 EXPORT_SYMBOL(blk_mq_start_request);
429
430 static void __blk_mq_requeue_request(struct request *rq)
431 {
432         struct request_queue *q = rq->q;
433
434         trace_block_rq_requeue(q, rq);
435
436         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
437                 if (q->dma_drain_size && blk_rq_bytes(rq))
438                         rq->nr_phys_segments--;
439         }
440 }
441
442 void blk_mq_requeue_request(struct request *rq)
443 {
444         __blk_mq_requeue_request(rq);
445
446         BUG_ON(blk_queued_rq(rq));
447         blk_mq_add_to_requeue_list(rq, true);
448 }
449 EXPORT_SYMBOL(blk_mq_requeue_request);
450
451 static void blk_mq_requeue_work(struct work_struct *work)
452 {
453         struct request_queue *q =
454                 container_of(work, struct request_queue, requeue_work);
455         LIST_HEAD(rq_list);
456         struct request *rq, *next;
457         unsigned long flags;
458
459         spin_lock_irqsave(&q->requeue_lock, flags);
460         list_splice_init(&q->requeue_list, &rq_list);
461         spin_unlock_irqrestore(&q->requeue_lock, flags);
462
463         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
464                 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
465                         continue;
466
467                 rq->cmd_flags &= ~REQ_SOFTBARRIER;
468                 list_del_init(&rq->queuelist);
469                 blk_mq_insert_request(rq, true, false, false);
470         }
471
472         while (!list_empty(&rq_list)) {
473                 rq = list_entry(rq_list.next, struct request, queuelist);
474                 list_del_init(&rq->queuelist);
475                 blk_mq_insert_request(rq, false, false, false);
476         }
477
478         /*
479          * Use the start variant of queue running here, so that running
480          * the requeue work will kick stopped queues.
481          */
482         blk_mq_start_hw_queues(q);
483 }
484
485 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
486 {
487         struct request_queue *q = rq->q;
488         unsigned long flags;
489
490         /*
491          * We abuse this flag that is otherwise used by the I/O scheduler to
492          * request head insertation from the workqueue.
493          */
494         BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
495
496         spin_lock_irqsave(&q->requeue_lock, flags);
497         if (at_head) {
498                 rq->cmd_flags |= REQ_SOFTBARRIER;
499                 list_add(&rq->queuelist, &q->requeue_list);
500         } else {
501                 list_add_tail(&rq->queuelist, &q->requeue_list);
502         }
503         spin_unlock_irqrestore(&q->requeue_lock, flags);
504 }
505 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
506
507 void blk_mq_kick_requeue_list(struct request_queue *q)
508 {
509         kblockd_schedule_work(&q->requeue_work);
510 }
511 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
512
513 static inline bool is_flush_request(struct request *rq,
514                 struct blk_flush_queue *fq, unsigned int tag)
515 {
516         return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
517                         fq->flush_rq->tag == tag);
518 }
519
520 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
521 {
522         struct request *rq = tags->rqs[tag];
523         /* mq_ctx of flush rq is always cloned from the corresponding req */
524         struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
525
526         if (!is_flush_request(rq, fq, tag))
527                 return rq;
528
529         return fq->flush_rq;
530 }
531 EXPORT_SYMBOL(blk_mq_tag_to_rq);
532
533 struct blk_mq_timeout_data {
534         unsigned long next;
535         unsigned int next_set;
536 };
537
538 void blk_mq_rq_timed_out(struct request *req, bool reserved)
539 {
540         struct blk_mq_ops *ops = req->q->mq_ops;
541         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
542
543         /*
544          * We know that complete is set at this point. If STARTED isn't set
545          * anymore, then the request isn't active and the "timeout" should
546          * just be ignored. This can happen due to the bitflag ordering.
547          * Timeout first checks if STARTED is set, and if it is, assumes
548          * the request is active. But if we race with completion, then
549          * we both flags will get cleared. So check here again, and ignore
550          * a timeout event with a request that isn't active.
551          */
552         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
553                 return;
554
555         if (ops->timeout)
556                 ret = ops->timeout(req, reserved);
557
558         switch (ret) {
559         case BLK_EH_HANDLED:
560                 __blk_mq_complete_request(req);
561                 break;
562         case BLK_EH_RESET_TIMER:
563                 blk_add_timer(req);
564                 blk_clear_rq_complete(req);
565                 break;
566         case BLK_EH_NOT_HANDLED:
567                 break;
568         default:
569                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
570                 break;
571         }
572 }
573                 
574 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
575                 struct request *rq, void *priv, bool reserved)
576 {
577         struct blk_mq_timeout_data *data = priv;
578
579         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
580                 return;
581
582         if (time_after_eq(jiffies, rq->deadline)) {
583                 if (!blk_mark_rq_complete(rq))
584                         blk_mq_rq_timed_out(rq, reserved);
585         } else if (!data->next_set || time_after(data->next, rq->deadline)) {
586                 data->next = rq->deadline;
587                 data->next_set = 1;
588         }
589 }
590
591 static void blk_mq_rq_timer(unsigned long priv)
592 {
593         struct request_queue *q = (struct request_queue *)priv;
594         struct blk_mq_timeout_data data = {
595                 .next           = 0,
596                 .next_set       = 0,
597         };
598         struct blk_mq_hw_ctx *hctx;
599         int i;
600
601         queue_for_each_hw_ctx(q, hctx, i) {
602                 /*
603                  * If not software queues are currently mapped to this
604                  * hardware queue, there's nothing to check
605                  */
606                 if (!blk_mq_hw_queue_mapped(hctx))
607                         continue;
608
609                 blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
610         }
611
612         if (data.next_set) {
613                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
614                 mod_timer(&q->timeout, data.next);
615         } else {
616                 queue_for_each_hw_ctx(q, hctx, i)
617                         blk_mq_tag_idle(hctx);
618         }
619 }
620
621 /*
622  * Reverse check our software queue for entries that we could potentially
623  * merge with. Currently includes a hand-wavy stop count of 8, to not spend
624  * too much time checking for merges.
625  */
626 static bool blk_mq_attempt_merge(struct request_queue *q,
627                                  struct blk_mq_ctx *ctx, struct bio *bio)
628 {
629         struct request *rq;
630         int checked = 8;
631
632         list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
633                 int el_ret;
634
635                 if (!checked--)
636                         break;
637
638                 if (!blk_rq_merge_ok(rq, bio))
639                         continue;
640
641                 el_ret = blk_try_merge(rq, bio);
642                 if (el_ret == ELEVATOR_BACK_MERGE) {
643                         if (bio_attempt_back_merge(q, rq, bio)) {
644                                 ctx->rq_merged++;
645                                 return true;
646                         }
647                         break;
648                 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
649                         if (bio_attempt_front_merge(q, rq, bio)) {
650                                 ctx->rq_merged++;
651                                 return true;
652                         }
653                         break;
654                 }
655         }
656
657         return false;
658 }
659
660 /*
661  * Process software queues that have been marked busy, splicing them
662  * to the for-dispatch
663  */
664 static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
665 {
666         struct blk_mq_ctx *ctx;
667         int i;
668
669         for (i = 0; i < hctx->ctx_map.map_size; i++) {
670                 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
671                 unsigned int off, bit;
672
673                 if (!bm->word)
674                         continue;
675
676                 bit = 0;
677                 off = i * hctx->ctx_map.bits_per_word;
678                 do {
679                         bit = find_next_bit(&bm->word, bm->depth, bit);
680                         if (bit >= bm->depth)
681                                 break;
682
683                         ctx = hctx->ctxs[bit + off];
684                         clear_bit(bit, &bm->word);
685                         spin_lock(&ctx->lock);
686                         list_splice_tail_init(&ctx->rq_list, list);
687                         spin_unlock(&ctx->lock);
688
689                         bit++;
690                 } while (1);
691         }
692 }
693
694 /*
695  * Run this hardware queue, pulling any software queues mapped to it in.
696  * Note that this function currently has various problems around ordering
697  * of IO. In particular, we'd like FIFO behaviour on handling existing
698  * items on the hctx->dispatch list. Ignore that for now.
699  */
700 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
701 {
702         struct request_queue *q = hctx->queue;
703         struct request *rq;
704         LIST_HEAD(rq_list);
705         LIST_HEAD(driver_list);
706         struct list_head *dptr;
707         int queued;
708
709         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
710
711         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
712                 return;
713
714         hctx->run++;
715
716         /*
717          * Touch any software queue that has pending entries.
718          */
719         flush_busy_ctxs(hctx, &rq_list);
720
721         /*
722          * If we have previous entries on our dispatch list, grab them
723          * and stuff them at the front for more fair dispatch.
724          */
725         if (!list_empty_careful(&hctx->dispatch)) {
726                 spin_lock(&hctx->lock);
727                 if (!list_empty(&hctx->dispatch))
728                         list_splice_init(&hctx->dispatch, &rq_list);
729                 spin_unlock(&hctx->lock);
730         }
731
732         /*
733          * Start off with dptr being NULL, so we start the first request
734          * immediately, even if we have more pending.
735          */
736         dptr = NULL;
737
738         /*
739          * Now process all the entries, sending them to the driver.
740          */
741         queued = 0;
742         while (!list_empty(&rq_list)) {
743                 struct blk_mq_queue_data bd;
744                 int ret;
745
746                 rq = list_first_entry(&rq_list, struct request, queuelist);
747                 list_del_init(&rq->queuelist);
748
749                 bd.rq = rq;
750                 bd.list = dptr;
751                 bd.last = list_empty(&rq_list);
752
753                 ret = q->mq_ops->queue_rq(hctx, &bd);
754                 switch (ret) {
755                 case BLK_MQ_RQ_QUEUE_OK:
756                         queued++;
757                         continue;
758                 case BLK_MQ_RQ_QUEUE_BUSY:
759                         list_add(&rq->queuelist, &rq_list);
760                         __blk_mq_requeue_request(rq);
761                         break;
762                 default:
763                         pr_err("blk-mq: bad return on queue: %d\n", ret);
764                 case BLK_MQ_RQ_QUEUE_ERROR:
765                         rq->errors = -EIO;
766                         blk_mq_end_request(rq, rq->errors);
767                         break;
768                 }
769
770                 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
771                         break;
772
773                 /*
774                  * We've done the first request. If we have more than 1
775                  * left in the list, set dptr to defer issue.
776                  */
777                 if (!dptr && rq_list.next != rq_list.prev)
778                         dptr = &driver_list;
779         }
780
781         if (!queued)
782                 hctx->dispatched[0]++;
783         else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
784                 hctx->dispatched[ilog2(queued) + 1]++;
785
786         /*
787          * Any items that need requeuing? Stuff them into hctx->dispatch,
788          * that is where we will continue on next queue run.
789          */
790         if (!list_empty(&rq_list)) {
791                 spin_lock(&hctx->lock);
792                 list_splice(&rq_list, &hctx->dispatch);
793                 spin_unlock(&hctx->lock);
794         }
795 }
796
797 /*
798  * It'd be great if the workqueue API had a way to pass
799  * in a mask and had some smarts for more clever placement.
800  * For now we just round-robin here, switching for every
801  * BLK_MQ_CPU_WORK_BATCH queued items.
802  */
803 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
804 {
805         if (hctx->queue->nr_hw_queues == 1)
806                 return WORK_CPU_UNBOUND;
807
808         if (--hctx->next_cpu_batch <= 0) {
809                 int cpu = hctx->next_cpu, next_cpu;
810
811                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
812                 if (next_cpu >= nr_cpu_ids)
813                         next_cpu = cpumask_first(hctx->cpumask);
814
815                 hctx->next_cpu = next_cpu;
816                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
817
818                 return cpu;
819         }
820
821         return hctx->next_cpu;
822 }
823
824 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
825 {
826         if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
827             !blk_mq_hw_queue_mapped(hctx)))
828                 return;
829
830         if (!async) {
831                 int cpu = get_cpu();
832                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
833                         __blk_mq_run_hw_queue(hctx);
834                         put_cpu();
835                         return;
836                 }
837
838                 put_cpu();
839         }
840
841         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
842                         &hctx->run_work, 0);
843 }
844
845 void blk_mq_run_queues(struct request_queue *q, bool async)
846 {
847         struct blk_mq_hw_ctx *hctx;
848         int i;
849
850         queue_for_each_hw_ctx(q, hctx, i) {
851                 if ((!blk_mq_hctx_has_pending(hctx) &&
852                     list_empty_careful(&hctx->dispatch)) ||
853                     test_bit(BLK_MQ_S_STOPPED, &hctx->state))
854                         continue;
855
856                 blk_mq_run_hw_queue(hctx, async);
857         }
858 }
859 EXPORT_SYMBOL(blk_mq_run_queues);
860
861 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
862 {
863         cancel_delayed_work(&hctx->run_work);
864         cancel_delayed_work(&hctx->delay_work);
865         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
866 }
867 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
868
869 void blk_mq_stop_hw_queues(struct request_queue *q)
870 {
871         struct blk_mq_hw_ctx *hctx;
872         int i;
873
874         queue_for_each_hw_ctx(q, hctx, i)
875                 blk_mq_stop_hw_queue(hctx);
876 }
877 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
878
879 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
880 {
881         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
882
883         blk_mq_run_hw_queue(hctx, false);
884 }
885 EXPORT_SYMBOL(blk_mq_start_hw_queue);
886
887 void blk_mq_start_hw_queues(struct request_queue *q)
888 {
889         struct blk_mq_hw_ctx *hctx;
890         int i;
891
892         queue_for_each_hw_ctx(q, hctx, i)
893                 blk_mq_start_hw_queue(hctx);
894 }
895 EXPORT_SYMBOL(blk_mq_start_hw_queues);
896
897
898 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
899 {
900         struct blk_mq_hw_ctx *hctx;
901         int i;
902
903         queue_for_each_hw_ctx(q, hctx, i) {
904                 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
905                         continue;
906
907                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
908                 blk_mq_run_hw_queue(hctx, async);
909         }
910 }
911 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
912
913 static void blk_mq_run_work_fn(struct work_struct *work)
914 {
915         struct blk_mq_hw_ctx *hctx;
916
917         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
918
919         __blk_mq_run_hw_queue(hctx);
920 }
921
922 static void blk_mq_delay_work_fn(struct work_struct *work)
923 {
924         struct blk_mq_hw_ctx *hctx;
925
926         hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
927
928         if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
929                 __blk_mq_run_hw_queue(hctx);
930 }
931
932 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
933 {
934         if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
935                 return;
936
937         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
938                         &hctx->delay_work, msecs_to_jiffies(msecs));
939 }
940 EXPORT_SYMBOL(blk_mq_delay_queue);
941
942 static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
943                                     struct request *rq, bool at_head)
944 {
945         struct blk_mq_ctx *ctx = rq->mq_ctx;
946
947         trace_block_rq_insert(hctx->queue, rq);
948
949         if (at_head)
950                 list_add(&rq->queuelist, &ctx->rq_list);
951         else
952                 list_add_tail(&rq->queuelist, &ctx->rq_list);
953
954         blk_mq_hctx_mark_pending(hctx, ctx);
955 }
956
957 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
958                 bool async)
959 {
960         struct request_queue *q = rq->q;
961         struct blk_mq_hw_ctx *hctx;
962         struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
963
964         current_ctx = blk_mq_get_ctx(q);
965         if (!cpu_online(ctx->cpu))
966                 rq->mq_ctx = ctx = current_ctx;
967
968         hctx = q->mq_ops->map_queue(q, ctx->cpu);
969
970         spin_lock(&ctx->lock);
971         __blk_mq_insert_request(hctx, rq, at_head);
972         spin_unlock(&ctx->lock);
973
974         if (run_queue)
975                 blk_mq_run_hw_queue(hctx, async);
976
977         blk_mq_put_ctx(current_ctx);
978 }
979
980 static void blk_mq_insert_requests(struct request_queue *q,
981                                      struct blk_mq_ctx *ctx,
982                                      struct list_head *list,
983                                      int depth,
984                                      bool from_schedule)
985
986 {
987         struct blk_mq_hw_ctx *hctx;
988         struct blk_mq_ctx *current_ctx;
989
990         trace_block_unplug(q, depth, !from_schedule);
991
992         current_ctx = blk_mq_get_ctx(q);
993
994         if (!cpu_online(ctx->cpu))
995                 ctx = current_ctx;
996         hctx = q->mq_ops->map_queue(q, ctx->cpu);
997
998         /*
999          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1000          * offline now
1001          */
1002         spin_lock(&ctx->lock);
1003         while (!list_empty(list)) {
1004                 struct request *rq;
1005
1006                 rq = list_first_entry(list, struct request, queuelist);
1007                 list_del_init(&rq->queuelist);
1008                 rq->mq_ctx = ctx;
1009                 __blk_mq_insert_request(hctx, rq, false);
1010         }
1011         spin_unlock(&ctx->lock);
1012
1013         blk_mq_run_hw_queue(hctx, from_schedule);
1014         blk_mq_put_ctx(current_ctx);
1015 }
1016
1017 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1018 {
1019         struct request *rqa = container_of(a, struct request, queuelist);
1020         struct request *rqb = container_of(b, struct request, queuelist);
1021
1022         return !(rqa->mq_ctx < rqb->mq_ctx ||
1023                  (rqa->mq_ctx == rqb->mq_ctx &&
1024                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1025 }
1026
1027 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1028 {
1029         struct blk_mq_ctx *this_ctx;
1030         struct request_queue *this_q;
1031         struct request *rq;
1032         LIST_HEAD(list);
1033         LIST_HEAD(ctx_list);
1034         unsigned int depth;
1035
1036         list_splice_init(&plug->mq_list, &list);
1037
1038         list_sort(NULL, &list, plug_ctx_cmp);
1039
1040         this_q = NULL;
1041         this_ctx = NULL;
1042         depth = 0;
1043
1044         while (!list_empty(&list)) {
1045                 rq = list_entry_rq(list.next);
1046                 list_del_init(&rq->queuelist);
1047                 BUG_ON(!rq->q);
1048                 if (rq->mq_ctx != this_ctx) {
1049                         if (this_ctx) {
1050                                 blk_mq_insert_requests(this_q, this_ctx,
1051                                                         &ctx_list, depth,
1052                                                         from_schedule);
1053                         }
1054
1055                         this_ctx = rq->mq_ctx;
1056                         this_q = rq->q;
1057                         depth = 0;
1058                 }
1059
1060                 depth++;
1061                 list_add_tail(&rq->queuelist, &ctx_list);
1062         }
1063
1064         /*
1065          * If 'this_ctx' is set, we know we have entries to complete
1066          * on 'ctx_list'. Do those.
1067          */
1068         if (this_ctx) {
1069                 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1070                                        from_schedule);
1071         }
1072 }
1073
1074 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1075 {
1076         init_request_from_bio(rq, bio);
1077
1078         if (blk_do_io_stat(rq))
1079                 blk_account_io_start(rq, 1);
1080 }
1081
1082 static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1083 {
1084         return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1085                 !blk_queue_nomerges(hctx->queue);
1086 }
1087
1088 static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1089                                          struct blk_mq_ctx *ctx,
1090                                          struct request *rq, struct bio *bio)
1091 {
1092         if (!hctx_allow_merges(hctx)) {
1093                 blk_mq_bio_to_request(rq, bio);
1094                 spin_lock(&ctx->lock);
1095 insert_rq:
1096                 __blk_mq_insert_request(hctx, rq, false);
1097                 spin_unlock(&ctx->lock);
1098                 return false;
1099         } else {
1100                 struct request_queue *q = hctx->queue;
1101
1102                 spin_lock(&ctx->lock);
1103                 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1104                         blk_mq_bio_to_request(rq, bio);
1105                         goto insert_rq;
1106                 }
1107
1108                 spin_unlock(&ctx->lock);
1109                 __blk_mq_free_request(hctx, ctx, rq);
1110                 return true;
1111         }
1112 }
1113
1114 struct blk_map_ctx {
1115         struct blk_mq_hw_ctx *hctx;
1116         struct blk_mq_ctx *ctx;
1117 };
1118
1119 static struct request *blk_mq_map_request(struct request_queue *q,
1120                                           struct bio *bio,
1121                                           struct blk_map_ctx *data)
1122 {
1123         struct blk_mq_hw_ctx *hctx;
1124         struct blk_mq_ctx *ctx;
1125         struct request *rq;
1126         int rw = bio_data_dir(bio);
1127         struct blk_mq_alloc_data alloc_data;
1128
1129         if (unlikely(blk_mq_queue_enter(q))) {
1130                 bio_endio(bio, -EIO);
1131                 return NULL;
1132         }
1133
1134         ctx = blk_mq_get_ctx(q);
1135         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1136
1137         if (rw_is_sync(bio->bi_rw))
1138                 rw |= REQ_SYNC;
1139
1140         trace_block_getrq(q, bio, rw);
1141         blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1142                         hctx);
1143         rq = __blk_mq_alloc_request(&alloc_data, rw);
1144         if (unlikely(!rq)) {
1145                 __blk_mq_run_hw_queue(hctx);
1146                 blk_mq_put_ctx(ctx);
1147                 trace_block_sleeprq(q, bio, rw);
1148
1149                 ctx = blk_mq_get_ctx(q);
1150                 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1151                 blk_mq_set_alloc_data(&alloc_data, q,
1152                                 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1153                 rq = __blk_mq_alloc_request(&alloc_data, rw);
1154                 ctx = alloc_data.ctx;
1155                 hctx = alloc_data.hctx;
1156         }
1157
1158         hctx->queued++;
1159         data->hctx = hctx;
1160         data->ctx = ctx;
1161         return rq;
1162 }
1163
1164 /*
1165  * Multiple hardware queue variant. This will not use per-process plugs,
1166  * but will attempt to bypass the hctx queueing if we can go straight to
1167  * hardware for SYNC IO.
1168  */
1169 static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1170 {
1171         const int is_sync = rw_is_sync(bio->bi_rw);
1172         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1173         struct blk_map_ctx data;
1174         struct request *rq;
1175
1176         blk_queue_bounce(q, &bio);
1177
1178         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1179                 bio_endio(bio, -EIO);
1180                 return;
1181         }
1182
1183         rq = blk_mq_map_request(q, bio, &data);
1184         if (unlikely(!rq))
1185                 return;
1186
1187         if (unlikely(is_flush_fua)) {
1188                 blk_mq_bio_to_request(rq, bio);
1189                 blk_insert_flush(rq);
1190                 goto run_queue;
1191         }
1192
1193         /*
1194          * If the driver supports defer issued based on 'last', then
1195          * queue it up like normal since we can potentially save some
1196          * CPU this way.
1197          */
1198         if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1199                 struct blk_mq_queue_data bd = {
1200                         .rq = rq,
1201                         .list = NULL,
1202                         .last = 1
1203                 };
1204                 int ret;
1205
1206                 blk_mq_bio_to_request(rq, bio);
1207
1208                 /*
1209                  * For OK queue, we are done. For error, kill it. Any other
1210                  * error (busy), just add it to our list as we previously
1211                  * would have done
1212                  */
1213                 ret = q->mq_ops->queue_rq(data.hctx, &bd);
1214                 if (ret == BLK_MQ_RQ_QUEUE_OK)
1215                         goto done;
1216                 else {
1217                         __blk_mq_requeue_request(rq);
1218
1219                         if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1220                                 rq->errors = -EIO;
1221                                 blk_mq_end_request(rq, rq->errors);
1222                                 goto done;
1223                         }
1224                 }
1225         }
1226
1227         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1228                 /*
1229                  * For a SYNC request, send it to the hardware immediately. For
1230                  * an ASYNC request, just ensure that we run it later on. The
1231                  * latter allows for merging opportunities and more efficient
1232                  * dispatching.
1233                  */
1234 run_queue:
1235                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1236         }
1237 done:
1238         blk_mq_put_ctx(data.ctx);
1239 }
1240
1241 /*
1242  * Single hardware queue variant. This will attempt to use any per-process
1243  * plug for merging and IO deferral.
1244  */
1245 static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1246 {
1247         const int is_sync = rw_is_sync(bio->bi_rw);
1248         const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1249         unsigned int use_plug, request_count = 0;
1250         struct blk_map_ctx data;
1251         struct request *rq;
1252
1253         /*
1254          * If we have multiple hardware queues, just go directly to
1255          * one of those for sync IO.
1256          */
1257         use_plug = !is_flush_fua && !is_sync;
1258
1259         blk_queue_bounce(q, &bio);
1260
1261         if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1262                 bio_endio(bio, -EIO);
1263                 return;
1264         }
1265
1266         if (use_plug && !blk_queue_nomerges(q) &&
1267             blk_attempt_plug_merge(q, bio, &request_count))
1268                 return;
1269
1270         rq = blk_mq_map_request(q, bio, &data);
1271         if (unlikely(!rq))
1272                 return;
1273
1274         if (unlikely(is_flush_fua)) {
1275                 blk_mq_bio_to_request(rq, bio);
1276                 blk_insert_flush(rq);
1277                 goto run_queue;
1278         }
1279
1280         /*
1281          * A task plug currently exists. Since this is completely lockless,
1282          * utilize that to temporarily store requests until the task is
1283          * either done or scheduled away.
1284          */
1285         if (use_plug) {
1286                 struct blk_plug *plug = current->plug;
1287
1288                 if (plug) {
1289                         blk_mq_bio_to_request(rq, bio);
1290                         if (list_empty(&plug->mq_list))
1291                                 trace_block_plug(q);
1292                         else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1293                                 blk_flush_plug_list(plug, false);
1294                                 trace_block_plug(q);
1295                         }
1296                         list_add_tail(&rq->queuelist, &plug->mq_list);
1297                         blk_mq_put_ctx(data.ctx);
1298                         return;
1299                 }
1300         }
1301
1302         if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1303                 /*
1304                  * For a SYNC request, send it to the hardware immediately. For
1305                  * an ASYNC request, just ensure that we run it later on. The
1306                  * latter allows for merging opportunities and more efficient
1307                  * dispatching.
1308                  */
1309 run_queue:
1310                 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1311         }
1312
1313         blk_mq_put_ctx(data.ctx);
1314 }
1315
1316 /*
1317  * Default mapping to a software queue, since we use one per CPU.
1318  */
1319 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1320 {
1321         return q->queue_hw_ctx[q->mq_map[cpu]];
1322 }
1323 EXPORT_SYMBOL(blk_mq_map_queue);
1324
1325 static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1326                 struct blk_mq_tags *tags, unsigned int hctx_idx)
1327 {
1328         struct page *page;
1329
1330         if (tags->rqs && set->ops->exit_request) {
1331                 int i;
1332
1333                 for (i = 0; i < tags->nr_tags; i++) {
1334                         if (!tags->rqs[i])
1335                                 continue;
1336                         set->ops->exit_request(set->driver_data, tags->rqs[i],
1337                                                 hctx_idx, i);
1338                         tags->rqs[i] = NULL;
1339                 }
1340         }
1341
1342         while (!list_empty(&tags->page_list)) {
1343                 page = list_first_entry(&tags->page_list, struct page, lru);
1344                 list_del_init(&page->lru);
1345                 __free_pages(page, page->private);
1346         }
1347
1348         kfree(tags->rqs);
1349
1350         blk_mq_free_tags(tags);
1351 }
1352
1353 static size_t order_to_size(unsigned int order)
1354 {
1355         return (size_t)PAGE_SIZE << order;
1356 }
1357
1358 static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1359                 unsigned int hctx_idx)
1360 {
1361         struct blk_mq_tags *tags;
1362         unsigned int i, j, entries_per_page, max_order = 4;
1363         size_t rq_size, left;
1364
1365         tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
1366                                 set->numa_node);
1367         if (!tags)
1368                 return NULL;
1369
1370         INIT_LIST_HEAD(&tags->page_list);
1371
1372         tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1373                                  GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1374                                  set->numa_node);
1375         if (!tags->rqs) {
1376                 blk_mq_free_tags(tags);
1377                 return NULL;
1378         }
1379
1380         /*
1381          * rq_size is the size of the request plus driver payload, rounded
1382          * to the cacheline size
1383          */
1384         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1385                                 cache_line_size());
1386         left = rq_size * set->queue_depth;
1387
1388         for (i = 0; i < set->queue_depth; ) {
1389                 int this_order = max_order;
1390                 struct page *page;
1391                 int to_do;
1392                 void *p;
1393
1394                 while (left < order_to_size(this_order - 1) && this_order)
1395                         this_order--;
1396
1397                 do {
1398                         page = alloc_pages_node(set->numa_node,
1399                                 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1400                                 this_order);
1401                         if (page)
1402                                 break;
1403                         if (!this_order--)
1404                                 break;
1405                         if (order_to_size(this_order) < rq_size)
1406                                 break;
1407                 } while (1);
1408
1409                 if (!page)
1410                         goto fail;
1411
1412                 page->private = this_order;
1413                 list_add_tail(&page->lru, &tags->page_list);
1414
1415                 p = page_address(page);
1416                 entries_per_page = order_to_size(this_order) / rq_size;
1417                 to_do = min(entries_per_page, set->queue_depth - i);
1418                 left -= to_do * rq_size;
1419                 for (j = 0; j < to_do; j++) {
1420                         tags->rqs[i] = p;
1421                         tags->rqs[i]->atomic_flags = 0;
1422                         tags->rqs[i]->cmd_flags = 0;
1423                         if (set->ops->init_request) {
1424                                 if (set->ops->init_request(set->driver_data,
1425                                                 tags->rqs[i], hctx_idx, i,
1426                                                 set->numa_node)) {
1427                                         tags->rqs[i] = NULL;
1428                                         goto fail;
1429                                 }
1430                         }
1431
1432                         p += rq_size;
1433                         i++;
1434                 }
1435         }
1436
1437         return tags;
1438
1439 fail:
1440         blk_mq_free_rq_map(set, tags, hctx_idx);
1441         return NULL;
1442 }
1443
1444 static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1445 {
1446         kfree(bitmap->map);
1447 }
1448
1449 static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1450 {
1451         unsigned int bpw = 8, total, num_maps, i;
1452
1453         bitmap->bits_per_word = bpw;
1454
1455         num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1456         bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1457                                         GFP_KERNEL, node);
1458         if (!bitmap->map)
1459                 return -ENOMEM;
1460
1461         bitmap->map_size = num_maps;
1462
1463         total = nr_cpu_ids;
1464         for (i = 0; i < num_maps; i++) {
1465                 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1466                 total -= bitmap->map[i].depth;
1467         }
1468
1469         return 0;
1470 }
1471
1472 static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1473 {
1474         struct request_queue *q = hctx->queue;
1475         struct blk_mq_ctx *ctx;
1476         LIST_HEAD(tmp);
1477
1478         /*
1479          * Move ctx entries to new CPU, if this one is going away.
1480          */
1481         ctx = __blk_mq_get_ctx(q, cpu);
1482
1483         spin_lock(&ctx->lock);
1484         if (!list_empty(&ctx->rq_list)) {
1485                 list_splice_init(&ctx->rq_list, &tmp);
1486                 blk_mq_hctx_clear_pending(hctx, ctx);
1487         }
1488         spin_unlock(&ctx->lock);
1489
1490         if (list_empty(&tmp))
1491                 return NOTIFY_OK;
1492
1493         ctx = blk_mq_get_ctx(q);
1494         spin_lock(&ctx->lock);
1495
1496         while (!list_empty(&tmp)) {
1497                 struct request *rq;
1498
1499                 rq = list_first_entry(&tmp, struct request, queuelist);
1500                 rq->mq_ctx = ctx;
1501                 list_move_tail(&rq->queuelist, &ctx->rq_list);
1502         }
1503
1504         hctx = q->mq_ops->map_queue(q, ctx->cpu);
1505         blk_mq_hctx_mark_pending(hctx, ctx);
1506
1507         spin_unlock(&ctx->lock);
1508
1509         blk_mq_run_hw_queue(hctx, true);
1510         blk_mq_put_ctx(ctx);
1511         return NOTIFY_OK;
1512 }
1513
1514 static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
1515 {
1516         struct request_queue *q = hctx->queue;
1517         struct blk_mq_tag_set *set = q->tag_set;
1518
1519         if (set->tags[hctx->queue_num])
1520                 return NOTIFY_OK;
1521
1522         set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
1523         if (!set->tags[hctx->queue_num])
1524                 return NOTIFY_STOP;
1525
1526         hctx->tags = set->tags[hctx->queue_num];
1527         return NOTIFY_OK;
1528 }
1529
1530 static int blk_mq_hctx_notify(void *data, unsigned long action,
1531                               unsigned int cpu)
1532 {
1533         struct blk_mq_hw_ctx *hctx = data;
1534
1535         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1536                 return blk_mq_hctx_cpu_offline(hctx, cpu);
1537         else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
1538                 return blk_mq_hctx_cpu_online(hctx, cpu);
1539
1540         return NOTIFY_OK;
1541 }
1542
1543 static void blk_mq_exit_hctx(struct request_queue *q,
1544                 struct blk_mq_tag_set *set,
1545                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1546 {
1547         unsigned flush_start_tag = set->queue_depth;
1548
1549         blk_mq_tag_idle(hctx);
1550
1551         if (set->ops->exit_request)
1552                 set->ops->exit_request(set->driver_data,
1553                                        hctx->fq->flush_rq, hctx_idx,
1554                                        flush_start_tag + hctx_idx);
1555
1556         if (set->ops->exit_hctx)
1557                 set->ops->exit_hctx(hctx, hctx_idx);
1558
1559         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1560         blk_free_flush_queue(hctx->fq);
1561         kfree(hctx->ctxs);
1562         blk_mq_free_bitmap(&hctx->ctx_map);
1563 }
1564
1565 static void blk_mq_exit_hw_queues(struct request_queue *q,
1566                 struct blk_mq_tag_set *set, int nr_queue)
1567 {
1568         struct blk_mq_hw_ctx *hctx;
1569         unsigned int i;
1570
1571         queue_for_each_hw_ctx(q, hctx, i) {
1572                 if (i == nr_queue)
1573                         break;
1574                 blk_mq_exit_hctx(q, set, hctx, i);
1575         }
1576 }
1577
1578 static void blk_mq_free_hw_queues(struct request_queue *q,
1579                 struct blk_mq_tag_set *set)
1580 {
1581         struct blk_mq_hw_ctx *hctx;
1582         unsigned int i;
1583
1584         queue_for_each_hw_ctx(q, hctx, i) {
1585                 free_cpumask_var(hctx->cpumask);
1586                 kfree(hctx);
1587         }
1588 }
1589
1590 static int blk_mq_init_hctx(struct request_queue *q,
1591                 struct blk_mq_tag_set *set,
1592                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1593 {
1594         int node;
1595         unsigned flush_start_tag = set->queue_depth;
1596
1597         node = hctx->numa_node;
1598         if (node == NUMA_NO_NODE)
1599                 node = hctx->numa_node = set->numa_node;
1600
1601         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1602         INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1603         spin_lock_init(&hctx->lock);
1604         INIT_LIST_HEAD(&hctx->dispatch);
1605         hctx->queue = q;
1606         hctx->queue_num = hctx_idx;
1607         hctx->flags = set->flags;
1608         hctx->cmd_size = set->cmd_size;
1609
1610         blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1611                                         blk_mq_hctx_notify, hctx);
1612         blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1613
1614         hctx->tags = set->tags[hctx_idx];
1615
1616         /*
1617          * Allocate space for all possible cpus to avoid allocation at
1618          * runtime
1619          */
1620         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1621                                         GFP_KERNEL, node);
1622         if (!hctx->ctxs)
1623                 goto unregister_cpu_notifier;
1624
1625         if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1626                 goto free_ctxs;
1627
1628         hctx->nr_ctx = 0;
1629
1630         if (set->ops->init_hctx &&
1631             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1632                 goto free_bitmap;
1633
1634         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1635         if (!hctx->fq)
1636                 goto exit_hctx;
1637
1638         if (set->ops->init_request &&
1639             set->ops->init_request(set->driver_data,
1640                                    hctx->fq->flush_rq, hctx_idx,
1641                                    flush_start_tag + hctx_idx, node))
1642                 goto free_fq;
1643
1644         return 0;
1645
1646  free_fq:
1647         kfree(hctx->fq);
1648  exit_hctx:
1649         if (set->ops->exit_hctx)
1650                 set->ops->exit_hctx(hctx, hctx_idx);
1651  free_bitmap:
1652         blk_mq_free_bitmap(&hctx->ctx_map);
1653  free_ctxs:
1654         kfree(hctx->ctxs);
1655  unregister_cpu_notifier:
1656         blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
1657
1658         return -1;
1659 }
1660
1661 static int blk_mq_init_hw_queues(struct request_queue *q,
1662                 struct blk_mq_tag_set *set)
1663 {
1664         struct blk_mq_hw_ctx *hctx;
1665         unsigned int i;
1666
1667         /*
1668          * Initialize hardware queues
1669          */
1670         queue_for_each_hw_ctx(q, hctx, i) {
1671                 if (blk_mq_init_hctx(q, set, hctx, i))
1672                         break;
1673         }
1674
1675         if (i == q->nr_hw_queues)
1676                 return 0;
1677
1678         /*
1679          * Init failed
1680          */
1681         blk_mq_exit_hw_queues(q, set, i);
1682
1683         return 1;
1684 }
1685
1686 static void blk_mq_init_cpu_queues(struct request_queue *q,
1687                                    unsigned int nr_hw_queues)
1688 {
1689         unsigned int i;
1690
1691         for_each_possible_cpu(i) {
1692                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1693                 struct blk_mq_hw_ctx *hctx;
1694
1695                 memset(__ctx, 0, sizeof(*__ctx));
1696                 __ctx->cpu = i;
1697                 spin_lock_init(&__ctx->lock);
1698                 INIT_LIST_HEAD(&__ctx->rq_list);
1699                 __ctx->queue = q;
1700
1701                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1702                 if (!cpu_online(i))
1703                         continue;
1704
1705                 hctx = q->mq_ops->map_queue(q, i);
1706                 cpumask_set_cpu(i, hctx->cpumask);
1707                 hctx->nr_ctx++;
1708
1709                 /*
1710                  * Set local node, IFF we have more than one hw queue. If
1711                  * not, we remain on the home node of the device
1712                  */
1713                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1714                         hctx->numa_node = cpu_to_node(i);
1715         }
1716 }
1717
1718 static void blk_mq_map_swqueue(struct request_queue *q)
1719 {
1720         unsigned int i;
1721         struct blk_mq_hw_ctx *hctx;
1722         struct blk_mq_ctx *ctx;
1723
1724         queue_for_each_hw_ctx(q, hctx, i) {
1725                 cpumask_clear(hctx->cpumask);
1726                 hctx->nr_ctx = 0;
1727         }
1728
1729         /*
1730          * Map software to hardware queues
1731          */
1732         queue_for_each_ctx(q, ctx, i) {
1733                 /* If the cpu isn't online, the cpu is mapped to first hctx */
1734                 if (!cpu_online(i))
1735                         continue;
1736
1737                 hctx = q->mq_ops->map_queue(q, i);
1738                 cpumask_set_cpu(i, hctx->cpumask);
1739                 ctx->index_hw = hctx->nr_ctx;
1740                 hctx->ctxs[hctx->nr_ctx++] = ctx;
1741         }
1742
1743         queue_for_each_hw_ctx(q, hctx, i) {
1744                 /*
1745                  * If no software queues are mapped to this hardware queue,
1746                  * disable it and free the request entries.
1747                  */
1748                 if (!hctx->nr_ctx) {
1749                         struct blk_mq_tag_set *set = q->tag_set;
1750
1751                         if (set->tags[i]) {
1752                                 blk_mq_free_rq_map(set, set->tags[i], i);
1753                                 set->tags[i] = NULL;
1754                                 hctx->tags = NULL;
1755                         }
1756                         continue;
1757                 }
1758
1759                 /*
1760                  * Initialize batch roundrobin counts
1761                  */
1762                 hctx->next_cpu = cpumask_first(hctx->cpumask);
1763                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1764         }
1765 }
1766
1767 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1768 {
1769         struct blk_mq_hw_ctx *hctx;
1770         struct request_queue *q;
1771         bool shared;
1772         int i;
1773
1774         if (set->tag_list.next == set->tag_list.prev)
1775                 shared = false;
1776         else
1777                 shared = true;
1778
1779         list_for_each_entry(q, &set->tag_list, tag_set_list) {
1780                 blk_mq_freeze_queue(q);
1781
1782                 queue_for_each_hw_ctx(q, hctx, i) {
1783                         if (shared)
1784                                 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1785                         else
1786                                 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1787                 }
1788                 blk_mq_unfreeze_queue(q);
1789         }
1790 }
1791
1792 static void blk_mq_del_queue_tag_set(struct request_queue *q)
1793 {
1794         struct blk_mq_tag_set *set = q->tag_set;
1795
1796         mutex_lock(&set->tag_list_lock);
1797         list_del_init(&q->tag_set_list);
1798         blk_mq_update_tag_set_depth(set);
1799         mutex_unlock(&set->tag_list_lock);
1800 }
1801
1802 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1803                                      struct request_queue *q)
1804 {
1805         q->tag_set = set;
1806
1807         mutex_lock(&set->tag_list_lock);
1808         list_add_tail(&q->tag_set_list, &set->tag_list);
1809         blk_mq_update_tag_set_depth(set);
1810         mutex_unlock(&set->tag_list_lock);
1811 }
1812
1813 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1814 {
1815         struct blk_mq_hw_ctx **hctxs;
1816         struct blk_mq_ctx __percpu *ctx;
1817         struct request_queue *q;
1818         unsigned int *map;
1819         int i;
1820
1821         ctx = alloc_percpu(struct blk_mq_ctx);
1822         if (!ctx)
1823                 return ERR_PTR(-ENOMEM);
1824
1825         hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1826                         set->numa_node);
1827
1828         if (!hctxs)
1829                 goto err_percpu;
1830
1831         map = blk_mq_make_queue_map(set);
1832         if (!map)
1833                 goto err_map;
1834
1835         for (i = 0; i < set->nr_hw_queues; i++) {
1836                 int node = blk_mq_hw_queue_to_node(map, i);
1837
1838                 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1839                                         GFP_KERNEL, node);
1840                 if (!hctxs[i])
1841                         goto err_hctxs;
1842
1843                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1844                                                 node))
1845                         goto err_hctxs;
1846
1847                 atomic_set(&hctxs[i]->nr_active, 0);
1848                 hctxs[i]->numa_node = node;
1849                 hctxs[i]->queue_num = i;
1850         }
1851
1852         q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1853         if (!q)
1854                 goto err_hctxs;
1855
1856         /*
1857          * Init percpu_ref in atomic mode so that it's faster to shutdown.
1858          * See blk_register_queue() for details.
1859          */
1860         if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
1861                             PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
1862                 goto err_map;
1863
1864         setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
1865         blk_queue_rq_timeout(q, 30000);
1866
1867         q->nr_queues = nr_cpu_ids;
1868         q->nr_hw_queues = set->nr_hw_queues;
1869         q->mq_map = map;
1870
1871         q->queue_ctx = ctx;
1872         q->queue_hw_ctx = hctxs;
1873
1874         q->mq_ops = set->ops;
1875         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
1876
1877         if (!(set->flags & BLK_MQ_F_SG_MERGE))
1878                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
1879
1880         q->sg_reserved_size = INT_MAX;
1881
1882         INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1883         INIT_LIST_HEAD(&q->requeue_list);
1884         spin_lock_init(&q->requeue_lock);
1885
1886         if (q->nr_hw_queues > 1)
1887                 blk_queue_make_request(q, blk_mq_make_request);
1888         else
1889                 blk_queue_make_request(q, blk_sq_make_request);
1890
1891         if (set->timeout)
1892                 blk_queue_rq_timeout(q, set->timeout);
1893
1894         /*
1895          * Do this after blk_queue_make_request() overrides it...
1896          */
1897         q->nr_requests = set->queue_depth;
1898
1899         if (set->ops->complete)
1900                 blk_queue_softirq_done(q, set->ops->complete);
1901
1902         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
1903
1904         if (blk_mq_init_hw_queues(q, set))
1905                 goto err_hw;
1906
1907         mutex_lock(&all_q_mutex);
1908         list_add_tail(&q->all_q_node, &all_q_list);
1909         mutex_unlock(&all_q_mutex);
1910
1911         blk_mq_add_queue_tag_set(set, q);
1912
1913         blk_mq_map_swqueue(q);
1914
1915         return q;
1916
1917 err_hw:
1918         blk_cleanup_queue(q);
1919 err_hctxs:
1920         kfree(map);
1921         for (i = 0; i < set->nr_hw_queues; i++) {
1922                 if (!hctxs[i])
1923                         break;
1924                 free_cpumask_var(hctxs[i]->cpumask);
1925                 kfree(hctxs[i]);
1926         }
1927 err_map:
1928         kfree(hctxs);
1929 err_percpu:
1930         free_percpu(ctx);
1931         return ERR_PTR(-ENOMEM);
1932 }
1933 EXPORT_SYMBOL(blk_mq_init_queue);
1934
1935 void blk_mq_free_queue(struct request_queue *q)
1936 {
1937         struct blk_mq_tag_set   *set = q->tag_set;
1938
1939         blk_mq_del_queue_tag_set(q);
1940
1941         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
1942         blk_mq_free_hw_queues(q, set);
1943
1944         percpu_ref_exit(&q->mq_usage_counter);
1945
1946         free_percpu(q->queue_ctx);
1947         kfree(q->queue_hw_ctx);
1948         kfree(q->mq_map);
1949
1950         q->queue_ctx = NULL;
1951         q->queue_hw_ctx = NULL;
1952         q->mq_map = NULL;
1953
1954         mutex_lock(&all_q_mutex);
1955         list_del_init(&q->all_q_node);
1956         mutex_unlock(&all_q_mutex);
1957 }
1958
1959 /* Basically redo blk_mq_init_queue with queue frozen */
1960 static void blk_mq_queue_reinit(struct request_queue *q)
1961 {
1962         WARN_ON_ONCE(!q->mq_freeze_depth);
1963
1964         blk_mq_sysfs_unregister(q);
1965
1966         blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
1967
1968         /*
1969          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
1970          * we should change hctx numa_node according to new topology (this
1971          * involves free and re-allocate memory, worthy doing?)
1972          */
1973
1974         blk_mq_map_swqueue(q);
1975
1976         blk_mq_sysfs_register(q);
1977 }
1978
1979 static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1980                                       unsigned long action, void *hcpu)
1981 {
1982         struct request_queue *q;
1983
1984         /*
1985          * Before new mappings are established, hotadded cpu might already
1986          * start handling requests. This doesn't break anything as we map
1987          * offline CPUs to first hardware queue. We will re-init the queue
1988          * below to get optimal settings.
1989          */
1990         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
1991             action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
1992                 return NOTIFY_OK;
1993
1994         mutex_lock(&all_q_mutex);
1995
1996         /*
1997          * We need to freeze and reinit all existing queues.  Freezing
1998          * involves synchronous wait for an RCU grace period and doing it
1999          * one by one may take a long time.  Start freezing all queues in
2000          * one swoop and then wait for the completions so that freezing can
2001          * take place in parallel.
2002          */
2003         list_for_each_entry(q, &all_q_list, all_q_node)
2004                 blk_mq_freeze_queue_start(q);
2005         list_for_each_entry(q, &all_q_list, all_q_node)
2006                 blk_mq_freeze_queue_wait(q);
2007
2008         list_for_each_entry(q, &all_q_list, all_q_node)
2009                 blk_mq_queue_reinit(q);
2010
2011         list_for_each_entry(q, &all_q_list, all_q_node)
2012                 blk_mq_unfreeze_queue(q);
2013
2014         mutex_unlock(&all_q_mutex);
2015         return NOTIFY_OK;
2016 }
2017
2018 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2019 {
2020         int i;
2021
2022         for (i = 0; i < set->nr_hw_queues; i++) {
2023                 set->tags[i] = blk_mq_init_rq_map(set, i);
2024                 if (!set->tags[i])
2025                         goto out_unwind;
2026         }
2027
2028         return 0;
2029
2030 out_unwind:
2031         while (--i >= 0)
2032                 blk_mq_free_rq_map(set, set->tags[i], i);
2033
2034         return -ENOMEM;
2035 }
2036
2037 /*
2038  * Allocate the request maps associated with this tag_set. Note that this
2039  * may reduce the depth asked for, if memory is tight. set->queue_depth
2040  * will be updated to reflect the allocated depth.
2041  */
2042 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2043 {
2044         unsigned int depth;
2045         int err;
2046
2047         depth = set->queue_depth;
2048         do {
2049                 err = __blk_mq_alloc_rq_maps(set);
2050                 if (!err)
2051                         break;
2052
2053                 set->queue_depth >>= 1;
2054                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2055                         err = -ENOMEM;
2056                         break;
2057                 }
2058         } while (set->queue_depth);
2059
2060         if (!set->queue_depth || err) {
2061                 pr_err("blk-mq: failed to allocate request map\n");
2062                 return -ENOMEM;
2063         }
2064
2065         if (depth != set->queue_depth)
2066                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2067                                                 depth, set->queue_depth);
2068
2069         return 0;
2070 }
2071
2072 /*
2073  * Alloc a tag set to be associated with one or more request queues.
2074  * May fail with EINVAL for various error conditions. May adjust the
2075  * requested depth down, if if it too large. In that case, the set
2076  * value will be stored in set->queue_depth.
2077  */
2078 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2079 {
2080         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2081
2082         if (!set->nr_hw_queues)
2083                 return -EINVAL;
2084         if (!set->queue_depth)
2085                 return -EINVAL;
2086         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2087                 return -EINVAL;
2088
2089         if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
2090                 return -EINVAL;
2091
2092         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2093                 pr_info("blk-mq: reduced tag depth to %u\n",
2094                         BLK_MQ_MAX_DEPTH);
2095                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2096         }
2097
2098         /*
2099          * If a crashdump is active, then we are potentially in a very
2100          * memory constrained environment. Limit us to 1 queue and
2101          * 64 tags to prevent using too much memory.
2102          */
2103         if (is_kdump_kernel()) {
2104                 set->nr_hw_queues = 1;
2105                 set->queue_depth = min(64U, set->queue_depth);
2106         }
2107
2108         set->tags = kmalloc_node(set->nr_hw_queues *
2109                                  sizeof(struct blk_mq_tags *),
2110                                  GFP_KERNEL, set->numa_node);
2111         if (!set->tags)
2112                 return -ENOMEM;
2113
2114         if (blk_mq_alloc_rq_maps(set))
2115                 goto enomem;
2116
2117         mutex_init(&set->tag_list_lock);
2118         INIT_LIST_HEAD(&set->tag_list);
2119
2120         return 0;
2121 enomem:
2122         kfree(set->tags);
2123         set->tags = NULL;
2124         return -ENOMEM;
2125 }
2126 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2127
2128 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2129 {
2130         int i;
2131
2132         for (i = 0; i < set->nr_hw_queues; i++) {
2133                 if (set->tags[i])
2134                         blk_mq_free_rq_map(set, set->tags[i], i);
2135         }
2136
2137         kfree(set->tags);
2138         set->tags = NULL;
2139 }
2140 EXPORT_SYMBOL(blk_mq_free_tag_set);
2141
2142 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2143 {
2144         struct blk_mq_tag_set *set = q->tag_set;
2145         struct blk_mq_hw_ctx *hctx;
2146         int i, ret;
2147
2148         if (!set || nr > set->queue_depth)
2149                 return -EINVAL;
2150
2151         ret = 0;
2152         queue_for_each_hw_ctx(q, hctx, i) {
2153                 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2154                 if (ret)
2155                         break;
2156         }
2157
2158         if (!ret)
2159                 q->nr_requests = nr;
2160
2161         return ret;
2162 }
2163
2164 void blk_mq_disable_hotplug(void)
2165 {
2166         mutex_lock(&all_q_mutex);
2167 }
2168
2169 void blk_mq_enable_hotplug(void)
2170 {
2171         mutex_unlock(&all_q_mutex);
2172 }
2173
2174 static int __init blk_mq_init(void)
2175 {
2176         blk_mq_cpu_init();
2177
2178         hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
2179
2180         return 0;
2181 }
2182 subsys_initcall(blk_mq_init);