blk-mq: don't allocate driver tag upfront for flush rq
[linux-block.git] / block / blk-mq.c
1 /*
2  * Block multiqueue core code
3  *
4  * Copyright (C) 2013-2014 Jens Axboe
5  * Copyright (C) 2013-2014 Christoph Hellwig
6  */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/backing-dev.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/kmemleak.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/smp.h>
18 #include <linux/llist.h>
19 #include <linux/list_sort.h>
20 #include <linux/cpu.h>
21 #include <linux/cache.h>
22 #include <linux/sched/sysctl.h>
23 #include <linux/sched/topology.h>
24 #include <linux/sched/signal.h>
25 #include <linux/delay.h>
26 #include <linux/crash_dump.h>
27 #include <linux/prefetch.h>
28
29 #include <trace/events/block.h>
30
31 #include <linux/blk-mq.h>
32 #include "blk.h"
33 #include "blk-mq.h"
34 #include "blk-mq-debugfs.h"
35 #include "blk-mq-tag.h"
36 #include "blk-stat.h"
37 #include "blk-wbt.h"
38 #include "blk-mq-sched.h"
39
40 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
41 static void blk_mq_poll_stats_start(struct request_queue *q);
42 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
43
44 static int blk_mq_poll_stats_bkt(const struct request *rq)
45 {
46         int ddir, bytes, bucket;
47
48         ddir = rq_data_dir(rq);
49         bytes = blk_rq_bytes(rq);
50
51         bucket = ddir + 2*(ilog2(bytes) - 9);
52
53         if (bucket < 0)
54                 return -1;
55         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
56                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
57
58         return bucket;
59 }
60
61 /*
62  * Check if any of the ctx's have pending work in this hardware queue
63  */
64 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
65 {
66         return sbitmap_any_bit_set(&hctx->ctx_map) ||
67                         !list_empty_careful(&hctx->dispatch) ||
68                         blk_mq_sched_has_work(hctx);
69 }
70
71 /*
72  * Mark this ctx as having pending work in this hardware queue
73  */
74 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
75                                      struct blk_mq_ctx *ctx)
76 {
77         if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
78                 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
79 }
80
81 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
82                                       struct blk_mq_ctx *ctx)
83 {
84         sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
85 }
86
87 struct mq_inflight {
88         struct hd_struct *part;
89         unsigned int *inflight;
90 };
91
92 static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
93                                   struct request *rq, void *priv,
94                                   bool reserved)
95 {
96         struct mq_inflight *mi = priv;
97
98         if (test_bit(REQ_ATOM_STARTED, &rq->atomic_flags) &&
99             !test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
100                 /*
101                  * index[0] counts the specific partition that was asked
102                  * for. index[1] counts the ones that are active on the
103                  * whole device, so increment that if mi->part is indeed
104                  * a partition, and not a whole device.
105                  */
106                 if (rq->part == mi->part)
107                         mi->inflight[0]++;
108                 if (mi->part->partno)
109                         mi->inflight[1]++;
110         }
111 }
112
113 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
114                       unsigned int inflight[2])
115 {
116         struct mq_inflight mi = { .part = part, .inflight = inflight, };
117
118         inflight[0] = inflight[1] = 0;
119         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
120 }
121
122 void blk_freeze_queue_start(struct request_queue *q)
123 {
124         int freeze_depth;
125
126         freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
127         if (freeze_depth == 1) {
128                 percpu_ref_kill(&q->q_usage_counter);
129                 blk_mq_run_hw_queues(q, false);
130         }
131 }
132 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
133
134 void blk_mq_freeze_queue_wait(struct request_queue *q)
135 {
136         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
137 }
138 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
139
140 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
141                                      unsigned long timeout)
142 {
143         return wait_event_timeout(q->mq_freeze_wq,
144                                         percpu_ref_is_zero(&q->q_usage_counter),
145                                         timeout);
146 }
147 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
148
149 /*
150  * Guarantee no request is in use, so we can change any data structure of
151  * the queue afterward.
152  */
153 void blk_freeze_queue(struct request_queue *q)
154 {
155         /*
156          * In the !blk_mq case we are only calling this to kill the
157          * q_usage_counter, otherwise this increases the freeze depth
158          * and waits for it to return to zero.  For this reason there is
159          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
160          * exported to drivers as the only user for unfreeze is blk_mq.
161          */
162         blk_freeze_queue_start(q);
163         blk_mq_freeze_queue_wait(q);
164 }
165
166 void blk_mq_freeze_queue(struct request_queue *q)
167 {
168         /*
169          * ...just an alias to keep freeze and unfreeze actions balanced
170          * in the blk_mq_* namespace
171          */
172         blk_freeze_queue(q);
173 }
174 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
175
176 void blk_mq_unfreeze_queue(struct request_queue *q)
177 {
178         int freeze_depth;
179
180         freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
181         WARN_ON_ONCE(freeze_depth < 0);
182         if (!freeze_depth) {
183                 percpu_ref_reinit(&q->q_usage_counter);
184                 wake_up_all(&q->mq_freeze_wq);
185         }
186 }
187 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
188
189 /*
190  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
191  * mpt3sas driver such that this function can be removed.
192  */
193 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
194 {
195         unsigned long flags;
196
197         spin_lock_irqsave(q->queue_lock, flags);
198         queue_flag_set(QUEUE_FLAG_QUIESCED, q);
199         spin_unlock_irqrestore(q->queue_lock, flags);
200 }
201 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
202
203 /**
204  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
205  * @q: request queue.
206  *
207  * Note: this function does not prevent that the struct request end_io()
208  * callback function is invoked. Once this function is returned, we make
209  * sure no dispatch can happen until the queue is unquiesced via
210  * blk_mq_unquiesce_queue().
211  */
212 void blk_mq_quiesce_queue(struct request_queue *q)
213 {
214         struct blk_mq_hw_ctx *hctx;
215         unsigned int i;
216         bool rcu = false;
217
218         blk_mq_quiesce_queue_nowait(q);
219
220         queue_for_each_hw_ctx(q, hctx, i) {
221                 if (hctx->flags & BLK_MQ_F_BLOCKING)
222                         synchronize_srcu(hctx->queue_rq_srcu);
223                 else
224                         rcu = true;
225         }
226         if (rcu)
227                 synchronize_rcu();
228 }
229 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
230
231 /*
232  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
233  * @q: request queue.
234  *
235  * This function recovers queue into the state before quiescing
236  * which is done by blk_mq_quiesce_queue.
237  */
238 void blk_mq_unquiesce_queue(struct request_queue *q)
239 {
240         unsigned long flags;
241
242         spin_lock_irqsave(q->queue_lock, flags);
243         queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
244         spin_unlock_irqrestore(q->queue_lock, flags);
245
246         /* dispatch requests which are inserted during quiescing */
247         blk_mq_run_hw_queues(q, true);
248 }
249 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
250
251 void blk_mq_wake_waiters(struct request_queue *q)
252 {
253         struct blk_mq_hw_ctx *hctx;
254         unsigned int i;
255
256         queue_for_each_hw_ctx(q, hctx, i)
257                 if (blk_mq_hw_queue_mapped(hctx))
258                         blk_mq_tag_wakeup_all(hctx->tags, true);
259
260         /*
261          * If we are called because the queue has now been marked as
262          * dying, we need to ensure that processes currently waiting on
263          * the queue are notified as well.
264          */
265         wake_up_all(&q->mq_freeze_wq);
266 }
267
268 bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
269 {
270         return blk_mq_has_free_tags(hctx->tags);
271 }
272 EXPORT_SYMBOL(blk_mq_can_queue);
273
274 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
275                 unsigned int tag, unsigned int op)
276 {
277         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
278         struct request *rq = tags->static_rqs[tag];
279
280         rq->rq_flags = 0;
281
282         if (data->flags & BLK_MQ_REQ_INTERNAL) {
283                 rq->tag = -1;
284                 rq->internal_tag = tag;
285         } else {
286                 if (blk_mq_tag_busy(data->hctx)) {
287                         rq->rq_flags = RQF_MQ_INFLIGHT;
288                         atomic_inc(&data->hctx->nr_active);
289                 }
290                 rq->tag = tag;
291                 rq->internal_tag = -1;
292                 data->hctx->tags->rqs[rq->tag] = rq;
293         }
294
295         INIT_LIST_HEAD(&rq->queuelist);
296         /* csd/requeue_work/fifo_time is initialized before use */
297         rq->q = data->q;
298         rq->mq_ctx = data->ctx;
299         rq->cmd_flags = op;
300         if (blk_queue_io_stat(data->q))
301                 rq->rq_flags |= RQF_IO_STAT;
302         /* do not touch atomic flags, it needs atomic ops against the timer */
303         rq->cpu = -1;
304         INIT_HLIST_NODE(&rq->hash);
305         RB_CLEAR_NODE(&rq->rb_node);
306         rq->rq_disk = NULL;
307         rq->part = NULL;
308         rq->start_time = jiffies;
309 #ifdef CONFIG_BLK_CGROUP
310         rq->rl = NULL;
311         set_start_time_ns(rq);
312         rq->io_start_time_ns = 0;
313 #endif
314         rq->nr_phys_segments = 0;
315 #if defined(CONFIG_BLK_DEV_INTEGRITY)
316         rq->nr_integrity_segments = 0;
317 #endif
318         rq->special = NULL;
319         /* tag was already set */
320         rq->extra_len = 0;
321
322         INIT_LIST_HEAD(&rq->timeout_list);
323         rq->timeout = 0;
324
325         rq->end_io = NULL;
326         rq->end_io_data = NULL;
327         rq->next_rq = NULL;
328
329         data->ctx->rq_dispatched[op_is_sync(op)]++;
330         return rq;
331 }
332
333 static struct request *blk_mq_get_request(struct request_queue *q,
334                 struct bio *bio, unsigned int op,
335                 struct blk_mq_alloc_data *data)
336 {
337         struct elevator_queue *e = q->elevator;
338         struct request *rq;
339         unsigned int tag;
340         bool put_ctx_on_error = false;
341
342         blk_queue_enter_live(q);
343         data->q = q;
344         if (likely(!data->ctx)) {
345                 data->ctx = blk_mq_get_ctx(q);
346                 put_ctx_on_error = true;
347         }
348         if (likely(!data->hctx))
349                 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
350         if (op & REQ_NOWAIT)
351                 data->flags |= BLK_MQ_REQ_NOWAIT;
352
353         if (e) {
354                 data->flags |= BLK_MQ_REQ_INTERNAL;
355
356                 /*
357                  * Flush requests are special and go directly to the
358                  * dispatch list.
359                  */
360                 if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
361                         e->type->ops.mq.limit_depth(op, data);
362         }
363
364         tag = blk_mq_get_tag(data);
365         if (tag == BLK_MQ_TAG_FAIL) {
366                 if (put_ctx_on_error) {
367                         blk_mq_put_ctx(data->ctx);
368                         data->ctx = NULL;
369                 }
370                 blk_queue_exit(q);
371                 return NULL;
372         }
373
374         rq = blk_mq_rq_ctx_init(data, tag, op);
375         if (!op_is_flush(op)) {
376                 rq->elv.icq = NULL;
377                 if (e && e->type->ops.mq.prepare_request) {
378                         if (e->type->icq_cache && rq_ioc(bio))
379                                 blk_mq_sched_assign_ioc(rq, bio);
380
381                         e->type->ops.mq.prepare_request(rq, bio);
382                         rq->rq_flags |= RQF_ELVPRIV;
383                 }
384         }
385         data->hctx->queued++;
386         return rq;
387 }
388
389 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
390                 unsigned int flags)
391 {
392         struct blk_mq_alloc_data alloc_data = { .flags = flags };
393         struct request *rq;
394         int ret;
395
396         ret = blk_queue_enter(q, flags & BLK_MQ_REQ_NOWAIT);
397         if (ret)
398                 return ERR_PTR(ret);
399
400         rq = blk_mq_get_request(q, NULL, op, &alloc_data);
401         blk_queue_exit(q);
402
403         if (!rq)
404                 return ERR_PTR(-EWOULDBLOCK);
405
406         blk_mq_put_ctx(alloc_data.ctx);
407
408         rq->__data_len = 0;
409         rq->__sector = (sector_t) -1;
410         rq->bio = rq->biotail = NULL;
411         return rq;
412 }
413 EXPORT_SYMBOL(blk_mq_alloc_request);
414
415 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
416                 unsigned int op, unsigned int flags, unsigned int hctx_idx)
417 {
418         struct blk_mq_alloc_data alloc_data = { .flags = flags };
419         struct request *rq;
420         unsigned int cpu;
421         int ret;
422
423         /*
424          * If the tag allocator sleeps we could get an allocation for a
425          * different hardware context.  No need to complicate the low level
426          * allocator for this for the rare use case of a command tied to
427          * a specific queue.
428          */
429         if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
430                 return ERR_PTR(-EINVAL);
431
432         if (hctx_idx >= q->nr_hw_queues)
433                 return ERR_PTR(-EIO);
434
435         ret = blk_queue_enter(q, true);
436         if (ret)
437                 return ERR_PTR(ret);
438
439         /*
440          * Check if the hardware context is actually mapped to anything.
441          * If not tell the caller that it should skip this queue.
442          */
443         alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
444         if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
445                 blk_queue_exit(q);
446                 return ERR_PTR(-EXDEV);
447         }
448         cpu = cpumask_first(alloc_data.hctx->cpumask);
449         alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
450
451         rq = blk_mq_get_request(q, NULL, op, &alloc_data);
452         blk_queue_exit(q);
453
454         if (!rq)
455                 return ERR_PTR(-EWOULDBLOCK);
456
457         return rq;
458 }
459 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
460
461 void blk_mq_free_request(struct request *rq)
462 {
463         struct request_queue *q = rq->q;
464         struct elevator_queue *e = q->elevator;
465         struct blk_mq_ctx *ctx = rq->mq_ctx;
466         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
467         const int sched_tag = rq->internal_tag;
468
469         if (rq->rq_flags & RQF_ELVPRIV) {
470                 if (e && e->type->ops.mq.finish_request)
471                         e->type->ops.mq.finish_request(rq);
472                 if (rq->elv.icq) {
473                         put_io_context(rq->elv.icq->ioc);
474                         rq->elv.icq = NULL;
475                 }
476         }
477
478         ctx->rq_completed[rq_is_sync(rq)]++;
479         if (rq->rq_flags & RQF_MQ_INFLIGHT)
480                 atomic_dec(&hctx->nr_active);
481
482         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
483                 laptop_io_completion(q->backing_dev_info);
484
485         wbt_done(q->rq_wb, &rq->issue_stat);
486
487         if (blk_rq_rl(rq))
488                 blk_put_rl(blk_rq_rl(rq));
489
490         clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
491         clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
492         if (rq->tag != -1)
493                 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
494         if (sched_tag != -1)
495                 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
496         blk_mq_sched_restart(hctx);
497         blk_queue_exit(q);
498 }
499 EXPORT_SYMBOL_GPL(blk_mq_free_request);
500
501 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
502 {
503         blk_account_io_done(rq);
504
505         if (rq->end_io) {
506                 wbt_done(rq->q->rq_wb, &rq->issue_stat);
507                 rq->end_io(rq, error);
508         } else {
509                 if (unlikely(blk_bidi_rq(rq)))
510                         blk_mq_free_request(rq->next_rq);
511                 blk_mq_free_request(rq);
512         }
513 }
514 EXPORT_SYMBOL(__blk_mq_end_request);
515
516 void blk_mq_end_request(struct request *rq, blk_status_t error)
517 {
518         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
519                 BUG();
520         __blk_mq_end_request(rq, error);
521 }
522 EXPORT_SYMBOL(blk_mq_end_request);
523
524 static void __blk_mq_complete_request_remote(void *data)
525 {
526         struct request *rq = data;
527
528         rq->q->softirq_done_fn(rq);
529 }
530
531 static void __blk_mq_complete_request(struct request *rq)
532 {
533         struct blk_mq_ctx *ctx = rq->mq_ctx;
534         bool shared = false;
535         int cpu;
536
537         if (rq->internal_tag != -1)
538                 blk_mq_sched_completed_request(rq);
539         if (rq->rq_flags & RQF_STATS) {
540                 blk_mq_poll_stats_start(rq->q);
541                 blk_stat_add(rq);
542         }
543
544         if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
545                 rq->q->softirq_done_fn(rq);
546                 return;
547         }
548
549         cpu = get_cpu();
550         if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
551                 shared = cpus_share_cache(cpu, ctx->cpu);
552
553         if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
554                 rq->csd.func = __blk_mq_complete_request_remote;
555                 rq->csd.info = rq;
556                 rq->csd.flags = 0;
557                 smp_call_function_single_async(ctx->cpu, &rq->csd);
558         } else {
559                 rq->q->softirq_done_fn(rq);
560         }
561         put_cpu();
562 }
563
564 /**
565  * blk_mq_complete_request - end I/O on a request
566  * @rq:         the request being processed
567  *
568  * Description:
569  *      Ends all I/O on a request. It does not handle partial completions.
570  *      The actual completion happens out-of-order, through a IPI handler.
571  **/
572 void blk_mq_complete_request(struct request *rq)
573 {
574         struct request_queue *q = rq->q;
575
576         if (unlikely(blk_should_fake_timeout(q)))
577                 return;
578         if (!blk_mark_rq_complete(rq))
579                 __blk_mq_complete_request(rq);
580 }
581 EXPORT_SYMBOL(blk_mq_complete_request);
582
583 int blk_mq_request_started(struct request *rq)
584 {
585         return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
586 }
587 EXPORT_SYMBOL_GPL(blk_mq_request_started);
588
589 void blk_mq_start_request(struct request *rq)
590 {
591         struct request_queue *q = rq->q;
592
593         blk_mq_sched_started_request(rq);
594
595         trace_block_rq_issue(q, rq);
596
597         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
598                 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq));
599                 rq->rq_flags |= RQF_STATS;
600                 wbt_issue(q->rq_wb, &rq->issue_stat);
601         }
602
603         blk_add_timer(rq);
604
605         WARN_ON_ONCE(test_bit(REQ_ATOM_STARTED, &rq->atomic_flags));
606
607         /*
608          * Mark us as started and clear complete. Complete might have been
609          * set if requeue raced with timeout, which then marked it as
610          * complete. So be sure to clear complete again when we start
611          * the request, otherwise we'll ignore the completion event.
612          *
613          * Ensure that ->deadline is visible before we set STARTED, such that
614          * blk_mq_check_expired() is guaranteed to observe our ->deadline when
615          * it observes STARTED.
616          */
617         smp_wmb();
618         set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
619         if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) {
620                 /*
621                  * Coherence order guarantees these consecutive stores to a
622                  * single variable propagate in the specified order. Thus the
623                  * clear_bit() is ordered _after_ the set bit. See
624                  * blk_mq_check_expired().
625                  *
626                  * (the bits must be part of the same byte for this to be
627                  * true).
628                  */
629                 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
630         }
631
632         if (q->dma_drain_size && blk_rq_bytes(rq)) {
633                 /*
634                  * Make sure space for the drain appears.  We know we can do
635                  * this because max_hw_segments has been adjusted to be one
636                  * fewer than the device can handle.
637                  */
638                 rq->nr_phys_segments++;
639         }
640 }
641 EXPORT_SYMBOL(blk_mq_start_request);
642
643 /*
644  * When we reach here because queue is busy, REQ_ATOM_COMPLETE
645  * flag isn't set yet, so there may be race with timeout handler,
646  * but given rq->deadline is just set in .queue_rq() under
647  * this situation, the race won't be possible in reality because
648  * rq->timeout should be set as big enough to cover the window
649  * between blk_mq_start_request() called from .queue_rq() and
650  * clearing REQ_ATOM_STARTED here.
651  */
652 static void __blk_mq_requeue_request(struct request *rq)
653 {
654         struct request_queue *q = rq->q;
655
656         blk_mq_put_driver_tag(rq);
657
658         trace_block_rq_requeue(q, rq);
659         wbt_requeue(q->rq_wb, &rq->issue_stat);
660         blk_mq_sched_requeue_request(rq);
661
662         if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
663                 if (q->dma_drain_size && blk_rq_bytes(rq))
664                         rq->nr_phys_segments--;
665         }
666 }
667
668 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
669 {
670         __blk_mq_requeue_request(rq);
671
672         BUG_ON(blk_queued_rq(rq));
673         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
674 }
675 EXPORT_SYMBOL(blk_mq_requeue_request);
676
677 static void blk_mq_requeue_work(struct work_struct *work)
678 {
679         struct request_queue *q =
680                 container_of(work, struct request_queue, requeue_work.work);
681         LIST_HEAD(rq_list);
682         struct request *rq, *next;
683
684         spin_lock_irq(&q->requeue_lock);
685         list_splice_init(&q->requeue_list, &rq_list);
686         spin_unlock_irq(&q->requeue_lock);
687
688         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
689                 if (!(rq->rq_flags & RQF_SOFTBARRIER))
690                         continue;
691
692                 rq->rq_flags &= ~RQF_SOFTBARRIER;
693                 list_del_init(&rq->queuelist);
694                 blk_mq_sched_insert_request(rq, true, false, false, true);
695         }
696
697         while (!list_empty(&rq_list)) {
698                 rq = list_entry(rq_list.next, struct request, queuelist);
699                 list_del_init(&rq->queuelist);
700                 blk_mq_sched_insert_request(rq, false, false, false, true);
701         }
702
703         blk_mq_run_hw_queues(q, false);
704 }
705
706 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
707                                 bool kick_requeue_list)
708 {
709         struct request_queue *q = rq->q;
710         unsigned long flags;
711
712         /*
713          * We abuse this flag that is otherwise used by the I/O scheduler to
714          * request head insertation from the workqueue.
715          */
716         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
717
718         spin_lock_irqsave(&q->requeue_lock, flags);
719         if (at_head) {
720                 rq->rq_flags |= RQF_SOFTBARRIER;
721                 list_add(&rq->queuelist, &q->requeue_list);
722         } else {
723                 list_add_tail(&rq->queuelist, &q->requeue_list);
724         }
725         spin_unlock_irqrestore(&q->requeue_lock, flags);
726
727         if (kick_requeue_list)
728                 blk_mq_kick_requeue_list(q);
729 }
730 EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
731
732 void blk_mq_kick_requeue_list(struct request_queue *q)
733 {
734         kblockd_schedule_delayed_work(&q->requeue_work, 0);
735 }
736 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
737
738 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
739                                     unsigned long msecs)
740 {
741         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
742                                     msecs_to_jiffies(msecs));
743 }
744 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
745
746 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
747 {
748         if (tag < tags->nr_tags) {
749                 prefetch(tags->rqs[tag]);
750                 return tags->rqs[tag];
751         }
752
753         return NULL;
754 }
755 EXPORT_SYMBOL(blk_mq_tag_to_rq);
756
757 struct blk_mq_timeout_data {
758         unsigned long next;
759         unsigned int next_set;
760 };
761
762 void blk_mq_rq_timed_out(struct request *req, bool reserved)
763 {
764         const struct blk_mq_ops *ops = req->q->mq_ops;
765         enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
766
767         /*
768          * We know that complete is set at this point. If STARTED isn't set
769          * anymore, then the request isn't active and the "timeout" should
770          * just be ignored. This can happen due to the bitflag ordering.
771          * Timeout first checks if STARTED is set, and if it is, assumes
772          * the request is active. But if we race with completion, then
773          * both flags will get cleared. So check here again, and ignore
774          * a timeout event with a request that isn't active.
775          */
776         if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
777                 return;
778
779         if (ops->timeout)
780                 ret = ops->timeout(req, reserved);
781
782         switch (ret) {
783         case BLK_EH_HANDLED:
784                 __blk_mq_complete_request(req);
785                 break;
786         case BLK_EH_RESET_TIMER:
787                 blk_add_timer(req);
788                 blk_clear_rq_complete(req);
789                 break;
790         case BLK_EH_NOT_HANDLED:
791                 break;
792         default:
793                 printk(KERN_ERR "block: bad eh return: %d\n", ret);
794                 break;
795         }
796 }
797
798 static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
799                 struct request *rq, void *priv, bool reserved)
800 {
801         struct blk_mq_timeout_data *data = priv;
802         unsigned long deadline;
803
804         if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
805                 return;
806
807         /*
808          * Ensures that if we see STARTED we must also see our
809          * up-to-date deadline, see blk_mq_start_request().
810          */
811         smp_rmb();
812
813         deadline = READ_ONCE(rq->deadline);
814
815         /*
816          * The rq being checked may have been freed and reallocated
817          * out already here, we avoid this race by checking rq->deadline
818          * and REQ_ATOM_COMPLETE flag together:
819          *
820          * - if rq->deadline is observed as new value because of
821          *   reusing, the rq won't be timed out because of timing.
822          * - if rq->deadline is observed as previous value,
823          *   REQ_ATOM_COMPLETE flag won't be cleared in reuse path
824          *   because we put a barrier between setting rq->deadline
825          *   and clearing the flag in blk_mq_start_request(), so
826          *   this rq won't be timed out too.
827          */
828         if (time_after_eq(jiffies, deadline)) {
829                 if (!blk_mark_rq_complete(rq)) {
830                         /*
831                          * Again coherence order ensures that consecutive reads
832                          * from the same variable must be in that order. This
833                          * ensures that if we see COMPLETE clear, we must then
834                          * see STARTED set and we'll ignore this timeout.
835                          *
836                          * (There's also the MB implied by the test_and_clear())
837                          */
838                         blk_mq_rq_timed_out(rq, reserved);
839                 }
840         } else if (!data->next_set || time_after(data->next, deadline)) {
841                 data->next = deadline;
842                 data->next_set = 1;
843         }
844 }
845
846 static void blk_mq_timeout_work(struct work_struct *work)
847 {
848         struct request_queue *q =
849                 container_of(work, struct request_queue, timeout_work);
850         struct blk_mq_timeout_data data = {
851                 .next           = 0,
852                 .next_set       = 0,
853         };
854         int i;
855
856         /* A deadlock might occur if a request is stuck requiring a
857          * timeout at the same time a queue freeze is waiting
858          * completion, since the timeout code would not be able to
859          * acquire the queue reference here.
860          *
861          * That's why we don't use blk_queue_enter here; instead, we use
862          * percpu_ref_tryget directly, because we need to be able to
863          * obtain a reference even in the short window between the queue
864          * starting to freeze, by dropping the first reference in
865          * blk_freeze_queue_start, and the moment the last request is
866          * consumed, marked by the instant q_usage_counter reaches
867          * zero.
868          */
869         if (!percpu_ref_tryget(&q->q_usage_counter))
870                 return;
871
872         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
873
874         if (data.next_set) {
875                 data.next = blk_rq_timeout(round_jiffies_up(data.next));
876                 mod_timer(&q->timeout, data.next);
877         } else {
878                 struct blk_mq_hw_ctx *hctx;
879
880                 queue_for_each_hw_ctx(q, hctx, i) {
881                         /* the hctx may be unmapped, so check it here */
882                         if (blk_mq_hw_queue_mapped(hctx))
883                                 blk_mq_tag_idle(hctx);
884                 }
885         }
886         blk_queue_exit(q);
887 }
888
889 struct flush_busy_ctx_data {
890         struct blk_mq_hw_ctx *hctx;
891         struct list_head *list;
892 };
893
894 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
895 {
896         struct flush_busy_ctx_data *flush_data = data;
897         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
898         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
899
900         sbitmap_clear_bit(sb, bitnr);
901         spin_lock(&ctx->lock);
902         list_splice_tail_init(&ctx->rq_list, flush_data->list);
903         spin_unlock(&ctx->lock);
904         return true;
905 }
906
907 /*
908  * Process software queues that have been marked busy, splicing them
909  * to the for-dispatch
910  */
911 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
912 {
913         struct flush_busy_ctx_data data = {
914                 .hctx = hctx,
915                 .list = list,
916         };
917
918         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
919 }
920 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
921
922 struct dispatch_rq_data {
923         struct blk_mq_hw_ctx *hctx;
924         struct request *rq;
925 };
926
927 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
928                 void *data)
929 {
930         struct dispatch_rq_data *dispatch_data = data;
931         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
932         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
933
934         spin_lock(&ctx->lock);
935         if (unlikely(!list_empty(&ctx->rq_list))) {
936                 dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
937                 list_del_init(&dispatch_data->rq->queuelist);
938                 if (list_empty(&ctx->rq_list))
939                         sbitmap_clear_bit(sb, bitnr);
940         }
941         spin_unlock(&ctx->lock);
942
943         return !dispatch_data->rq;
944 }
945
946 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
947                                         struct blk_mq_ctx *start)
948 {
949         unsigned off = start ? start->index_hw : 0;
950         struct dispatch_rq_data data = {
951                 .hctx = hctx,
952                 .rq   = NULL,
953         };
954
955         __sbitmap_for_each_set(&hctx->ctx_map, off,
956                                dispatch_rq_from_ctx, &data);
957
958         return data.rq;
959 }
960
961 static inline unsigned int queued_to_index(unsigned int queued)
962 {
963         if (!queued)
964                 return 0;
965
966         return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
967 }
968
969 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
970                            bool wait)
971 {
972         struct blk_mq_alloc_data data = {
973                 .q = rq->q,
974                 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
975                 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
976         };
977
978         might_sleep_if(wait);
979
980         if (rq->tag != -1)
981                 goto done;
982
983         if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
984                 data.flags |= BLK_MQ_REQ_RESERVED;
985
986         rq->tag = blk_mq_get_tag(&data);
987         if (rq->tag >= 0) {
988                 if (blk_mq_tag_busy(data.hctx)) {
989                         rq->rq_flags |= RQF_MQ_INFLIGHT;
990                         atomic_inc(&data.hctx->nr_active);
991                 }
992                 data.hctx->tags->rqs[rq->tag] = rq;
993         }
994
995 done:
996         if (hctx)
997                 *hctx = data.hctx;
998         return rq->tag != -1;
999 }
1000
1001 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
1002                                 void *key)
1003 {
1004         struct blk_mq_hw_ctx *hctx;
1005
1006         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1007
1008         list_del(&wait->entry);
1009         clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
1010         blk_mq_run_hw_queue(hctx, true);
1011         return 1;
1012 }
1013
1014 static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
1015 {
1016         struct sbq_wait_state *ws;
1017
1018         /*
1019          * The TAG_WAITING bit serves as a lock protecting hctx->dispatch_wait.
1020          * The thread which wins the race to grab this bit adds the hardware
1021          * queue to the wait queue.
1022          */
1023         if (test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state) ||
1024             test_and_set_bit_lock(BLK_MQ_S_TAG_WAITING, &hctx->state))
1025                 return false;
1026
1027         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
1028         ws = bt_wait_ptr(&hctx->tags->bitmap_tags, hctx);
1029
1030         /*
1031          * As soon as this returns, it's no longer safe to fiddle with
1032          * hctx->dispatch_wait, since a completion can wake up the wait queue
1033          * and unlock the bit.
1034          */
1035         add_wait_queue(&ws->wait, &hctx->dispatch_wait);
1036         return true;
1037 }
1038
1039 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1040                 bool got_budget)
1041 {
1042         struct blk_mq_hw_ctx *hctx;
1043         struct request *rq, *nxt;
1044         int errors, queued;
1045
1046         if (list_empty(list))
1047                 return false;
1048
1049         WARN_ON(!list_is_singular(list) && got_budget);
1050
1051         /*
1052          * Now process all the entries, sending them to the driver.
1053          */
1054         errors = queued = 0;
1055         do {
1056                 struct blk_mq_queue_data bd;
1057                 blk_status_t ret;
1058
1059                 rq = list_first_entry(list, struct request, queuelist);
1060                 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
1061                         /*
1062                          * The initial allocation attempt failed, so we need to
1063                          * rerun the hardware queue when a tag is freed.
1064                          */
1065                         if (!blk_mq_dispatch_wait_add(hctx)) {
1066                                 if (got_budget)
1067                                         blk_mq_put_dispatch_budget(hctx);
1068                                 break;
1069                         }
1070
1071                         /*
1072                          * It's possible that a tag was freed in the window
1073                          * between the allocation failure and adding the
1074                          * hardware queue to the wait queue.
1075                          */
1076                         if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
1077                                 if (got_budget)
1078                                         blk_mq_put_dispatch_budget(hctx);
1079                                 break;
1080                         }
1081                 }
1082
1083                 if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
1084                         break;
1085
1086                 list_del_init(&rq->queuelist);
1087
1088                 bd.rq = rq;
1089
1090                 /*
1091                  * Flag last if we have no more requests, or if we have more
1092                  * but can't assign a driver tag to it.
1093                  */
1094                 if (list_empty(list))
1095                         bd.last = true;
1096                 else {
1097                         nxt = list_first_entry(list, struct request, queuelist);
1098                         bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1099                 }
1100
1101                 ret = q->mq_ops->queue_rq(hctx, &bd);
1102                 if (ret == BLK_STS_RESOURCE) {
1103                         /*
1104                          * If an I/O scheduler has been configured and we got a
1105                          * driver tag for the next request already, free it again.
1106                          */
1107                         if (!list_empty(list)) {
1108                                 nxt = list_first_entry(list, struct request, queuelist);
1109                                 blk_mq_put_driver_tag(nxt);
1110                         }
1111                         list_add(&rq->queuelist, list);
1112                         __blk_mq_requeue_request(rq);
1113                         break;
1114                 }
1115
1116                 if (unlikely(ret != BLK_STS_OK)) {
1117                         errors++;
1118                         blk_mq_end_request(rq, BLK_STS_IOERR);
1119                         continue;
1120                 }
1121
1122                 queued++;
1123         } while (!list_empty(list));
1124
1125         hctx->dispatched[queued_to_index(queued)]++;
1126
1127         /*
1128          * Any items that need requeuing? Stuff them into hctx->dispatch,
1129          * that is where we will continue on next queue run.
1130          */
1131         if (!list_empty(list)) {
1132                 spin_lock(&hctx->lock);
1133                 list_splice_init(list, &hctx->dispatch);
1134                 spin_unlock(&hctx->lock);
1135
1136                 /*
1137                  * If SCHED_RESTART was set by the caller of this function and
1138                  * it is no longer set that means that it was cleared by another
1139                  * thread and hence that a queue rerun is needed.
1140                  *
1141                  * If TAG_WAITING is set that means that an I/O scheduler has
1142                  * been configured and another thread is waiting for a driver
1143                  * tag. To guarantee fairness, do not rerun this hardware queue
1144                  * but let the other thread grab the driver tag.
1145                  *
1146                  * If no I/O scheduler has been configured it is possible that
1147                  * the hardware queue got stopped and restarted before requests
1148                  * were pushed back onto the dispatch list. Rerun the queue to
1149                  * avoid starvation. Notes:
1150                  * - blk_mq_run_hw_queue() checks whether or not a queue has
1151                  *   been stopped before rerunning a queue.
1152                  * - Some but not all block drivers stop a queue before
1153                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1154                  *   and dm-rq.
1155                  */
1156                 if (!blk_mq_sched_needs_restart(hctx) &&
1157                     !test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
1158                         blk_mq_run_hw_queue(hctx, true);
1159         }
1160
1161         return (queued + errors) != 0;
1162 }
1163
1164 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1165 {
1166         int srcu_idx;
1167
1168         /*
1169          * We should be running this queue from one of the CPUs that
1170          * are mapped to it.
1171          */
1172         WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1173                 cpu_online(hctx->next_cpu));
1174
1175         /*
1176          * We can't run the queue inline with ints disabled. Ensure that
1177          * we catch bad users of this early.
1178          */
1179         WARN_ON_ONCE(in_interrupt());
1180
1181         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1182                 rcu_read_lock();
1183                 blk_mq_sched_dispatch_requests(hctx);
1184                 rcu_read_unlock();
1185         } else {
1186                 might_sleep();
1187
1188                 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
1189                 blk_mq_sched_dispatch_requests(hctx);
1190                 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
1191         }
1192 }
1193
1194 /*
1195  * It'd be great if the workqueue API had a way to pass
1196  * in a mask and had some smarts for more clever placement.
1197  * For now we just round-robin here, switching for every
1198  * BLK_MQ_CPU_WORK_BATCH queued items.
1199  */
1200 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1201 {
1202         if (hctx->queue->nr_hw_queues == 1)
1203                 return WORK_CPU_UNBOUND;
1204
1205         if (--hctx->next_cpu_batch <= 0) {
1206                 int next_cpu;
1207
1208                 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
1209                 if (next_cpu >= nr_cpu_ids)
1210                         next_cpu = cpumask_first(hctx->cpumask);
1211
1212                 hctx->next_cpu = next_cpu;
1213                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1214         }
1215
1216         return hctx->next_cpu;
1217 }
1218
1219 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1220                                         unsigned long msecs)
1221 {
1222         if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1223                 return;
1224
1225         if (unlikely(blk_mq_hctx_stopped(hctx)))
1226                 return;
1227
1228         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
1229                 int cpu = get_cpu();
1230                 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
1231                         __blk_mq_run_hw_queue(hctx);
1232                         put_cpu();
1233                         return;
1234                 }
1235
1236                 put_cpu();
1237         }
1238
1239         kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1240                                          &hctx->run_work,
1241                                          msecs_to_jiffies(msecs));
1242 }
1243
1244 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1245 {
1246         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1247 }
1248 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1249
1250 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1251 {
1252         __blk_mq_delay_run_hw_queue(hctx, async, 0);
1253 }
1254 EXPORT_SYMBOL(blk_mq_run_hw_queue);
1255
1256 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
1257 {
1258         struct blk_mq_hw_ctx *hctx;
1259         int i;
1260
1261         queue_for_each_hw_ctx(q, hctx, i) {
1262                 if (!blk_mq_hctx_has_pending(hctx) ||
1263                     blk_mq_hctx_stopped(hctx))
1264                         continue;
1265
1266                 blk_mq_run_hw_queue(hctx, async);
1267         }
1268 }
1269 EXPORT_SYMBOL(blk_mq_run_hw_queues);
1270
1271 /**
1272  * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1273  * @q: request queue.
1274  *
1275  * The caller is responsible for serializing this function against
1276  * blk_mq_{start,stop}_hw_queue().
1277  */
1278 bool blk_mq_queue_stopped(struct request_queue *q)
1279 {
1280         struct blk_mq_hw_ctx *hctx;
1281         int i;
1282
1283         queue_for_each_hw_ctx(q, hctx, i)
1284                 if (blk_mq_hctx_stopped(hctx))
1285                         return true;
1286
1287         return false;
1288 }
1289 EXPORT_SYMBOL(blk_mq_queue_stopped);
1290
1291 /*
1292  * This function is often used for pausing .queue_rq() by driver when
1293  * there isn't enough resource or some conditions aren't satisfied, and
1294  * BLK_STS_RESOURCE is usually returned.
1295  *
1296  * We do not guarantee that dispatch can be drained or blocked
1297  * after blk_mq_stop_hw_queue() returns. Please use
1298  * blk_mq_quiesce_queue() for that requirement.
1299  */
1300 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1301 {
1302         cancel_delayed_work(&hctx->run_work);
1303
1304         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
1305 }
1306 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
1307
1308 /*
1309  * This function is often used for pausing .queue_rq() by driver when
1310  * there isn't enough resource or some conditions aren't satisfied, and
1311  * BLK_STS_RESOURCE is usually returned.
1312  *
1313  * We do not guarantee that dispatch can be drained or blocked
1314  * after blk_mq_stop_hw_queues() returns. Please use
1315  * blk_mq_quiesce_queue() for that requirement.
1316  */
1317 void blk_mq_stop_hw_queues(struct request_queue *q)
1318 {
1319         struct blk_mq_hw_ctx *hctx;
1320         int i;
1321
1322         queue_for_each_hw_ctx(q, hctx, i)
1323                 blk_mq_stop_hw_queue(hctx);
1324 }
1325 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1326
1327 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1328 {
1329         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1330
1331         blk_mq_run_hw_queue(hctx, false);
1332 }
1333 EXPORT_SYMBOL(blk_mq_start_hw_queue);
1334
1335 void blk_mq_start_hw_queues(struct request_queue *q)
1336 {
1337         struct blk_mq_hw_ctx *hctx;
1338         int i;
1339
1340         queue_for_each_hw_ctx(q, hctx, i)
1341                 blk_mq_start_hw_queue(hctx);
1342 }
1343 EXPORT_SYMBOL(blk_mq_start_hw_queues);
1344
1345 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1346 {
1347         if (!blk_mq_hctx_stopped(hctx))
1348                 return;
1349
1350         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1351         blk_mq_run_hw_queue(hctx, async);
1352 }
1353 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1354
1355 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
1356 {
1357         struct blk_mq_hw_ctx *hctx;
1358         int i;
1359
1360         queue_for_each_hw_ctx(q, hctx, i)
1361                 blk_mq_start_stopped_hw_queue(hctx, async);
1362 }
1363 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1364
1365 static void blk_mq_run_work_fn(struct work_struct *work)
1366 {
1367         struct blk_mq_hw_ctx *hctx;
1368
1369         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
1370
1371         /*
1372          * If we are stopped, don't run the queue. The exception is if
1373          * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
1374          * the STOPPED bit and run it.
1375          */
1376         if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
1377                 if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
1378                         return;
1379
1380                 clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1381                 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1382         }
1383
1384         __blk_mq_run_hw_queue(hctx);
1385 }
1386
1387
1388 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1389 {
1390         if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
1391                 return;
1392
1393         /*
1394          * Stop the hw queue, then modify currently delayed work.
1395          * This should prevent us from running the queue prematurely.
1396          * Mark the queue as auto-clearing STOPPED when it runs.
1397          */
1398         blk_mq_stop_hw_queue(hctx);
1399         set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
1400         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
1401                                         &hctx->run_work,
1402                                         msecs_to_jiffies(msecs));
1403 }
1404 EXPORT_SYMBOL(blk_mq_delay_queue);
1405
1406 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
1407                                             struct request *rq,
1408                                             bool at_head)
1409 {
1410         struct blk_mq_ctx *ctx = rq->mq_ctx;
1411
1412         lockdep_assert_held(&ctx->lock);
1413
1414         trace_block_rq_insert(hctx->queue, rq);
1415
1416         if (at_head)
1417                 list_add(&rq->queuelist, &ctx->rq_list);
1418         else
1419                 list_add_tail(&rq->queuelist, &ctx->rq_list);
1420 }
1421
1422 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1423                              bool at_head)
1424 {
1425         struct blk_mq_ctx *ctx = rq->mq_ctx;
1426
1427         lockdep_assert_held(&ctx->lock);
1428
1429         __blk_mq_insert_req_list(hctx, rq, at_head);
1430         blk_mq_hctx_mark_pending(hctx, ctx);
1431 }
1432
1433 /*
1434  * Should only be used carefully, when the caller knows we want to
1435  * bypass a potential IO scheduler on the target device.
1436  */
1437 void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
1438 {
1439         struct blk_mq_ctx *ctx = rq->mq_ctx;
1440         struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1441
1442         spin_lock(&hctx->lock);
1443         list_add_tail(&rq->queuelist, &hctx->dispatch);
1444         spin_unlock(&hctx->lock);
1445
1446         if (run_queue)
1447                 blk_mq_run_hw_queue(hctx, false);
1448 }
1449
1450 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1451                             struct list_head *list)
1452
1453 {
1454         /*
1455          * preemption doesn't flush plug list, so it's possible ctx->cpu is
1456          * offline now
1457          */
1458         spin_lock(&ctx->lock);
1459         while (!list_empty(list)) {
1460                 struct request *rq;
1461
1462                 rq = list_first_entry(list, struct request, queuelist);
1463                 BUG_ON(rq->mq_ctx != ctx);
1464                 list_del_init(&rq->queuelist);
1465                 __blk_mq_insert_req_list(hctx, rq, false);
1466         }
1467         blk_mq_hctx_mark_pending(hctx, ctx);
1468         spin_unlock(&ctx->lock);
1469 }
1470
1471 static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1472 {
1473         struct request *rqa = container_of(a, struct request, queuelist);
1474         struct request *rqb = container_of(b, struct request, queuelist);
1475
1476         return !(rqa->mq_ctx < rqb->mq_ctx ||
1477                  (rqa->mq_ctx == rqb->mq_ctx &&
1478                   blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1479 }
1480
1481 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1482 {
1483         struct blk_mq_ctx *this_ctx;
1484         struct request_queue *this_q;
1485         struct request *rq;
1486         LIST_HEAD(list);
1487         LIST_HEAD(ctx_list);
1488         unsigned int depth;
1489
1490         list_splice_init(&plug->mq_list, &list);
1491
1492         list_sort(NULL, &list, plug_ctx_cmp);
1493
1494         this_q = NULL;
1495         this_ctx = NULL;
1496         depth = 0;
1497
1498         while (!list_empty(&list)) {
1499                 rq = list_entry_rq(list.next);
1500                 list_del_init(&rq->queuelist);
1501                 BUG_ON(!rq->q);
1502                 if (rq->mq_ctx != this_ctx) {
1503                         if (this_ctx) {
1504                                 trace_block_unplug(this_q, depth, from_schedule);
1505                                 blk_mq_sched_insert_requests(this_q, this_ctx,
1506                                                                 &ctx_list,
1507                                                                 from_schedule);
1508                         }
1509
1510                         this_ctx = rq->mq_ctx;
1511                         this_q = rq->q;
1512                         depth = 0;
1513                 }
1514
1515                 depth++;
1516                 list_add_tail(&rq->queuelist, &ctx_list);
1517         }
1518
1519         /*
1520          * If 'this_ctx' is set, we know we have entries to complete
1521          * on 'ctx_list'. Do those.
1522          */
1523         if (this_ctx) {
1524                 trace_block_unplug(this_q, depth, from_schedule);
1525                 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1526                                                 from_schedule);
1527         }
1528 }
1529
1530 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1531 {
1532         blk_init_request_from_bio(rq, bio);
1533
1534         blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
1535
1536         blk_account_io_start(rq, true);
1537 }
1538
1539 static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1540                                    struct blk_mq_ctx *ctx,
1541                                    struct request *rq)
1542 {
1543         spin_lock(&ctx->lock);
1544         __blk_mq_insert_request(hctx, rq, false);
1545         spin_unlock(&ctx->lock);
1546 }
1547
1548 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1549 {
1550         if (rq->tag != -1)
1551                 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1552
1553         return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
1554 }
1555
1556 static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1557                                         struct request *rq,
1558                                         blk_qc_t *cookie, bool may_sleep)
1559 {
1560         struct request_queue *q = rq->q;
1561         struct blk_mq_queue_data bd = {
1562                 .rq = rq,
1563                 .last = true,
1564         };
1565         blk_qc_t new_cookie;
1566         blk_status_t ret;
1567         bool run_queue = true;
1568
1569         /* RCU or SRCU read lock is needed before checking quiesced flag */
1570         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
1571                 run_queue = false;
1572                 goto insert;
1573         }
1574
1575         if (q->elevator)
1576                 goto insert;
1577
1578         if (!blk_mq_get_driver_tag(rq, NULL, false))
1579                 goto insert;
1580
1581         if (!blk_mq_get_dispatch_budget(hctx)) {
1582                 blk_mq_put_driver_tag(rq);
1583                 goto insert;
1584         }
1585
1586         new_cookie = request_to_qc_t(hctx, rq);
1587
1588         /*
1589          * For OK queue, we are done. For error, kill it. Any other
1590          * error (busy), just add it to our list as we previously
1591          * would have done
1592          */
1593         ret = q->mq_ops->queue_rq(hctx, &bd);
1594         switch (ret) {
1595         case BLK_STS_OK:
1596                 *cookie = new_cookie;
1597                 return;
1598         case BLK_STS_RESOURCE:
1599                 __blk_mq_requeue_request(rq);
1600                 goto insert;
1601         default:
1602                 *cookie = BLK_QC_T_NONE;
1603                 blk_mq_end_request(rq, ret);
1604                 return;
1605         }
1606
1607 insert:
1608         blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
1609 }
1610
1611 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1612                 struct request *rq, blk_qc_t *cookie)
1613 {
1614         if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
1615                 rcu_read_lock();
1616                 __blk_mq_try_issue_directly(hctx, rq, cookie, false);
1617                 rcu_read_unlock();
1618         } else {
1619                 unsigned int srcu_idx;
1620
1621                 might_sleep();
1622
1623                 srcu_idx = srcu_read_lock(hctx->queue_rq_srcu);
1624                 __blk_mq_try_issue_directly(hctx, rq, cookie, true);
1625                 srcu_read_unlock(hctx->queue_rq_srcu, srcu_idx);
1626         }
1627 }
1628
1629 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1630 {
1631         const int is_sync = op_is_sync(bio->bi_opf);
1632         const int is_flush_fua = op_is_flush(bio->bi_opf);
1633         struct blk_mq_alloc_data data = { .flags = 0 };
1634         struct request *rq;
1635         unsigned int request_count = 0;
1636         struct blk_plug *plug;
1637         struct request *same_queue_rq = NULL;
1638         blk_qc_t cookie;
1639         unsigned int wb_acct;
1640
1641         blk_queue_bounce(q, &bio);
1642
1643         blk_queue_split(q, &bio);
1644
1645         if (!bio_integrity_prep(bio))
1646                 return BLK_QC_T_NONE;
1647
1648         if (!is_flush_fua && !blk_queue_nomerges(q) &&
1649             blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1650                 return BLK_QC_T_NONE;
1651
1652         if (blk_mq_sched_bio_merge(q, bio))
1653                 return BLK_QC_T_NONE;
1654
1655         wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1656
1657         trace_block_getrq(q, bio, bio->bi_opf);
1658
1659         rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
1660         if (unlikely(!rq)) {
1661                 __wbt_done(q->rq_wb, wb_acct);
1662                 if (bio->bi_opf & REQ_NOWAIT)
1663                         bio_wouldblock_error(bio);
1664                 return BLK_QC_T_NONE;
1665         }
1666
1667         wbt_track(&rq->issue_stat, wb_acct);
1668
1669         cookie = request_to_qc_t(data.hctx, rq);
1670
1671         plug = current->plug;
1672         if (unlikely(is_flush_fua)) {
1673                 blk_mq_put_ctx(data.ctx);
1674                 blk_mq_bio_to_request(rq, bio);
1675
1676                 /* bypass scheduler for flush rq */
1677                 blk_insert_flush(rq);
1678                 blk_mq_run_hw_queue(data.hctx, true);
1679         } else if (plug && q->nr_hw_queues == 1) {
1680                 struct request *last = NULL;
1681
1682                 blk_mq_put_ctx(data.ctx);
1683                 blk_mq_bio_to_request(rq, bio);
1684
1685                 /*
1686                  * @request_count may become stale because of schedule
1687                  * out, so check the list again.
1688                  */
1689                 if (list_empty(&plug->mq_list))
1690                         request_count = 0;
1691                 else if (blk_queue_nomerges(q))
1692                         request_count = blk_plug_queued_count(q);
1693
1694                 if (!request_count)
1695                         trace_block_plug(q);
1696                 else
1697                         last = list_entry_rq(plug->mq_list.prev);
1698
1699                 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1700                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1701                         blk_flush_plug_list(plug, false);
1702                         trace_block_plug(q);
1703                 }
1704
1705                 list_add_tail(&rq->queuelist, &plug->mq_list);
1706         } else if (plug && !blk_queue_nomerges(q)) {
1707                 blk_mq_bio_to_request(rq, bio);
1708
1709                 /*
1710                  * We do limited plugging. If the bio can be merged, do that.
1711                  * Otherwise the existing request in the plug list will be
1712                  * issued. So the plug list will have one request at most
1713                  * The plug list might get flushed before this. If that happens,
1714                  * the plug list is empty, and same_queue_rq is invalid.
1715                  */
1716                 if (list_empty(&plug->mq_list))
1717                         same_queue_rq = NULL;
1718                 if (same_queue_rq)
1719                         list_del_init(&same_queue_rq->queuelist);
1720                 list_add_tail(&rq->queuelist, &plug->mq_list);
1721
1722                 blk_mq_put_ctx(data.ctx);
1723
1724                 if (same_queue_rq) {
1725                         data.hctx = blk_mq_map_queue(q,
1726                                         same_queue_rq->mq_ctx->cpu);
1727                         blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1728                                         &cookie);
1729                 }
1730         } else if (q->nr_hw_queues > 1 && is_sync) {
1731                 blk_mq_put_ctx(data.ctx);
1732                 blk_mq_bio_to_request(rq, bio);
1733                 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
1734         } else if (q->elevator) {
1735                 blk_mq_put_ctx(data.ctx);
1736                 blk_mq_bio_to_request(rq, bio);
1737                 blk_mq_sched_insert_request(rq, false, true, true, true);
1738         } else {
1739                 blk_mq_put_ctx(data.ctx);
1740                 blk_mq_bio_to_request(rq, bio);
1741                 blk_mq_queue_io(data.hctx, data.ctx, rq);
1742                 blk_mq_run_hw_queue(data.hctx, true);
1743         }
1744
1745         return cookie;
1746 }
1747
1748 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1749                      unsigned int hctx_idx)
1750 {
1751         struct page *page;
1752
1753         if (tags->rqs && set->ops->exit_request) {
1754                 int i;
1755
1756                 for (i = 0; i < tags->nr_tags; i++) {
1757                         struct request *rq = tags->static_rqs[i];
1758
1759                         if (!rq)
1760                                 continue;
1761                         set->ops->exit_request(set, rq, hctx_idx);
1762                         tags->static_rqs[i] = NULL;
1763                 }
1764         }
1765
1766         while (!list_empty(&tags->page_list)) {
1767                 page = list_first_entry(&tags->page_list, struct page, lru);
1768                 list_del_init(&page->lru);
1769                 /*
1770                  * Remove kmemleak object previously allocated in
1771                  * blk_mq_init_rq_map().
1772                  */
1773                 kmemleak_free(page_address(page));
1774                 __free_pages(page, page->private);
1775         }
1776 }
1777
1778 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
1779 {
1780         kfree(tags->rqs);
1781         tags->rqs = NULL;
1782         kfree(tags->static_rqs);
1783         tags->static_rqs = NULL;
1784
1785         blk_mq_free_tags(tags);
1786 }
1787
1788 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
1789                                         unsigned int hctx_idx,
1790                                         unsigned int nr_tags,
1791                                         unsigned int reserved_tags)
1792 {
1793         struct blk_mq_tags *tags;
1794         int node;
1795
1796         node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1797         if (node == NUMA_NO_NODE)
1798                 node = set->numa_node;
1799
1800         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
1801                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
1802         if (!tags)
1803                 return NULL;
1804
1805         tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1806                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1807                                  node);
1808         if (!tags->rqs) {
1809                 blk_mq_free_tags(tags);
1810                 return NULL;
1811         }
1812
1813         tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
1814                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
1815                                  node);
1816         if (!tags->static_rqs) {
1817                 kfree(tags->rqs);
1818                 blk_mq_free_tags(tags);
1819                 return NULL;
1820         }
1821
1822         return tags;
1823 }
1824
1825 static size_t order_to_size(unsigned int order)
1826 {
1827         return (size_t)PAGE_SIZE << order;
1828 }
1829
1830 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1831                      unsigned int hctx_idx, unsigned int depth)
1832 {
1833         unsigned int i, j, entries_per_page, max_order = 4;
1834         size_t rq_size, left;
1835         int node;
1836
1837         node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
1838         if (node == NUMA_NO_NODE)
1839                 node = set->numa_node;
1840
1841         INIT_LIST_HEAD(&tags->page_list);
1842
1843         /*
1844          * rq_size is the size of the request plus driver payload, rounded
1845          * to the cacheline size
1846          */
1847         rq_size = round_up(sizeof(struct request) + set->cmd_size,
1848                                 cache_line_size());
1849         left = rq_size * depth;
1850
1851         for (i = 0; i < depth; ) {
1852                 int this_order = max_order;
1853                 struct page *page;
1854                 int to_do;
1855                 void *p;
1856
1857                 while (this_order && left < order_to_size(this_order - 1))
1858                         this_order--;
1859
1860                 do {
1861                         page = alloc_pages_node(node,
1862                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
1863                                 this_order);
1864                         if (page)
1865                                 break;
1866                         if (!this_order--)
1867                                 break;
1868                         if (order_to_size(this_order) < rq_size)
1869                                 break;
1870                 } while (1);
1871
1872                 if (!page)
1873                         goto fail;
1874
1875                 page->private = this_order;
1876                 list_add_tail(&page->lru, &tags->page_list);
1877
1878                 p = page_address(page);
1879                 /*
1880                  * Allow kmemleak to scan these pages as they contain pointers
1881                  * to additional allocations like via ops->init_request().
1882                  */
1883                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
1884                 entries_per_page = order_to_size(this_order) / rq_size;
1885                 to_do = min(entries_per_page, depth - i);
1886                 left -= to_do * rq_size;
1887                 for (j = 0; j < to_do; j++) {
1888                         struct request *rq = p;
1889
1890                         tags->static_rqs[i] = rq;
1891                         if (set->ops->init_request) {
1892                                 if (set->ops->init_request(set, rq, hctx_idx,
1893                                                 node)) {
1894                                         tags->static_rqs[i] = NULL;
1895                                         goto fail;
1896                                 }
1897                         }
1898
1899                         p += rq_size;
1900                         i++;
1901                 }
1902         }
1903         return 0;
1904
1905 fail:
1906         blk_mq_free_rqs(set, tags, hctx_idx);
1907         return -ENOMEM;
1908 }
1909
1910 /*
1911  * 'cpu' is going away. splice any existing rq_list entries from this
1912  * software queue to the hw queue dispatch list, and ensure that it
1913  * gets run.
1914  */
1915 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
1916 {
1917         struct blk_mq_hw_ctx *hctx;
1918         struct blk_mq_ctx *ctx;
1919         LIST_HEAD(tmp);
1920
1921         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
1922         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
1923
1924         spin_lock(&ctx->lock);
1925         if (!list_empty(&ctx->rq_list)) {
1926                 list_splice_init(&ctx->rq_list, &tmp);
1927                 blk_mq_hctx_clear_pending(hctx, ctx);
1928         }
1929         spin_unlock(&ctx->lock);
1930
1931         if (list_empty(&tmp))
1932                 return 0;
1933
1934         spin_lock(&hctx->lock);
1935         list_splice_tail_init(&tmp, &hctx->dispatch);
1936         spin_unlock(&hctx->lock);
1937
1938         blk_mq_run_hw_queue(hctx, true);
1939         return 0;
1940 }
1941
1942 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
1943 {
1944         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
1945                                             &hctx->cpuhp_dead);
1946 }
1947
1948 /* hctx->ctxs will be freed in queue's release handler */
1949 static void blk_mq_exit_hctx(struct request_queue *q,
1950                 struct blk_mq_tag_set *set,
1951                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1952 {
1953         blk_mq_debugfs_unregister_hctx(hctx);
1954
1955         blk_mq_tag_idle(hctx);
1956
1957         if (set->ops->exit_request)
1958                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
1959
1960         blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
1961
1962         if (set->ops->exit_hctx)
1963                 set->ops->exit_hctx(hctx, hctx_idx);
1964
1965         if (hctx->flags & BLK_MQ_F_BLOCKING)
1966                 cleanup_srcu_struct(hctx->queue_rq_srcu);
1967
1968         blk_mq_remove_cpuhp(hctx);
1969         blk_free_flush_queue(hctx->fq);
1970         sbitmap_free(&hctx->ctx_map);
1971 }
1972
1973 static void blk_mq_exit_hw_queues(struct request_queue *q,
1974                 struct blk_mq_tag_set *set, int nr_queue)
1975 {
1976         struct blk_mq_hw_ctx *hctx;
1977         unsigned int i;
1978
1979         queue_for_each_hw_ctx(q, hctx, i) {
1980                 if (i == nr_queue)
1981                         break;
1982                 blk_mq_exit_hctx(q, set, hctx, i);
1983         }
1984 }
1985
1986 static int blk_mq_init_hctx(struct request_queue *q,
1987                 struct blk_mq_tag_set *set,
1988                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
1989 {
1990         int node;
1991
1992         node = hctx->numa_node;
1993         if (node == NUMA_NO_NODE)
1994                 node = hctx->numa_node = set->numa_node;
1995
1996         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1997         spin_lock_init(&hctx->lock);
1998         INIT_LIST_HEAD(&hctx->dispatch);
1999         hctx->queue = q;
2000         hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
2001
2002         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
2003
2004         hctx->tags = set->tags[hctx_idx];
2005
2006         /*
2007          * Allocate space for all possible cpus to avoid allocation at
2008          * runtime
2009          */
2010         hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
2011                                         GFP_KERNEL, node);
2012         if (!hctx->ctxs)
2013                 goto unregister_cpu_notifier;
2014
2015         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2016                               node))
2017                 goto free_ctxs;
2018
2019         hctx->nr_ctx = 0;
2020
2021         if (set->ops->init_hctx &&
2022             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2023                 goto free_bitmap;
2024
2025         if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2026                 goto exit_hctx;
2027
2028         hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2029         if (!hctx->fq)
2030                 goto sched_exit_hctx;
2031
2032         if (set->ops->init_request &&
2033             set->ops->init_request(set, hctx->fq->flush_rq, hctx_idx,
2034                                    node))
2035                 goto free_fq;
2036
2037         if (hctx->flags & BLK_MQ_F_BLOCKING)
2038                 init_srcu_struct(hctx->queue_rq_srcu);
2039
2040         blk_mq_debugfs_register_hctx(q, hctx);
2041
2042         return 0;
2043
2044  free_fq:
2045         kfree(hctx->fq);
2046  sched_exit_hctx:
2047         blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2048  exit_hctx:
2049         if (set->ops->exit_hctx)
2050                 set->ops->exit_hctx(hctx, hctx_idx);
2051  free_bitmap:
2052         sbitmap_free(&hctx->ctx_map);
2053  free_ctxs:
2054         kfree(hctx->ctxs);
2055  unregister_cpu_notifier:
2056         blk_mq_remove_cpuhp(hctx);
2057         return -1;
2058 }
2059
2060 static void blk_mq_init_cpu_queues(struct request_queue *q,
2061                                    unsigned int nr_hw_queues)
2062 {
2063         unsigned int i;
2064
2065         for_each_possible_cpu(i) {
2066                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2067                 struct blk_mq_hw_ctx *hctx;
2068
2069                 __ctx->cpu = i;
2070                 spin_lock_init(&__ctx->lock);
2071                 INIT_LIST_HEAD(&__ctx->rq_list);
2072                 __ctx->queue = q;
2073
2074                 /* If the cpu isn't present, the cpu is mapped to first hctx */
2075                 if (!cpu_present(i))
2076                         continue;
2077
2078                 hctx = blk_mq_map_queue(q, i);
2079
2080                 /*
2081                  * Set local node, IFF we have more than one hw queue. If
2082                  * not, we remain on the home node of the device
2083                  */
2084                 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
2085                         hctx->numa_node = local_memory_node(cpu_to_node(i));
2086         }
2087 }
2088
2089 static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2090 {
2091         int ret = 0;
2092
2093         set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2094                                         set->queue_depth, set->reserved_tags);
2095         if (!set->tags[hctx_idx])
2096                 return false;
2097
2098         ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2099                                 set->queue_depth);
2100         if (!ret)
2101                 return true;
2102
2103         blk_mq_free_rq_map(set->tags[hctx_idx]);
2104         set->tags[hctx_idx] = NULL;
2105         return false;
2106 }
2107
2108 static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2109                                          unsigned int hctx_idx)
2110 {
2111         if (set->tags[hctx_idx]) {
2112                 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2113                 blk_mq_free_rq_map(set->tags[hctx_idx]);
2114                 set->tags[hctx_idx] = NULL;
2115         }
2116 }
2117
2118 static void blk_mq_map_swqueue(struct request_queue *q)
2119 {
2120         unsigned int i, hctx_idx;
2121         struct blk_mq_hw_ctx *hctx;
2122         struct blk_mq_ctx *ctx;
2123         struct blk_mq_tag_set *set = q->tag_set;
2124
2125         /*
2126          * Avoid others reading imcomplete hctx->cpumask through sysfs
2127          */
2128         mutex_lock(&q->sysfs_lock);
2129
2130         queue_for_each_hw_ctx(q, hctx, i) {
2131                 cpumask_clear(hctx->cpumask);
2132                 hctx->nr_ctx = 0;
2133         }
2134
2135         /*
2136          * Map software to hardware queues.
2137          *
2138          * If the cpu isn't present, the cpu is mapped to first hctx.
2139          */
2140         for_each_present_cpu(i) {
2141                 hctx_idx = q->mq_map[i];
2142                 /* unmapped hw queue can be remapped after CPU topo changed */
2143                 if (!set->tags[hctx_idx] &&
2144                     !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2145                         /*
2146                          * If tags initialization fail for some hctx,
2147                          * that hctx won't be brought online.  In this
2148                          * case, remap the current ctx to hctx[0] which
2149                          * is guaranteed to always have tags allocated
2150                          */
2151                         q->mq_map[i] = 0;
2152                 }
2153
2154                 ctx = per_cpu_ptr(q->queue_ctx, i);
2155                 hctx = blk_mq_map_queue(q, i);
2156
2157                 cpumask_set_cpu(i, hctx->cpumask);
2158                 ctx->index_hw = hctx->nr_ctx;
2159                 hctx->ctxs[hctx->nr_ctx++] = ctx;
2160         }
2161
2162         mutex_unlock(&q->sysfs_lock);
2163
2164         queue_for_each_hw_ctx(q, hctx, i) {
2165                 /*
2166                  * If no software queues are mapped to this hardware queue,
2167                  * disable it and free the request entries.
2168                  */
2169                 if (!hctx->nr_ctx) {
2170                         /* Never unmap queue 0.  We need it as a
2171                          * fallback in case of a new remap fails
2172                          * allocation
2173                          */
2174                         if (i && set->tags[i])
2175                                 blk_mq_free_map_and_requests(set, i);
2176
2177                         hctx->tags = NULL;
2178                         continue;
2179                 }
2180
2181                 hctx->tags = set->tags[i];
2182                 WARN_ON(!hctx->tags);
2183
2184                 /*
2185                  * Set the map size to the number of mapped software queues.
2186                  * This is more accurate and more efficient than looping
2187                  * over all possibly mapped software queues.
2188                  */
2189                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
2190
2191                 /*
2192                  * Initialize batch roundrobin counts
2193                  */
2194                 hctx->next_cpu = cpumask_first(hctx->cpumask);
2195                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2196         }
2197 }
2198
2199 /*
2200  * Caller needs to ensure that we're either frozen/quiesced, or that
2201  * the queue isn't live yet.
2202  */
2203 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
2204 {
2205         struct blk_mq_hw_ctx *hctx;
2206         int i;
2207
2208         queue_for_each_hw_ctx(q, hctx, i) {
2209                 if (shared) {
2210                         if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2211                                 atomic_inc(&q->shared_hctx_restart);
2212                         hctx->flags |= BLK_MQ_F_TAG_SHARED;
2213                 } else {
2214                         if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2215                                 atomic_dec(&q->shared_hctx_restart);
2216                         hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
2217                 }
2218         }
2219 }
2220
2221 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2222                                         bool shared)
2223 {
2224         struct request_queue *q;
2225
2226         lockdep_assert_held(&set->tag_list_lock);
2227
2228         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2229                 blk_mq_freeze_queue(q);
2230                 queue_set_hctx_shared(q, shared);
2231                 blk_mq_unfreeze_queue(q);
2232         }
2233 }
2234
2235 static void blk_mq_del_queue_tag_set(struct request_queue *q)
2236 {
2237         struct blk_mq_tag_set *set = q->tag_set;
2238
2239         mutex_lock(&set->tag_list_lock);
2240         list_del_rcu(&q->tag_set_list);
2241         INIT_LIST_HEAD(&q->tag_set_list);
2242         if (list_is_singular(&set->tag_list)) {
2243                 /* just transitioned to unshared */
2244                 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2245                 /* update existing queue */
2246                 blk_mq_update_tag_set_depth(set, false);
2247         }
2248         mutex_unlock(&set->tag_list_lock);
2249
2250         synchronize_rcu();
2251 }
2252
2253 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2254                                      struct request_queue *q)
2255 {
2256         q->tag_set = set;
2257
2258         mutex_lock(&set->tag_list_lock);
2259
2260         /* Check to see if we're transitioning to shared (from 1 to 2 queues). */
2261         if (!list_empty(&set->tag_list) && !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2262                 set->flags |= BLK_MQ_F_TAG_SHARED;
2263                 /* update existing queue */
2264                 blk_mq_update_tag_set_depth(set, true);
2265         }
2266         if (set->flags & BLK_MQ_F_TAG_SHARED)
2267                 queue_set_hctx_shared(q, true);
2268         list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2269
2270         mutex_unlock(&set->tag_list_lock);
2271 }
2272
2273 /*
2274  * It is the actual release handler for mq, but we do it from
2275  * request queue's release handler for avoiding use-after-free
2276  * and headache because q->mq_kobj shouldn't have been introduced,
2277  * but we can't group ctx/kctx kobj without it.
2278  */
2279 void blk_mq_release(struct request_queue *q)
2280 {
2281         struct blk_mq_hw_ctx *hctx;
2282         unsigned int i;
2283
2284         /* hctx kobj stays in hctx */
2285         queue_for_each_hw_ctx(q, hctx, i) {
2286                 if (!hctx)
2287                         continue;
2288                 kobject_put(&hctx->kobj);
2289         }
2290
2291         q->mq_map = NULL;
2292
2293         kfree(q->queue_hw_ctx);
2294
2295         /*
2296          * release .mq_kobj and sw queue's kobject now because
2297          * both share lifetime with request queue.
2298          */
2299         blk_mq_sysfs_deinit(q);
2300
2301         free_percpu(q->queue_ctx);
2302 }
2303
2304 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
2305 {
2306         struct request_queue *uninit_q, *q;
2307
2308         uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
2309         if (!uninit_q)
2310                 return ERR_PTR(-ENOMEM);
2311
2312         q = blk_mq_init_allocated_queue(set, uninit_q);
2313         if (IS_ERR(q))
2314                 blk_cleanup_queue(uninit_q);
2315
2316         return q;
2317 }
2318 EXPORT_SYMBOL(blk_mq_init_queue);
2319
2320 static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2321 {
2322         int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2323
2324         BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, queue_rq_srcu),
2325                            __alignof__(struct blk_mq_hw_ctx)) !=
2326                      sizeof(struct blk_mq_hw_ctx));
2327
2328         if (tag_set->flags & BLK_MQ_F_BLOCKING)
2329                 hw_ctx_size += sizeof(struct srcu_struct);
2330
2331         return hw_ctx_size;
2332 }
2333
2334 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2335                                                 struct request_queue *q)
2336 {
2337         int i, j;
2338         struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
2339
2340         blk_mq_sysfs_unregister(q);
2341         for (i = 0; i < set->nr_hw_queues; i++) {
2342                 int node;
2343
2344                 if (hctxs[i])
2345                         continue;
2346
2347                 node = blk_mq_hw_queue_to_node(q->mq_map, i);
2348                 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
2349                                         GFP_KERNEL, node);
2350                 if (!hctxs[i])
2351                         break;
2352
2353                 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
2354                                                 node)) {
2355                         kfree(hctxs[i]);
2356                         hctxs[i] = NULL;
2357                         break;
2358                 }
2359
2360                 atomic_set(&hctxs[i]->nr_active, 0);
2361                 hctxs[i]->numa_node = node;
2362                 hctxs[i]->queue_num = i;
2363
2364                 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2365                         free_cpumask_var(hctxs[i]->cpumask);
2366                         kfree(hctxs[i]);
2367                         hctxs[i] = NULL;
2368                         break;
2369                 }
2370                 blk_mq_hctx_kobj_init(hctxs[i]);
2371         }
2372         for (j = i; j < q->nr_hw_queues; j++) {
2373                 struct blk_mq_hw_ctx *hctx = hctxs[j];
2374
2375                 if (hctx) {
2376                         if (hctx->tags)
2377                                 blk_mq_free_map_and_requests(set, j);
2378                         blk_mq_exit_hctx(q, set, hctx, j);
2379                         kobject_put(&hctx->kobj);
2380                         hctxs[j] = NULL;
2381
2382                 }
2383         }
2384         q->nr_hw_queues = i;
2385         blk_mq_sysfs_register(q);
2386 }
2387
2388 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2389                                                   struct request_queue *q)
2390 {
2391         /* mark the queue as mq asap */
2392         q->mq_ops = set->ops;
2393
2394         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
2395                                              blk_mq_poll_stats_bkt,
2396                                              BLK_MQ_POLL_STATS_BKTS, q);
2397         if (!q->poll_cb)
2398                 goto err_exit;
2399
2400         q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2401         if (!q->queue_ctx)
2402                 goto err_exit;
2403
2404         /* init q->mq_kobj and sw queues' kobjects */
2405         blk_mq_sysfs_init(q);
2406
2407         q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2408                                                 GFP_KERNEL, set->numa_node);
2409         if (!q->queue_hw_ctx)
2410                 goto err_percpu;
2411
2412         q->mq_map = set->mq_map;
2413
2414         blk_mq_realloc_hw_ctxs(set, q);
2415         if (!q->nr_hw_queues)
2416                 goto err_hctxs;
2417
2418         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
2419         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
2420
2421         q->nr_queues = nr_cpu_ids;
2422
2423         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
2424
2425         if (!(set->flags & BLK_MQ_F_SG_MERGE))
2426                 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2427
2428         q->sg_reserved_size = INT_MAX;
2429
2430         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
2431         INIT_LIST_HEAD(&q->requeue_list);
2432         spin_lock_init(&q->requeue_lock);
2433
2434         blk_queue_make_request(q, blk_mq_make_request);
2435         if (q->mq_ops->poll)
2436                 q->poll_fn = blk_mq_poll;
2437
2438         /*
2439          * Do this after blk_queue_make_request() overrides it...
2440          */
2441         q->nr_requests = set->queue_depth;
2442
2443         /*
2444          * Default to classic polling
2445          */
2446         q->poll_nsec = -1;
2447
2448         if (set->ops->complete)
2449                 blk_queue_softirq_done(q, set->ops->complete);
2450
2451         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2452         blk_mq_add_queue_tag_set(set, q);
2453         blk_mq_map_swqueue(q);
2454
2455         if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2456                 int ret;
2457
2458                 ret = blk_mq_sched_init(q);
2459                 if (ret)
2460                         return ERR_PTR(ret);
2461         }
2462
2463         return q;
2464
2465 err_hctxs:
2466         kfree(q->queue_hw_ctx);
2467 err_percpu:
2468         free_percpu(q->queue_ctx);
2469 err_exit:
2470         q->mq_ops = NULL;
2471         return ERR_PTR(-ENOMEM);
2472 }
2473 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2474
2475 void blk_mq_free_queue(struct request_queue *q)
2476 {
2477         struct blk_mq_tag_set   *set = q->tag_set;
2478
2479         blk_mq_del_queue_tag_set(q);
2480         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2481 }
2482
2483 /* Basically redo blk_mq_init_queue with queue frozen */
2484 static void blk_mq_queue_reinit(struct request_queue *q)
2485 {
2486         WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
2487
2488         blk_mq_debugfs_unregister_hctxs(q);
2489         blk_mq_sysfs_unregister(q);
2490
2491         /*
2492          * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2493          * we should change hctx numa_node according to new topology (this
2494          * involves free and re-allocate memory, worthy doing?)
2495          */
2496
2497         blk_mq_map_swqueue(q);
2498
2499         blk_mq_sysfs_register(q);
2500         blk_mq_debugfs_register_hctxs(q);
2501 }
2502
2503 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2504 {
2505         int i;
2506
2507         for (i = 0; i < set->nr_hw_queues; i++)
2508                 if (!__blk_mq_alloc_rq_map(set, i))
2509                         goto out_unwind;
2510
2511         return 0;
2512
2513 out_unwind:
2514         while (--i >= 0)
2515                 blk_mq_free_rq_map(set->tags[i]);
2516
2517         return -ENOMEM;
2518 }
2519
2520 /*
2521  * Allocate the request maps associated with this tag_set. Note that this
2522  * may reduce the depth asked for, if memory is tight. set->queue_depth
2523  * will be updated to reflect the allocated depth.
2524  */
2525 static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2526 {
2527         unsigned int depth;
2528         int err;
2529
2530         depth = set->queue_depth;
2531         do {
2532                 err = __blk_mq_alloc_rq_maps(set);
2533                 if (!err)
2534                         break;
2535
2536                 set->queue_depth >>= 1;
2537                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2538                         err = -ENOMEM;
2539                         break;
2540                 }
2541         } while (set->queue_depth);
2542
2543         if (!set->queue_depth || err) {
2544                 pr_err("blk-mq: failed to allocate request map\n");
2545                 return -ENOMEM;
2546         }
2547
2548         if (depth != set->queue_depth)
2549                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2550                                                 depth, set->queue_depth);
2551
2552         return 0;
2553 }
2554
2555 static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2556 {
2557         if (set->ops->map_queues)
2558                 return set->ops->map_queues(set);
2559         else
2560                 return blk_mq_map_queues(set);
2561 }
2562
2563 /*
2564  * Alloc a tag set to be associated with one or more request queues.
2565  * May fail with EINVAL for various error conditions. May adjust the
2566  * requested depth down, if if it too large. In that case, the set
2567  * value will be stored in set->queue_depth.
2568  */
2569 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2570 {
2571         int ret;
2572
2573         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2574
2575         if (!set->nr_hw_queues)
2576                 return -EINVAL;
2577         if (!set->queue_depth)
2578                 return -EINVAL;
2579         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2580                 return -EINVAL;
2581
2582         if (!set->ops->queue_rq)
2583                 return -EINVAL;
2584
2585         if (!set->ops->get_budget ^ !set->ops->put_budget)
2586                 return -EINVAL;
2587
2588         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2589                 pr_info("blk-mq: reduced tag depth to %u\n",
2590                         BLK_MQ_MAX_DEPTH);
2591                 set->queue_depth = BLK_MQ_MAX_DEPTH;
2592         }
2593
2594         /*
2595          * If a crashdump is active, then we are potentially in a very
2596          * memory constrained environment. Limit us to 1 queue and
2597          * 64 tags to prevent using too much memory.
2598          */
2599         if (is_kdump_kernel()) {
2600                 set->nr_hw_queues = 1;
2601                 set->queue_depth = min(64U, set->queue_depth);
2602         }
2603         /*
2604          * There is no use for more h/w queues than cpus.
2605          */
2606         if (set->nr_hw_queues > nr_cpu_ids)
2607                 set->nr_hw_queues = nr_cpu_ids;
2608
2609         set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
2610                                  GFP_KERNEL, set->numa_node);
2611         if (!set->tags)
2612                 return -ENOMEM;
2613
2614         ret = -ENOMEM;
2615         set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2616                         GFP_KERNEL, set->numa_node);
2617         if (!set->mq_map)
2618                 goto out_free_tags;
2619
2620         ret = blk_mq_update_queue_map(set);
2621         if (ret)
2622                 goto out_free_mq_map;
2623
2624         ret = blk_mq_alloc_rq_maps(set);
2625         if (ret)
2626                 goto out_free_mq_map;
2627
2628         mutex_init(&set->tag_list_lock);
2629         INIT_LIST_HEAD(&set->tag_list);
2630
2631         return 0;
2632
2633 out_free_mq_map:
2634         kfree(set->mq_map);
2635         set->mq_map = NULL;
2636 out_free_tags:
2637         kfree(set->tags);
2638         set->tags = NULL;
2639         return ret;
2640 }
2641 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2642
2643 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2644 {
2645         int i;
2646
2647         for (i = 0; i < nr_cpu_ids; i++)
2648                 blk_mq_free_map_and_requests(set, i);
2649
2650         kfree(set->mq_map);
2651         set->mq_map = NULL;
2652
2653         kfree(set->tags);
2654         set->tags = NULL;
2655 }
2656 EXPORT_SYMBOL(blk_mq_free_tag_set);
2657
2658 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2659 {
2660         struct blk_mq_tag_set *set = q->tag_set;
2661         struct blk_mq_hw_ctx *hctx;
2662         int i, ret;
2663
2664         if (!set)
2665                 return -EINVAL;
2666
2667         blk_mq_freeze_queue(q);
2668
2669         ret = 0;
2670         queue_for_each_hw_ctx(q, hctx, i) {
2671                 if (!hctx->tags)
2672                         continue;
2673                 /*
2674                  * If we're using an MQ scheduler, just update the scheduler
2675                  * queue depth. This is similar to what the old code would do.
2676                  */
2677                 if (!hctx->sched_tags) {
2678                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
2679                                                         false);
2680                 } else {
2681                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2682                                                         nr, true);
2683                 }
2684                 if (ret)
2685                         break;
2686         }
2687
2688         if (!ret)
2689                 q->nr_requests = nr;
2690
2691         blk_mq_unfreeze_queue(q);
2692
2693         return ret;
2694 }
2695
2696 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2697                                                         int nr_hw_queues)
2698 {
2699         struct request_queue *q;
2700
2701         lockdep_assert_held(&set->tag_list_lock);
2702
2703         if (nr_hw_queues > nr_cpu_ids)
2704                 nr_hw_queues = nr_cpu_ids;
2705         if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2706                 return;
2707
2708         list_for_each_entry(q, &set->tag_list, tag_set_list)
2709                 blk_mq_freeze_queue(q);
2710
2711         set->nr_hw_queues = nr_hw_queues;
2712         blk_mq_update_queue_map(set);
2713         list_for_each_entry(q, &set->tag_list, tag_set_list) {
2714                 blk_mq_realloc_hw_ctxs(set, q);
2715                 blk_mq_queue_reinit(q);
2716         }
2717
2718         list_for_each_entry(q, &set->tag_list, tag_set_list)
2719                 blk_mq_unfreeze_queue(q);
2720 }
2721
2722 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2723 {
2724         mutex_lock(&set->tag_list_lock);
2725         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2726         mutex_unlock(&set->tag_list_lock);
2727 }
2728 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2729
2730 /* Enable polling stats and return whether they were already enabled. */
2731 static bool blk_poll_stats_enable(struct request_queue *q)
2732 {
2733         if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2734             test_and_set_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
2735                 return true;
2736         blk_stat_add_callback(q, q->poll_cb);
2737         return false;
2738 }
2739
2740 static void blk_mq_poll_stats_start(struct request_queue *q)
2741 {
2742         /*
2743          * We don't arm the callback if polling stats are not enabled or the
2744          * callback is already active.
2745          */
2746         if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
2747             blk_stat_is_active(q->poll_cb))
2748                 return;
2749
2750         blk_stat_activate_msecs(q->poll_cb, 100);
2751 }
2752
2753 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
2754 {
2755         struct request_queue *q = cb->data;
2756         int bucket;
2757
2758         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
2759                 if (cb->stat[bucket].nr_samples)
2760                         q->poll_stat[bucket] = cb->stat[bucket];
2761         }
2762 }
2763
2764 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
2765                                        struct blk_mq_hw_ctx *hctx,
2766                                        struct request *rq)
2767 {
2768         unsigned long ret = 0;
2769         int bucket;
2770
2771         /*
2772          * If stats collection isn't on, don't sleep but turn it on for
2773          * future users
2774          */
2775         if (!blk_poll_stats_enable(q))
2776                 return 0;
2777
2778         /*
2779          * As an optimistic guess, use half of the mean service time
2780          * for this type of request. We can (and should) make this smarter.
2781          * For instance, if the completion latencies are tight, we can
2782          * get closer than just half the mean. This is especially
2783          * important on devices where the completion latencies are longer
2784          * than ~10 usec. We do use the stats for the relevant IO size
2785          * if available which does lead to better estimates.
2786          */
2787         bucket = blk_mq_poll_stats_bkt(rq);
2788         if (bucket < 0)
2789                 return ret;
2790
2791         if (q->poll_stat[bucket].nr_samples)
2792                 ret = (q->poll_stat[bucket].mean + 1) / 2;
2793
2794         return ret;
2795 }
2796
2797 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
2798                                      struct blk_mq_hw_ctx *hctx,
2799                                      struct request *rq)
2800 {
2801         struct hrtimer_sleeper hs;
2802         enum hrtimer_mode mode;
2803         unsigned int nsecs;
2804         ktime_t kt;
2805
2806         if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
2807                 return false;
2808
2809         /*
2810          * poll_nsec can be:
2811          *
2812          * -1:  don't ever hybrid sleep
2813          *  0:  use half of prev avg
2814          * >0:  use this specific value
2815          */
2816         if (q->poll_nsec == -1)
2817                 return false;
2818         else if (q->poll_nsec > 0)
2819                 nsecs = q->poll_nsec;
2820         else
2821                 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
2822
2823         if (!nsecs)
2824                 return false;
2825
2826         set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
2827
2828         /*
2829          * This will be replaced with the stats tracking code, using
2830          * 'avg_completion_time / 2' as the pre-sleep target.
2831          */
2832         kt = nsecs;
2833
2834         mode = HRTIMER_MODE_REL;
2835         hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
2836         hrtimer_set_expires(&hs.timer, kt);
2837
2838         hrtimer_init_sleeper(&hs, current);
2839         do {
2840                 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
2841                         break;
2842                 set_current_state(TASK_UNINTERRUPTIBLE);
2843                 hrtimer_start_expires(&hs.timer, mode);
2844                 if (hs.task)
2845                         io_schedule();
2846                 hrtimer_cancel(&hs.timer);
2847                 mode = HRTIMER_MODE_ABS;
2848         } while (hs.task && !signal_pending(current));
2849
2850         __set_current_state(TASK_RUNNING);
2851         destroy_hrtimer_on_stack(&hs.timer);
2852         return true;
2853 }
2854
2855 static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
2856 {
2857         struct request_queue *q = hctx->queue;
2858         long state;
2859
2860         /*
2861          * If we sleep, have the caller restart the poll loop to reset
2862          * the state. Like for the other success return cases, the
2863          * caller is responsible for checking if the IO completed. If
2864          * the IO isn't complete, we'll get called again and will go
2865          * straight to the busy poll loop.
2866          */
2867         if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
2868                 return true;
2869
2870         hctx->poll_considered++;
2871
2872         state = current->state;
2873         while (!need_resched()) {
2874                 int ret;
2875
2876                 hctx->poll_invoked++;
2877
2878                 ret = q->mq_ops->poll(hctx, rq->tag);
2879                 if (ret > 0) {
2880                         hctx->poll_success++;
2881                         set_current_state(TASK_RUNNING);
2882                         return true;
2883                 }
2884
2885                 if (signal_pending_state(state, current))
2886                         set_current_state(TASK_RUNNING);
2887
2888                 if (current->state == TASK_RUNNING)
2889                         return true;
2890                 if (ret < 0)
2891                         break;
2892                 cpu_relax();
2893         }
2894
2895         return false;
2896 }
2897
2898 static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
2899 {
2900         struct blk_mq_hw_ctx *hctx;
2901         struct request *rq;
2902
2903         if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
2904                 return false;
2905
2906         hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
2907         if (!blk_qc_t_is_internal(cookie))
2908                 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
2909         else {
2910                 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
2911                 /*
2912                  * With scheduling, if the request has completed, we'll
2913                  * get a NULL return here, as we clear the sched tag when
2914                  * that happens. The request still remains valid, like always,
2915                  * so we should be safe with just the NULL check.
2916                  */
2917                 if (!rq)
2918                         return false;
2919         }
2920
2921         return __blk_mq_poll(hctx, rq);
2922 }
2923
2924 static int __init blk_mq_init(void)
2925 {
2926         /*
2927          * See comment in block/blk.h rq_atomic_flags enum
2928          */
2929         BUILD_BUG_ON((REQ_ATOM_STARTED / BITS_PER_BYTE) !=
2930                         (REQ_ATOM_COMPLETE / BITS_PER_BYTE));
2931
2932         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
2933                                 blk_mq_hctx_notify_dead);
2934         return 0;
2935 }
2936 subsys_initcall(blk_mq_init);