ublk_drv: return flag of UBLK_F_URING_CMD_COMP_IN_TASK in case of module
[linux-block.git] / block / blk-mq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Block multiqueue core code
4  *
5  * Copyright (C) 2013-2014 Jens Axboe
6  * Copyright (C) 2013-2014 Christoph Hellwig
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/blk-integrity.h>
14 #include <linux/kmemleak.h>
15 #include <linux/mm.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/workqueue.h>
19 #include <linux/smp.h>
20 #include <linux/interrupt.h>
21 #include <linux/llist.h>
22 #include <linux/cpu.h>
23 #include <linux/cache.h>
24 #include <linux/sched/sysctl.h>
25 #include <linux/sched/topology.h>
26 #include <linux/sched/signal.h>
27 #include <linux/delay.h>
28 #include <linux/crash_dump.h>
29 #include <linux/prefetch.h>
30 #include <linux/blk-crypto.h>
31 #include <linux/part_stat.h>
32
33 #include <trace/events/block.h>
34
35 #include <linux/blk-mq.h>
36 #include <linux/t10-pi.h>
37 #include "blk.h"
38 #include "blk-mq.h"
39 #include "blk-mq-debugfs.h"
40 #include "blk-mq-tag.h"
41 #include "blk-pm.h"
42 #include "blk-stat.h"
43 #include "blk-mq-sched.h"
44 #include "blk-rq-qos.h"
45 #include "blk-ioprio.h"
46
47 static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
48
49 static void blk_mq_poll_stats_start(struct request_queue *q);
50 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
51
52 static int blk_mq_poll_stats_bkt(const struct request *rq)
53 {
54         int ddir, sectors, bucket;
55
56         ddir = rq_data_dir(rq);
57         sectors = blk_rq_stats_sectors(rq);
58
59         bucket = ddir + 2 * ilog2(sectors);
60
61         if (bucket < 0)
62                 return -1;
63         else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
64                 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
65
66         return bucket;
67 }
68
69 #define BLK_QC_T_SHIFT          16
70 #define BLK_QC_T_INTERNAL       (1U << 31)
71
72 static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
73                 blk_qc_t qc)
74 {
75         return xa_load(&q->hctx_table,
76                         (qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT);
77 }
78
79 static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
80                 blk_qc_t qc)
81 {
82         unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1);
83
84         if (qc & BLK_QC_T_INTERNAL)
85                 return blk_mq_tag_to_rq(hctx->sched_tags, tag);
86         return blk_mq_tag_to_rq(hctx->tags, tag);
87 }
88
89 static inline blk_qc_t blk_rq_to_qc(struct request *rq)
90 {
91         return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
92                 (rq->tag != -1 ?
93                  rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
94 }
95
96 /*
97  * Check if any of the ctx, dispatch list or elevator
98  * have pending work in this hardware queue.
99  */
100 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
101 {
102         return !list_empty_careful(&hctx->dispatch) ||
103                 sbitmap_any_bit_set(&hctx->ctx_map) ||
104                         blk_mq_sched_has_work(hctx);
105 }
106
107 /*
108  * Mark this ctx as having pending work in this hardware queue
109  */
110 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
111                                      struct blk_mq_ctx *ctx)
112 {
113         const int bit = ctx->index_hw[hctx->type];
114
115         if (!sbitmap_test_bit(&hctx->ctx_map, bit))
116                 sbitmap_set_bit(&hctx->ctx_map, bit);
117 }
118
119 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
120                                       struct blk_mq_ctx *ctx)
121 {
122         const int bit = ctx->index_hw[hctx->type];
123
124         sbitmap_clear_bit(&hctx->ctx_map, bit);
125 }
126
127 struct mq_inflight {
128         struct block_device *part;
129         unsigned int inflight[2];
130 };
131
132 static bool blk_mq_check_inflight(struct request *rq, void *priv)
133 {
134         struct mq_inflight *mi = priv;
135
136         if (rq->part && blk_do_io_stat(rq) &&
137             (!mi->part->bd_partno || rq->part == mi->part) &&
138             blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
139                 mi->inflight[rq_data_dir(rq)]++;
140
141         return true;
142 }
143
144 unsigned int blk_mq_in_flight(struct request_queue *q,
145                 struct block_device *part)
146 {
147         struct mq_inflight mi = { .part = part };
148
149         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
150
151         return mi.inflight[0] + mi.inflight[1];
152 }
153
154 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
155                 unsigned int inflight[2])
156 {
157         struct mq_inflight mi = { .part = part };
158
159         blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
160         inflight[0] = mi.inflight[0];
161         inflight[1] = mi.inflight[1];
162 }
163
164 void blk_freeze_queue_start(struct request_queue *q)
165 {
166         mutex_lock(&q->mq_freeze_lock);
167         if (++q->mq_freeze_depth == 1) {
168                 percpu_ref_kill(&q->q_usage_counter);
169                 mutex_unlock(&q->mq_freeze_lock);
170                 if (queue_is_mq(q))
171                         blk_mq_run_hw_queues(q, false);
172         } else {
173                 mutex_unlock(&q->mq_freeze_lock);
174         }
175 }
176 EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
177
178 void blk_mq_freeze_queue_wait(struct request_queue *q)
179 {
180         wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
181 }
182 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
183
184 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
185                                      unsigned long timeout)
186 {
187         return wait_event_timeout(q->mq_freeze_wq,
188                                         percpu_ref_is_zero(&q->q_usage_counter),
189                                         timeout);
190 }
191 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
192
193 /*
194  * Guarantee no request is in use, so we can change any data structure of
195  * the queue afterward.
196  */
197 void blk_freeze_queue(struct request_queue *q)
198 {
199         /*
200          * In the !blk_mq case we are only calling this to kill the
201          * q_usage_counter, otherwise this increases the freeze depth
202          * and waits for it to return to zero.  For this reason there is
203          * no blk_unfreeze_queue(), and blk_freeze_queue() is not
204          * exported to drivers as the only user for unfreeze is blk_mq.
205          */
206         blk_freeze_queue_start(q);
207         blk_mq_freeze_queue_wait(q);
208 }
209
210 void blk_mq_freeze_queue(struct request_queue *q)
211 {
212         /*
213          * ...just an alias to keep freeze and unfreeze actions balanced
214          * in the blk_mq_* namespace
215          */
216         blk_freeze_queue(q);
217 }
218 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
219
220 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
221 {
222         mutex_lock(&q->mq_freeze_lock);
223         if (force_atomic)
224                 q->q_usage_counter.data->force_atomic = true;
225         q->mq_freeze_depth--;
226         WARN_ON_ONCE(q->mq_freeze_depth < 0);
227         if (!q->mq_freeze_depth) {
228                 percpu_ref_resurrect(&q->q_usage_counter);
229                 wake_up_all(&q->mq_freeze_wq);
230         }
231         mutex_unlock(&q->mq_freeze_lock);
232 }
233
234 void blk_mq_unfreeze_queue(struct request_queue *q)
235 {
236         __blk_mq_unfreeze_queue(q, false);
237 }
238 EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
239
240 /*
241  * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
242  * mpt3sas driver such that this function can be removed.
243  */
244 void blk_mq_quiesce_queue_nowait(struct request_queue *q)
245 {
246         unsigned long flags;
247
248         spin_lock_irqsave(&q->queue_lock, flags);
249         if (!q->quiesce_depth++)
250                 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
251         spin_unlock_irqrestore(&q->queue_lock, flags);
252 }
253 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
254
255 /**
256  * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
257  * @q: request queue.
258  *
259  * Note: it is driver's responsibility for making sure that quiesce has
260  * been started.
261  */
262 void blk_mq_wait_quiesce_done(struct request_queue *q)
263 {
264         if (blk_queue_has_srcu(q))
265                 synchronize_srcu(q->srcu);
266         else
267                 synchronize_rcu();
268 }
269 EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
270
271 /**
272  * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
273  * @q: request queue.
274  *
275  * Note: this function does not prevent that the struct request end_io()
276  * callback function is invoked. Once this function is returned, we make
277  * sure no dispatch can happen until the queue is unquiesced via
278  * blk_mq_unquiesce_queue().
279  */
280 void blk_mq_quiesce_queue(struct request_queue *q)
281 {
282         blk_mq_quiesce_queue_nowait(q);
283         blk_mq_wait_quiesce_done(q);
284 }
285 EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
286
287 /*
288  * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
289  * @q: request queue.
290  *
291  * This function recovers queue into the state before quiescing
292  * which is done by blk_mq_quiesce_queue.
293  */
294 void blk_mq_unquiesce_queue(struct request_queue *q)
295 {
296         unsigned long flags;
297         bool run_queue = false;
298
299         spin_lock_irqsave(&q->queue_lock, flags);
300         if (WARN_ON_ONCE(q->quiesce_depth <= 0)) {
301                 ;
302         } else if (!--q->quiesce_depth) {
303                 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
304                 run_queue = true;
305         }
306         spin_unlock_irqrestore(&q->queue_lock, flags);
307
308         /* dispatch requests which are inserted during quiescing */
309         if (run_queue)
310                 blk_mq_run_hw_queues(q, true);
311 }
312 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
313
314 void blk_mq_wake_waiters(struct request_queue *q)
315 {
316         struct blk_mq_hw_ctx *hctx;
317         unsigned long i;
318
319         queue_for_each_hw_ctx(q, hctx, i)
320                 if (blk_mq_hw_queue_mapped(hctx))
321                         blk_mq_tag_wakeup_all(hctx->tags, true);
322 }
323
324 void blk_rq_init(struct request_queue *q, struct request *rq)
325 {
326         memset(rq, 0, sizeof(*rq));
327
328         INIT_LIST_HEAD(&rq->queuelist);
329         rq->q = q;
330         rq->__sector = (sector_t) -1;
331         INIT_HLIST_NODE(&rq->hash);
332         RB_CLEAR_NODE(&rq->rb_node);
333         rq->tag = BLK_MQ_NO_TAG;
334         rq->internal_tag = BLK_MQ_NO_TAG;
335         rq->start_time_ns = ktime_get_ns();
336         rq->part = NULL;
337         blk_crypto_rq_set_defaults(rq);
338 }
339 EXPORT_SYMBOL(blk_rq_init);
340
341 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
342                 struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
343 {
344         struct blk_mq_ctx *ctx = data->ctx;
345         struct blk_mq_hw_ctx *hctx = data->hctx;
346         struct request_queue *q = data->q;
347         struct request *rq = tags->static_rqs[tag];
348
349         rq->q = q;
350         rq->mq_ctx = ctx;
351         rq->mq_hctx = hctx;
352         rq->cmd_flags = data->cmd_flags;
353
354         if (data->flags & BLK_MQ_REQ_PM)
355                 data->rq_flags |= RQF_PM;
356         if (blk_queue_io_stat(q))
357                 data->rq_flags |= RQF_IO_STAT;
358         rq->rq_flags = data->rq_flags;
359
360         if (!(data->rq_flags & RQF_ELV)) {
361                 rq->tag = tag;
362                 rq->internal_tag = BLK_MQ_NO_TAG;
363         } else {
364                 rq->tag = BLK_MQ_NO_TAG;
365                 rq->internal_tag = tag;
366         }
367         rq->timeout = 0;
368
369         if (blk_mq_need_time_stamp(rq))
370                 rq->start_time_ns = ktime_get_ns();
371         else
372                 rq->start_time_ns = 0;
373         rq->part = NULL;
374 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
375         rq->alloc_time_ns = alloc_time_ns;
376 #endif
377         rq->io_start_time_ns = 0;
378         rq->stats_sectors = 0;
379         rq->nr_phys_segments = 0;
380 #if defined(CONFIG_BLK_DEV_INTEGRITY)
381         rq->nr_integrity_segments = 0;
382 #endif
383         rq->end_io = NULL;
384         rq->end_io_data = NULL;
385
386         blk_crypto_rq_set_defaults(rq);
387         INIT_LIST_HEAD(&rq->queuelist);
388         /* tag was already set */
389         WRITE_ONCE(rq->deadline, 0);
390         req_ref_set(rq, 1);
391
392         if (rq->rq_flags & RQF_ELV) {
393                 struct elevator_queue *e = data->q->elevator;
394
395                 INIT_HLIST_NODE(&rq->hash);
396                 RB_CLEAR_NODE(&rq->rb_node);
397
398                 if (!op_is_flush(data->cmd_flags) &&
399                     e->type->ops.prepare_request) {
400                         e->type->ops.prepare_request(rq);
401                         rq->rq_flags |= RQF_ELVPRIV;
402                 }
403         }
404
405         return rq;
406 }
407
408 static inline struct request *
409 __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
410                 u64 alloc_time_ns)
411 {
412         unsigned int tag, tag_offset;
413         struct blk_mq_tags *tags;
414         struct request *rq;
415         unsigned long tag_mask;
416         int i, nr = 0;
417
418         tag_mask = blk_mq_get_tags(data, data->nr_tags, &tag_offset);
419         if (unlikely(!tag_mask))
420                 return NULL;
421
422         tags = blk_mq_tags_from_data(data);
423         for (i = 0; tag_mask; i++) {
424                 if (!(tag_mask & (1UL << i)))
425                         continue;
426                 tag = tag_offset + i;
427                 prefetch(tags->static_rqs[tag]);
428                 tag_mask &= ~(1UL << i);
429                 rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
430                 rq_list_add(data->cached_rq, rq);
431                 nr++;
432         }
433         /* caller already holds a reference, add for remainder */
434         percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
435         data->nr_tags -= nr;
436
437         return rq_list_pop(data->cached_rq);
438 }
439
440 static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
441 {
442         struct request_queue *q = data->q;
443         u64 alloc_time_ns = 0;
444         struct request *rq;
445         unsigned int tag;
446
447         /* alloc_time includes depth and tag waits */
448         if (blk_queue_rq_alloc_time(q))
449                 alloc_time_ns = ktime_get_ns();
450
451         if (data->cmd_flags & REQ_NOWAIT)
452                 data->flags |= BLK_MQ_REQ_NOWAIT;
453
454         if (q->elevator) {
455                 struct elevator_queue *e = q->elevator;
456
457                 data->rq_flags |= RQF_ELV;
458
459                 /*
460                  * Flush/passthrough requests are special and go directly to the
461                  * dispatch list. Don't include reserved tags in the
462                  * limiting, as it isn't useful.
463                  */
464                 if (!op_is_flush(data->cmd_flags) &&
465                     !blk_op_is_passthrough(data->cmd_flags) &&
466                     e->type->ops.limit_depth &&
467                     !(data->flags & BLK_MQ_REQ_RESERVED))
468                         e->type->ops.limit_depth(data->cmd_flags, data);
469         }
470
471 retry:
472         data->ctx = blk_mq_get_ctx(q);
473         data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
474         if (!(data->rq_flags & RQF_ELV))
475                 blk_mq_tag_busy(data->hctx);
476
477         if (data->flags & BLK_MQ_REQ_RESERVED)
478                 data->rq_flags |= RQF_RESV;
479
480         /*
481          * Try batched alloc if we want more than 1 tag.
482          */
483         if (data->nr_tags > 1) {
484                 rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
485                 if (rq)
486                         return rq;
487                 data->nr_tags = 1;
488         }
489
490         /*
491          * Waiting allocations only fail because of an inactive hctx.  In that
492          * case just retry the hctx assignment and tag allocation as CPU hotplug
493          * should have migrated us to an online CPU by now.
494          */
495         tag = blk_mq_get_tag(data);
496         if (tag == BLK_MQ_NO_TAG) {
497                 if (data->flags & BLK_MQ_REQ_NOWAIT)
498                         return NULL;
499                 /*
500                  * Give up the CPU and sleep for a random short time to
501                  * ensure that thread using a realtime scheduling class
502                  * are migrated off the CPU, and thus off the hctx that
503                  * is going away.
504                  */
505                 msleep(3);
506                 goto retry;
507         }
508
509         return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
510                                         alloc_time_ns);
511 }
512
513 static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
514                                             struct blk_plug *plug,
515                                             blk_opf_t opf,
516                                             blk_mq_req_flags_t flags)
517 {
518         struct blk_mq_alloc_data data = {
519                 .q              = q,
520                 .flags          = flags,
521                 .cmd_flags      = opf,
522                 .nr_tags        = plug->nr_ios,
523                 .cached_rq      = &plug->cached_rq,
524         };
525         struct request *rq;
526
527         if (blk_queue_enter(q, flags))
528                 return NULL;
529
530         plug->nr_ios = 1;
531
532         rq = __blk_mq_alloc_requests(&data);
533         if (unlikely(!rq))
534                 blk_queue_exit(q);
535         return rq;
536 }
537
538 static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
539                                                    blk_opf_t opf,
540                                                    blk_mq_req_flags_t flags)
541 {
542         struct blk_plug *plug = current->plug;
543         struct request *rq;
544
545         if (!plug)
546                 return NULL;
547         if (rq_list_empty(plug->cached_rq)) {
548                 if (plug->nr_ios == 1)
549                         return NULL;
550                 rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
551                 if (rq)
552                         goto got_it;
553                 return NULL;
554         }
555         rq = rq_list_peek(&plug->cached_rq);
556         if (!rq || rq->q != q)
557                 return NULL;
558
559         if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
560                 return NULL;
561         if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
562                 return NULL;
563
564         plug->cached_rq = rq_list_next(rq);
565 got_it:
566         rq->cmd_flags = opf;
567         INIT_LIST_HEAD(&rq->queuelist);
568         return rq;
569 }
570
571 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
572                 blk_mq_req_flags_t flags)
573 {
574         struct request *rq;
575
576         rq = blk_mq_alloc_cached_request(q, opf, flags);
577         if (!rq) {
578                 struct blk_mq_alloc_data data = {
579                         .q              = q,
580                         .flags          = flags,
581                         .cmd_flags      = opf,
582                         .nr_tags        = 1,
583                 };
584                 int ret;
585
586                 ret = blk_queue_enter(q, flags);
587                 if (ret)
588                         return ERR_PTR(ret);
589
590                 rq = __blk_mq_alloc_requests(&data);
591                 if (!rq)
592                         goto out_queue_exit;
593         }
594         rq->__data_len = 0;
595         rq->__sector = (sector_t) -1;
596         rq->bio = rq->biotail = NULL;
597         return rq;
598 out_queue_exit:
599         blk_queue_exit(q);
600         return ERR_PTR(-EWOULDBLOCK);
601 }
602 EXPORT_SYMBOL(blk_mq_alloc_request);
603
604 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
605         blk_opf_t opf, blk_mq_req_flags_t flags, unsigned int hctx_idx)
606 {
607         struct blk_mq_alloc_data data = {
608                 .q              = q,
609                 .flags          = flags,
610                 .cmd_flags      = opf,
611                 .nr_tags        = 1,
612         };
613         u64 alloc_time_ns = 0;
614         struct request *rq;
615         unsigned int cpu;
616         unsigned int tag;
617         int ret;
618
619         /* alloc_time includes depth and tag waits */
620         if (blk_queue_rq_alloc_time(q))
621                 alloc_time_ns = ktime_get_ns();
622
623         /*
624          * If the tag allocator sleeps we could get an allocation for a
625          * different hardware context.  No need to complicate the low level
626          * allocator for this for the rare use case of a command tied to
627          * a specific queue.
628          */
629         if (WARN_ON_ONCE(!(flags & (BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED))))
630                 return ERR_PTR(-EINVAL);
631
632         if (hctx_idx >= q->nr_hw_queues)
633                 return ERR_PTR(-EIO);
634
635         ret = blk_queue_enter(q, flags);
636         if (ret)
637                 return ERR_PTR(ret);
638
639         /*
640          * Check if the hardware context is actually mapped to anything.
641          * If not tell the caller that it should skip this queue.
642          */
643         ret = -EXDEV;
644         data.hctx = xa_load(&q->hctx_table, hctx_idx);
645         if (!blk_mq_hw_queue_mapped(data.hctx))
646                 goto out_queue_exit;
647         cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
648         if (cpu >= nr_cpu_ids)
649                 goto out_queue_exit;
650         data.ctx = __blk_mq_get_ctx(q, cpu);
651
652         if (!q->elevator)
653                 blk_mq_tag_busy(data.hctx);
654         else
655                 data.rq_flags |= RQF_ELV;
656
657         if (flags & BLK_MQ_REQ_RESERVED)
658                 data.rq_flags |= RQF_RESV;
659
660         ret = -EWOULDBLOCK;
661         tag = blk_mq_get_tag(&data);
662         if (tag == BLK_MQ_NO_TAG)
663                 goto out_queue_exit;
664         rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
665                                         alloc_time_ns);
666         rq->__data_len = 0;
667         rq->__sector = (sector_t) -1;
668         rq->bio = rq->biotail = NULL;
669         return rq;
670
671 out_queue_exit:
672         blk_queue_exit(q);
673         return ERR_PTR(ret);
674 }
675 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
676
677 static void __blk_mq_free_request(struct request *rq)
678 {
679         struct request_queue *q = rq->q;
680         struct blk_mq_ctx *ctx = rq->mq_ctx;
681         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
682         const int sched_tag = rq->internal_tag;
683
684         blk_crypto_free_request(rq);
685         blk_pm_mark_last_busy(rq);
686         rq->mq_hctx = NULL;
687         if (rq->tag != BLK_MQ_NO_TAG)
688                 blk_mq_put_tag(hctx->tags, ctx, rq->tag);
689         if (sched_tag != BLK_MQ_NO_TAG)
690                 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
691         blk_mq_sched_restart(hctx);
692         blk_queue_exit(q);
693 }
694
695 void blk_mq_free_request(struct request *rq)
696 {
697         struct request_queue *q = rq->q;
698         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
699
700         if ((rq->rq_flags & RQF_ELVPRIV) &&
701             q->elevator->type->ops.finish_request)
702                 q->elevator->type->ops.finish_request(rq);
703
704         if (rq->rq_flags & RQF_MQ_INFLIGHT)
705                 __blk_mq_dec_active_requests(hctx);
706
707         if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
708                 laptop_io_completion(q->disk->bdi);
709
710         rq_qos_done(q, rq);
711
712         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
713         if (req_ref_put_and_test(rq))
714                 __blk_mq_free_request(rq);
715 }
716 EXPORT_SYMBOL_GPL(blk_mq_free_request);
717
718 void blk_mq_free_plug_rqs(struct blk_plug *plug)
719 {
720         struct request *rq;
721
722         while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
723                 blk_mq_free_request(rq);
724 }
725
726 void blk_dump_rq_flags(struct request *rq, char *msg)
727 {
728         printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
729                 rq->q->disk ? rq->q->disk->disk_name : "?",
730                 (__force unsigned long long) rq->cmd_flags);
731
732         printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
733                (unsigned long long)blk_rq_pos(rq),
734                blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
735         printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
736                rq->bio, rq->biotail, blk_rq_bytes(rq));
737 }
738 EXPORT_SYMBOL(blk_dump_rq_flags);
739
740 static void req_bio_endio(struct request *rq, struct bio *bio,
741                           unsigned int nbytes, blk_status_t error)
742 {
743         if (unlikely(error)) {
744                 bio->bi_status = error;
745         } else if (req_op(rq) == REQ_OP_ZONE_APPEND) {
746                 /*
747                  * Partial zone append completions cannot be supported as the
748                  * BIO fragments may end up not being written sequentially.
749                  */
750                 if (bio->bi_iter.bi_size != nbytes)
751                         bio->bi_status = BLK_STS_IOERR;
752                 else
753                         bio->bi_iter.bi_sector = rq->__sector;
754         }
755
756         bio_advance(bio, nbytes);
757
758         if (unlikely(rq->rq_flags & RQF_QUIET))
759                 bio_set_flag(bio, BIO_QUIET);
760         /* don't actually finish bio if it's part of flush sequence */
761         if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
762                 bio_endio(bio);
763 }
764
765 static void blk_account_io_completion(struct request *req, unsigned int bytes)
766 {
767         if (req->part && blk_do_io_stat(req)) {
768                 const int sgrp = op_stat_group(req_op(req));
769
770                 part_stat_lock();
771                 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
772                 part_stat_unlock();
773         }
774 }
775
776 static void blk_print_req_error(struct request *req, blk_status_t status)
777 {
778         printk_ratelimited(KERN_ERR
779                 "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
780                 "phys_seg %u prio class %u\n",
781                 blk_status_to_str(status),
782                 req->q->disk ? req->q->disk->disk_name : "?",
783                 blk_rq_pos(req), (__force u32)req_op(req),
784                 blk_op_str(req_op(req)),
785                 (__force u32)(req->cmd_flags & ~REQ_OP_MASK),
786                 req->nr_phys_segments,
787                 IOPRIO_PRIO_CLASS(req->ioprio));
788 }
789
790 /*
791  * Fully end IO on a request. Does not support partial completions, or
792  * errors.
793  */
794 static void blk_complete_request(struct request *req)
795 {
796         const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
797         int total_bytes = blk_rq_bytes(req);
798         struct bio *bio = req->bio;
799
800         trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
801
802         if (!bio)
803                 return;
804
805 #ifdef CONFIG_BLK_DEV_INTEGRITY
806         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
807                 req->q->integrity.profile->complete_fn(req, total_bytes);
808 #endif
809
810         blk_account_io_completion(req, total_bytes);
811
812         do {
813                 struct bio *next = bio->bi_next;
814
815                 /* Completion has already been traced */
816                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
817
818                 if (req_op(req) == REQ_OP_ZONE_APPEND)
819                         bio->bi_iter.bi_sector = req->__sector;
820
821                 if (!is_flush)
822                         bio_endio(bio);
823                 bio = next;
824         } while (bio);
825
826         /*
827          * Reset counters so that the request stacking driver
828          * can find how many bytes remain in the request
829          * later.
830          */
831         if (!req->end_io) {
832                 req->bio = NULL;
833                 req->__data_len = 0;
834         }
835 }
836
837 /**
838  * blk_update_request - Complete multiple bytes without completing the request
839  * @req:      the request being processed
840  * @error:    block status code
841  * @nr_bytes: number of bytes to complete for @req
842  *
843  * Description:
844  *     Ends I/O on a number of bytes attached to @req, but doesn't complete
845  *     the request structure even if @req doesn't have leftover.
846  *     If @req has leftover, sets it up for the next range of segments.
847  *
848  *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
849  *     %false return from this function.
850  *
851  * Note:
852  *      The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function
853  *      except in the consistency check at the end of this function.
854  *
855  * Return:
856  *     %false - this request doesn't have any more data
857  *     %true  - this request has more data
858  **/
859 bool blk_update_request(struct request *req, blk_status_t error,
860                 unsigned int nr_bytes)
861 {
862         int total_bytes;
863
864         trace_block_rq_complete(req, error, nr_bytes);
865
866         if (!req->bio)
867                 return false;
868
869 #ifdef CONFIG_BLK_DEV_INTEGRITY
870         if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
871             error == BLK_STS_OK)
872                 req->q->integrity.profile->complete_fn(req, nr_bytes);
873 #endif
874
875         if (unlikely(error && !blk_rq_is_passthrough(req) &&
876                      !(req->rq_flags & RQF_QUIET)) &&
877                      !test_bit(GD_DEAD, &req->q->disk->state)) {
878                 blk_print_req_error(req, error);
879                 trace_block_rq_error(req, error, nr_bytes);
880         }
881
882         blk_account_io_completion(req, nr_bytes);
883
884         total_bytes = 0;
885         while (req->bio) {
886                 struct bio *bio = req->bio;
887                 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
888
889                 if (bio_bytes == bio->bi_iter.bi_size)
890                         req->bio = bio->bi_next;
891
892                 /* Completion has already been traced */
893                 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
894                 req_bio_endio(req, bio, bio_bytes, error);
895
896                 total_bytes += bio_bytes;
897                 nr_bytes -= bio_bytes;
898
899                 if (!nr_bytes)
900                         break;
901         }
902
903         /*
904          * completely done
905          */
906         if (!req->bio) {
907                 /*
908                  * Reset counters so that the request stacking driver
909                  * can find how many bytes remain in the request
910                  * later.
911                  */
912                 req->__data_len = 0;
913                 return false;
914         }
915
916         req->__data_len -= total_bytes;
917
918         /* update sector only for requests with clear definition of sector */
919         if (!blk_rq_is_passthrough(req))
920                 req->__sector += total_bytes >> 9;
921
922         /* mixed attributes always follow the first bio */
923         if (req->rq_flags & RQF_MIXED_MERGE) {
924                 req->cmd_flags &= ~REQ_FAILFAST_MASK;
925                 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
926         }
927
928         if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
929                 /*
930                  * If total number of sectors is less than the first segment
931                  * size, something has gone terribly wrong.
932                  */
933                 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
934                         blk_dump_rq_flags(req, "request botched");
935                         req->__data_len = blk_rq_cur_bytes(req);
936                 }
937
938                 /* recalculate the number of segments */
939                 req->nr_phys_segments = blk_recalc_rq_segments(req);
940         }
941
942         return true;
943 }
944 EXPORT_SYMBOL_GPL(blk_update_request);
945
946 static void __blk_account_io_done(struct request *req, u64 now)
947 {
948         const int sgrp = op_stat_group(req_op(req));
949
950         part_stat_lock();
951         update_io_ticks(req->part, jiffies, true);
952         part_stat_inc(req->part, ios[sgrp]);
953         part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
954         part_stat_unlock();
955 }
956
957 static inline void blk_account_io_done(struct request *req, u64 now)
958 {
959         /*
960          * Account IO completion.  flush_rq isn't accounted as a
961          * normal IO on queueing nor completion.  Accounting the
962          * containing request is enough.
963          */
964         if (blk_do_io_stat(req) && req->part &&
965             !(req->rq_flags & RQF_FLUSH_SEQ))
966                 __blk_account_io_done(req, now);
967 }
968
969 static void __blk_account_io_start(struct request *rq)
970 {
971         /*
972          * All non-passthrough requests are created from a bio with one
973          * exception: when a flush command that is part of a flush sequence
974          * generated by the state machine in blk-flush.c is cloned onto the
975          * lower device by dm-multipath we can get here without a bio.
976          */
977         if (rq->bio)
978                 rq->part = rq->bio->bi_bdev;
979         else
980                 rq->part = rq->q->disk->part0;
981
982         part_stat_lock();
983         update_io_ticks(rq->part, jiffies, false);
984         part_stat_unlock();
985 }
986
987 static inline void blk_account_io_start(struct request *req)
988 {
989         if (blk_do_io_stat(req))
990                 __blk_account_io_start(req);
991 }
992
993 static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
994 {
995         if (rq->rq_flags & RQF_STATS) {
996                 blk_mq_poll_stats_start(rq->q);
997                 blk_stat_add(rq, now);
998         }
999
1000         blk_mq_sched_completed_request(rq, now);
1001         blk_account_io_done(rq, now);
1002 }
1003
1004 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
1005 {
1006         if (blk_mq_need_time_stamp(rq))
1007                 __blk_mq_end_request_acct(rq, ktime_get_ns());
1008
1009         if (rq->end_io) {
1010                 rq_qos_done(rq->q, rq);
1011                 if (rq->end_io(rq, error) == RQ_END_IO_FREE)
1012                         blk_mq_free_request(rq);
1013         } else {
1014                 blk_mq_free_request(rq);
1015         }
1016 }
1017 EXPORT_SYMBOL(__blk_mq_end_request);
1018
1019 void blk_mq_end_request(struct request *rq, blk_status_t error)
1020 {
1021         if (blk_update_request(rq, error, blk_rq_bytes(rq)))
1022                 BUG();
1023         __blk_mq_end_request(rq, error);
1024 }
1025 EXPORT_SYMBOL(blk_mq_end_request);
1026
1027 #define TAG_COMP_BATCH          32
1028
1029 static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
1030                                           int *tag_array, int nr_tags)
1031 {
1032         struct request_queue *q = hctx->queue;
1033
1034         /*
1035          * All requests should have been marked as RQF_MQ_INFLIGHT, so
1036          * update hctx->nr_active in batch
1037          */
1038         if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
1039                 __blk_mq_sub_active_requests(hctx, nr_tags);
1040
1041         blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
1042         percpu_ref_put_many(&q->q_usage_counter, nr_tags);
1043 }
1044
1045 void blk_mq_end_request_batch(struct io_comp_batch *iob)
1046 {
1047         int tags[TAG_COMP_BATCH], nr_tags = 0;
1048         struct blk_mq_hw_ctx *cur_hctx = NULL;
1049         struct request *rq;
1050         u64 now = 0;
1051
1052         if (iob->need_ts)
1053                 now = ktime_get_ns();
1054
1055         while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
1056                 prefetch(rq->bio);
1057                 prefetch(rq->rq_next);
1058
1059                 blk_complete_request(rq);
1060                 if (iob->need_ts)
1061                         __blk_mq_end_request_acct(rq, now);
1062
1063                 rq_qos_done(rq->q, rq);
1064
1065                 /*
1066                  * If end_io handler returns NONE, then it still has
1067                  * ownership of the request.
1068                  */
1069                 if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
1070                         continue;
1071
1072                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1073                 if (!req_ref_put_and_test(rq))
1074                         continue;
1075
1076                 blk_crypto_free_request(rq);
1077                 blk_pm_mark_last_busy(rq);
1078
1079                 if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
1080                         if (cur_hctx)
1081                                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1082                         nr_tags = 0;
1083                         cur_hctx = rq->mq_hctx;
1084                 }
1085                 tags[nr_tags++] = rq->tag;
1086         }
1087
1088         if (nr_tags)
1089                 blk_mq_flush_tag_batch(cur_hctx, tags, nr_tags);
1090 }
1091 EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);
1092
1093 static void blk_complete_reqs(struct llist_head *list)
1094 {
1095         struct llist_node *entry = llist_reverse_order(llist_del_all(list));
1096         struct request *rq, *next;
1097
1098         llist_for_each_entry_safe(rq, next, entry, ipi_list)
1099                 rq->q->mq_ops->complete(rq);
1100 }
1101
1102 static __latent_entropy void blk_done_softirq(struct softirq_action *h)
1103 {
1104         blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
1105 }
1106
1107 static int blk_softirq_cpu_dead(unsigned int cpu)
1108 {
1109         blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
1110         return 0;
1111 }
1112
1113 static void __blk_mq_complete_request_remote(void *data)
1114 {
1115         __raise_softirq_irqoff(BLOCK_SOFTIRQ);
1116 }
1117
1118 static inline bool blk_mq_complete_need_ipi(struct request *rq)
1119 {
1120         int cpu = raw_smp_processor_id();
1121
1122         if (!IS_ENABLED(CONFIG_SMP) ||
1123             !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags))
1124                 return false;
1125         /*
1126          * With force threaded interrupts enabled, raising softirq from an SMP
1127          * function call will always result in waking the ksoftirqd thread.
1128          * This is probably worse than completing the request on a different
1129          * cache domain.
1130          */
1131         if (force_irqthreads())
1132                 return false;
1133
1134         /* same CPU or cache domain?  Complete locally */
1135         if (cpu == rq->mq_ctx->cpu ||
1136             (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) &&
1137              cpus_share_cache(cpu, rq->mq_ctx->cpu)))
1138                 return false;
1139
1140         /* don't try to IPI to an offline CPU */
1141         return cpu_online(rq->mq_ctx->cpu);
1142 }
1143
1144 static void blk_mq_complete_send_ipi(struct request *rq)
1145 {
1146         struct llist_head *list;
1147         unsigned int cpu;
1148
1149         cpu = rq->mq_ctx->cpu;
1150         list = &per_cpu(blk_cpu_done, cpu);
1151         if (llist_add(&rq->ipi_list, list)) {
1152                 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
1153                 smp_call_function_single_async(cpu, &rq->csd);
1154         }
1155 }
1156
1157 static void blk_mq_raise_softirq(struct request *rq)
1158 {
1159         struct llist_head *list;
1160
1161         preempt_disable();
1162         list = this_cpu_ptr(&blk_cpu_done);
1163         if (llist_add(&rq->ipi_list, list))
1164                 raise_softirq(BLOCK_SOFTIRQ);
1165         preempt_enable();
1166 }
1167
1168 bool blk_mq_complete_request_remote(struct request *rq)
1169 {
1170         WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
1171
1172         /*
1173          * For request which hctx has only one ctx mapping,
1174          * or a polled request, always complete locally,
1175          * it's pointless to redirect the completion.
1176          */
1177         if (rq->mq_hctx->nr_ctx == 1 ||
1178                 rq->cmd_flags & REQ_POLLED)
1179                 return false;
1180
1181         if (blk_mq_complete_need_ipi(rq)) {
1182                 blk_mq_complete_send_ipi(rq);
1183                 return true;
1184         }
1185
1186         if (rq->q->nr_hw_queues == 1) {
1187                 blk_mq_raise_softirq(rq);
1188                 return true;
1189         }
1190         return false;
1191 }
1192 EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
1193
1194 /**
1195  * blk_mq_complete_request - end I/O on a request
1196  * @rq:         the request being processed
1197  *
1198  * Description:
1199  *      Complete a request by scheduling the ->complete_rq operation.
1200  **/
1201 void blk_mq_complete_request(struct request *rq)
1202 {
1203         if (!blk_mq_complete_request_remote(rq))
1204                 rq->q->mq_ops->complete(rq);
1205 }
1206 EXPORT_SYMBOL(blk_mq_complete_request);
1207
1208 /**
1209  * blk_mq_start_request - Start processing a request
1210  * @rq: Pointer to request to be started
1211  *
1212  * Function used by device drivers to notify the block layer that a request
1213  * is going to be processed now, so blk layer can do proper initializations
1214  * such as starting the timeout timer.
1215  */
1216 void blk_mq_start_request(struct request *rq)
1217 {
1218         struct request_queue *q = rq->q;
1219
1220         trace_block_rq_issue(rq);
1221
1222         if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
1223                 rq->io_start_time_ns = ktime_get_ns();
1224                 rq->stats_sectors = blk_rq_sectors(rq);
1225                 rq->rq_flags |= RQF_STATS;
1226                 rq_qos_issue(q, rq);
1227         }
1228
1229         WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
1230
1231         blk_add_timer(rq);
1232         WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT);
1233
1234 #ifdef CONFIG_BLK_DEV_INTEGRITY
1235         if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
1236                 q->integrity.profile->prepare_fn(rq);
1237 #endif
1238         if (rq->bio && rq->bio->bi_opf & REQ_POLLED)
1239                 WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq));
1240 }
1241 EXPORT_SYMBOL(blk_mq_start_request);
1242
1243 /*
1244  * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
1245  * queues. This is important for md arrays to benefit from merging
1246  * requests.
1247  */
1248 static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
1249 {
1250         if (plug->multiple_queues)
1251                 return BLK_MAX_REQUEST_COUNT * 2;
1252         return BLK_MAX_REQUEST_COUNT;
1253 }
1254
1255 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
1256 {
1257         struct request *last = rq_list_peek(&plug->mq_list);
1258
1259         if (!plug->rq_count) {
1260                 trace_block_plug(rq->q);
1261         } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
1262                    (!blk_queue_nomerges(rq->q) &&
1263                     blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
1264                 blk_mq_flush_plug_list(plug, false);
1265                 trace_block_plug(rq->q);
1266         }
1267
1268         if (!plug->multiple_queues && last && last->q != rq->q)
1269                 plug->multiple_queues = true;
1270         if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
1271                 plug->has_elevator = true;
1272         rq->rq_next = NULL;
1273         rq_list_add(&plug->mq_list, rq);
1274         plug->rq_count++;
1275 }
1276
1277 /**
1278  * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
1279  * @rq:         request to insert
1280  * @at_head:    insert request at head or tail of queue
1281  *
1282  * Description:
1283  *    Insert a fully prepared request at the back of the I/O scheduler queue
1284  *    for execution.  Don't wait for completion.
1285  *
1286  * Note:
1287  *    This function will invoke @done directly if the queue is dead.
1288  */
1289 void blk_execute_rq_nowait(struct request *rq, bool at_head)
1290 {
1291         WARN_ON(irqs_disabled());
1292         WARN_ON(!blk_rq_is_passthrough(rq));
1293
1294         blk_account_io_start(rq);
1295
1296         /*
1297          * As plugging can be enabled for passthrough requests on a zoned
1298          * device, directly accessing the plug instead of using blk_mq_plug()
1299          * should not have any consequences.
1300          */
1301         if (current->plug)
1302                 blk_add_rq_to_plug(current->plug, rq);
1303         else
1304                 blk_mq_sched_insert_request(rq, at_head, true, false);
1305 }
1306 EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
1307
1308 struct blk_rq_wait {
1309         struct completion done;
1310         blk_status_t ret;
1311 };
1312
1313 static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
1314 {
1315         struct blk_rq_wait *wait = rq->end_io_data;
1316
1317         wait->ret = ret;
1318         complete(&wait->done);
1319         return RQ_END_IO_NONE;
1320 }
1321
1322 bool blk_rq_is_poll(struct request *rq)
1323 {
1324         if (!rq->mq_hctx)
1325                 return false;
1326         if (rq->mq_hctx->type != HCTX_TYPE_POLL)
1327                 return false;
1328         if (WARN_ON_ONCE(!rq->bio))
1329                 return false;
1330         return true;
1331 }
1332 EXPORT_SYMBOL_GPL(blk_rq_is_poll);
1333
1334 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
1335 {
1336         do {
1337                 bio_poll(rq->bio, NULL, 0);
1338                 cond_resched();
1339         } while (!completion_done(wait));
1340 }
1341
1342 /**
1343  * blk_execute_rq - insert a request into queue for execution
1344  * @rq:         request to insert
1345  * @at_head:    insert request at head or tail of queue
1346  *
1347  * Description:
1348  *    Insert a fully prepared request at the back of the I/O scheduler queue
1349  *    for execution and wait for completion.
1350  * Return: The blk_status_t result provided to blk_mq_end_request().
1351  */
1352 blk_status_t blk_execute_rq(struct request *rq, bool at_head)
1353 {
1354         struct blk_rq_wait wait = {
1355                 .done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
1356         };
1357
1358         WARN_ON(irqs_disabled());
1359         WARN_ON(!blk_rq_is_passthrough(rq));
1360
1361         rq->end_io_data = &wait;
1362         rq->end_io = blk_end_sync_rq;
1363
1364         blk_account_io_start(rq);
1365         blk_mq_sched_insert_request(rq, at_head, true, false);
1366
1367         if (blk_rq_is_poll(rq)) {
1368                 blk_rq_poll_completion(rq, &wait.done);
1369         } else {
1370                 /*
1371                  * Prevent hang_check timer from firing at us during very long
1372                  * I/O
1373                  */
1374                 unsigned long hang_check = sysctl_hung_task_timeout_secs;
1375
1376                 if (hang_check)
1377                         while (!wait_for_completion_io_timeout(&wait.done,
1378                                         hang_check * (HZ/2)))
1379                                 ;
1380                 else
1381                         wait_for_completion_io(&wait.done);
1382         }
1383
1384         return wait.ret;
1385 }
1386 EXPORT_SYMBOL(blk_execute_rq);
1387
1388 static void __blk_mq_requeue_request(struct request *rq)
1389 {
1390         struct request_queue *q = rq->q;
1391
1392         blk_mq_put_driver_tag(rq);
1393
1394         trace_block_rq_requeue(rq);
1395         rq_qos_requeue(q, rq);
1396
1397         if (blk_mq_request_started(rq)) {
1398                 WRITE_ONCE(rq->state, MQ_RQ_IDLE);
1399                 rq->rq_flags &= ~RQF_TIMED_OUT;
1400         }
1401 }
1402
1403 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
1404 {
1405         __blk_mq_requeue_request(rq);
1406
1407         /* this request will be re-inserted to io scheduler queue */
1408         blk_mq_sched_requeue_request(rq);
1409
1410         blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
1411 }
1412 EXPORT_SYMBOL(blk_mq_requeue_request);
1413
1414 static void blk_mq_requeue_work(struct work_struct *work)
1415 {
1416         struct request_queue *q =
1417                 container_of(work, struct request_queue, requeue_work.work);
1418         LIST_HEAD(rq_list);
1419         struct request *rq, *next;
1420
1421         spin_lock_irq(&q->requeue_lock);
1422         list_splice_init(&q->requeue_list, &rq_list);
1423         spin_unlock_irq(&q->requeue_lock);
1424
1425         list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
1426                 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
1427                         continue;
1428
1429                 rq->rq_flags &= ~RQF_SOFTBARRIER;
1430                 list_del_init(&rq->queuelist);
1431                 /*
1432                  * If RQF_DONTPREP, rq has contained some driver specific
1433                  * data, so insert it to hctx dispatch list to avoid any
1434                  * merge.
1435                  */
1436                 if (rq->rq_flags & RQF_DONTPREP)
1437                         blk_mq_request_bypass_insert(rq, false, false);
1438                 else
1439                         blk_mq_sched_insert_request(rq, true, false, false);
1440         }
1441
1442         while (!list_empty(&rq_list)) {
1443                 rq = list_entry(rq_list.next, struct request, queuelist);
1444                 list_del_init(&rq->queuelist);
1445                 blk_mq_sched_insert_request(rq, false, false, false);
1446         }
1447
1448         blk_mq_run_hw_queues(q, false);
1449 }
1450
1451 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
1452                                 bool kick_requeue_list)
1453 {
1454         struct request_queue *q = rq->q;
1455         unsigned long flags;
1456
1457         /*
1458          * We abuse this flag that is otherwise used by the I/O scheduler to
1459          * request head insertion from the workqueue.
1460          */
1461         BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
1462
1463         spin_lock_irqsave(&q->requeue_lock, flags);
1464         if (at_head) {
1465                 rq->rq_flags |= RQF_SOFTBARRIER;
1466                 list_add(&rq->queuelist, &q->requeue_list);
1467         } else {
1468                 list_add_tail(&rq->queuelist, &q->requeue_list);
1469         }
1470         spin_unlock_irqrestore(&q->requeue_lock, flags);
1471
1472         if (kick_requeue_list)
1473                 blk_mq_kick_requeue_list(q);
1474 }
1475
1476 void blk_mq_kick_requeue_list(struct request_queue *q)
1477 {
1478         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
1479 }
1480 EXPORT_SYMBOL(blk_mq_kick_requeue_list);
1481
1482 void blk_mq_delay_kick_requeue_list(struct request_queue *q,
1483                                     unsigned long msecs)
1484 {
1485         kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
1486                                     msecs_to_jiffies(msecs));
1487 }
1488 EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
1489
1490 static bool blk_mq_rq_inflight(struct request *rq, void *priv)
1491 {
1492         /*
1493          * If we find a request that isn't idle we know the queue is busy
1494          * as it's checked in the iter.
1495          * Return false to stop the iteration.
1496          */
1497         if (blk_mq_request_started(rq)) {
1498                 bool *busy = priv;
1499
1500                 *busy = true;
1501                 return false;
1502         }
1503
1504         return true;
1505 }
1506
1507 bool blk_mq_queue_inflight(struct request_queue *q)
1508 {
1509         bool busy = false;
1510
1511         blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
1512         return busy;
1513 }
1514 EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
1515
1516 static void blk_mq_rq_timed_out(struct request *req)
1517 {
1518         req->rq_flags |= RQF_TIMED_OUT;
1519         if (req->q->mq_ops->timeout) {
1520                 enum blk_eh_timer_return ret;
1521
1522                 ret = req->q->mq_ops->timeout(req);
1523                 if (ret == BLK_EH_DONE)
1524                         return;
1525                 WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
1526         }
1527
1528         blk_add_timer(req);
1529 }
1530
1531 static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
1532 {
1533         unsigned long deadline;
1534
1535         if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
1536                 return false;
1537         if (rq->rq_flags & RQF_TIMED_OUT)
1538                 return false;
1539
1540         deadline = READ_ONCE(rq->deadline);
1541         if (time_after_eq(jiffies, deadline))
1542                 return true;
1543
1544         if (*next == 0)
1545                 *next = deadline;
1546         else if (time_after(*next, deadline))
1547                 *next = deadline;
1548         return false;
1549 }
1550
1551 void blk_mq_put_rq_ref(struct request *rq)
1552 {
1553         if (is_flush_rq(rq)) {
1554                 if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
1555                         blk_mq_free_request(rq);
1556         } else if (req_ref_put_and_test(rq)) {
1557                 __blk_mq_free_request(rq);
1558         }
1559 }
1560
1561 static bool blk_mq_check_expired(struct request *rq, void *priv)
1562 {
1563         unsigned long *next = priv;
1564
1565         /*
1566          * blk_mq_queue_tag_busy_iter() has locked the request, so it cannot
1567          * be reallocated underneath the timeout handler's processing, then
1568          * the expire check is reliable. If the request is not expired, then
1569          * it was completed and reallocated as a new request after returning
1570          * from blk_mq_check_expired().
1571          */
1572         if (blk_mq_req_expired(rq, next))
1573                 blk_mq_rq_timed_out(rq);
1574         return true;
1575 }
1576
1577 static void blk_mq_timeout_work(struct work_struct *work)
1578 {
1579         struct request_queue *q =
1580                 container_of(work, struct request_queue, timeout_work);
1581         unsigned long next = 0;
1582         struct blk_mq_hw_ctx *hctx;
1583         unsigned long i;
1584
1585         /* A deadlock might occur if a request is stuck requiring a
1586          * timeout at the same time a queue freeze is waiting
1587          * completion, since the timeout code would not be able to
1588          * acquire the queue reference here.
1589          *
1590          * That's why we don't use blk_queue_enter here; instead, we use
1591          * percpu_ref_tryget directly, because we need to be able to
1592          * obtain a reference even in the short window between the queue
1593          * starting to freeze, by dropping the first reference in
1594          * blk_freeze_queue_start, and the moment the last request is
1595          * consumed, marked by the instant q_usage_counter reaches
1596          * zero.
1597          */
1598         if (!percpu_ref_tryget(&q->q_usage_counter))
1599                 return;
1600
1601         blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next);
1602
1603         if (next != 0) {
1604                 mod_timer(&q->timeout, next);
1605         } else {
1606                 /*
1607                  * Request timeouts are handled as a forward rolling timer. If
1608                  * we end up here it means that no requests are pending and
1609                  * also that no request has been pending for a while. Mark
1610                  * each hctx as idle.
1611                  */
1612                 queue_for_each_hw_ctx(q, hctx, i) {
1613                         /* the hctx may be unmapped, so check it here */
1614                         if (blk_mq_hw_queue_mapped(hctx))
1615                                 blk_mq_tag_idle(hctx);
1616                 }
1617         }
1618         blk_queue_exit(q);
1619 }
1620
1621 struct flush_busy_ctx_data {
1622         struct blk_mq_hw_ctx *hctx;
1623         struct list_head *list;
1624 };
1625
1626 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
1627 {
1628         struct flush_busy_ctx_data *flush_data = data;
1629         struct blk_mq_hw_ctx *hctx = flush_data->hctx;
1630         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1631         enum hctx_type type = hctx->type;
1632
1633         spin_lock(&ctx->lock);
1634         list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
1635         sbitmap_clear_bit(sb, bitnr);
1636         spin_unlock(&ctx->lock);
1637         return true;
1638 }
1639
1640 /*
1641  * Process software queues that have been marked busy, splicing them
1642  * to the for-dispatch
1643  */
1644 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1645 {
1646         struct flush_busy_ctx_data data = {
1647                 .hctx = hctx,
1648                 .list = list,
1649         };
1650
1651         sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1652 }
1653 EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1654
1655 struct dispatch_rq_data {
1656         struct blk_mq_hw_ctx *hctx;
1657         struct request *rq;
1658 };
1659
1660 static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1661                 void *data)
1662 {
1663         struct dispatch_rq_data *dispatch_data = data;
1664         struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1665         struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1666         enum hctx_type type = hctx->type;
1667
1668         spin_lock(&ctx->lock);
1669         if (!list_empty(&ctx->rq_lists[type])) {
1670                 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
1671                 list_del_init(&dispatch_data->rq->queuelist);
1672                 if (list_empty(&ctx->rq_lists[type]))
1673                         sbitmap_clear_bit(sb, bitnr);
1674         }
1675         spin_unlock(&ctx->lock);
1676
1677         return !dispatch_data->rq;
1678 }
1679
1680 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1681                                         struct blk_mq_ctx *start)
1682 {
1683         unsigned off = start ? start->index_hw[hctx->type] : 0;
1684         struct dispatch_rq_data data = {
1685                 .hctx = hctx,
1686                 .rq   = NULL,
1687         };
1688
1689         __sbitmap_for_each_set(&hctx->ctx_map, off,
1690                                dispatch_rq_from_ctx, &data);
1691
1692         return data.rq;
1693 }
1694
1695 static bool __blk_mq_alloc_driver_tag(struct request *rq)
1696 {
1697         struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
1698         unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
1699         int tag;
1700
1701         blk_mq_tag_busy(rq->mq_hctx);
1702
1703         if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
1704                 bt = &rq->mq_hctx->tags->breserved_tags;
1705                 tag_offset = 0;
1706         } else {
1707                 if (!hctx_may_queue(rq->mq_hctx, bt))
1708                         return false;
1709         }
1710
1711         tag = __sbitmap_queue_get(bt);
1712         if (tag == BLK_MQ_NO_TAG)
1713                 return false;
1714
1715         rq->tag = tag + tag_offset;
1716         return true;
1717 }
1718
1719 bool __blk_mq_get_driver_tag(struct blk_mq_hw_ctx *hctx, struct request *rq)
1720 {
1721         if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_alloc_driver_tag(rq))
1722                 return false;
1723
1724         if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) &&
1725                         !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
1726                 rq->rq_flags |= RQF_MQ_INFLIGHT;
1727                 __blk_mq_inc_active_requests(hctx);
1728         }
1729         hctx->tags->rqs[rq->tag] = rq;
1730         return true;
1731 }
1732
1733 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1734                                 int flags, void *key)
1735 {
1736         struct blk_mq_hw_ctx *hctx;
1737
1738         hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1739
1740         spin_lock(&hctx->dispatch_wait_lock);
1741         if (!list_empty(&wait->entry)) {
1742                 struct sbitmap_queue *sbq;
1743
1744                 list_del_init(&wait->entry);
1745                 sbq = &hctx->tags->bitmap_tags;
1746                 atomic_dec(&sbq->ws_active);
1747         }
1748         spin_unlock(&hctx->dispatch_wait_lock);
1749
1750         blk_mq_run_hw_queue(hctx, true);
1751         return 1;
1752 }
1753
1754 /*
1755  * Mark us waiting for a tag. For shared tags, this involves hooking us into
1756  * the tag wakeups. For non-shared tags, we can simply mark us needing a
1757  * restart. For both cases, take care to check the condition again after
1758  * marking us as waiting.
1759  */
1760 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1761                                  struct request *rq)
1762 {
1763         struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1764         struct wait_queue_head *wq;
1765         wait_queue_entry_t *wait;
1766         bool ret;
1767
1768         if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
1769                 blk_mq_sched_mark_restart_hctx(hctx);
1770
1771                 /*
1772                  * It's possible that a tag was freed in the window between the
1773                  * allocation failure and adding the hardware queue to the wait
1774                  * queue.
1775                  *
1776                  * Don't clear RESTART here, someone else could have set it.
1777                  * At most this will cost an extra queue run.
1778                  */
1779                 return blk_mq_get_driver_tag(rq);
1780         }
1781
1782         wait = &hctx->dispatch_wait;
1783         if (!list_empty_careful(&wait->entry))
1784                 return false;
1785
1786         wq = &bt_wait_ptr(sbq, hctx)->wait;
1787
1788         spin_lock_irq(&wq->lock);
1789         spin_lock(&hctx->dispatch_wait_lock);
1790         if (!list_empty(&wait->entry)) {
1791                 spin_unlock(&hctx->dispatch_wait_lock);
1792                 spin_unlock_irq(&wq->lock);
1793                 return false;
1794         }
1795
1796         atomic_inc(&sbq->ws_active);
1797         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1798         __add_wait_queue(wq, wait);
1799
1800         /*
1801          * It's possible that a tag was freed in the window between the
1802          * allocation failure and adding the hardware queue to the wait
1803          * queue.
1804          */
1805         ret = blk_mq_get_driver_tag(rq);
1806         if (!ret) {
1807                 spin_unlock(&hctx->dispatch_wait_lock);
1808                 spin_unlock_irq(&wq->lock);
1809                 return false;
1810         }
1811
1812         /*
1813          * We got a tag, remove ourselves from the wait queue to ensure
1814          * someone else gets the wakeup.
1815          */
1816         list_del_init(&wait->entry);
1817         atomic_dec(&sbq->ws_active);
1818         spin_unlock(&hctx->dispatch_wait_lock);
1819         spin_unlock_irq(&wq->lock);
1820
1821         return true;
1822 }
1823
1824 #define BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT  8
1825 #define BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR  4
1826 /*
1827  * Update dispatch busy with the Exponential Weighted Moving Average(EWMA):
1828  * - EWMA is one simple way to compute running average value
1829  * - weight(7/8 and 1/8) is applied so that it can decrease exponentially
1830  * - take 4 as factor for avoiding to get too small(0) result, and this
1831  *   factor doesn't matter because EWMA decreases exponentially
1832  */
1833 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1834 {
1835         unsigned int ewma;
1836
1837         ewma = hctx->dispatch_busy;
1838
1839         if (!ewma && !busy)
1840                 return;
1841
1842         ewma *= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT - 1;
1843         if (busy)
1844                 ewma += 1 << BLK_MQ_DISPATCH_BUSY_EWMA_FACTOR;
1845         ewma /= BLK_MQ_DISPATCH_BUSY_EWMA_WEIGHT;
1846
1847         hctx->dispatch_busy = ewma;
1848 }
1849
1850 #define BLK_MQ_RESOURCE_DELAY   3               /* ms units */
1851
1852 static void blk_mq_handle_dev_resource(struct request *rq,
1853                                        struct list_head *list)
1854 {
1855         struct request *next =
1856                 list_first_entry_or_null(list, struct request, queuelist);
1857
1858         /*
1859          * If an I/O scheduler has been configured and we got a driver tag for
1860          * the next request already, free it.
1861          */
1862         if (next)
1863                 blk_mq_put_driver_tag(next);
1864
1865         list_add(&rq->queuelist, list);
1866         __blk_mq_requeue_request(rq);
1867 }
1868
1869 static void blk_mq_handle_zone_resource(struct request *rq,
1870                                         struct list_head *zone_list)
1871 {
1872         /*
1873          * If we end up here it is because we cannot dispatch a request to a
1874          * specific zone due to LLD level zone-write locking or other zone
1875          * related resource not being available. In this case, set the request
1876          * aside in zone_list for retrying it later.
1877          */
1878         list_add(&rq->queuelist, zone_list);
1879         __blk_mq_requeue_request(rq);
1880 }
1881
1882 enum prep_dispatch {
1883         PREP_DISPATCH_OK,
1884         PREP_DISPATCH_NO_TAG,
1885         PREP_DISPATCH_NO_BUDGET,
1886 };
1887
1888 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
1889                                                   bool need_budget)
1890 {
1891         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1892         int budget_token = -1;
1893
1894         if (need_budget) {
1895                 budget_token = blk_mq_get_dispatch_budget(rq->q);
1896                 if (budget_token < 0) {
1897                         blk_mq_put_driver_tag(rq);
1898                         return PREP_DISPATCH_NO_BUDGET;
1899                 }
1900                 blk_mq_set_rq_budget_token(rq, budget_token);
1901         }
1902
1903         if (!blk_mq_get_driver_tag(rq)) {
1904                 /*
1905                  * The initial allocation attempt failed, so we need to
1906                  * rerun the hardware queue when a tag is freed. The
1907                  * waitqueue takes care of that. If the queue is run
1908                  * before we add this entry back on the dispatch list,
1909                  * we'll re-run it below.
1910                  */
1911                 if (!blk_mq_mark_tag_wait(hctx, rq)) {
1912                         /*
1913                          * All budgets not got from this function will be put
1914                          * together during handling partial dispatch
1915                          */
1916                         if (need_budget)
1917                                 blk_mq_put_dispatch_budget(rq->q, budget_token);
1918                         return PREP_DISPATCH_NO_TAG;
1919                 }
1920         }
1921
1922         return PREP_DISPATCH_OK;
1923 }
1924
1925 /* release all allocated budgets before calling to blk_mq_dispatch_rq_list */
1926 static void blk_mq_release_budgets(struct request_queue *q,
1927                 struct list_head *list)
1928 {
1929         struct request *rq;
1930
1931         list_for_each_entry(rq, list, queuelist) {
1932                 int budget_token = blk_mq_get_rq_budget_token(rq);
1933
1934                 if (budget_token >= 0)
1935                         blk_mq_put_dispatch_budget(q, budget_token);
1936         }
1937 }
1938
1939 /*
1940  * Returns true if we did some work AND can potentially do more.
1941  */
1942 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
1943                              unsigned int nr_budgets)
1944 {
1945         enum prep_dispatch prep;
1946         struct request_queue *q = hctx->queue;
1947         struct request *rq, *nxt;
1948         int errors, queued;
1949         blk_status_t ret = BLK_STS_OK;
1950         LIST_HEAD(zone_list);
1951         bool needs_resource = false;
1952
1953         if (list_empty(list))
1954                 return false;
1955
1956         /*
1957          * Now process all the entries, sending them to the driver.
1958          */
1959         errors = queued = 0;
1960         do {
1961                 struct blk_mq_queue_data bd;
1962
1963                 rq = list_first_entry(list, struct request, queuelist);
1964
1965                 WARN_ON_ONCE(hctx != rq->mq_hctx);
1966                 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets);
1967                 if (prep != PREP_DISPATCH_OK)
1968                         break;
1969
1970                 list_del_init(&rq->queuelist);
1971
1972                 bd.rq = rq;
1973
1974                 /*
1975                  * Flag last if we have no more requests, or if we have more
1976                  * but can't assign a driver tag to it.
1977                  */
1978                 if (list_empty(list))
1979                         bd.last = true;
1980                 else {
1981                         nxt = list_first_entry(list, struct request, queuelist);
1982                         bd.last = !blk_mq_get_driver_tag(nxt);
1983                 }
1984
1985                 /*
1986                  * once the request is queued to lld, no need to cover the
1987                  * budget any more
1988                  */
1989                 if (nr_budgets)
1990                         nr_budgets--;
1991                 ret = q->mq_ops->queue_rq(hctx, &bd);
1992                 switch (ret) {
1993                 case BLK_STS_OK:
1994                         queued++;
1995                         break;
1996                 case BLK_STS_RESOURCE:
1997                         needs_resource = true;
1998                         fallthrough;
1999                 case BLK_STS_DEV_RESOURCE:
2000                         blk_mq_handle_dev_resource(rq, list);
2001                         goto out;
2002                 case BLK_STS_ZONE_RESOURCE:
2003                         /*
2004                          * Move the request to zone_list and keep going through
2005                          * the dispatch list to find more requests the drive can
2006                          * accept.
2007                          */
2008                         blk_mq_handle_zone_resource(rq, &zone_list);
2009                         needs_resource = true;
2010                         break;
2011                 default:
2012                         errors++;
2013                         blk_mq_end_request(rq, ret);
2014                 }
2015         } while (!list_empty(list));
2016 out:
2017         if (!list_empty(&zone_list))
2018                 list_splice_tail_init(&zone_list, list);
2019
2020         /* If we didn't flush the entire list, we could have told the driver
2021          * there was more coming, but that turned out to be a lie.
2022          */
2023         if ((!list_empty(list) || errors || needs_resource ||
2024              ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued)
2025                 q->mq_ops->commit_rqs(hctx);
2026         /*
2027          * Any items that need requeuing? Stuff them into hctx->dispatch,
2028          * that is where we will continue on next queue run.
2029          */
2030         if (!list_empty(list)) {
2031                 bool needs_restart;
2032                 /* For non-shared tags, the RESTART check will suffice */
2033                 bool no_tag = prep == PREP_DISPATCH_NO_TAG &&
2034                         (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED);
2035
2036                 if (nr_budgets)
2037                         blk_mq_release_budgets(q, list);
2038
2039                 spin_lock(&hctx->lock);
2040                 list_splice_tail_init(list, &hctx->dispatch);
2041                 spin_unlock(&hctx->lock);
2042
2043                 /*
2044                  * Order adding requests to hctx->dispatch and checking
2045                  * SCHED_RESTART flag. The pair of this smp_mb() is the one
2046                  * in blk_mq_sched_restart(). Avoid restart code path to
2047                  * miss the new added requests to hctx->dispatch, meantime
2048                  * SCHED_RESTART is observed here.
2049                  */
2050                 smp_mb();
2051
2052                 /*
2053                  * If SCHED_RESTART was set by the caller of this function and
2054                  * it is no longer set that means that it was cleared by another
2055                  * thread and hence that a queue rerun is needed.
2056                  *
2057                  * If 'no_tag' is set, that means that we failed getting
2058                  * a driver tag with an I/O scheduler attached. If our dispatch
2059                  * waitqueue is no longer active, ensure that we run the queue
2060                  * AFTER adding our entries back to the list.
2061                  *
2062                  * If no I/O scheduler has been configured it is possible that
2063                  * the hardware queue got stopped and restarted before requests
2064                  * were pushed back onto the dispatch list. Rerun the queue to
2065                  * avoid starvation. Notes:
2066                  * - blk_mq_run_hw_queue() checks whether or not a queue has
2067                  *   been stopped before rerunning a queue.
2068                  * - Some but not all block drivers stop a queue before
2069                  *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
2070                  *   and dm-rq.
2071                  *
2072                  * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
2073                  * bit is set, run queue after a delay to avoid IO stalls
2074                  * that could otherwise occur if the queue is idle.  We'll do
2075                  * similar if we couldn't get budget or couldn't lock a zone
2076                  * and SCHED_RESTART is set.
2077                  */
2078                 needs_restart = blk_mq_sched_needs_restart(hctx);
2079                 if (prep == PREP_DISPATCH_NO_BUDGET)
2080                         needs_resource = true;
2081                 if (!needs_restart ||
2082                     (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2083                         blk_mq_run_hw_queue(hctx, true);
2084                 else if (needs_resource)
2085                         blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
2086
2087                 blk_mq_update_dispatch_busy(hctx, true);
2088                 return false;
2089         } else
2090                 blk_mq_update_dispatch_busy(hctx, false);
2091
2092         return (queued + errors) != 0;
2093 }
2094
2095 /**
2096  * __blk_mq_run_hw_queue - Run a hardware queue.
2097  * @hctx: Pointer to the hardware queue to run.
2098  *
2099  * Send pending requests to the hardware.
2100  */
2101 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
2102 {
2103         /*
2104          * We can't run the queue inline with ints disabled. Ensure that
2105          * we catch bad users of this early.
2106          */
2107         WARN_ON_ONCE(in_interrupt());
2108
2109         blk_mq_run_dispatch_ops(hctx->queue,
2110                         blk_mq_sched_dispatch_requests(hctx));
2111 }
2112
2113 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
2114 {
2115         int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
2116
2117         if (cpu >= nr_cpu_ids)
2118                 cpu = cpumask_first(hctx->cpumask);
2119         return cpu;
2120 }
2121
2122 /*
2123  * It'd be great if the workqueue API had a way to pass
2124  * in a mask and had some smarts for more clever placement.
2125  * For now we just round-robin here, switching for every
2126  * BLK_MQ_CPU_WORK_BATCH queued items.
2127  */
2128 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
2129 {
2130         bool tried = false;
2131         int next_cpu = hctx->next_cpu;
2132
2133         if (hctx->queue->nr_hw_queues == 1)
2134                 return WORK_CPU_UNBOUND;
2135
2136         if (--hctx->next_cpu_batch <= 0) {
2137 select_cpu:
2138                 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
2139                                 cpu_online_mask);
2140                 if (next_cpu >= nr_cpu_ids)
2141                         next_cpu = blk_mq_first_mapped_cpu(hctx);
2142                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2143         }
2144
2145         /*
2146          * Do unbound schedule if we can't find a online CPU for this hctx,
2147          * and it should only happen in the path of handling CPU DEAD.
2148          */
2149         if (!cpu_online(next_cpu)) {
2150                 if (!tried) {
2151                         tried = true;
2152                         goto select_cpu;
2153                 }
2154
2155                 /*
2156                  * Make sure to re-select CPU next time once after CPUs
2157                  * in hctx->cpumask become online again.
2158                  */
2159                 hctx->next_cpu = next_cpu;
2160                 hctx->next_cpu_batch = 1;
2161                 return WORK_CPU_UNBOUND;
2162         }
2163
2164         hctx->next_cpu = next_cpu;
2165         return next_cpu;
2166 }
2167
2168 /**
2169  * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
2170  * @hctx: Pointer to the hardware queue to run.
2171  * @async: If we want to run the queue asynchronously.
2172  * @msecs: Milliseconds of delay to wait before running the queue.
2173  *
2174  * If !@async, try to run the queue now. Else, run the queue asynchronously and
2175  * with a delay of @msecs.
2176  */
2177 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
2178                                         unsigned long msecs)
2179 {
2180         if (unlikely(blk_mq_hctx_stopped(hctx)))
2181                 return;
2182
2183         if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
2184                 if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
2185                         __blk_mq_run_hw_queue(hctx);
2186                         return;
2187                 }
2188         }
2189
2190         kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
2191                                     msecs_to_jiffies(msecs));
2192 }
2193
2194 /**
2195  * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
2196  * @hctx: Pointer to the hardware queue to run.
2197  * @msecs: Milliseconds of delay to wait before running the queue.
2198  *
2199  * Run a hardware queue asynchronously with a delay of @msecs.
2200  */
2201 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
2202 {
2203         __blk_mq_delay_run_hw_queue(hctx, true, msecs);
2204 }
2205 EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
2206
2207 /**
2208  * blk_mq_run_hw_queue - Start to run a hardware queue.
2209  * @hctx: Pointer to the hardware queue to run.
2210  * @async: If we want to run the queue asynchronously.
2211  *
2212  * Check if the request queue is not in a quiesced state and if there are
2213  * pending requests to be sent. If this is true, run the queue to send requests
2214  * to hardware.
2215  */
2216 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2217 {
2218         bool need_run;
2219
2220         /*
2221          * When queue is quiesced, we may be switching io scheduler, or
2222          * updating nr_hw_queues, or other things, and we can't run queue
2223          * any more, even __blk_mq_hctx_has_pending() can't be called safely.
2224          *
2225          * And queue will be rerun in blk_mq_unquiesce_queue() if it is
2226          * quiesced.
2227          */
2228         __blk_mq_run_dispatch_ops(hctx->queue, false,
2229                 need_run = !blk_queue_quiesced(hctx->queue) &&
2230                 blk_mq_hctx_has_pending(hctx));
2231
2232         if (need_run)
2233                 __blk_mq_delay_run_hw_queue(hctx, async, 0);
2234 }
2235 EXPORT_SYMBOL(blk_mq_run_hw_queue);
2236
2237 /*
2238  * Return prefered queue to dispatch from (if any) for non-mq aware IO
2239  * scheduler.
2240  */
2241 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
2242 {
2243         struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
2244         /*
2245          * If the IO scheduler does not respect hardware queues when
2246          * dispatching, we just don't bother with multiple HW queues and
2247          * dispatch from hctx for the current CPU since running multiple queues
2248          * just causes lock contention inside the scheduler and pointless cache
2249          * bouncing.
2250          */
2251         struct blk_mq_hw_ctx *hctx = ctx->hctxs[HCTX_TYPE_DEFAULT];
2252
2253         if (!blk_mq_hctx_stopped(hctx))
2254                 return hctx;
2255         return NULL;
2256 }
2257
2258 /**
2259  * blk_mq_run_hw_queues - Run all hardware queues in a request queue.
2260  * @q: Pointer to the request queue to run.
2261  * @async: If we want to run the queue asynchronously.
2262  */
2263 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
2264 {
2265         struct blk_mq_hw_ctx *hctx, *sq_hctx;
2266         unsigned long i;
2267
2268         sq_hctx = NULL;
2269         if (blk_queue_sq_sched(q))
2270                 sq_hctx = blk_mq_get_sq_hctx(q);
2271         queue_for_each_hw_ctx(q, hctx, i) {
2272                 if (blk_mq_hctx_stopped(hctx))
2273                         continue;
2274                 /*
2275                  * Dispatch from this hctx either if there's no hctx preferred
2276                  * by IO scheduler or if it has requests that bypass the
2277                  * scheduler.
2278                  */
2279                 if (!sq_hctx || sq_hctx == hctx ||
2280                     !list_empty_careful(&hctx->dispatch))
2281                         blk_mq_run_hw_queue(hctx, async);
2282         }
2283 }
2284 EXPORT_SYMBOL(blk_mq_run_hw_queues);
2285
2286 /**
2287  * blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
2288  * @q: Pointer to the request queue to run.
2289  * @msecs: Milliseconds of delay to wait before running the queues.
2290  */
2291 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
2292 {
2293         struct blk_mq_hw_ctx *hctx, *sq_hctx;
2294         unsigned long i;
2295
2296         sq_hctx = NULL;
2297         if (blk_queue_sq_sched(q))
2298                 sq_hctx = blk_mq_get_sq_hctx(q);
2299         queue_for_each_hw_ctx(q, hctx, i) {
2300                 if (blk_mq_hctx_stopped(hctx))
2301                         continue;
2302                 /*
2303                  * If there is already a run_work pending, leave the
2304                  * pending delay untouched. Otherwise, a hctx can stall
2305                  * if another hctx is re-delaying the other's work
2306                  * before the work executes.
2307                  */
2308                 if (delayed_work_pending(&hctx->run_work))
2309                         continue;
2310                 /*
2311                  * Dispatch from this hctx either if there's no hctx preferred
2312                  * by IO scheduler or if it has requests that bypass the
2313                  * scheduler.
2314                  */
2315                 if (!sq_hctx || sq_hctx == hctx ||
2316                     !list_empty_careful(&hctx->dispatch))
2317                         blk_mq_delay_run_hw_queue(hctx, msecs);
2318         }
2319 }
2320 EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
2321
2322 /*
2323  * This function is often used for pausing .queue_rq() by driver when
2324  * there isn't enough resource or some conditions aren't satisfied, and
2325  * BLK_STS_RESOURCE is usually returned.
2326  *
2327  * We do not guarantee that dispatch can be drained or blocked
2328  * after blk_mq_stop_hw_queue() returns. Please use
2329  * blk_mq_quiesce_queue() for that requirement.
2330  */
2331 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
2332 {
2333         cancel_delayed_work(&hctx->run_work);
2334
2335         set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2336 }
2337 EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2338
2339 /*
2340  * This function is often used for pausing .queue_rq() by driver when
2341  * there isn't enough resource or some conditions aren't satisfied, and
2342  * BLK_STS_RESOURCE is usually returned.
2343  *
2344  * We do not guarantee that dispatch can be drained or blocked
2345  * after blk_mq_stop_hw_queues() returns. Please use
2346  * blk_mq_quiesce_queue() for that requirement.
2347  */
2348 void blk_mq_stop_hw_queues(struct request_queue *q)
2349 {
2350         struct blk_mq_hw_ctx *hctx;
2351         unsigned long i;
2352
2353         queue_for_each_hw_ctx(q, hctx, i)
2354                 blk_mq_stop_hw_queue(hctx);
2355 }
2356 EXPORT_SYMBOL(blk_mq_stop_hw_queues);
2357
2358 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
2359 {
2360         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2361
2362         blk_mq_run_hw_queue(hctx, false);
2363 }
2364 EXPORT_SYMBOL(blk_mq_start_hw_queue);
2365
2366 void blk_mq_start_hw_queues(struct request_queue *q)
2367 {
2368         struct blk_mq_hw_ctx *hctx;
2369         unsigned long i;
2370
2371         queue_for_each_hw_ctx(q, hctx, i)
2372                 blk_mq_start_hw_queue(hctx);
2373 }
2374 EXPORT_SYMBOL(blk_mq_start_hw_queues);
2375
2376 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
2377 {
2378         if (!blk_mq_hctx_stopped(hctx))
2379                 return;
2380
2381         clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
2382         blk_mq_run_hw_queue(hctx, async);
2383 }
2384 EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
2385
2386 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
2387 {
2388         struct blk_mq_hw_ctx *hctx;
2389         unsigned long i;
2390
2391         queue_for_each_hw_ctx(q, hctx, i)
2392                 blk_mq_start_stopped_hw_queue(hctx, async);
2393 }
2394 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
2395
2396 static void blk_mq_run_work_fn(struct work_struct *work)
2397 {
2398         struct blk_mq_hw_ctx *hctx;
2399
2400         hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
2401
2402         /*
2403          * If we are stopped, don't run the queue.
2404          */
2405         if (blk_mq_hctx_stopped(hctx))
2406                 return;
2407
2408         __blk_mq_run_hw_queue(hctx);
2409 }
2410
2411 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
2412                                             struct request *rq,
2413                                             bool at_head)
2414 {
2415         struct blk_mq_ctx *ctx = rq->mq_ctx;
2416         enum hctx_type type = hctx->type;
2417
2418         lockdep_assert_held(&ctx->lock);
2419
2420         trace_block_rq_insert(rq);
2421
2422         if (at_head)
2423                 list_add(&rq->queuelist, &ctx->rq_lists[type]);
2424         else
2425                 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
2426 }
2427
2428 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
2429                              bool at_head)
2430 {
2431         struct blk_mq_ctx *ctx = rq->mq_ctx;
2432
2433         lockdep_assert_held(&ctx->lock);
2434
2435         __blk_mq_insert_req_list(hctx, rq, at_head);
2436         blk_mq_hctx_mark_pending(hctx, ctx);
2437 }
2438
2439 /**
2440  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
2441  * @rq: Pointer to request to be inserted.
2442  * @at_head: true if the request should be inserted at the head of the list.
2443  * @run_queue: If we should run the hardware queue after inserting the request.
2444  *
2445  * Should only be used carefully, when the caller knows we want to
2446  * bypass a potential IO scheduler on the target device.
2447  */
2448 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
2449                                   bool run_queue)
2450 {
2451         struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
2452
2453         spin_lock(&hctx->lock);
2454         if (at_head)
2455                 list_add(&rq->queuelist, &hctx->dispatch);
2456         else
2457                 list_add_tail(&rq->queuelist, &hctx->dispatch);
2458         spin_unlock(&hctx->lock);
2459
2460         if (run_queue)
2461                 blk_mq_run_hw_queue(hctx, false);
2462 }
2463
2464 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
2465                             struct list_head *list)
2466
2467 {
2468         struct request *rq;
2469         enum hctx_type type = hctx->type;
2470
2471         /*
2472          * preemption doesn't flush plug list, so it's possible ctx->cpu is
2473          * offline now
2474          */
2475         list_for_each_entry(rq, list, queuelist) {
2476                 BUG_ON(rq->mq_ctx != ctx);
2477                 trace_block_rq_insert(rq);
2478         }
2479
2480         spin_lock(&ctx->lock);
2481         list_splice_tail_init(list, &ctx->rq_lists[type]);
2482         blk_mq_hctx_mark_pending(hctx, ctx);
2483         spin_unlock(&ctx->lock);
2484 }
2485
2486 static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
2487                               bool from_schedule)
2488 {
2489         if (hctx->queue->mq_ops->commit_rqs) {
2490                 trace_block_unplug(hctx->queue, *queued, !from_schedule);
2491                 hctx->queue->mq_ops->commit_rqs(hctx);
2492         }
2493         *queued = 0;
2494 }
2495
2496 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
2497                 unsigned int nr_segs)
2498 {
2499         int err;
2500
2501         if (bio->bi_opf & REQ_RAHEAD)
2502                 rq->cmd_flags |= REQ_FAILFAST_MASK;
2503
2504         rq->__sector = bio->bi_iter.bi_sector;
2505         blk_rq_bio_prep(rq, bio, nr_segs);
2506
2507         /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
2508         err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
2509         WARN_ON_ONCE(err);
2510
2511         blk_account_io_start(rq);
2512 }
2513
2514 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
2515                                             struct request *rq, bool last)
2516 {
2517         struct request_queue *q = rq->q;
2518         struct blk_mq_queue_data bd = {
2519                 .rq = rq,
2520                 .last = last,
2521         };
2522         blk_status_t ret;
2523
2524         /*
2525          * For OK queue, we are done. For error, caller may kill it.
2526          * Any other error (busy), just add it to our list as we
2527          * previously would have done.
2528          */
2529         ret = q->mq_ops->queue_rq(hctx, &bd);
2530         switch (ret) {
2531         case BLK_STS_OK:
2532                 blk_mq_update_dispatch_busy(hctx, false);
2533                 break;
2534         case BLK_STS_RESOURCE:
2535         case BLK_STS_DEV_RESOURCE:
2536                 blk_mq_update_dispatch_busy(hctx, true);
2537                 __blk_mq_requeue_request(rq);
2538                 break;
2539         default:
2540                 blk_mq_update_dispatch_busy(hctx, false);
2541                 break;
2542         }
2543
2544         return ret;
2545 }
2546
2547 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2548                                                 struct request *rq,
2549                                                 bool bypass_insert, bool last)
2550 {
2551         struct request_queue *q = rq->q;
2552         bool run_queue = true;
2553         int budget_token;
2554
2555         /*
2556          * RCU or SRCU read lock is needed before checking quiesced flag.
2557          *
2558          * When queue is stopped or quiesced, ignore 'bypass_insert' from
2559          * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
2560          * and avoid driver to try to dispatch again.
2561          */
2562         if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
2563                 run_queue = false;
2564                 bypass_insert = false;
2565                 goto insert;
2566         }
2567
2568         if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
2569                 goto insert;
2570
2571         budget_token = blk_mq_get_dispatch_budget(q);
2572         if (budget_token < 0)
2573                 goto insert;
2574
2575         blk_mq_set_rq_budget_token(rq, budget_token);
2576
2577         if (!blk_mq_get_driver_tag(rq)) {
2578                 blk_mq_put_dispatch_budget(q, budget_token);
2579                 goto insert;
2580         }
2581
2582         return __blk_mq_issue_directly(hctx, rq, last);
2583 insert:
2584         if (bypass_insert)
2585                 return BLK_STS_RESOURCE;
2586
2587         blk_mq_sched_insert_request(rq, false, run_queue, false);
2588
2589         return BLK_STS_OK;
2590 }
2591
2592 /**
2593  * blk_mq_try_issue_directly - Try to send a request directly to device driver.
2594  * @hctx: Pointer of the associated hardware queue.
2595  * @rq: Pointer to request to be sent.
2596  *
2597  * If the device has enough resources to accept a new request now, send the
2598  * request directly to device driver. Else, insert at hctx->dispatch queue, so
2599  * we can try send it another time in the future. Requests inserted at this
2600  * queue have higher priority.
2601  */
2602 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
2603                 struct request *rq)
2604 {
2605         blk_status_t ret =
2606                 __blk_mq_try_issue_directly(hctx, rq, false, true);
2607
2608         if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
2609                 blk_mq_request_bypass_insert(rq, false, true);
2610         else if (ret != BLK_STS_OK)
2611                 blk_mq_end_request(rq, ret);
2612 }
2613
2614 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
2615 {
2616         return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
2617 }
2618
2619 static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
2620 {
2621         struct blk_mq_hw_ctx *hctx = NULL;
2622         struct request *rq;
2623         int queued = 0;
2624         int errors = 0;
2625
2626         while ((rq = rq_list_pop(&plug->mq_list))) {
2627                 bool last = rq_list_empty(plug->mq_list);
2628                 blk_status_t ret;
2629
2630                 if (hctx != rq->mq_hctx) {
2631                         if (hctx)
2632                                 blk_mq_commit_rqs(hctx, &queued, from_schedule);
2633                         hctx = rq->mq_hctx;
2634                 }
2635
2636                 ret = blk_mq_request_issue_directly(rq, last);
2637                 switch (ret) {
2638                 case BLK_STS_OK:
2639                         queued++;
2640                         break;
2641                 case BLK_STS_RESOURCE:
2642                 case BLK_STS_DEV_RESOURCE:
2643                         blk_mq_request_bypass_insert(rq, false, true);
2644                         blk_mq_commit_rqs(hctx, &queued, from_schedule);
2645                         return;
2646                 default:
2647                         blk_mq_end_request(rq, ret);
2648                         errors++;
2649                         break;
2650                 }
2651         }
2652
2653         /*
2654          * If we didn't flush the entire list, we could have told the driver
2655          * there was more coming, but that turned out to be a lie.
2656          */
2657         if (errors)
2658                 blk_mq_commit_rqs(hctx, &queued, from_schedule);
2659 }
2660
2661 static void __blk_mq_flush_plug_list(struct request_queue *q,
2662                                      struct blk_plug *plug)
2663 {
2664         if (blk_queue_quiesced(q))
2665                 return;
2666         q->mq_ops->queue_rqs(&plug->mq_list);
2667 }
2668
2669 static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
2670 {
2671         struct blk_mq_hw_ctx *this_hctx = NULL;
2672         struct blk_mq_ctx *this_ctx = NULL;
2673         struct request *requeue_list = NULL;
2674         unsigned int depth = 0;
2675         LIST_HEAD(list);
2676
2677         do {
2678                 struct request *rq = rq_list_pop(&plug->mq_list);
2679
2680                 if (!this_hctx) {
2681                         this_hctx = rq->mq_hctx;
2682                         this_ctx = rq->mq_ctx;
2683                 } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
2684                         rq_list_add(&requeue_list, rq);
2685                         continue;
2686                 }
2687                 list_add_tail(&rq->queuelist, &list);
2688                 depth++;
2689         } while (!rq_list_empty(plug->mq_list));
2690
2691         plug->mq_list = requeue_list;
2692         trace_block_unplug(this_hctx->queue, depth, !from_sched);
2693         blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
2694 }
2695
2696 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2697 {
2698         struct request *rq;
2699
2700         if (rq_list_empty(plug->mq_list))
2701                 return;
2702         plug->rq_count = 0;
2703
2704         if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
2705                 struct request_queue *q;
2706
2707                 rq = rq_list_peek(&plug->mq_list);
2708                 q = rq->q;
2709
2710                 /*
2711                  * Peek first request and see if we have a ->queue_rqs() hook.
2712                  * If we do, we can dispatch the whole plug list in one go. We
2713                  * already know at this point that all requests belong to the
2714                  * same queue, caller must ensure that's the case.
2715                  *
2716                  * Since we pass off the full list to the driver at this point,
2717                  * we do not increment the active request count for the queue.
2718                  * Bypass shared tags for now because of that.
2719                  */
2720                 if (q->mq_ops->queue_rqs &&
2721                     !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
2722                         blk_mq_run_dispatch_ops(q,
2723                                 __blk_mq_flush_plug_list(q, plug));
2724                         if (rq_list_empty(plug->mq_list))
2725                                 return;
2726                 }
2727
2728                 blk_mq_run_dispatch_ops(q,
2729                                 blk_mq_plug_issue_direct(plug, false));
2730                 if (rq_list_empty(plug->mq_list))
2731                         return;
2732         }
2733
2734         do {
2735                 blk_mq_dispatch_plug_list(plug, from_schedule);
2736         } while (!rq_list_empty(plug->mq_list));
2737 }
2738
2739 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
2740                 struct list_head *list)
2741 {
2742         int queued = 0;
2743         int errors = 0;
2744
2745         while (!list_empty(list)) {
2746                 blk_status_t ret;
2747                 struct request *rq = list_first_entry(list, struct request,
2748                                 queuelist);
2749
2750                 list_del_init(&rq->queuelist);
2751                 ret = blk_mq_request_issue_directly(rq, list_empty(list));
2752                 if (ret != BLK_STS_OK) {
2753                         errors++;
2754                         if (ret == BLK_STS_RESOURCE ||
2755                                         ret == BLK_STS_DEV_RESOURCE) {
2756                                 blk_mq_request_bypass_insert(rq, false,
2757                                                         list_empty(list));
2758                                 break;
2759                         }
2760                         blk_mq_end_request(rq, ret);
2761                 } else
2762                         queued++;
2763         }
2764
2765         /*
2766          * If we didn't flush the entire list, we could have told
2767          * the driver there was more coming, but that turned out to
2768          * be a lie.
2769          */
2770         if ((!list_empty(list) || errors) &&
2771              hctx->queue->mq_ops->commit_rqs && queued)
2772                 hctx->queue->mq_ops->commit_rqs(hctx);
2773 }
2774
2775 static bool blk_mq_attempt_bio_merge(struct request_queue *q,
2776                                      struct bio *bio, unsigned int nr_segs)
2777 {
2778         if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
2779                 if (blk_attempt_plug_merge(q, bio, nr_segs))
2780                         return true;
2781                 if (blk_mq_sched_bio_merge(q, bio, nr_segs))
2782                         return true;
2783         }
2784         return false;
2785 }
2786
2787 static struct request *blk_mq_get_new_requests(struct request_queue *q,
2788                                                struct blk_plug *plug,
2789                                                struct bio *bio,
2790                                                unsigned int nsegs)
2791 {
2792         struct blk_mq_alloc_data data = {
2793                 .q              = q,
2794                 .nr_tags        = 1,
2795                 .cmd_flags      = bio->bi_opf,
2796         };
2797         struct request *rq;
2798
2799         if (unlikely(bio_queue_enter(bio)))
2800                 return NULL;
2801
2802         if (blk_mq_attempt_bio_merge(q, bio, nsegs))
2803                 goto queue_exit;
2804
2805         rq_qos_throttle(q, bio);
2806
2807         if (plug) {
2808                 data.nr_tags = plug->nr_ios;
2809                 plug->nr_ios = 1;
2810                 data.cached_rq = &plug->cached_rq;
2811         }
2812
2813         rq = __blk_mq_alloc_requests(&data);
2814         if (rq)
2815                 return rq;
2816         rq_qos_cleanup(q, bio);
2817         if (bio->bi_opf & REQ_NOWAIT)
2818                 bio_wouldblock_error(bio);
2819 queue_exit:
2820         blk_queue_exit(q);
2821         return NULL;
2822 }
2823
2824 static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
2825                 struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
2826 {
2827         struct request *rq;
2828
2829         if (!plug)
2830                 return NULL;
2831         rq = rq_list_peek(&plug->cached_rq);
2832         if (!rq || rq->q != q)
2833                 return NULL;
2834
2835         if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
2836                 *bio = NULL;
2837                 return NULL;
2838         }
2839
2840         if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
2841                 return NULL;
2842         if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
2843                 return NULL;
2844
2845         /*
2846          * If any qos ->throttle() end up blocking, we will have flushed the
2847          * plug and hence killed the cached_rq list as well. Pop this entry
2848          * before we throttle.
2849          */
2850         plug->cached_rq = rq_list_next(rq);
2851         rq_qos_throttle(q, *bio);
2852
2853         rq->cmd_flags = (*bio)->bi_opf;
2854         INIT_LIST_HEAD(&rq->queuelist);
2855         return rq;
2856 }
2857
2858 static void bio_set_ioprio(struct bio *bio)
2859 {
2860         /* Nobody set ioprio so far? Initialize it based on task's nice value */
2861         if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
2862                 bio->bi_ioprio = get_current_ioprio();
2863         blkcg_set_ioprio(bio);
2864 }
2865
2866 /**
2867  * blk_mq_submit_bio - Create and send a request to block device.
2868  * @bio: Bio pointer.
2869  *
2870  * Builds up a request structure from @q and @bio and send to the device. The
2871  * request may not be queued directly to hardware if:
2872  * * This request can be merged with another one
2873  * * We want to place request at plug queue for possible future merging
2874  * * There is an IO scheduler active at this queue
2875  *
2876  * It will not queue the request if there is an error with the bio, or at the
2877  * request creation.
2878  */
2879 void blk_mq_submit_bio(struct bio *bio)
2880 {
2881         struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2882         struct blk_plug *plug = blk_mq_plug(bio);
2883         const int is_sync = op_is_sync(bio->bi_opf);
2884         struct request *rq;
2885         unsigned int nr_segs = 1;
2886         blk_status_t ret;
2887
2888         bio = blk_queue_bounce(bio, q);
2889         if (bio_may_exceed_limits(bio, &q->limits))
2890                 bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
2891
2892         if (!bio_integrity_prep(bio))
2893                 return;
2894
2895         bio_set_ioprio(bio);
2896
2897         rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
2898         if (!rq) {
2899                 if (!bio)
2900                         return;
2901                 rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
2902                 if (unlikely(!rq))
2903                         return;
2904         }
2905
2906         trace_block_getrq(bio);
2907
2908         rq_qos_track(q, rq, bio);
2909
2910         blk_mq_bio_to_request(rq, bio, nr_segs);
2911
2912         ret = blk_crypto_init_request(rq);
2913         if (ret != BLK_STS_OK) {
2914                 bio->bi_status = ret;
2915                 bio_endio(bio);
2916                 blk_mq_free_request(rq);
2917                 return;
2918         }
2919
2920         if (op_is_flush(bio->bi_opf)) {
2921                 blk_insert_flush(rq);
2922                 return;
2923         }
2924
2925         if (plug)
2926                 blk_add_rq_to_plug(plug, rq);
2927         else if ((rq->rq_flags & RQF_ELV) ||
2928                  (rq->mq_hctx->dispatch_busy &&
2929                   (q->nr_hw_queues == 1 || !is_sync)))
2930                 blk_mq_sched_insert_request(rq, false, true, true);
2931         else
2932                 blk_mq_run_dispatch_ops(rq->q,
2933                                 blk_mq_try_issue_directly(rq->mq_hctx, rq));
2934 }
2935
2936 #ifdef CONFIG_BLK_MQ_STACKING
2937 /**
2938  * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2939  * @rq: the request being queued
2940  */
2941 blk_status_t blk_insert_cloned_request(struct request *rq)
2942 {
2943         struct request_queue *q = rq->q;
2944         unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
2945         blk_status_t ret;
2946
2947         if (blk_rq_sectors(rq) > max_sectors) {
2948                 /*
2949                  * SCSI device does not have a good way to return if
2950                  * Write Same/Zero is actually supported. If a device rejects
2951                  * a non-read/write command (discard, write same,etc.) the
2952                  * low-level device driver will set the relevant queue limit to
2953                  * 0 to prevent blk-lib from issuing more of the offending
2954                  * operations. Commands queued prior to the queue limit being
2955                  * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
2956                  * errors being propagated to upper layers.
2957                  */
2958                 if (max_sectors == 0)
2959                         return BLK_STS_NOTSUPP;
2960
2961                 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
2962                         __func__, blk_rq_sectors(rq), max_sectors);
2963                 return BLK_STS_IOERR;
2964         }
2965
2966         /*
2967          * The queue settings related to segment counting may differ from the
2968          * original queue.
2969          */
2970         rq->nr_phys_segments = blk_recalc_rq_segments(rq);
2971         if (rq->nr_phys_segments > queue_max_segments(q)) {
2972                 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
2973                         __func__, rq->nr_phys_segments, queue_max_segments(q));
2974                 return BLK_STS_IOERR;
2975         }
2976
2977         if (q->disk && should_fail_request(q->disk->part0, blk_rq_bytes(rq)))
2978                 return BLK_STS_IOERR;
2979
2980         if (blk_crypto_insert_cloned_request(rq))
2981                 return BLK_STS_IOERR;
2982
2983         blk_account_io_start(rq);
2984
2985         /*
2986          * Since we have a scheduler attached on the top device,
2987          * bypass a potential scheduler on the bottom device for
2988          * insert.
2989          */
2990         blk_mq_run_dispatch_ops(q,
2991                         ret = blk_mq_request_issue_directly(rq, true));
2992         if (ret)
2993                 blk_account_io_done(rq, ktime_get_ns());
2994         return ret;
2995 }
2996 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2997
2998 /**
2999  * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3000  * @rq: the clone request to be cleaned up
3001  *
3002  * Description:
3003  *     Free all bios in @rq for a cloned request.
3004  */
3005 void blk_rq_unprep_clone(struct request *rq)
3006 {
3007         struct bio *bio;
3008
3009         while ((bio = rq->bio) != NULL) {
3010                 rq->bio = bio->bi_next;
3011
3012                 bio_put(bio);
3013         }
3014 }
3015 EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3016
3017 /**
3018  * blk_rq_prep_clone - Helper function to setup clone request
3019  * @rq: the request to be setup
3020  * @rq_src: original request to be cloned
3021  * @bs: bio_set that bios for clone are allocated from
3022  * @gfp_mask: memory allocation mask for bio
3023  * @bio_ctr: setup function to be called for each clone bio.
3024  *           Returns %0 for success, non %0 for failure.
3025  * @data: private data to be passed to @bio_ctr
3026  *
3027  * Description:
3028  *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3029  *     Also, pages which the original bios are pointing to are not copied
3030  *     and the cloned bios just point same pages.
3031  *     So cloned bios must be completed before original bios, which means
3032  *     the caller must complete @rq before @rq_src.
3033  */
3034 int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3035                       struct bio_set *bs, gfp_t gfp_mask,
3036                       int (*bio_ctr)(struct bio *, struct bio *, void *),
3037                       void *data)
3038 {
3039         struct bio *bio, *bio_src;
3040
3041         if (!bs)
3042                 bs = &fs_bio_set;
3043
3044         __rq_for_each_bio(bio_src, rq_src) {
3045                 bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3046                                       bs);
3047                 if (!bio)
3048                         goto free_and_out;
3049
3050                 if (bio_ctr && bio_ctr(bio, bio_src, data))
3051                         goto free_and_out;
3052
3053                 if (rq->bio) {
3054                         rq->biotail->bi_next = bio;
3055                         rq->biotail = bio;
3056                 } else {
3057                         rq->bio = rq->biotail = bio;
3058                 }
3059                 bio = NULL;
3060         }
3061
3062         /* Copy attributes of the original request to the clone request. */
3063         rq->__sector = blk_rq_pos(rq_src);
3064         rq->__data_len = blk_rq_bytes(rq_src);
3065         if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
3066                 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
3067                 rq->special_vec = rq_src->special_vec;
3068         }
3069         rq->nr_phys_segments = rq_src->nr_phys_segments;
3070         rq->ioprio = rq_src->ioprio;
3071
3072         if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
3073                 goto free_and_out;
3074
3075         return 0;
3076
3077 free_and_out:
3078         if (bio)
3079                 bio_put(bio);
3080         blk_rq_unprep_clone(rq);
3081
3082         return -ENOMEM;
3083 }
3084 EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3085 #endif /* CONFIG_BLK_MQ_STACKING */
3086
3087 /*
3088  * Steal bios from a request and add them to a bio list.
3089  * The request must not have been partially completed before.
3090  */
3091 void blk_steal_bios(struct bio_list *list, struct request *rq)
3092 {
3093         if (rq->bio) {
3094                 if (list->tail)
3095                         list->tail->bi_next = rq->bio;
3096                 else
3097                         list->head = rq->bio;
3098                 list->tail = rq->biotail;
3099
3100                 rq->bio = NULL;
3101                 rq->biotail = NULL;
3102         }
3103
3104         rq->__data_len = 0;
3105 }
3106 EXPORT_SYMBOL_GPL(blk_steal_bios);
3107
3108 static size_t order_to_size(unsigned int order)
3109 {
3110         return (size_t)PAGE_SIZE << order;
3111 }
3112
3113 /* called before freeing request pool in @tags */
3114 static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
3115                                     struct blk_mq_tags *tags)
3116 {
3117         struct page *page;
3118         unsigned long flags;
3119
3120         /*
3121          * There is no need to clear mapping if driver tags is not initialized
3122          * or the mapping belongs to the driver tags.
3123          */
3124         if (!drv_tags || drv_tags == tags)
3125                 return;
3126
3127         list_for_each_entry(page, &tags->page_list, lru) {
3128                 unsigned long start = (unsigned long)page_address(page);
3129                 unsigned long end = start + order_to_size(page->private);
3130                 int i;
3131
3132                 for (i = 0; i < drv_tags->nr_tags; i++) {
3133                         struct request *rq = drv_tags->rqs[i];
3134                         unsigned long rq_addr = (unsigned long)rq;
3135
3136                         if (rq_addr >= start && rq_addr < end) {
3137                                 WARN_ON_ONCE(req_ref_read(rq) != 0);
3138                                 cmpxchg(&drv_tags->rqs[i], rq, NULL);
3139                         }
3140                 }
3141         }
3142
3143         /*
3144          * Wait until all pending iteration is done.
3145          *
3146          * Request reference is cleared and it is guaranteed to be observed
3147          * after the ->lock is released.
3148          */
3149         spin_lock_irqsave(&drv_tags->lock, flags);
3150         spin_unlock_irqrestore(&drv_tags->lock, flags);
3151 }
3152
3153 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
3154                      unsigned int hctx_idx)
3155 {
3156         struct blk_mq_tags *drv_tags;
3157         struct page *page;
3158
3159         if (list_empty(&tags->page_list))
3160                 return;
3161
3162         if (blk_mq_is_shared_tags(set->flags))
3163                 drv_tags = set->shared_tags;
3164         else
3165                 drv_tags = set->tags[hctx_idx];
3166
3167         if (tags->static_rqs && set->ops->exit_request) {
3168                 int i;
3169
3170                 for (i = 0; i < tags->nr_tags; i++) {
3171                         struct request *rq = tags->static_rqs[i];
3172
3173                         if (!rq)
3174                                 continue;
3175                         set->ops->exit_request(set, rq, hctx_idx);
3176                         tags->static_rqs[i] = NULL;
3177                 }
3178         }
3179
3180         blk_mq_clear_rq_mapping(drv_tags, tags);
3181
3182         while (!list_empty(&tags->page_list)) {
3183                 page = list_first_entry(&tags->page_list, struct page, lru);
3184                 list_del_init(&page->lru);
3185                 /*
3186                  * Remove kmemleak object previously allocated in
3187                  * blk_mq_alloc_rqs().
3188                  */
3189                 kmemleak_free(page_address(page));
3190                 __free_pages(page, page->private);
3191         }
3192 }
3193
3194 void blk_mq_free_rq_map(struct blk_mq_tags *tags)
3195 {
3196         kfree(tags->rqs);
3197         tags->rqs = NULL;
3198         kfree(tags->static_rqs);
3199         tags->static_rqs = NULL;
3200
3201         blk_mq_free_tags(tags);
3202 }
3203
3204 static enum hctx_type hctx_idx_to_type(struct blk_mq_tag_set *set,
3205                 unsigned int hctx_idx)
3206 {
3207         int i;
3208
3209         for (i = 0; i < set->nr_maps; i++) {
3210                 unsigned int start = set->map[i].queue_offset;
3211                 unsigned int end = start + set->map[i].nr_queues;
3212
3213                 if (hctx_idx >= start && hctx_idx < end)
3214                         break;
3215         }
3216
3217         if (i >= set->nr_maps)
3218                 i = HCTX_TYPE_DEFAULT;
3219
3220         return i;
3221 }
3222
3223 static int blk_mq_get_hctx_node(struct blk_mq_tag_set *set,
3224                 unsigned int hctx_idx)
3225 {
3226         enum hctx_type type = hctx_idx_to_type(set, hctx_idx);
3227
3228         return blk_mq_hw_queue_to_node(&set->map[type], hctx_idx);
3229 }
3230
3231 static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
3232                                                unsigned int hctx_idx,
3233                                                unsigned int nr_tags,
3234                                                unsigned int reserved_tags)
3235 {
3236         int node = blk_mq_get_hctx_node(set, hctx_idx);
3237         struct blk_mq_tags *tags;
3238
3239         if (node == NUMA_NO_NODE)
3240                 node = set->numa_node;
3241
3242         tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
3243                                 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
3244         if (!tags)
3245                 return NULL;
3246
3247         tags->rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3248                                  GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3249                                  node);
3250         if (!tags->rqs) {
3251                 blk_mq_free_tags(tags);
3252                 return NULL;
3253         }
3254
3255         tags->static_rqs = kcalloc_node(nr_tags, sizeof(struct request *),
3256                                         GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
3257                                         node);
3258         if (!tags->static_rqs) {
3259                 kfree(tags->rqs);
3260                 blk_mq_free_tags(tags);
3261                 return NULL;
3262         }
3263
3264         return tags;
3265 }
3266
3267 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
3268                                unsigned int hctx_idx, int node)
3269 {
3270         int ret;
3271
3272         if (set->ops->init_request) {
3273                 ret = set->ops->init_request(set, rq, hctx_idx, node);
3274                 if (ret)
3275                         return ret;
3276         }
3277
3278         WRITE_ONCE(rq->state, MQ_RQ_IDLE);
3279         return 0;
3280 }
3281
3282 static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
3283                             struct blk_mq_tags *tags,
3284                             unsigned int hctx_idx, unsigned int depth)
3285 {
3286         unsigned int i, j, entries_per_page, max_order = 4;
3287         int node = blk_mq_get_hctx_node(set, hctx_idx);
3288         size_t rq_size, left;
3289
3290         if (node == NUMA_NO_NODE)
3291                 node = set->numa_node;
3292
3293         INIT_LIST_HEAD(&tags->page_list);
3294
3295         /*
3296          * rq_size is the size of the request plus driver payload, rounded
3297          * to the cacheline size
3298          */
3299         rq_size = round_up(sizeof(struct request) + set->cmd_size,
3300                                 cache_line_size());
3301         left = rq_size * depth;
3302
3303         for (i = 0; i < depth; ) {
3304                 int this_order = max_order;
3305                 struct page *page;
3306                 int to_do;
3307                 void *p;
3308
3309                 while (this_order && left < order_to_size(this_order - 1))
3310                         this_order--;
3311
3312                 do {
3313                         page = alloc_pages_node(node,
3314                                 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
3315                                 this_order);
3316                         if (page)
3317                                 break;
3318                         if (!this_order--)
3319                                 break;
3320                         if (order_to_size(this_order) < rq_size)
3321                                 break;
3322                 } while (1);
3323
3324                 if (!page)
3325                         goto fail;
3326
3327                 page->private = this_order;
3328                 list_add_tail(&page->lru, &tags->page_list);
3329
3330                 p = page_address(page);
3331                 /*
3332                  * Allow kmemleak to scan these pages as they contain pointers
3333                  * to additional allocations like via ops->init_request().
3334                  */
3335                 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
3336                 entries_per_page = order_to_size(this_order) / rq_size;
3337                 to_do = min(entries_per_page, depth - i);
3338                 left -= to_do * rq_size;
3339                 for (j = 0; j < to_do; j++) {
3340                         struct request *rq = p;
3341
3342                         tags->static_rqs[i] = rq;
3343                         if (blk_mq_init_request(set, rq, hctx_idx, node)) {
3344                                 tags->static_rqs[i] = NULL;
3345                                 goto fail;
3346                         }
3347
3348                         p += rq_size;
3349                         i++;
3350                 }
3351         }
3352         return 0;
3353
3354 fail:
3355         blk_mq_free_rqs(set, tags, hctx_idx);
3356         return -ENOMEM;
3357 }
3358
3359 struct rq_iter_data {
3360         struct blk_mq_hw_ctx *hctx;
3361         bool has_rq;
3362 };
3363
3364 static bool blk_mq_has_request(struct request *rq, void *data)
3365 {
3366         struct rq_iter_data *iter_data = data;
3367
3368         if (rq->mq_hctx != iter_data->hctx)
3369                 return true;
3370         iter_data->has_rq = true;
3371         return false;
3372 }
3373
3374 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
3375 {
3376         struct blk_mq_tags *tags = hctx->sched_tags ?
3377                         hctx->sched_tags : hctx->tags;
3378         struct rq_iter_data data = {
3379                 .hctx   = hctx,
3380         };
3381
3382         blk_mq_all_tag_iter(tags, blk_mq_has_request, &data);
3383         return data.has_rq;
3384 }
3385
3386 static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
3387                 struct blk_mq_hw_ctx *hctx)
3388 {
3389         if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
3390                 return false;
3391         if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
3392                 return false;
3393         return true;
3394 }
3395
3396 static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
3397 {
3398         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3399                         struct blk_mq_hw_ctx, cpuhp_online);
3400
3401         if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
3402             !blk_mq_last_cpu_in_hctx(cpu, hctx))
3403                 return 0;
3404
3405         /*
3406          * Prevent new request from being allocated on the current hctx.
3407          *
3408          * The smp_mb__after_atomic() Pairs with the implied barrier in
3409          * test_and_set_bit_lock in sbitmap_get().  Ensures the inactive flag is
3410          * seen once we return from the tag allocator.
3411          */
3412         set_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3413         smp_mb__after_atomic();
3414
3415         /*
3416          * Try to grab a reference to the queue and wait for any outstanding
3417          * requests.  If we could not grab a reference the queue has been
3418          * frozen and there are no requests.
3419          */
3420         if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) {
3421                 while (blk_mq_hctx_has_requests(hctx))
3422                         msleep(5);
3423                 percpu_ref_put(&hctx->queue->q_usage_counter);
3424         }
3425
3426         return 0;
3427 }
3428
3429 static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
3430 {
3431         struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
3432                         struct blk_mq_hw_ctx, cpuhp_online);
3433
3434         if (cpumask_test_cpu(cpu, hctx->cpumask))
3435                 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
3436         return 0;
3437 }
3438
3439 /*
3440  * 'cpu' is going away. splice any existing rq_list entries from this
3441  * software queue to the hw queue dispatch list, and ensure that it
3442  * gets run.
3443  */
3444 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
3445 {
3446         struct blk_mq_hw_ctx *hctx;
3447         struct blk_mq_ctx *ctx;
3448         LIST_HEAD(tmp);
3449         enum hctx_type type;
3450
3451         hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
3452         if (!cpumask_test_cpu(cpu, hctx->cpumask))
3453                 return 0;
3454
3455         ctx = __blk_mq_get_ctx(hctx->queue, cpu);
3456         type = hctx->type;
3457
3458         spin_lock(&ctx->lock);
3459         if (!list_empty(&ctx->rq_lists[type])) {
3460                 list_splice_init(&ctx->rq_lists[type], &tmp);
3461                 blk_mq_hctx_clear_pending(hctx, ctx);
3462         }
3463         spin_unlock(&ctx->lock);
3464
3465         if (list_empty(&tmp))
3466                 return 0;
3467
3468         spin_lock(&hctx->lock);
3469         list_splice_tail_init(&tmp, &hctx->dispatch);
3470         spin_unlock(&hctx->lock);
3471
3472         blk_mq_run_hw_queue(hctx, true);
3473         return 0;
3474 }
3475
3476 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
3477 {
3478         if (!(hctx->flags & BLK_MQ_F_STACKING))
3479                 cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3480                                                     &hctx->cpuhp_online);
3481         cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
3482                                             &hctx->cpuhp_dead);
3483 }
3484
3485 /*
3486  * Before freeing hw queue, clearing the flush request reference in
3487  * tags->rqs[] for avoiding potential UAF.
3488  */
3489 static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
3490                 unsigned int queue_depth, struct request *flush_rq)
3491 {
3492         int i;
3493         unsigned long flags;
3494
3495         /* The hw queue may not be mapped yet */
3496         if (!tags)
3497                 return;
3498
3499         WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
3500
3501         for (i = 0; i < queue_depth; i++)
3502                 cmpxchg(&tags->rqs[i], flush_rq, NULL);
3503
3504         /*
3505          * Wait until all pending iteration is done.
3506          *
3507          * Request reference is cleared and it is guaranteed to be observed
3508          * after the ->lock is released.
3509          */
3510         spin_lock_irqsave(&tags->lock, flags);
3511         spin_unlock_irqrestore(&tags->lock, flags);
3512 }
3513
3514 /* hctx->ctxs will be freed in queue's release handler */
3515 static void blk_mq_exit_hctx(struct request_queue *q,
3516                 struct blk_mq_tag_set *set,
3517                 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
3518 {
3519         struct request *flush_rq = hctx->fq->flush_rq;
3520
3521         if (blk_mq_hw_queue_mapped(hctx))
3522                 blk_mq_tag_idle(hctx);
3523
3524         if (blk_queue_init_done(q))
3525                 blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
3526                                 set->queue_depth, flush_rq);
3527         if (set->ops->exit_request)
3528                 set->ops->exit_request(set, flush_rq, hctx_idx);
3529
3530         if (set->ops->exit_hctx)
3531                 set->ops->exit_hctx(hctx, hctx_idx);
3532
3533         blk_mq_remove_cpuhp(hctx);
3534
3535         xa_erase(&q->hctx_table, hctx_idx);
3536
3537         spin_lock(&q->unused_hctx_lock);
3538         list_add(&hctx->hctx_list, &q->unused_hctx_list);
3539         spin_unlock(&q->unused_hctx_lock);
3540 }
3541
3542 static void blk_mq_exit_hw_queues(struct request_queue *q,
3543                 struct blk_mq_tag_set *set, int nr_queue)
3544 {
3545         struct blk_mq_hw_ctx *hctx;
3546         unsigned long i;
3547
3548         queue_for_each_hw_ctx(q, hctx, i) {
3549                 if (i == nr_queue)
3550                         break;
3551                 blk_mq_exit_hctx(q, set, hctx, i);
3552         }
3553 }
3554
3555 static int blk_mq_init_hctx(struct request_queue *q,
3556                 struct blk_mq_tag_set *set,
3557                 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
3558 {
3559         hctx->queue_num = hctx_idx;
3560
3561         if (!(hctx->flags & BLK_MQ_F_STACKING))
3562                 cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
3563                                 &hctx->cpuhp_online);
3564         cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
3565
3566         hctx->tags = set->tags[hctx_idx];
3567
3568         if (set->ops->init_hctx &&
3569             set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
3570                 goto unregister_cpu_notifier;
3571
3572         if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
3573                                 hctx->numa_node))
3574                 goto exit_hctx;
3575
3576         if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
3577                 goto exit_flush_rq;
3578
3579         return 0;
3580
3581  exit_flush_rq:
3582         if (set->ops->exit_request)
3583                 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
3584  exit_hctx:
3585         if (set->ops->exit_hctx)
3586                 set->ops->exit_hctx(hctx, hctx_idx);
3587  unregister_cpu_notifier:
3588         blk_mq_remove_cpuhp(hctx);
3589         return -1;
3590 }
3591
3592 static struct blk_mq_hw_ctx *
3593 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
3594                 int node)
3595 {
3596         struct blk_mq_hw_ctx *hctx;
3597         gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
3598
3599         hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
3600         if (!hctx)
3601                 goto fail_alloc_hctx;
3602
3603         if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
3604                 goto free_hctx;
3605
3606         atomic_set(&hctx->nr_active, 0);
3607         if (node == NUMA_NO_NODE)
3608                 node = set->numa_node;
3609         hctx->numa_node = node;
3610
3611         INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
3612         spin_lock_init(&hctx->lock);
3613         INIT_LIST_HEAD(&hctx->dispatch);
3614         hctx->queue = q;
3615         hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
3616
3617         INIT_LIST_HEAD(&hctx->hctx_list);
3618
3619         /*
3620          * Allocate space for all possible cpus to avoid allocation at
3621          * runtime
3622          */
3623         hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
3624                         gfp, node);
3625         if (!hctx->ctxs)
3626                 goto free_cpumask;
3627
3628         if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
3629                                 gfp, node, false, false))
3630                 goto free_ctxs;
3631         hctx->nr_ctx = 0;
3632
3633         spin_lock_init(&hctx->dispatch_wait_lock);
3634         init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
3635         INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
3636
3637         hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp);
3638         if (!hctx->fq)
3639                 goto free_bitmap;
3640
3641         blk_mq_hctx_kobj_init(hctx);
3642
3643         return hctx;
3644
3645  free_bitmap:
3646         sbitmap_free(&hctx->ctx_map);
3647  free_ctxs:
3648         kfree(hctx->ctxs);
3649  free_cpumask:
3650         free_cpumask_var(hctx->cpumask);
3651  free_hctx:
3652         kfree(hctx);
3653  fail_alloc_hctx:
3654         return NULL;
3655 }
3656
3657 static void blk_mq_init_cpu_queues(struct request_queue *q,
3658                                    unsigned int nr_hw_queues)
3659 {
3660         struct blk_mq_tag_set *set = q->tag_set;
3661         unsigned int i, j;
3662
3663         for_each_possible_cpu(i) {
3664                 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
3665                 struct blk_mq_hw_ctx *hctx;
3666                 int k;
3667
3668                 __ctx->cpu = i;
3669                 spin_lock_init(&__ctx->lock);
3670                 for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
3671                         INIT_LIST_HEAD(&__ctx->rq_lists[k]);
3672
3673                 __ctx->queue = q;
3674
3675                 /*
3676                  * Set local node, IFF we have more than one hw queue. If
3677                  * not, we remain on the home node of the device
3678                  */
3679                 for (j = 0; j < set->nr_maps; j++) {
3680                         hctx = blk_mq_map_queue_type(q, j, i);
3681                         if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
3682                                 hctx->numa_node = cpu_to_node(i);
3683                 }
3684         }
3685 }
3686
3687 struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3688                                              unsigned int hctx_idx,
3689                                              unsigned int depth)
3690 {
3691         struct blk_mq_tags *tags;
3692         int ret;
3693
3694         tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3695         if (!tags)
3696                 return NULL;
3697
3698         ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
3699         if (ret) {
3700                 blk_mq_free_rq_map(tags);
3701                 return NULL;
3702         }
3703
3704         return tags;
3705 }
3706
3707 static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
3708                                        int hctx_idx)
3709 {
3710         if (blk_mq_is_shared_tags(set->flags)) {
3711                 set->tags[hctx_idx] = set->shared_tags;
3712
3713                 return true;
3714         }
3715
3716         set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
3717                                                        set->queue_depth);
3718
3719         return set->tags[hctx_idx];
3720 }
3721
3722 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3723                              struct blk_mq_tags *tags,
3724                              unsigned int hctx_idx)
3725 {
3726         if (tags) {
3727                 blk_mq_free_rqs(set, tags, hctx_idx);
3728                 blk_mq_free_rq_map(tags);
3729         }
3730 }
3731
3732 static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
3733                                       unsigned int hctx_idx)
3734 {
3735         if (!blk_mq_is_shared_tags(set->flags))
3736                 blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
3737
3738         set->tags[hctx_idx] = NULL;
3739 }
3740
3741 static void blk_mq_map_swqueue(struct request_queue *q)
3742 {
3743         unsigned int j, hctx_idx;
3744         unsigned long i;
3745         struct blk_mq_hw_ctx *hctx;
3746         struct blk_mq_ctx *ctx;
3747         struct blk_mq_tag_set *set = q->tag_set;
3748
3749         queue_for_each_hw_ctx(q, hctx, i) {
3750                 cpumask_clear(hctx->cpumask);
3751                 hctx->nr_ctx = 0;
3752                 hctx->dispatch_from = NULL;
3753         }
3754
3755         /*
3756          * Map software to hardware queues.
3757          *
3758          * If the cpu isn't present, the cpu is mapped to first hctx.
3759          */
3760         for_each_possible_cpu(i) {
3761
3762                 ctx = per_cpu_ptr(q->queue_ctx, i);
3763                 for (j = 0; j < set->nr_maps; j++) {
3764                         if (!set->map[j].nr_queues) {
3765                                 ctx->hctxs[j] = blk_mq_map_queue_type(q,
3766                                                 HCTX_TYPE_DEFAULT, i);
3767                                 continue;
3768                         }
3769                         hctx_idx = set->map[j].mq_map[i];
3770                         /* unmapped hw queue can be remapped after CPU topo changed */
3771                         if (!set->tags[hctx_idx] &&
3772                             !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
3773                                 /*
3774                                  * If tags initialization fail for some hctx,
3775                                  * that hctx won't be brought online.  In this
3776                                  * case, remap the current ctx to hctx[0] which
3777                                  * is guaranteed to always have tags allocated
3778                                  */
3779                                 set->map[j].mq_map[i] = 0;
3780                         }
3781
3782                         hctx = blk_mq_map_queue_type(q, j, i);
3783                         ctx->hctxs[j] = hctx;
3784                         /*
3785                          * If the CPU is already set in the mask, then we've
3786                          * mapped this one already. This can happen if
3787                          * devices share queues across queue maps.
3788                          */
3789                         if (cpumask_test_cpu(i, hctx->cpumask))
3790                                 continue;
3791
3792                         cpumask_set_cpu(i, hctx->cpumask);
3793                         hctx->type = j;
3794                         ctx->index_hw[hctx->type] = hctx->nr_ctx;
3795                         hctx->ctxs[hctx->nr_ctx++] = ctx;
3796
3797                         /*
3798                          * If the nr_ctx type overflows, we have exceeded the
3799                          * amount of sw queues we can support.
3800                          */
3801                         BUG_ON(!hctx->nr_ctx);
3802                 }
3803
3804                 for (; j < HCTX_MAX_TYPES; j++)
3805                         ctx->hctxs[j] = blk_mq_map_queue_type(q,
3806                                         HCTX_TYPE_DEFAULT, i);
3807         }
3808
3809         queue_for_each_hw_ctx(q, hctx, i) {
3810                 /*
3811                  * If no software queues are mapped to this hardware queue,
3812                  * disable it and free the request entries.
3813                  */
3814                 if (!hctx->nr_ctx) {
3815                         /* Never unmap queue 0.  We need it as a
3816                          * fallback in case of a new remap fails
3817                          * allocation
3818                          */
3819                         if (i)
3820                                 __blk_mq_free_map_and_rqs(set, i);
3821
3822                         hctx->tags = NULL;
3823                         continue;
3824                 }
3825
3826                 hctx->tags = set->tags[i];
3827                 WARN_ON(!hctx->tags);
3828
3829                 /*
3830                  * Set the map size to the number of mapped software queues.
3831                  * This is more accurate and more efficient than looping
3832                  * over all possibly mapped software queues.
3833                  */
3834                 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
3835
3836                 /*
3837                  * Initialize batch roundrobin counts
3838                  */
3839                 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
3840                 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
3841         }
3842 }
3843
3844 /*
3845  * Caller needs to ensure that we're either frozen/quiesced, or that
3846  * the queue isn't live yet.
3847  */
3848 static void queue_set_hctx_shared(struct request_queue *q, bool shared)
3849 {
3850         struct blk_mq_hw_ctx *hctx;
3851         unsigned long i;
3852
3853         queue_for_each_hw_ctx(q, hctx, i) {
3854                 if (shared) {
3855                         hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3856                 } else {
3857                         blk_mq_tag_idle(hctx);
3858                         hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3859                 }
3860         }
3861 }
3862
3863 static void blk_mq_update_tag_set_shared(struct blk_mq_tag_set *set,
3864                                          bool shared)
3865 {
3866         struct request_queue *q;
3867
3868         lockdep_assert_held(&set->tag_list_lock);
3869
3870         list_for_each_entry(q, &set->tag_list, tag_set_list) {
3871                 blk_mq_freeze_queue(q);
3872                 queue_set_hctx_shared(q, shared);
3873                 blk_mq_unfreeze_queue(q);
3874         }
3875 }
3876
3877 static void blk_mq_del_queue_tag_set(struct request_queue *q)
3878 {
3879         struct blk_mq_tag_set *set = q->tag_set;
3880
3881         mutex_lock(&set->tag_list_lock);
3882         list_del(&q->tag_set_list);
3883         if (list_is_singular(&set->tag_list)) {
3884                 /* just transitioned to unshared */
3885                 set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED;
3886                 /* update existing queue */
3887                 blk_mq_update_tag_set_shared(set, false);
3888         }
3889         mutex_unlock(&set->tag_list_lock);
3890         INIT_LIST_HEAD(&q->tag_set_list);
3891 }
3892
3893 static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
3894                                      struct request_queue *q)
3895 {
3896         mutex_lock(&set->tag_list_lock);
3897
3898         /*
3899          * Check to see if we're transitioning to shared (from 1 to 2 queues).
3900          */
3901         if (!list_empty(&set->tag_list) &&
3902             !(set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
3903                 set->flags |= BLK_MQ_F_TAG_QUEUE_SHARED;
3904                 /* update existing queue */
3905                 blk_mq_update_tag_set_shared(set, true);
3906         }
3907         if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
3908                 queue_set_hctx_shared(q, true);
3909         list_add_tail(&q->tag_set_list, &set->tag_list);
3910
3911         mutex_unlock(&set->tag_list_lock);
3912 }
3913
3914 /* All allocations will be freed in release handler of q->mq_kobj */
3915 static int blk_mq_alloc_ctxs(struct request_queue *q)
3916 {
3917         struct blk_mq_ctxs *ctxs;
3918         int cpu;
3919
3920         ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
3921         if (!ctxs)
3922                 return -ENOMEM;
3923
3924         ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
3925         if (!ctxs->queue_ctx)
3926                 goto fail;
3927
3928         for_each_possible_cpu(cpu) {
3929                 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
3930                 ctx->ctxs = ctxs;
3931         }
3932
3933         q->mq_kobj = &ctxs->kobj;
3934         q->queue_ctx = ctxs->queue_ctx;
3935
3936         return 0;
3937  fail:
3938         kfree(ctxs);
3939         return -ENOMEM;
3940 }
3941
3942 /*
3943  * It is the actual release handler for mq, but we do it from
3944  * request queue's release handler for avoiding use-after-free
3945  * and headache because q->mq_kobj shouldn't have been introduced,
3946  * but we can't group ctx/kctx kobj without it.
3947  */
3948 void blk_mq_release(struct request_queue *q)
3949 {
3950         struct blk_mq_hw_ctx *hctx, *next;
3951         unsigned long i;
3952
3953         queue_for_each_hw_ctx(q, hctx, i)
3954                 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
3955
3956         /* all hctx are in .unused_hctx_list now */
3957         list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
3958                 list_del_init(&hctx->hctx_list);
3959                 kobject_put(&hctx->kobj);
3960         }
3961
3962         xa_destroy(&q->hctx_table);
3963
3964         /*
3965          * release .mq_kobj and sw queue's kobject now because
3966          * both share lifetime with request queue.
3967          */
3968         blk_mq_sysfs_deinit(q);
3969 }
3970
3971 static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
3972                 void *queuedata)
3973 {
3974         struct request_queue *q;
3975         int ret;
3976
3977         q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
3978         if (!q)
3979                 return ERR_PTR(-ENOMEM);
3980         q->queuedata = queuedata;
3981         ret = blk_mq_init_allocated_queue(set, q);
3982         if (ret) {
3983                 blk_put_queue(q);
3984                 return ERR_PTR(ret);
3985         }
3986         return q;
3987 }
3988
3989 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
3990 {
3991         return blk_mq_init_queue_data(set, NULL);
3992 }
3993 EXPORT_SYMBOL(blk_mq_init_queue);
3994
3995 /**
3996  * blk_mq_destroy_queue - shutdown a request queue
3997  * @q: request queue to shutdown
3998  *
3999  * This shuts down a request queue allocated by blk_mq_init_queue() and drops
4000  * the initial reference.  All future requests will failed with -ENODEV.
4001  *
4002  * Context: can sleep
4003  */
4004 void blk_mq_destroy_queue(struct request_queue *q)
4005 {
4006         WARN_ON_ONCE(!queue_is_mq(q));
4007         WARN_ON_ONCE(blk_queue_registered(q));
4008
4009         might_sleep();
4010
4011         blk_queue_flag_set(QUEUE_FLAG_DYING, q);
4012         blk_queue_start_drain(q);
4013         blk_freeze_queue(q);
4014
4015         blk_sync_queue(q);
4016         blk_mq_cancel_work_sync(q);
4017         blk_mq_exit_queue(q);
4018
4019         /* @q is and will stay empty, shutdown and put */
4020         blk_put_queue(q);
4021 }
4022 EXPORT_SYMBOL(blk_mq_destroy_queue);
4023
4024 struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata,
4025                 struct lock_class_key *lkclass)
4026 {
4027         struct request_queue *q;
4028         struct gendisk *disk;
4029
4030         q = blk_mq_init_queue_data(set, queuedata);
4031         if (IS_ERR(q))
4032                 return ERR_CAST(q);
4033
4034         disk = __alloc_disk_node(q, set->numa_node, lkclass);
4035         if (!disk) {
4036                 blk_mq_destroy_queue(q);
4037                 return ERR_PTR(-ENOMEM);
4038         }
4039         set_bit(GD_OWNS_QUEUE, &disk->state);
4040         return disk;
4041 }
4042 EXPORT_SYMBOL(__blk_mq_alloc_disk);
4043
4044 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
4045                 struct lock_class_key *lkclass)
4046 {
4047         if (!blk_get_queue(q))
4048                 return NULL;
4049         return __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
4050 }
4051 EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
4052
4053 static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
4054                 struct blk_mq_tag_set *set, struct request_queue *q,
4055                 int hctx_idx, int node)
4056 {
4057         struct blk_mq_hw_ctx *hctx = NULL, *tmp;
4058
4059         /* reuse dead hctx first */
4060         spin_lock(&q->unused_hctx_lock);
4061         list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
4062                 if (tmp->numa_node == node) {
4063                         hctx = tmp;
4064                         break;
4065                 }
4066         }
4067         if (hctx)
4068                 list_del_init(&hctx->hctx_list);
4069         spin_unlock(&q->unused_hctx_lock);
4070
4071         if (!hctx)
4072                 hctx = blk_mq_alloc_hctx(q, set, node);
4073         if (!hctx)
4074                 goto fail;
4075
4076         if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
4077                 goto free_hctx;
4078
4079         return hctx;
4080
4081  free_hctx:
4082         kobject_put(&hctx->kobj);
4083  fail:
4084         return NULL;
4085 }
4086
4087 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
4088                                                 struct request_queue *q)
4089 {
4090         struct blk_mq_hw_ctx *hctx;
4091         unsigned long i, j;
4092
4093         /* protect against switching io scheduler  */
4094         mutex_lock(&q->sysfs_lock);
4095         for (i = 0; i < set->nr_hw_queues; i++) {
4096                 int old_node;
4097                 int node = blk_mq_get_hctx_node(set, i);
4098                 struct blk_mq_hw_ctx *old_hctx = xa_load(&q->hctx_table, i);
4099
4100                 if (old_hctx) {
4101                         old_node = old_hctx->numa_node;
4102                         blk_mq_exit_hctx(q, set, old_hctx, i);
4103                 }
4104
4105                 if (!blk_mq_alloc_and_init_hctx(set, q, i, node)) {
4106                         if (!old_hctx)
4107                                 break;
4108                         pr_warn("Allocate new hctx on node %d fails, fallback to previous one on node %d\n",
4109                                         node, old_node);
4110                         hctx = blk_mq_alloc_and_init_hctx(set, q, i, old_node);
4111                         WARN_ON_ONCE(!hctx);
4112                 }
4113         }
4114         /*
4115          * Increasing nr_hw_queues fails. Free the newly allocated
4116          * hctxs and keep the previous q->nr_hw_queues.
4117          */
4118         if (i != set->nr_hw_queues) {
4119                 j = q->nr_hw_queues;
4120         } else {
4121                 j = i;
4122                 q->nr_hw_queues = set->nr_hw_queues;
4123         }
4124
4125         xa_for_each_start(&q->hctx_table, j, hctx, j)
4126                 blk_mq_exit_hctx(q, set, hctx, j);
4127         mutex_unlock(&q->sysfs_lock);
4128 }
4129
4130 static void blk_mq_update_poll_flag(struct request_queue *q)
4131 {
4132         struct blk_mq_tag_set *set = q->tag_set;
4133
4134         if (set->nr_maps > HCTX_TYPE_POLL &&
4135             set->map[HCTX_TYPE_POLL].nr_queues)
4136                 blk_queue_flag_set(QUEUE_FLAG_POLL, q);
4137         else
4138                 blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
4139 }
4140
4141 int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
4142                 struct request_queue *q)
4143 {
4144         WARN_ON_ONCE(blk_queue_has_srcu(q) !=
4145                         !!(set->flags & BLK_MQ_F_BLOCKING));
4146
4147         /* mark the queue as mq asap */
4148         q->mq_ops = set->ops;
4149
4150         q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
4151                                              blk_mq_poll_stats_bkt,
4152                                              BLK_MQ_POLL_STATS_BKTS, q);
4153         if (!q->poll_cb)
4154                 goto err_exit;
4155
4156         if (blk_mq_alloc_ctxs(q))
4157                 goto err_poll;
4158
4159         /* init q->mq_kobj and sw queues' kobjects */
4160         blk_mq_sysfs_init(q);
4161
4162         INIT_LIST_HEAD(&q->unused_hctx_list);
4163         spin_lock_init(&q->unused_hctx_lock);
4164
4165         xa_init(&q->hctx_table);
4166
4167         blk_mq_realloc_hw_ctxs(set, q);
4168         if (!q->nr_hw_queues)
4169                 goto err_hctxs;
4170
4171         INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
4172         blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
4173
4174         q->tag_set = set;
4175
4176         q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
4177         blk_mq_update_poll_flag(q);
4178
4179         INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
4180         INIT_LIST_HEAD(&q->requeue_list);
4181         spin_lock_init(&q->requeue_lock);
4182
4183         q->nr_requests = set->queue_depth;
4184
4185         /*
4186          * Default to classic polling
4187          */
4188         q->poll_nsec = BLK_MQ_POLL_CLASSIC;
4189
4190         blk_mq_init_cpu_queues(q, set->nr_hw_queues);
4191         blk_mq_add_queue_tag_set(set, q);
4192         blk_mq_map_swqueue(q);
4193         return 0;
4194
4195 err_hctxs:
4196         xa_destroy(&q->hctx_table);
4197         q->nr_hw_queues = 0;
4198         blk_mq_sysfs_deinit(q);
4199 err_poll:
4200         blk_stat_free_callback(q->poll_cb);
4201         q->poll_cb = NULL;
4202 err_exit:
4203         q->mq_ops = NULL;
4204         return -ENOMEM;
4205 }
4206 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
4207
4208 /* tags can _not_ be used after returning from blk_mq_exit_queue */
4209 void blk_mq_exit_queue(struct request_queue *q)
4210 {
4211         struct blk_mq_tag_set *set = q->tag_set;
4212
4213         /* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
4214         blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
4215         /* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
4216         blk_mq_del_queue_tag_set(q);
4217 }
4218
4219 static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
4220 {
4221         int i;
4222
4223         if (blk_mq_is_shared_tags(set->flags)) {
4224                 set->shared_tags = blk_mq_alloc_map_and_rqs(set,
4225                                                 BLK_MQ_NO_HCTX_IDX,
4226                                                 set->queue_depth);
4227                 if (!set->shared_tags)
4228                         return -ENOMEM;
4229         }
4230
4231         for (i = 0; i < set->nr_hw_queues; i++) {
4232                 if (!__blk_mq_alloc_map_and_rqs(set, i))
4233                         goto out_unwind;
4234                 cond_resched();
4235         }
4236
4237         return 0;
4238
4239 out_unwind:
4240         while (--i >= 0)
4241                 __blk_mq_free_map_and_rqs(set, i);
4242
4243         if (blk_mq_is_shared_tags(set->flags)) {
4244                 blk_mq_free_map_and_rqs(set, set->shared_tags,
4245                                         BLK_MQ_NO_HCTX_IDX);
4246         }
4247
4248         return -ENOMEM;
4249 }
4250
4251 /*
4252  * Allocate the request maps associated with this tag_set. Note that this
4253  * may reduce the depth asked for, if memory is tight. set->queue_depth
4254  * will be updated to reflect the allocated depth.
4255  */
4256 static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
4257 {
4258         unsigned int depth;
4259         int err;
4260
4261         depth = set->queue_depth;
4262         do {
4263                 err = __blk_mq_alloc_rq_maps(set);
4264                 if (!err)
4265                         break;
4266
4267                 set->queue_depth >>= 1;
4268                 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
4269                         err = -ENOMEM;
4270                         break;
4271                 }
4272         } while (set->queue_depth);
4273
4274         if (!set->queue_depth || err) {
4275                 pr_err("blk-mq: failed to allocate request map\n");
4276                 return -ENOMEM;
4277         }
4278
4279         if (depth != set->queue_depth)
4280                 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4281                                                 depth, set->queue_depth);
4282
4283         return 0;
4284 }
4285
4286 static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
4287 {
4288         /*
4289          * blk_mq_map_queues() and multiple .map_queues() implementations
4290          * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
4291          * number of hardware queues.
4292          */
4293         if (set->nr_maps == 1)
4294                 set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
4295
4296         if (set->ops->map_queues && !is_kdump_kernel()) {
4297                 int i;
4298
4299                 /*
4300                  * transport .map_queues is usually done in the following
4301                  * way:
4302                  *
4303                  * for (queue = 0; queue < set->nr_hw_queues; queue++) {
4304                  *      mask = get_cpu_mask(queue)
4305                  *      for_each_cpu(cpu, mask)
4306                  *              set->map[x].mq_map[cpu] = queue;
4307                  * }
4308                  *
4309                  * When we need to remap, the table has to be cleared for
4310                  * killing stale mapping since one CPU may not be mapped
4311                  * to any hw queue.
4312                  */
4313                 for (i = 0; i < set->nr_maps; i++)
4314                         blk_mq_clear_mq_map(&set->map[i]);
4315
4316                 set->ops->map_queues(set);
4317         } else {
4318                 BUG_ON(set->nr_maps > 1);
4319                 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4320         }
4321 }
4322
4323 static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
4324                                   int cur_nr_hw_queues, int new_nr_hw_queues)
4325 {
4326         struct blk_mq_tags **new_tags;
4327
4328         if (cur_nr_hw_queues >= new_nr_hw_queues)
4329                 return 0;
4330
4331         new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
4332                                 GFP_KERNEL, set->numa_node);
4333         if (!new_tags)
4334                 return -ENOMEM;
4335
4336         if (set->tags)
4337                 memcpy(new_tags, set->tags, cur_nr_hw_queues *
4338                        sizeof(*set->tags));
4339         kfree(set->tags);
4340         set->tags = new_tags;
4341         set->nr_hw_queues = new_nr_hw_queues;
4342
4343         return 0;
4344 }
4345
4346 static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
4347                                 int new_nr_hw_queues)
4348 {
4349         return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
4350 }
4351
4352 /*
4353  * Alloc a tag set to be associated with one or more request queues.
4354  * May fail with EINVAL for various error conditions. May adjust the
4355  * requested depth down, if it's too large. In that case, the set
4356  * value will be stored in set->queue_depth.
4357  */
4358 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
4359 {
4360         int i, ret;
4361
4362         BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
4363
4364         if (!set->nr_hw_queues)
4365                 return -EINVAL;
4366         if (!set->queue_depth)
4367                 return -EINVAL;
4368         if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
4369                 return -EINVAL;
4370
4371         if (!set->ops->queue_rq)
4372                 return -EINVAL;
4373
4374         if (!set->ops->get_budget ^ !set->ops->put_budget)
4375                 return -EINVAL;
4376
4377         if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
4378                 pr_info("blk-mq: reduced tag depth to %u\n",
4379                         BLK_MQ_MAX_DEPTH);
4380                 set->queue_depth = BLK_MQ_MAX_DEPTH;
4381         }
4382
4383         if (!set->nr_maps)
4384                 set->nr_maps = 1;
4385         else if (set->nr_maps > HCTX_MAX_TYPES)
4386                 return -EINVAL;
4387
4388         /*
4389          * If a crashdump is active, then we are potentially in a very
4390          * memory constrained environment. Limit us to 1 queue and
4391          * 64 tags to prevent using too much memory.
4392          */
4393         if (is_kdump_kernel()) {
4394                 set->nr_hw_queues = 1;
4395                 set->nr_maps = 1;
4396                 set->queue_depth = min(64U, set->queue_depth);
4397         }
4398         /*
4399          * There is no use for more h/w queues than cpus if we just have
4400          * a single map
4401          */
4402         if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
4403                 set->nr_hw_queues = nr_cpu_ids;
4404
4405         if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
4406                 return -ENOMEM;
4407
4408         ret = -ENOMEM;
4409         for (i = 0; i < set->nr_maps; i++) {
4410                 set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
4411                                                   sizeof(set->map[i].mq_map[0]),
4412                                                   GFP_KERNEL, set->numa_node);
4413                 if (!set->map[i].mq_map)
4414                         goto out_free_mq_map;
4415                 set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
4416         }
4417
4418         blk_mq_update_queue_map(set);
4419
4420         ret = blk_mq_alloc_set_map_and_rqs(set);
4421         if (ret)
4422                 goto out_free_mq_map;
4423
4424         mutex_init(&set->tag_list_lock);
4425         INIT_LIST_HEAD(&set->tag_list);
4426
4427         return 0;
4428
4429 out_free_mq_map:
4430         for (i = 0; i < set->nr_maps; i++) {
4431                 kfree(set->map[i].mq_map);
4432                 set->map[i].mq_map = NULL;
4433         }
4434         kfree(set->tags);
4435         set->tags = NULL;
4436         return ret;
4437 }
4438 EXPORT_SYMBOL(blk_mq_alloc_tag_set);
4439
4440 /* allocate and initialize a tagset for a simple single-queue device */
4441 int blk_mq_alloc_sq_tag_set(struct blk_mq_tag_set *set,
4442                 const struct blk_mq_ops *ops, unsigned int queue_depth,
4443                 unsigned int set_flags)
4444 {
4445         memset(set, 0, sizeof(*set));
4446         set->ops = ops;
4447         set->nr_hw_queues = 1;
4448         set->nr_maps = 1;
4449         set->queue_depth = queue_depth;
4450         set->numa_node = NUMA_NO_NODE;
4451         set->flags = set_flags;
4452         return blk_mq_alloc_tag_set(set);
4453 }
4454 EXPORT_SYMBOL_GPL(blk_mq_alloc_sq_tag_set);
4455
4456 void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
4457 {
4458         int i, j;
4459
4460         for (i = 0; i < set->nr_hw_queues; i++)
4461                 __blk_mq_free_map_and_rqs(set, i);
4462
4463         if (blk_mq_is_shared_tags(set->flags)) {
4464                 blk_mq_free_map_and_rqs(set, set->shared_tags,
4465                                         BLK_MQ_NO_HCTX_IDX);
4466         }
4467
4468         for (j = 0; j < set->nr_maps; j++) {
4469                 kfree(set->map[j].mq_map);
4470                 set->map[j].mq_map = NULL;
4471         }
4472
4473         kfree(set->tags);
4474         set->tags = NULL;
4475 }
4476 EXPORT_SYMBOL(blk_mq_free_tag_set);
4477
4478 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
4479 {
4480         struct blk_mq_tag_set *set = q->tag_set;
4481         struct blk_mq_hw_ctx *hctx;
4482         int ret;
4483         unsigned long i;
4484
4485         if (!set)
4486                 return -EINVAL;
4487
4488         if (q->nr_requests == nr)
4489                 return 0;
4490
4491         blk_mq_freeze_queue(q);
4492         blk_mq_quiesce_queue(q);
4493
4494         ret = 0;
4495         queue_for_each_hw_ctx(q, hctx, i) {
4496                 if (!hctx->tags)
4497                         continue;
4498                 /*
4499                  * If we're using an MQ scheduler, just update the scheduler
4500                  * queue depth. This is similar to what the old code would do.
4501                  */
4502                 if (hctx->sched_tags) {
4503                         ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
4504                                                       nr, true);
4505                 } else {
4506                         ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
4507                                                       false);
4508                 }
4509                 if (ret)
4510                         break;
4511                 if (q->elevator && q->elevator->type->ops.depth_updated)
4512                         q->elevator->type->ops.depth_updated(hctx);
4513         }
4514         if (!ret) {
4515                 q->nr_requests = nr;
4516                 if (blk_mq_is_shared_tags(set->flags)) {
4517                         if (q->elevator)
4518                                 blk_mq_tag_update_sched_shared_tags(q);
4519                         else
4520                                 blk_mq_tag_resize_shared_tags(set, nr);
4521                 }
4522         }
4523
4524         blk_mq_unquiesce_queue(q);
4525         blk_mq_unfreeze_queue(q);
4526
4527         return ret;
4528 }
4529
4530 /*
4531  * request_queue and elevator_type pair.
4532  * It is just used by __blk_mq_update_nr_hw_queues to cache
4533  * the elevator_type associated with a request_queue.
4534  */
4535 struct blk_mq_qe_pair {
4536         struct list_head node;
4537         struct request_queue *q;
4538         struct elevator_type *type;
4539 };
4540
4541 /*
4542  * Cache the elevator_type in qe pair list and switch the
4543  * io scheduler to 'none'
4544  */
4545 static bool blk_mq_elv_switch_none(struct list_head *head,
4546                 struct request_queue *q)
4547 {
4548         struct blk_mq_qe_pair *qe;
4549
4550         if (!q->elevator)
4551                 return true;
4552
4553         qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
4554         if (!qe)
4555                 return false;
4556
4557         /* q->elevator needs protection from ->sysfs_lock */
4558         mutex_lock(&q->sysfs_lock);
4559
4560         INIT_LIST_HEAD(&qe->node);
4561         qe->q = q;
4562         qe->type = q->elevator->type;
4563         list_add(&qe->node, head);
4564
4565         /*
4566          * After elevator_switch, the previous elevator_queue will be
4567          * released by elevator_release. The reference of the io scheduler
4568          * module get by elevator_get will also be put. So we need to get
4569          * a reference of the io scheduler module here to prevent it to be
4570          * removed.
4571          */
4572         __module_get(qe->type->elevator_owner);
4573         elevator_switch(q, NULL);
4574         mutex_unlock(&q->sysfs_lock);
4575
4576         return true;
4577 }
4578
4579 static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
4580                                                 struct request_queue *q)
4581 {
4582         struct blk_mq_qe_pair *qe;
4583
4584         list_for_each_entry(qe, head, node)
4585                 if (qe->q == q)
4586                         return qe;
4587
4588         return NULL;
4589 }
4590
4591 static void blk_mq_elv_switch_back(struct list_head *head,
4592                                   struct request_queue *q)
4593 {
4594         struct blk_mq_qe_pair *qe;
4595         struct elevator_type *t;
4596
4597         qe = blk_lookup_qe_pair(head, q);
4598         if (!qe)
4599                 return;
4600         t = qe->type;
4601         list_del(&qe->node);
4602         kfree(qe);
4603
4604         mutex_lock(&q->sysfs_lock);
4605         elevator_switch(q, t);
4606         mutex_unlock(&q->sysfs_lock);
4607 }
4608
4609 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
4610                                                         int nr_hw_queues)
4611 {
4612         struct request_queue *q;
4613         LIST_HEAD(head);
4614         int prev_nr_hw_queues;
4615
4616         lockdep_assert_held(&set->tag_list_lock);
4617
4618         if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
4619                 nr_hw_queues = nr_cpu_ids;
4620         if (nr_hw_queues < 1)
4621                 return;
4622         if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
4623                 return;
4624
4625         list_for_each_entry(q, &set->tag_list, tag_set_list)
4626                 blk_mq_freeze_queue(q);
4627         /*
4628          * Switch IO scheduler to 'none', cleaning up the data associated
4629          * with the previous scheduler. We will switch back once we are done
4630          * updating the new sw to hw queue mappings.
4631          */
4632         list_for_each_entry(q, &set->tag_list, tag_set_list)
4633                 if (!blk_mq_elv_switch_none(&head, q))
4634                         goto switch_back;
4635
4636         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4637                 blk_mq_debugfs_unregister_hctxs(q);
4638                 blk_mq_sysfs_unregister_hctxs(q);
4639         }
4640
4641         prev_nr_hw_queues = set->nr_hw_queues;
4642         if (blk_mq_realloc_tag_set_tags(set, set->nr_hw_queues, nr_hw_queues) <
4643             0)
4644                 goto reregister;
4645
4646         set->nr_hw_queues = nr_hw_queues;
4647 fallback:
4648         blk_mq_update_queue_map(set);
4649         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4650                 blk_mq_realloc_hw_ctxs(set, q);
4651                 blk_mq_update_poll_flag(q);
4652                 if (q->nr_hw_queues != set->nr_hw_queues) {
4653                         int i = prev_nr_hw_queues;
4654
4655                         pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
4656                                         nr_hw_queues, prev_nr_hw_queues);
4657                         for (; i < set->nr_hw_queues; i++)
4658                                 __blk_mq_free_map_and_rqs(set, i);
4659
4660                         set->nr_hw_queues = prev_nr_hw_queues;
4661                         blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
4662                         goto fallback;
4663                 }
4664                 blk_mq_map_swqueue(q);
4665         }
4666
4667 reregister:
4668         list_for_each_entry(q, &set->tag_list, tag_set_list) {
4669                 blk_mq_sysfs_register_hctxs(q);
4670                 blk_mq_debugfs_register_hctxs(q);
4671         }
4672
4673 switch_back:
4674         list_for_each_entry(q, &set->tag_list, tag_set_list)
4675                 blk_mq_elv_switch_back(&head, q);
4676
4677         list_for_each_entry(q, &set->tag_list, tag_set_list)
4678                 blk_mq_unfreeze_queue(q);
4679 }
4680
4681 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
4682 {
4683         mutex_lock(&set->tag_list_lock);
4684         __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
4685         mutex_unlock(&set->tag_list_lock);
4686 }
4687 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
4688
4689 /* Enable polling stats and return whether they were already enabled. */
4690 static bool blk_poll_stats_enable(struct request_queue *q)
4691 {
4692         if (q->poll_stat)
4693                 return true;
4694
4695         return blk_stats_alloc_enable(q);
4696 }
4697
4698 static void blk_mq_poll_stats_start(struct request_queue *q)
4699 {
4700         /*
4701          * We don't arm the callback if polling stats are not enabled or the
4702          * callback is already active.
4703          */
4704         if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
4705                 return;
4706
4707         blk_stat_activate_msecs(q->poll_cb, 100);
4708 }
4709
4710 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
4711 {
4712         struct request_queue *q = cb->data;
4713         int bucket;
4714
4715         for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
4716                 if (cb->stat[bucket].nr_samples)
4717                         q->poll_stat[bucket] = cb->stat[bucket];
4718         }
4719 }
4720
4721 static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
4722                                        struct request *rq)
4723 {
4724         unsigned long ret = 0;
4725         int bucket;
4726
4727         /*
4728          * If stats collection isn't on, don't sleep but turn it on for
4729          * future users
4730          */
4731         if (!blk_poll_stats_enable(q))
4732                 return 0;
4733
4734         /*
4735          * As an optimistic guess, use half of the mean service time
4736          * for this type of request. We can (and should) make this smarter.
4737          * For instance, if the completion latencies are tight, we can
4738          * get closer than just half the mean. This is especially
4739          * important on devices where the completion latencies are longer
4740          * than ~10 usec. We do use the stats for the relevant IO size
4741          * if available which does lead to better estimates.
4742          */
4743         bucket = blk_mq_poll_stats_bkt(rq);
4744         if (bucket < 0)
4745                 return ret;
4746
4747         if (q->poll_stat[bucket].nr_samples)
4748                 ret = (q->poll_stat[bucket].mean + 1) / 2;
4749
4750         return ret;
4751 }
4752
4753 static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
4754 {
4755         struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
4756         struct request *rq = blk_qc_to_rq(hctx, qc);
4757         struct hrtimer_sleeper hs;
4758         enum hrtimer_mode mode;
4759         unsigned int nsecs;
4760         ktime_t kt;
4761
4762         /*
4763          * If a request has completed on queue that uses an I/O scheduler, we
4764          * won't get back a request from blk_qc_to_rq.
4765          */
4766         if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
4767                 return false;
4768
4769         /*
4770          * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
4771          *
4772          *  0:  use half of prev avg
4773          * >0:  use this specific value
4774          */
4775         if (q->poll_nsec > 0)
4776                 nsecs = q->poll_nsec;
4777         else
4778                 nsecs = blk_mq_poll_nsecs(q, rq);
4779
4780         if (!nsecs)
4781                 return false;
4782
4783         rq->rq_flags |= RQF_MQ_POLL_SLEPT;
4784
4785         /*
4786          * This will be replaced with the stats tracking code, using
4787          * 'avg_completion_time / 2' as the pre-sleep target.
4788          */
4789         kt = nsecs;
4790
4791         mode = HRTIMER_MODE_REL;
4792         hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode);
4793         hrtimer_set_expires(&hs.timer, kt);
4794
4795         do {
4796                 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
4797                         break;
4798                 set_current_state(TASK_UNINTERRUPTIBLE);
4799                 hrtimer_sleeper_start_expires(&hs, mode);
4800                 if (hs.task)
4801                         io_schedule();
4802                 hrtimer_cancel(&hs.timer);
4803                 mode = HRTIMER_MODE_ABS;
4804         } while (hs.task && !signal_pending(current));
4805
4806         __set_current_state(TASK_RUNNING);
4807         destroy_hrtimer_on_stack(&hs.timer);
4808
4809         /*
4810          * If we sleep, have the caller restart the poll loop to reset the
4811          * state.  Like for the other success return cases, the caller is
4812          * responsible for checking if the IO completed.  If the IO isn't
4813          * complete, we'll get called again and will go straight to the busy
4814          * poll loop.
4815          */
4816         return true;
4817 }
4818
4819 static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
4820                                struct io_comp_batch *iob, unsigned int flags)
4821 {
4822         struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
4823         long state = get_current_state();
4824         int ret;
4825
4826         do {
4827                 ret = q->mq_ops->poll(hctx, iob);
4828                 if (ret > 0) {
4829                         __set_current_state(TASK_RUNNING);
4830                         return ret;
4831                 }
4832
4833                 if (signal_pending_state(state, current))
4834                         __set_current_state(TASK_RUNNING);
4835                 if (task_is_running(current))
4836                         return 1;
4837
4838                 if (ret < 0 || (flags & BLK_POLL_ONESHOT))
4839                         break;
4840                 cpu_relax();
4841         } while (!need_resched());
4842
4843         __set_current_state(TASK_RUNNING);
4844         return 0;
4845 }
4846
4847 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
4848                 unsigned int flags)
4849 {
4850         if (!(flags & BLK_POLL_NOSLEEP) &&
4851             q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
4852                 if (blk_mq_poll_hybrid(q, cookie))
4853                         return 1;
4854         }
4855         return blk_mq_poll_classic(q, cookie, iob, flags);
4856 }
4857
4858 unsigned int blk_mq_rq_cpu(struct request *rq)
4859 {
4860         return rq->mq_ctx->cpu;
4861 }
4862 EXPORT_SYMBOL(blk_mq_rq_cpu);
4863
4864 void blk_mq_cancel_work_sync(struct request_queue *q)
4865 {
4866         if (queue_is_mq(q)) {
4867                 struct blk_mq_hw_ctx *hctx;
4868                 unsigned long i;
4869
4870                 cancel_delayed_work_sync(&q->requeue_work);
4871
4872                 queue_for_each_hw_ctx(q, hctx, i)
4873                         cancel_delayed_work_sync(&hctx->run_work);
4874         }
4875 }
4876
4877 static int __init blk_mq_init(void)
4878 {
4879         int i;
4880
4881         for_each_possible_cpu(i)
4882                 init_llist_head(&per_cpu(blk_cpu_done, i));
4883         open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
4884
4885         cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
4886                                   "block/softirq:dead", NULL,
4887                                   blk_softirq_cpu_dead);
4888         cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
4889                                 blk_mq_hctx_notify_dead);
4890         cpuhp_setup_state_multi(CPUHP_AP_BLK_MQ_ONLINE, "block/mq:online",
4891                                 blk_mq_hctx_notify_online,
4892                                 blk_mq_hctx_notify_offline);
4893         return 0;
4894 }
4895 subsys_initcall(blk_mq_init);