block: use ktime_get_ns() instead of sched_clock() for cfq and bfq
[linux-2.6-block.git] / block / blk-mq.c
CommitLineData
75bb4625
JA
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
320ae51f
JA
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
f75782e4 12#include <linux/kmemleak.h>
320ae51f
JA
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
105ab3d8 23#include <linux/sched/topology.h>
174cd4b1 24#include <linux/sched/signal.h>
320ae51f 25#include <linux/delay.h>
aedcd72f 26#include <linux/crash_dump.h>
88c7b2b7 27#include <linux/prefetch.h>
320ae51f
JA
28
29#include <trace/events/block.h>
30
31#include <linux/blk-mq.h>
32#include "blk.h"
33#include "blk-mq.h"
9c1051aa 34#include "blk-mq-debugfs.h"
320ae51f 35#include "blk-mq-tag.h"
cf43e6be 36#include "blk-stat.h"
87760e5e 37#include "blk-wbt.h"
bd166ef1 38#include "blk-mq-sched.h"
320ae51f 39
ea435e1b 40static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
34dbad5d
OS
41static void blk_mq_poll_stats_start(struct request_queue *q);
42static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
43
720b8ccc
SB
44static int blk_mq_poll_stats_bkt(const struct request *rq)
45{
46 int ddir, bytes, bucket;
47
99c749a4 48 ddir = rq_data_dir(rq);
720b8ccc
SB
49 bytes = blk_rq_bytes(rq);
50
51 bucket = ddir + 2*(ilog2(bytes) - 9);
52
53 if (bucket < 0)
54 return -1;
55 else if (bucket >= BLK_MQ_POLL_STATS_BKTS)
56 return ddir + BLK_MQ_POLL_STATS_BKTS - 2;
57
58 return bucket;
59}
60
320ae51f
JA
61/*
62 * Check if any of the ctx's have pending work in this hardware queue
63 */
79f720a7 64static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
320ae51f 65{
79f720a7
JA
66 return !list_empty_careful(&hctx->dispatch) ||
67 sbitmap_any_bit_set(&hctx->ctx_map) ||
bd166ef1 68 blk_mq_sched_has_work(hctx);
1429d7c9
JA
69}
70
320ae51f
JA
71/*
72 * Mark this ctx as having pending work in this hardware queue
73 */
74static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
75 struct blk_mq_ctx *ctx)
76{
88459642
OS
77 if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
78 sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
1429d7c9
JA
79}
80
81static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
82 struct blk_mq_ctx *ctx)
83{
88459642 84 sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
320ae51f
JA
85}
86
f299b7c7
JA
87struct mq_inflight {
88 struct hd_struct *part;
89 unsigned int *inflight;
90};
91
92static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
93 struct request *rq, void *priv,
94 bool reserved)
95{
96 struct mq_inflight *mi = priv;
97
6131837b
OS
98 /*
99 * index[0] counts the specific partition that was asked for. index[1]
100 * counts the ones that are active on the whole device, so increment
101 * that if mi->part is indeed a partition, and not a whole device.
102 */
103 if (rq->part == mi->part)
104 mi->inflight[0]++;
105 if (mi->part->partno)
106 mi->inflight[1]++;
f299b7c7
JA
107}
108
109void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
110 unsigned int inflight[2])
111{
112 struct mq_inflight mi = { .part = part, .inflight = inflight, };
113
b8d62b3a 114 inflight[0] = inflight[1] = 0;
f299b7c7
JA
115 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
116}
117
bf0ddaba
OS
118static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
119 struct request *rq, void *priv,
120 bool reserved)
121{
122 struct mq_inflight *mi = priv;
123
124 if (rq->part == mi->part)
125 mi->inflight[rq_data_dir(rq)]++;
126}
127
128void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
129 unsigned int inflight[2])
130{
131 struct mq_inflight mi = { .part = part, .inflight = inflight, };
132
133 inflight[0] = inflight[1] = 0;
134 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
135}
136
1671d522 137void blk_freeze_queue_start(struct request_queue *q)
43a5e4e2 138{
4ecd4fef 139 int freeze_depth;
cddd5d17 140
4ecd4fef
CH
141 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
142 if (freeze_depth == 1) {
3ef28e83 143 percpu_ref_kill(&q->q_usage_counter);
055f6e18
ML
144 if (q->mq_ops)
145 blk_mq_run_hw_queues(q, false);
cddd5d17 146 }
f3af020b 147}
1671d522 148EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
f3af020b 149
6bae363e 150void blk_mq_freeze_queue_wait(struct request_queue *q)
f3af020b 151{
3ef28e83 152 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
43a5e4e2 153}
6bae363e 154EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
43a5e4e2 155
f91328c4
KB
156int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
157 unsigned long timeout)
158{
159 return wait_event_timeout(q->mq_freeze_wq,
160 percpu_ref_is_zero(&q->q_usage_counter),
161 timeout);
162}
163EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait_timeout);
43a5e4e2 164
f3af020b
TH
165/*
166 * Guarantee no request is in use, so we can change any data structure of
167 * the queue afterward.
168 */
3ef28e83 169void blk_freeze_queue(struct request_queue *q)
f3af020b 170{
3ef28e83
DW
171 /*
172 * In the !blk_mq case we are only calling this to kill the
173 * q_usage_counter, otherwise this increases the freeze depth
174 * and waits for it to return to zero. For this reason there is
175 * no blk_unfreeze_queue(), and blk_freeze_queue() is not
176 * exported to drivers as the only user for unfreeze is blk_mq.
177 */
1671d522 178 blk_freeze_queue_start(q);
454be724
ML
179 if (!q->mq_ops)
180 blk_drain_queue(q);
f3af020b
TH
181 blk_mq_freeze_queue_wait(q);
182}
3ef28e83
DW
183
184void blk_mq_freeze_queue(struct request_queue *q)
185{
186 /*
187 * ...just an alias to keep freeze and unfreeze actions balanced
188 * in the blk_mq_* namespace
189 */
190 blk_freeze_queue(q);
191}
c761d96b 192EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
f3af020b 193
b4c6a028 194void blk_mq_unfreeze_queue(struct request_queue *q)
320ae51f 195{
4ecd4fef 196 int freeze_depth;
320ae51f 197
4ecd4fef
CH
198 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
199 WARN_ON_ONCE(freeze_depth < 0);
200 if (!freeze_depth) {
3ef28e83 201 percpu_ref_reinit(&q->q_usage_counter);
320ae51f 202 wake_up_all(&q->mq_freeze_wq);
add703fd 203 }
320ae51f 204}
b4c6a028 205EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
320ae51f 206
852ec809
BVA
207/*
208 * FIXME: replace the scsi_internal_device_*block_nowait() calls in the
209 * mpt3sas driver such that this function can be removed.
210 */
211void blk_mq_quiesce_queue_nowait(struct request_queue *q)
212{
8814ce8a 213 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
852ec809
BVA
214}
215EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
216
6a83e74d 217/**
69e07c4a 218 * blk_mq_quiesce_queue() - wait until all ongoing dispatches have finished
6a83e74d
BVA
219 * @q: request queue.
220 *
221 * Note: this function does not prevent that the struct request end_io()
69e07c4a
ML
222 * callback function is invoked. Once this function is returned, we make
223 * sure no dispatch can happen until the queue is unquiesced via
224 * blk_mq_unquiesce_queue().
6a83e74d
BVA
225 */
226void blk_mq_quiesce_queue(struct request_queue *q)
227{
228 struct blk_mq_hw_ctx *hctx;
229 unsigned int i;
230 bool rcu = false;
231
1d9e9bc6 232 blk_mq_quiesce_queue_nowait(q);
f4560ffe 233
6a83e74d
BVA
234 queue_for_each_hw_ctx(q, hctx, i) {
235 if (hctx->flags & BLK_MQ_F_BLOCKING)
05707b64 236 synchronize_srcu(hctx->srcu);
6a83e74d
BVA
237 else
238 rcu = true;
239 }
240 if (rcu)
241 synchronize_rcu();
242}
243EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
244
e4e73913
ML
245/*
246 * blk_mq_unquiesce_queue() - counterpart of blk_mq_quiesce_queue()
247 * @q: request queue.
248 *
249 * This function recovers queue into the state before quiescing
250 * which is done by blk_mq_quiesce_queue.
251 */
252void blk_mq_unquiesce_queue(struct request_queue *q)
253{
8814ce8a 254 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
f4560ffe 255
1d9e9bc6
ML
256 /* dispatch requests which are inserted during quiescing */
257 blk_mq_run_hw_queues(q, true);
e4e73913
ML
258}
259EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
260
aed3ea94
JA
261void blk_mq_wake_waiters(struct request_queue *q)
262{
263 struct blk_mq_hw_ctx *hctx;
264 unsigned int i;
265
266 queue_for_each_hw_ctx(q, hctx, i)
267 if (blk_mq_hw_queue_mapped(hctx))
268 blk_mq_tag_wakeup_all(hctx->tags, true);
269}
270
320ae51f
JA
271bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
272{
273 return blk_mq_has_free_tags(hctx->tags);
274}
275EXPORT_SYMBOL(blk_mq_can_queue);
276
e4cdf1a1
CH
277static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
278 unsigned int tag, unsigned int op)
320ae51f 279{
e4cdf1a1
CH
280 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
281 struct request *rq = tags->static_rqs[tag];
bf9ae8c5 282 req_flags_t rq_flags = 0;
c3a148d2 283
e4cdf1a1
CH
284 if (data->flags & BLK_MQ_REQ_INTERNAL) {
285 rq->tag = -1;
286 rq->internal_tag = tag;
287 } else {
288 if (blk_mq_tag_busy(data->hctx)) {
bf9ae8c5 289 rq_flags = RQF_MQ_INFLIGHT;
e4cdf1a1
CH
290 atomic_inc(&data->hctx->nr_active);
291 }
292 rq->tag = tag;
293 rq->internal_tag = -1;
294 data->hctx->tags->rqs[rq->tag] = rq;
295 }
296
af76e555 297 /* csd/requeue_work/fifo_time is initialized before use */
e4cdf1a1
CH
298 rq->q = data->q;
299 rq->mq_ctx = data->ctx;
bf9ae8c5 300 rq->rq_flags = rq_flags;
7c3fb70f 301 rq->cpu = -1;
ef295ecf 302 rq->cmd_flags = op;
1b6d65a0
BVA
303 if (data->flags & BLK_MQ_REQ_PREEMPT)
304 rq->rq_flags |= RQF_PREEMPT;
e4cdf1a1 305 if (blk_queue_io_stat(data->q))
e8064021 306 rq->rq_flags |= RQF_IO_STAT;
7c3fb70f 307 INIT_LIST_HEAD(&rq->queuelist);
af76e555
CH
308 INIT_HLIST_NODE(&rq->hash);
309 RB_CLEAR_NODE(&rq->rb_node);
af76e555
CH
310 rq->rq_disk = NULL;
311 rq->part = NULL;
3ee32372 312 rq->start_time = jiffies;
544ccc8d 313 rq->io_start_time_ns = 0;
af76e555
CH
314 rq->nr_phys_segments = 0;
315#if defined(CONFIG_BLK_DEV_INTEGRITY)
316 rq->nr_integrity_segments = 0;
317#endif
af76e555
CH
318 rq->special = NULL;
319 /* tag was already set */
af76e555 320 rq->extra_len = 0;
e14575b3 321 rq->__deadline = 0;
af76e555 322
af76e555 323 INIT_LIST_HEAD(&rq->timeout_list);
f6be4fb4
JA
324 rq->timeout = 0;
325
af76e555
CH
326 rq->end_io = NULL;
327 rq->end_io_data = NULL;
328 rq->next_rq = NULL;
329
7c3fb70f
JA
330#ifdef CONFIG_BLK_CGROUP
331 rq->rl = NULL;
332 set_start_time_ns(rq);
544ccc8d 333 rq->cgroup_io_start_time_ns = 0;
7c3fb70f
JA
334#endif
335
e4cdf1a1
CH
336 data->ctx->rq_dispatched[op_is_sync(op)]++;
337 return rq;
5dee8577
CH
338}
339
d2c0d383
CH
340static struct request *blk_mq_get_request(struct request_queue *q,
341 struct bio *bio, unsigned int op,
342 struct blk_mq_alloc_data *data)
343{
344 struct elevator_queue *e = q->elevator;
345 struct request *rq;
e4cdf1a1 346 unsigned int tag;
21e768b4 347 bool put_ctx_on_error = false;
d2c0d383
CH
348
349 blk_queue_enter_live(q);
350 data->q = q;
21e768b4
BVA
351 if (likely(!data->ctx)) {
352 data->ctx = blk_mq_get_ctx(q);
353 put_ctx_on_error = true;
354 }
d2c0d383
CH
355 if (likely(!data->hctx))
356 data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
03a07c92
GR
357 if (op & REQ_NOWAIT)
358 data->flags |= BLK_MQ_REQ_NOWAIT;
d2c0d383
CH
359
360 if (e) {
361 data->flags |= BLK_MQ_REQ_INTERNAL;
362
363 /*
364 * Flush requests are special and go directly to the
365 * dispatch list.
366 */
5bbf4e5a
CH
367 if (!op_is_flush(op) && e->type->ops.mq.limit_depth)
368 e->type->ops.mq.limit_depth(op, data);
d2c0d383
CH
369 }
370
e4cdf1a1
CH
371 tag = blk_mq_get_tag(data);
372 if (tag == BLK_MQ_TAG_FAIL) {
21e768b4
BVA
373 if (put_ctx_on_error) {
374 blk_mq_put_ctx(data->ctx);
1ad43c00
ML
375 data->ctx = NULL;
376 }
037cebb8
CH
377 blk_queue_exit(q);
378 return NULL;
d2c0d383
CH
379 }
380
e4cdf1a1 381 rq = blk_mq_rq_ctx_init(data, tag, op);
037cebb8
CH
382 if (!op_is_flush(op)) {
383 rq->elv.icq = NULL;
5bbf4e5a 384 if (e && e->type->ops.mq.prepare_request) {
44e8c2bf
CH
385 if (e->type->icq_cache && rq_ioc(bio))
386 blk_mq_sched_assign_ioc(rq, bio);
387
5bbf4e5a
CH
388 e->type->ops.mq.prepare_request(rq, bio);
389 rq->rq_flags |= RQF_ELVPRIV;
44e8c2bf 390 }
037cebb8
CH
391 }
392 data->hctx->queued++;
393 return rq;
d2c0d383
CH
394}
395
cd6ce148 396struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
9a95e4ef 397 blk_mq_req_flags_t flags)
320ae51f 398{
5a797e00 399 struct blk_mq_alloc_data alloc_data = { .flags = flags };
bd166ef1 400 struct request *rq;
a492f075 401 int ret;
320ae51f 402
3a0a5299 403 ret = blk_queue_enter(q, flags);
a492f075
JL
404 if (ret)
405 return ERR_PTR(ret);
320ae51f 406
cd6ce148 407 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
3280d66a 408 blk_queue_exit(q);
841bac2c 409
bd166ef1 410 if (!rq)
a492f075 411 return ERR_PTR(-EWOULDBLOCK);
0c4de0f3 412
1ad43c00 413 blk_mq_put_ctx(alloc_data.ctx);
1ad43c00 414
0c4de0f3
CH
415 rq->__data_len = 0;
416 rq->__sector = (sector_t) -1;
417 rq->bio = rq->biotail = NULL;
320ae51f
JA
418 return rq;
419}
4bb659b1 420EXPORT_SYMBOL(blk_mq_alloc_request);
320ae51f 421
cd6ce148 422struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
9a95e4ef 423 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
1f5bd336 424{
6d2809d5 425 struct blk_mq_alloc_data alloc_data = { .flags = flags };
1f5bd336 426 struct request *rq;
6d2809d5 427 unsigned int cpu;
1f5bd336
ML
428 int ret;
429
430 /*
431 * If the tag allocator sleeps we could get an allocation for a
432 * different hardware context. No need to complicate the low level
433 * allocator for this for the rare use case of a command tied to
434 * a specific queue.
435 */
436 if (WARN_ON_ONCE(!(flags & BLK_MQ_REQ_NOWAIT)))
437 return ERR_PTR(-EINVAL);
438
439 if (hctx_idx >= q->nr_hw_queues)
440 return ERR_PTR(-EIO);
441
3a0a5299 442 ret = blk_queue_enter(q, flags);
1f5bd336
ML
443 if (ret)
444 return ERR_PTR(ret);
445
c8712c6a
CH
446 /*
447 * Check if the hardware context is actually mapped to anything.
448 * If not tell the caller that it should skip this queue.
449 */
6d2809d5
OS
450 alloc_data.hctx = q->queue_hw_ctx[hctx_idx];
451 if (!blk_mq_hw_queue_mapped(alloc_data.hctx)) {
452 blk_queue_exit(q);
453 return ERR_PTR(-EXDEV);
c8712c6a 454 }
20e4d813 455 cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
6d2809d5 456 alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
1f5bd336 457
cd6ce148 458 rq = blk_mq_get_request(q, NULL, op, &alloc_data);
3280d66a 459 blk_queue_exit(q);
c8712c6a 460
6d2809d5
OS
461 if (!rq)
462 return ERR_PTR(-EWOULDBLOCK);
463
464 return rq;
1f5bd336
ML
465}
466EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
467
6af54051 468void blk_mq_free_request(struct request *rq)
320ae51f 469{
320ae51f 470 struct request_queue *q = rq->q;
6af54051
CH
471 struct elevator_queue *e = q->elevator;
472 struct blk_mq_ctx *ctx = rq->mq_ctx;
473 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
474 const int sched_tag = rq->internal_tag;
475
5bbf4e5a 476 if (rq->rq_flags & RQF_ELVPRIV) {
6af54051
CH
477 if (e && e->type->ops.mq.finish_request)
478 e->type->ops.mq.finish_request(rq);
479 if (rq->elv.icq) {
480 put_io_context(rq->elv.icq->ioc);
481 rq->elv.icq = NULL;
482 }
483 }
320ae51f 484
6af54051 485 ctx->rq_completed[rq_is_sync(rq)]++;
e8064021 486 if (rq->rq_flags & RQF_MQ_INFLIGHT)
0d2602ca 487 atomic_dec(&hctx->nr_active);
87760e5e 488
7beb2f84
JA
489 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
490 laptop_io_completion(q->backing_dev_info);
491
a8a45941 492 wbt_done(q->rq_wb, rq);
0d2602ca 493
85acb3ba
SL
494 if (blk_rq_rl(rq))
495 blk_put_rl(blk_rq_rl(rq));
496
1d9bd516 497 blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
bd166ef1
JA
498 if (rq->tag != -1)
499 blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
500 if (sched_tag != -1)
c05f8525 501 blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
6d8c6c0f 502 blk_mq_sched_restart(hctx);
3ef28e83 503 blk_queue_exit(q);
320ae51f 504}
1a3b595a 505EXPORT_SYMBOL_GPL(blk_mq_free_request);
320ae51f 506
2a842aca 507inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
320ae51f 508{
0d11e6ac
ML
509 blk_account_io_done(rq);
510
91b63639 511 if (rq->end_io) {
a8a45941 512 wbt_done(rq->q->rq_wb, rq);
320ae51f 513 rq->end_io(rq, error);
91b63639
CH
514 } else {
515 if (unlikely(blk_bidi_rq(rq)))
516 blk_mq_free_request(rq->next_rq);
320ae51f 517 blk_mq_free_request(rq);
91b63639 518 }
320ae51f 519}
c8a446ad 520EXPORT_SYMBOL(__blk_mq_end_request);
63151a44 521
2a842aca 522void blk_mq_end_request(struct request *rq, blk_status_t error)
63151a44
CH
523{
524 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
525 BUG();
c8a446ad 526 __blk_mq_end_request(rq, error);
63151a44 527}
c8a446ad 528EXPORT_SYMBOL(blk_mq_end_request);
320ae51f 529
30a91cb4 530static void __blk_mq_complete_request_remote(void *data)
320ae51f 531{
3d6efbf6 532 struct request *rq = data;
320ae51f 533
30a91cb4 534 rq->q->softirq_done_fn(rq);
320ae51f 535}
320ae51f 536
453f8341 537static void __blk_mq_complete_request(struct request *rq)
320ae51f
JA
538{
539 struct blk_mq_ctx *ctx = rq->mq_ctx;
38535201 540 bool shared = false;
320ae51f
JA
541 int cpu;
542
1d9bd516 543 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT);
5a61c363 544 blk_mq_rq_update_state(rq, MQ_RQ_COMPLETE);
1d9bd516 545
453f8341
CH
546 if (rq->internal_tag != -1)
547 blk_mq_sched_completed_request(rq);
548 if (rq->rq_flags & RQF_STATS) {
549 blk_mq_poll_stats_start(rq->q);
550 blk_stat_add(rq);
551 }
552
38535201 553 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
30a91cb4
CH
554 rq->q->softirq_done_fn(rq);
555 return;
556 }
320ae51f
JA
557
558 cpu = get_cpu();
38535201
CH
559 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
560 shared = cpus_share_cache(cpu, ctx->cpu);
561
562 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
30a91cb4 563 rq->csd.func = __blk_mq_complete_request_remote;
3d6efbf6
CH
564 rq->csd.info = rq;
565 rq->csd.flags = 0;
c46fff2a 566 smp_call_function_single_async(ctx->cpu, &rq->csd);
3d6efbf6 567 } else {
30a91cb4 568 rq->q->softirq_done_fn(rq);
3d6efbf6 569 }
320ae51f
JA
570 put_cpu();
571}
30a91cb4 572
04ced159 573static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
b7435db8 574 __releases(hctx->srcu)
04ced159
JA
575{
576 if (!(hctx->flags & BLK_MQ_F_BLOCKING))
577 rcu_read_unlock();
578 else
05707b64 579 srcu_read_unlock(hctx->srcu, srcu_idx);
04ced159
JA
580}
581
582static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
b7435db8 583 __acquires(hctx->srcu)
04ced159 584{
08b5a6e2
JA
585 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
586 /* shut up gcc false positive */
587 *srcu_idx = 0;
04ced159 588 rcu_read_lock();
08b5a6e2 589 } else
05707b64 590 *srcu_idx = srcu_read_lock(hctx->srcu);
04ced159
JA
591}
592
1d9bd516
TH
593static void blk_mq_rq_update_aborted_gstate(struct request *rq, u64 gstate)
594{
595 unsigned long flags;
596
597 /*
598 * blk_mq_rq_aborted_gstate() is used from the completion path and
599 * can thus be called from irq context. u64_stats_fetch in the
600 * middle of update on the same CPU leads to lockup. Disable irq
601 * while updating.
602 */
603 local_irq_save(flags);
604 u64_stats_update_begin(&rq->aborted_gstate_sync);
605 rq->aborted_gstate = gstate;
606 u64_stats_update_end(&rq->aborted_gstate_sync);
607 local_irq_restore(flags);
608}
609
610static u64 blk_mq_rq_aborted_gstate(struct request *rq)
611{
612 unsigned int start;
613 u64 aborted_gstate;
614
615 do {
616 start = u64_stats_fetch_begin(&rq->aborted_gstate_sync);
617 aborted_gstate = rq->aborted_gstate;
618 } while (u64_stats_fetch_retry(&rq->aborted_gstate_sync, start));
619
620 return aborted_gstate;
621}
622
30a91cb4
CH
623/**
624 * blk_mq_complete_request - end I/O on a request
625 * @rq: the request being processed
626 *
627 * Description:
628 * Ends all I/O on a request. It does not handle partial completions.
629 * The actual completion happens out-of-order, through a IPI handler.
630 **/
08e0029a 631void blk_mq_complete_request(struct request *rq)
30a91cb4 632{
95f09684 633 struct request_queue *q = rq->q;
5197c05e
TH
634 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
635 int srcu_idx;
95f09684
JA
636
637 if (unlikely(blk_should_fake_timeout(q)))
30a91cb4 638 return;
5197c05e 639
1d9bd516
TH
640 /*
641 * If @rq->aborted_gstate equals the current instance, timeout is
642 * claiming @rq and we lost. This is synchronized through
643 * hctx_lock(). See blk_mq_timeout_work() for details.
644 *
645 * Completion path never blocks and we can directly use RCU here
646 * instead of hctx_lock() which can be either RCU or SRCU.
647 * However, that would complicate paths which want to synchronize
648 * against us. Let stay in sync with the issue path so that
649 * hctx_lock() covers both issue and completion paths.
650 */
5197c05e 651 hctx_lock(hctx, &srcu_idx);
634f9e46 652 if (blk_mq_rq_aborted_gstate(rq) != rq->gstate)
ed851860 653 __blk_mq_complete_request(rq);
5197c05e 654 hctx_unlock(hctx, srcu_idx);
30a91cb4
CH
655}
656EXPORT_SYMBOL(blk_mq_complete_request);
320ae51f 657
973c0191
KB
658int blk_mq_request_started(struct request *rq)
659{
5a61c363 660 return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
973c0191
KB
661}
662EXPORT_SYMBOL_GPL(blk_mq_request_started);
663
e2490073 664void blk_mq_start_request(struct request *rq)
320ae51f
JA
665{
666 struct request_queue *q = rq->q;
667
bd166ef1
JA
668 blk_mq_sched_started_request(rq);
669
320ae51f
JA
670 trace_block_rq_issue(q, rq);
671
cf43e6be 672 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
544ccc8d
OS
673 rq->io_start_time_ns = ktime_get_ns();
674#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
675 rq->throtl_size = blk_rq_sectors(rq);
676#endif
cf43e6be 677 rq->rq_flags |= RQF_STATS;
a8a45941 678 wbt_issue(q->rq_wb, rq);
cf43e6be
JA
679 }
680
1d9bd516 681 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE);
538b7534 682
87ee7b11 683 /*
1d9bd516
TH
684 * Mark @rq in-flight which also advances the generation number,
685 * and register for timeout. Protect with a seqcount to allow the
686 * timeout path to read both @rq->gstate and @rq->deadline
687 * coherently.
a7af0af3 688 *
1d9bd516
TH
689 * This is the only place where a request is marked in-flight. If
690 * the timeout path reads an in-flight @rq->gstate, the
691 * @rq->deadline it reads together under @rq->gstate_seq is
692 * guaranteed to be the matching one.
87ee7b11 693 */
1d9bd516
TH
694 preempt_disable();
695 write_seqcount_begin(&rq->gstate_seq);
696
697 blk_mq_rq_update_state(rq, MQ_RQ_IN_FLIGHT);
698 blk_add_timer(rq);
699
700 write_seqcount_end(&rq->gstate_seq);
701 preempt_enable();
49f5baa5
CH
702
703 if (q->dma_drain_size && blk_rq_bytes(rq)) {
704 /*
705 * Make sure space for the drain appears. We know we can do
706 * this because max_hw_segments has been adjusted to be one
707 * fewer than the device can handle.
708 */
709 rq->nr_phys_segments++;
710 }
320ae51f 711}
e2490073 712EXPORT_SYMBOL(blk_mq_start_request);
320ae51f 713
d9d149a3 714/*
5a61c363
TH
715 * When we reach here because queue is busy, it's safe to change the state
716 * to IDLE without checking @rq->aborted_gstate because we should still be
717 * holding the RCU read lock and thus protected against timeout.
d9d149a3 718 */
ed0791b2 719static void __blk_mq_requeue_request(struct request *rq)
320ae51f
JA
720{
721 struct request_queue *q = rq->q;
722
923218f6
ML
723 blk_mq_put_driver_tag(rq);
724
320ae51f 725 trace_block_rq_requeue(q, rq);
a8a45941 726 wbt_requeue(q->rq_wb, rq);
49f5baa5 727
5a61c363 728 if (blk_mq_rq_state(rq) != MQ_RQ_IDLE) {
1d9bd516 729 blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
e2490073
CH
730 if (q->dma_drain_size && blk_rq_bytes(rq))
731 rq->nr_phys_segments--;
732 }
320ae51f
JA
733}
734
2b053aca 735void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
ed0791b2 736{
ed0791b2 737 __blk_mq_requeue_request(rq);
ed0791b2 738
105976f5
ML
739 /* this request will be re-inserted to io scheduler queue */
740 blk_mq_sched_requeue_request(rq);
741
ed0791b2 742 BUG_ON(blk_queued_rq(rq));
2b053aca 743 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
ed0791b2
CH
744}
745EXPORT_SYMBOL(blk_mq_requeue_request);
746
6fca6a61
CH
747static void blk_mq_requeue_work(struct work_struct *work)
748{
749 struct request_queue *q =
2849450a 750 container_of(work, struct request_queue, requeue_work.work);
6fca6a61
CH
751 LIST_HEAD(rq_list);
752 struct request *rq, *next;
6fca6a61 753
18e9781d 754 spin_lock_irq(&q->requeue_lock);
6fca6a61 755 list_splice_init(&q->requeue_list, &rq_list);
18e9781d 756 spin_unlock_irq(&q->requeue_lock);
6fca6a61
CH
757
758 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
e8064021 759 if (!(rq->rq_flags & RQF_SOFTBARRIER))
6fca6a61
CH
760 continue;
761
e8064021 762 rq->rq_flags &= ~RQF_SOFTBARRIER;
6fca6a61 763 list_del_init(&rq->queuelist);
9e97d295 764 blk_mq_sched_insert_request(rq, true, false, false);
6fca6a61
CH
765 }
766
767 while (!list_empty(&rq_list)) {
768 rq = list_entry(rq_list.next, struct request, queuelist);
769 list_del_init(&rq->queuelist);
9e97d295 770 blk_mq_sched_insert_request(rq, false, false, false);
6fca6a61
CH
771 }
772
52d7f1b5 773 blk_mq_run_hw_queues(q, false);
6fca6a61
CH
774}
775
2b053aca
BVA
776void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
777 bool kick_requeue_list)
6fca6a61
CH
778{
779 struct request_queue *q = rq->q;
780 unsigned long flags;
781
782 /*
783 * We abuse this flag that is otherwise used by the I/O scheduler to
ff821d27 784 * request head insertion from the workqueue.
6fca6a61 785 */
e8064021 786 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER);
6fca6a61
CH
787
788 spin_lock_irqsave(&q->requeue_lock, flags);
789 if (at_head) {
e8064021 790 rq->rq_flags |= RQF_SOFTBARRIER;
6fca6a61
CH
791 list_add(&rq->queuelist, &q->requeue_list);
792 } else {
793 list_add_tail(&rq->queuelist, &q->requeue_list);
794 }
795 spin_unlock_irqrestore(&q->requeue_lock, flags);
2b053aca
BVA
796
797 if (kick_requeue_list)
798 blk_mq_kick_requeue_list(q);
6fca6a61
CH
799}
800EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
801
802void blk_mq_kick_requeue_list(struct request_queue *q)
803{
ae943d20 804 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0);
6fca6a61
CH
805}
806EXPORT_SYMBOL(blk_mq_kick_requeue_list);
807
2849450a
MS
808void blk_mq_delay_kick_requeue_list(struct request_queue *q,
809 unsigned long msecs)
810{
d4acf365
BVA
811 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work,
812 msecs_to_jiffies(msecs));
2849450a
MS
813}
814EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
815
0e62f51f
JA
816struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
817{
88c7b2b7
JA
818 if (tag < tags->nr_tags) {
819 prefetch(tags->rqs[tag]);
4ee86bab 820 return tags->rqs[tag];
88c7b2b7 821 }
4ee86bab
HR
822
823 return NULL;
24d2f903
CH
824}
825EXPORT_SYMBOL(blk_mq_tag_to_rq);
826
320ae51f 827struct blk_mq_timeout_data {
46f92d42
CH
828 unsigned long next;
829 unsigned int next_set;
1d9bd516 830 unsigned int nr_expired;
320ae51f
JA
831};
832
358f70da 833static void blk_mq_rq_timed_out(struct request *req, bool reserved)
320ae51f 834{
f8a5b122 835 const struct blk_mq_ops *ops = req->q->mq_ops;
46f92d42 836 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
87ee7b11 837
634f9e46 838 req->rq_flags |= RQF_MQ_TIMEOUT_EXPIRED;
87ee7b11 839
46f92d42 840 if (ops->timeout)
0152fb6b 841 ret = ops->timeout(req, reserved);
46f92d42
CH
842
843 switch (ret) {
844 case BLK_EH_HANDLED:
845 __blk_mq_complete_request(req);
846 break;
847 case BLK_EH_RESET_TIMER:
1d9bd516
TH
848 /*
849 * As nothing prevents from completion happening while
850 * ->aborted_gstate is set, this may lead to ignored
851 * completions and further spurious timeouts.
852 */
853 blk_mq_rq_update_aborted_gstate(req, 0);
46f92d42 854 blk_add_timer(req);
46f92d42
CH
855 break;
856 case BLK_EH_NOT_HANDLED:
857 break;
858 default:
859 printk(KERN_ERR "block: bad eh return: %d\n", ret);
860 break;
861 }
87ee7b11 862}
5b3f25fc 863
81481eb4
CH
864static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
865 struct request *rq, void *priv, bool reserved)
866{
867 struct blk_mq_timeout_data *data = priv;
1d9bd516
TH
868 unsigned long gstate, deadline;
869 int start;
87ee7b11 870
1d9bd516 871 might_sleep();
87ee7b11 872
5a61c363 873 if (rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED)
46f92d42 874 return;
a7af0af3 875
1d9bd516
TH
876 /* read coherent snapshots of @rq->state_gen and @rq->deadline */
877 while (true) {
878 start = read_seqcount_begin(&rq->gstate_seq);
879 gstate = READ_ONCE(rq->gstate);
0a72e7f4 880 deadline = blk_rq_deadline(rq);
1d9bd516
TH
881 if (!read_seqcount_retry(&rq->gstate_seq, start))
882 break;
883 cond_resched();
884 }
a7af0af3 885
1d9bd516
TH
886 /* if in-flight && overdue, mark for abortion */
887 if ((gstate & MQ_RQ_STATE_MASK) == MQ_RQ_IN_FLIGHT &&
888 time_after_eq(jiffies, deadline)) {
889 blk_mq_rq_update_aborted_gstate(rq, gstate);
890 data->nr_expired++;
891 hctx->nr_expired++;
a7af0af3
PZ
892 } else if (!data->next_set || time_after(data->next, deadline)) {
893 data->next = deadline;
46f92d42
CH
894 data->next_set = 1;
895 }
87ee7b11
JA
896}
897
1d9bd516
TH
898static void blk_mq_terminate_expired(struct blk_mq_hw_ctx *hctx,
899 struct request *rq, void *priv, bool reserved)
900{
901 /*
902 * We marked @rq->aborted_gstate and waited for RCU. If there were
903 * completions that we lost to, they would have finished and
904 * updated @rq->gstate by now; otherwise, the completion path is
905 * now guaranteed to see @rq->aborted_gstate and yield. If
906 * @rq->aborted_gstate still matches @rq->gstate, @rq is ours.
907 */
634f9e46
TH
908 if (!(rq->rq_flags & RQF_MQ_TIMEOUT_EXPIRED) &&
909 READ_ONCE(rq->gstate) == rq->aborted_gstate)
1d9bd516
TH
910 blk_mq_rq_timed_out(rq, reserved);
911}
912
287922eb 913static void blk_mq_timeout_work(struct work_struct *work)
320ae51f 914{
287922eb
CH
915 struct request_queue *q =
916 container_of(work, struct request_queue, timeout_work);
81481eb4
CH
917 struct blk_mq_timeout_data data = {
918 .next = 0,
919 .next_set = 0,
1d9bd516 920 .nr_expired = 0,
81481eb4 921 };
1d9bd516 922 struct blk_mq_hw_ctx *hctx;
81481eb4 923 int i;
320ae51f 924
71f79fb3
GKB
925 /* A deadlock might occur if a request is stuck requiring a
926 * timeout at the same time a queue freeze is waiting
927 * completion, since the timeout code would not be able to
928 * acquire the queue reference here.
929 *
930 * That's why we don't use blk_queue_enter here; instead, we use
931 * percpu_ref_tryget directly, because we need to be able to
932 * obtain a reference even in the short window between the queue
933 * starting to freeze, by dropping the first reference in
1671d522 934 * blk_freeze_queue_start, and the moment the last request is
71f79fb3
GKB
935 * consumed, marked by the instant q_usage_counter reaches
936 * zero.
937 */
938 if (!percpu_ref_tryget(&q->q_usage_counter))
287922eb
CH
939 return;
940
1d9bd516 941 /* scan for the expired ones and set their ->aborted_gstate */
0bf6cd5b 942 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
320ae51f 943
1d9bd516
TH
944 if (data.nr_expired) {
945 bool has_rcu = false;
946
947 /*
948 * Wait till everyone sees ->aborted_gstate. The
949 * sequential waits for SRCUs aren't ideal. If this ever
950 * becomes a problem, we can add per-hw_ctx rcu_head and
951 * wait in parallel.
952 */
953 queue_for_each_hw_ctx(q, hctx, i) {
954 if (!hctx->nr_expired)
955 continue;
956
957 if (!(hctx->flags & BLK_MQ_F_BLOCKING))
958 has_rcu = true;
959 else
05707b64 960 synchronize_srcu(hctx->srcu);
1d9bd516
TH
961
962 hctx->nr_expired = 0;
963 }
964 if (has_rcu)
965 synchronize_rcu();
966
967 /* terminate the ones we won */
968 blk_mq_queue_tag_busy_iter(q, blk_mq_terminate_expired, NULL);
969 }
970
81481eb4
CH
971 if (data.next_set) {
972 data.next = blk_rq_timeout(round_jiffies_up(data.next));
973 mod_timer(&q->timeout, data.next);
0d2602ca 974 } else {
fcd36c36
BVA
975 /*
976 * Request timeouts are handled as a forward rolling timer. If
977 * we end up here it means that no requests are pending and
978 * also that no request has been pending for a while. Mark
979 * each hctx as idle.
980 */
f054b56c
ML
981 queue_for_each_hw_ctx(q, hctx, i) {
982 /* the hctx may be unmapped, so check it here */
983 if (blk_mq_hw_queue_mapped(hctx))
984 blk_mq_tag_idle(hctx);
985 }
0d2602ca 986 }
287922eb 987 blk_queue_exit(q);
320ae51f
JA
988}
989
88459642
OS
990struct flush_busy_ctx_data {
991 struct blk_mq_hw_ctx *hctx;
992 struct list_head *list;
993};
994
995static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
996{
997 struct flush_busy_ctx_data *flush_data = data;
998 struct blk_mq_hw_ctx *hctx = flush_data->hctx;
999 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1000
88459642
OS
1001 spin_lock(&ctx->lock);
1002 list_splice_tail_init(&ctx->rq_list, flush_data->list);
e9a99a63 1003 sbitmap_clear_bit(sb, bitnr);
88459642
OS
1004 spin_unlock(&ctx->lock);
1005 return true;
1006}
1007
1429d7c9
JA
1008/*
1009 * Process software queues that have been marked busy, splicing them
1010 * to the for-dispatch
1011 */
2c3ad667 1012void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
1429d7c9 1013{
88459642
OS
1014 struct flush_busy_ctx_data data = {
1015 .hctx = hctx,
1016 .list = list,
1017 };
1429d7c9 1018
88459642 1019 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data);
1429d7c9 1020}
2c3ad667 1021EXPORT_SYMBOL_GPL(blk_mq_flush_busy_ctxs);
1429d7c9 1022
b347689f
ML
1023struct dispatch_rq_data {
1024 struct blk_mq_hw_ctx *hctx;
1025 struct request *rq;
1026};
1027
1028static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
1029 void *data)
1030{
1031 struct dispatch_rq_data *dispatch_data = data;
1032 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
1033 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
1034
1035 spin_lock(&ctx->lock);
1036 if (unlikely(!list_empty(&ctx->rq_list))) {
1037 dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
1038 list_del_init(&dispatch_data->rq->queuelist);
1039 if (list_empty(&ctx->rq_list))
1040 sbitmap_clear_bit(sb, bitnr);
1041 }
1042 spin_unlock(&ctx->lock);
1043
1044 return !dispatch_data->rq;
1045}
1046
1047struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
1048 struct blk_mq_ctx *start)
1049{
1050 unsigned off = start ? start->index_hw : 0;
1051 struct dispatch_rq_data data = {
1052 .hctx = hctx,
1053 .rq = NULL,
1054 };
1055
1056 __sbitmap_for_each_set(&hctx->ctx_map, off,
1057 dispatch_rq_from_ctx, &data);
1058
1059 return data.rq;
1060}
1061
703fd1c0
JA
1062static inline unsigned int queued_to_index(unsigned int queued)
1063{
1064 if (!queued)
1065 return 0;
1429d7c9 1066
703fd1c0 1067 return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
1429d7c9
JA
1068}
1069
bd6737f1
JA
1070bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
1071 bool wait)
bd166ef1
JA
1072{
1073 struct blk_mq_alloc_data data = {
1074 .q = rq->q,
bd166ef1
JA
1075 .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
1076 .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
1077 };
1078
5feeacdd
JA
1079 might_sleep_if(wait);
1080
81380ca1
OS
1081 if (rq->tag != -1)
1082 goto done;
bd166ef1 1083
415b806d
SG
1084 if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
1085 data.flags |= BLK_MQ_REQ_RESERVED;
1086
bd166ef1
JA
1087 rq->tag = blk_mq_get_tag(&data);
1088 if (rq->tag >= 0) {
200e86b3
JA
1089 if (blk_mq_tag_busy(data.hctx)) {
1090 rq->rq_flags |= RQF_MQ_INFLIGHT;
1091 atomic_inc(&data.hctx->nr_active);
1092 }
bd166ef1 1093 data.hctx->tags->rqs[rq->tag] = rq;
bd166ef1
JA
1094 }
1095
81380ca1
OS
1096done:
1097 if (hctx)
1098 *hctx = data.hctx;
1099 return rq->tag != -1;
bd166ef1
JA
1100}
1101
eb619fdb
JA
1102static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1103 int flags, void *key)
da55f2cc
OS
1104{
1105 struct blk_mq_hw_ctx *hctx;
1106
1107 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1108
eb619fdb 1109 list_del_init(&wait->entry);
da55f2cc
OS
1110 blk_mq_run_hw_queue(hctx, true);
1111 return 1;
1112}
1113
f906a6a0
JA
1114/*
1115 * Mark us waiting for a tag. For shared tags, this involves hooking us into
ee3e4de5
BVA
1116 * the tag wakeups. For non-shared tags, we can simply mark us needing a
1117 * restart. For both cases, take care to check the condition again after
f906a6a0
JA
1118 * marking us as waiting.
1119 */
1120static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
1121 struct request *rq)
da55f2cc 1122{
eb619fdb 1123 struct blk_mq_hw_ctx *this_hctx = *hctx;
da55f2cc 1124 struct sbq_wait_state *ws;
f906a6a0
JA
1125 wait_queue_entry_t *wait;
1126 bool ret;
da55f2cc 1127
c27d53fb 1128 if (!(this_hctx->flags & BLK_MQ_F_TAG_SHARED)) {
f906a6a0
JA
1129 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state))
1130 set_bit(BLK_MQ_S_SCHED_RESTART, &this_hctx->state);
f906a6a0 1131
c27d53fb
BVA
1132 /*
1133 * It's possible that a tag was freed in the window between the
1134 * allocation failure and adding the hardware queue to the wait
1135 * queue.
1136 *
1137 * Don't clear RESTART here, someone else could have set it.
1138 * At most this will cost an extra queue run.
1139 */
1140 return blk_mq_get_driver_tag(rq, hctx, false);
eb619fdb 1141 }
eb619fdb 1142
c27d53fb
BVA
1143 wait = &this_hctx->dispatch_wait;
1144 if (!list_empty_careful(&wait->entry))
1145 return false;
1146
1147 spin_lock(&this_hctx->lock);
1148 if (!list_empty(&wait->entry)) {
1149 spin_unlock(&this_hctx->lock);
1150 return false;
eb619fdb
JA
1151 }
1152
c27d53fb
BVA
1153 ws = bt_wait_ptr(&this_hctx->tags->bitmap_tags, this_hctx);
1154 add_wait_queue(&ws->wait, wait);
1155
da55f2cc 1156 /*
eb619fdb
JA
1157 * It's possible that a tag was freed in the window between the
1158 * allocation failure and adding the hardware queue to the wait
1159 * queue.
da55f2cc 1160 */
f906a6a0 1161 ret = blk_mq_get_driver_tag(rq, hctx, false);
c27d53fb 1162 if (!ret) {
eb619fdb 1163 spin_unlock(&this_hctx->lock);
c27d53fb 1164 return false;
eb619fdb 1165 }
c27d53fb
BVA
1166
1167 /*
1168 * We got a tag, remove ourselves from the wait queue to ensure
1169 * someone else gets the wakeup.
1170 */
1171 spin_lock_irq(&ws->wait.lock);
1172 list_del_init(&wait->entry);
1173 spin_unlock_irq(&ws->wait.lock);
1174 spin_unlock(&this_hctx->lock);
1175
1176 return true;
da55f2cc
OS
1177}
1178
86ff7c2a
ML
1179#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1180
de148297 1181bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
eb619fdb 1182 bool got_budget)
320ae51f 1183{
81380ca1 1184 struct blk_mq_hw_ctx *hctx;
6d6f167c 1185 struct request *rq, *nxt;
eb619fdb 1186 bool no_tag = false;
fc17b653 1187 int errors, queued;
86ff7c2a 1188 blk_status_t ret = BLK_STS_OK;
320ae51f 1189
81380ca1
OS
1190 if (list_empty(list))
1191 return false;
1192
de148297
ML
1193 WARN_ON(!list_is_singular(list) && got_budget);
1194
320ae51f
JA
1195 /*
1196 * Now process all the entries, sending them to the driver.
1197 */
93efe981 1198 errors = queued = 0;
81380ca1 1199 do {
74c45052 1200 struct blk_mq_queue_data bd;
320ae51f 1201
f04c3df3 1202 rq = list_first_entry(list, struct request, queuelist);
0bca799b
ML
1203
1204 hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
1205 if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
1206 break;
1207
1208 if (!blk_mq_get_driver_tag(rq, NULL, false)) {
3c782d67 1209 /*
da55f2cc 1210 * The initial allocation attempt failed, so we need to
eb619fdb
JA
1211 * rerun the hardware queue when a tag is freed. The
1212 * waitqueue takes care of that. If the queue is run
1213 * before we add this entry back on the dispatch list,
1214 * we'll re-run it below.
3c782d67 1215 */
f906a6a0 1216 if (!blk_mq_mark_tag_wait(&hctx, rq)) {
0bca799b 1217 blk_mq_put_dispatch_budget(hctx);
f906a6a0
JA
1218 /*
1219 * For non-shared tags, the RESTART check
1220 * will suffice.
1221 */
1222 if (hctx->flags & BLK_MQ_F_TAG_SHARED)
1223 no_tag = true;
de148297
ML
1224 break;
1225 }
1226 }
1227
320ae51f 1228 list_del_init(&rq->queuelist);
320ae51f 1229
74c45052 1230 bd.rq = rq;
113285b4
JA
1231
1232 /*
1233 * Flag last if we have no more requests, or if we have more
1234 * but can't assign a driver tag to it.
1235 */
1236 if (list_empty(list))
1237 bd.last = true;
1238 else {
113285b4
JA
1239 nxt = list_first_entry(list, struct request, queuelist);
1240 bd.last = !blk_mq_get_driver_tag(nxt, NULL, false);
1241 }
74c45052
JA
1242
1243 ret = q->mq_ops->queue_rq(hctx, &bd);
86ff7c2a 1244 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
6d6f167c
JW
1245 /*
1246 * If an I/O scheduler has been configured and we got a
ff821d27
JA
1247 * driver tag for the next request already, free it
1248 * again.
6d6f167c
JW
1249 */
1250 if (!list_empty(list)) {
1251 nxt = list_first_entry(list, struct request, queuelist);
1252 blk_mq_put_driver_tag(nxt);
1253 }
f04c3df3 1254 list_add(&rq->queuelist, list);
ed0791b2 1255 __blk_mq_requeue_request(rq);
320ae51f 1256 break;
fc17b653
CH
1257 }
1258
1259 if (unlikely(ret != BLK_STS_OK)) {
93efe981 1260 errors++;
2a842aca 1261 blk_mq_end_request(rq, BLK_STS_IOERR);
fc17b653 1262 continue;
320ae51f
JA
1263 }
1264
fc17b653 1265 queued++;
81380ca1 1266 } while (!list_empty(list));
320ae51f 1267
703fd1c0 1268 hctx->dispatched[queued_to_index(queued)]++;
320ae51f
JA
1269
1270 /*
1271 * Any items that need requeuing? Stuff them into hctx->dispatch,
1272 * that is where we will continue on next queue run.
1273 */
f04c3df3 1274 if (!list_empty(list)) {
86ff7c2a
ML
1275 bool needs_restart;
1276
320ae51f 1277 spin_lock(&hctx->lock);
c13660a0 1278 list_splice_init(list, &hctx->dispatch);
320ae51f 1279 spin_unlock(&hctx->lock);
f04c3df3 1280
9ba52e58 1281 /*
710c785f
BVA
1282 * If SCHED_RESTART was set by the caller of this function and
1283 * it is no longer set that means that it was cleared by another
1284 * thread and hence that a queue rerun is needed.
9ba52e58 1285 *
eb619fdb
JA
1286 * If 'no_tag' is set, that means that we failed getting
1287 * a driver tag with an I/O scheduler attached. If our dispatch
1288 * waitqueue is no longer active, ensure that we run the queue
1289 * AFTER adding our entries back to the list.
bd166ef1 1290 *
710c785f
BVA
1291 * If no I/O scheduler has been configured it is possible that
1292 * the hardware queue got stopped and restarted before requests
1293 * were pushed back onto the dispatch list. Rerun the queue to
1294 * avoid starvation. Notes:
1295 * - blk_mq_run_hw_queue() checks whether or not a queue has
1296 * been stopped before rerunning a queue.
1297 * - Some but not all block drivers stop a queue before
fc17b653 1298 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
710c785f 1299 * and dm-rq.
86ff7c2a
ML
1300 *
1301 * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
1302 * bit is set, run queue after a delay to avoid IO stalls
1303 * that could otherwise occur if the queue is idle.
bd166ef1 1304 */
86ff7c2a
ML
1305 needs_restart = blk_mq_sched_needs_restart(hctx);
1306 if (!needs_restart ||
eb619fdb 1307 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
bd166ef1 1308 blk_mq_run_hw_queue(hctx, true);
86ff7c2a
ML
1309 else if (needs_restart && (ret == BLK_STS_RESOURCE))
1310 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
320ae51f 1311 }
f04c3df3 1312
93efe981 1313 return (queued + errors) != 0;
f04c3df3
JA
1314}
1315
6a83e74d
BVA
1316static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1317{
1318 int srcu_idx;
1319
b7a71e66
JA
1320 /*
1321 * We should be running this queue from one of the CPUs that
1322 * are mapped to it.
7df938fb
ML
1323 *
1324 * There are at least two related races now between setting
1325 * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1326 * __blk_mq_run_hw_queue():
1327 *
1328 * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1329 * but later it becomes online, then this warning is harmless
1330 * at all
1331 *
1332 * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1333 * but later it becomes offline, then the warning can't be
1334 * triggered, and we depend on blk-mq timeout handler to
1335 * handle dispatched requests to this hctx
b7a71e66 1336 */
7df938fb
ML
1337 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1338 cpu_online(hctx->next_cpu)) {
1339 printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1340 raw_smp_processor_id(),
1341 cpumask_empty(hctx->cpumask) ? "inactive": "active");
1342 dump_stack();
1343 }
6a83e74d 1344
b7a71e66
JA
1345 /*
1346 * We can't run the queue inline with ints disabled. Ensure that
1347 * we catch bad users of this early.
1348 */
1349 WARN_ON_ONCE(in_interrupt());
1350
04ced159 1351 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
bf4907c0 1352
04ced159
JA
1353 hctx_lock(hctx, &srcu_idx);
1354 blk_mq_sched_dispatch_requests(hctx);
1355 hctx_unlock(hctx, srcu_idx);
6a83e74d
BVA
1356}
1357
f82ddf19
ML
1358static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
1359{
1360 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
1361
1362 if (cpu >= nr_cpu_ids)
1363 cpu = cpumask_first(hctx->cpumask);
1364 return cpu;
1365}
1366
506e931f
JA
1367/*
1368 * It'd be great if the workqueue API had a way to pass
1369 * in a mask and had some smarts for more clever placement.
1370 * For now we just round-robin here, switching for every
1371 * BLK_MQ_CPU_WORK_BATCH queued items.
1372 */
1373static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
1374{
7bed4595 1375 bool tried = false;
476f8c98 1376 int next_cpu = hctx->next_cpu;
7bed4595 1377
b657d7e6
CH
1378 if (hctx->queue->nr_hw_queues == 1)
1379 return WORK_CPU_UNBOUND;
506e931f
JA
1380
1381 if (--hctx->next_cpu_batch <= 0) {
7bed4595 1382select_cpu:
476f8c98 1383 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
20e4d813 1384 cpu_online_mask);
506e931f 1385 if (next_cpu >= nr_cpu_ids)
f82ddf19 1386 next_cpu = blk_mq_first_mapped_cpu(hctx);
506e931f
JA
1387 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1388 }
1389
7bed4595
ML
1390 /*
1391 * Do unbound schedule if we can't find a online CPU for this hctx,
1392 * and it should only happen in the path of handling CPU DEAD.
1393 */
476f8c98 1394 if (!cpu_online(next_cpu)) {
7bed4595
ML
1395 if (!tried) {
1396 tried = true;
1397 goto select_cpu;
1398 }
1399
1400 /*
1401 * Make sure to re-select CPU next time once after CPUs
1402 * in hctx->cpumask become online again.
1403 */
476f8c98 1404 hctx->next_cpu = next_cpu;
7bed4595
ML
1405 hctx->next_cpu_batch = 1;
1406 return WORK_CPU_UNBOUND;
1407 }
476f8c98
ML
1408
1409 hctx->next_cpu = next_cpu;
1410 return next_cpu;
506e931f
JA
1411}
1412
7587a5ae
BVA
1413static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
1414 unsigned long msecs)
320ae51f 1415{
5435c023 1416 if (unlikely(blk_mq_hctx_stopped(hctx)))
320ae51f
JA
1417 return;
1418
1b792f2f 1419 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
2a90d4aa
PB
1420 int cpu = get_cpu();
1421 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
398205b8 1422 __blk_mq_run_hw_queue(hctx);
2a90d4aa 1423 put_cpu();
398205b8
PB
1424 return;
1425 }
e4043dcf 1426
2a90d4aa 1427 put_cpu();
e4043dcf 1428 }
398205b8 1429
ae943d20
BVA
1430 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
1431 msecs_to_jiffies(msecs));
7587a5ae
BVA
1432}
1433
1434void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
1435{
1436 __blk_mq_delay_run_hw_queue(hctx, true, msecs);
1437}
1438EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
1439
79f720a7 1440bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
7587a5ae 1441{
24f5a90f
ML
1442 int srcu_idx;
1443 bool need_run;
1444
1445 /*
1446 * When queue is quiesced, we may be switching io scheduler, or
1447 * updating nr_hw_queues, or other things, and we can't run queue
1448 * any more, even __blk_mq_hctx_has_pending() can't be called safely.
1449 *
1450 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
1451 * quiesced.
1452 */
04ced159
JA
1453 hctx_lock(hctx, &srcu_idx);
1454 need_run = !blk_queue_quiesced(hctx->queue) &&
1455 blk_mq_hctx_has_pending(hctx);
1456 hctx_unlock(hctx, srcu_idx);
24f5a90f
ML
1457
1458 if (need_run) {
79f720a7
JA
1459 __blk_mq_delay_run_hw_queue(hctx, async, 0);
1460 return true;
1461 }
1462
1463 return false;
320ae51f 1464}
5b727272 1465EXPORT_SYMBOL(blk_mq_run_hw_queue);
320ae51f 1466
b94ec296 1467void blk_mq_run_hw_queues(struct request_queue *q, bool async)
320ae51f
JA
1468{
1469 struct blk_mq_hw_ctx *hctx;
1470 int i;
1471
1472 queue_for_each_hw_ctx(q, hctx, i) {
79f720a7 1473 if (blk_mq_hctx_stopped(hctx))
320ae51f
JA
1474 continue;
1475
b94ec296 1476 blk_mq_run_hw_queue(hctx, async);
320ae51f
JA
1477 }
1478}
b94ec296 1479EXPORT_SYMBOL(blk_mq_run_hw_queues);
320ae51f 1480
fd001443
BVA
1481/**
1482 * blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
1483 * @q: request queue.
1484 *
1485 * The caller is responsible for serializing this function against
1486 * blk_mq_{start,stop}_hw_queue().
1487 */
1488bool blk_mq_queue_stopped(struct request_queue *q)
1489{
1490 struct blk_mq_hw_ctx *hctx;
1491 int i;
1492
1493 queue_for_each_hw_ctx(q, hctx, i)
1494 if (blk_mq_hctx_stopped(hctx))
1495 return true;
1496
1497 return false;
1498}
1499EXPORT_SYMBOL(blk_mq_queue_stopped);
1500
39a70c76
ML
1501/*
1502 * This function is often used for pausing .queue_rq() by driver when
1503 * there isn't enough resource or some conditions aren't satisfied, and
4d606219 1504 * BLK_STS_RESOURCE is usually returned.
39a70c76
ML
1505 *
1506 * We do not guarantee that dispatch can be drained or blocked
1507 * after blk_mq_stop_hw_queue() returns. Please use
1508 * blk_mq_quiesce_queue() for that requirement.
1509 */
2719aa21
JA
1510void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
1511{
641a9ed6 1512 cancel_delayed_work(&hctx->run_work);
280d45f6 1513
641a9ed6 1514 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
2719aa21 1515}
641a9ed6 1516EXPORT_SYMBOL(blk_mq_stop_hw_queue);
2719aa21 1517
39a70c76
ML
1518/*
1519 * This function is often used for pausing .queue_rq() by driver when
1520 * there isn't enough resource or some conditions aren't satisfied, and
4d606219 1521 * BLK_STS_RESOURCE is usually returned.
39a70c76
ML
1522 *
1523 * We do not guarantee that dispatch can be drained or blocked
1524 * after blk_mq_stop_hw_queues() returns. Please use
1525 * blk_mq_quiesce_queue() for that requirement.
1526 */
2719aa21
JA
1527void blk_mq_stop_hw_queues(struct request_queue *q)
1528{
641a9ed6
ML
1529 struct blk_mq_hw_ctx *hctx;
1530 int i;
1531
1532 queue_for_each_hw_ctx(q, hctx, i)
1533 blk_mq_stop_hw_queue(hctx);
280d45f6
CH
1534}
1535EXPORT_SYMBOL(blk_mq_stop_hw_queues);
1536
320ae51f
JA
1537void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
1538{
1539 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
e4043dcf 1540
0ffbce80 1541 blk_mq_run_hw_queue(hctx, false);
320ae51f
JA
1542}
1543EXPORT_SYMBOL(blk_mq_start_hw_queue);
1544
2f268556
CH
1545void blk_mq_start_hw_queues(struct request_queue *q)
1546{
1547 struct blk_mq_hw_ctx *hctx;
1548 int i;
1549
1550 queue_for_each_hw_ctx(q, hctx, i)
1551 blk_mq_start_hw_queue(hctx);
1552}
1553EXPORT_SYMBOL(blk_mq_start_hw_queues);
1554
ae911c5e
JA
1555void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
1556{
1557 if (!blk_mq_hctx_stopped(hctx))
1558 return;
1559
1560 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1561 blk_mq_run_hw_queue(hctx, async);
1562}
1563EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
1564
1b4a3258 1565void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
320ae51f
JA
1566{
1567 struct blk_mq_hw_ctx *hctx;
1568 int i;
1569
ae911c5e
JA
1570 queue_for_each_hw_ctx(q, hctx, i)
1571 blk_mq_start_stopped_hw_queue(hctx, async);
320ae51f
JA
1572}
1573EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
1574
70f4db63 1575static void blk_mq_run_work_fn(struct work_struct *work)
320ae51f
JA
1576{
1577 struct blk_mq_hw_ctx *hctx;
1578
9f993737 1579 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
320ae51f 1580
21c6e939 1581 /*
15fe8a90 1582 * If we are stopped, don't run the queue.
21c6e939 1583 */
15fe8a90 1584 if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
21c6e939 1585 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
7587a5ae
BVA
1586
1587 __blk_mq_run_hw_queue(hctx);
1588}
1589
cfd0c552 1590static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
cfd0c552
ML
1591 struct request *rq,
1592 bool at_head)
320ae51f 1593{
e57690fe
JA
1594 struct blk_mq_ctx *ctx = rq->mq_ctx;
1595
7b607814
BVA
1596 lockdep_assert_held(&ctx->lock);
1597
01b983c9
JA
1598 trace_block_rq_insert(hctx->queue, rq);
1599
72a0a36e
CH
1600 if (at_head)
1601 list_add(&rq->queuelist, &ctx->rq_list);
1602 else
1603 list_add_tail(&rq->queuelist, &ctx->rq_list);
cfd0c552 1604}
4bb659b1 1605
2c3ad667
JA
1606void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
1607 bool at_head)
cfd0c552
ML
1608{
1609 struct blk_mq_ctx *ctx = rq->mq_ctx;
1610
7b607814
BVA
1611 lockdep_assert_held(&ctx->lock);
1612
e57690fe 1613 __blk_mq_insert_req_list(hctx, rq, at_head);
320ae51f 1614 blk_mq_hctx_mark_pending(hctx, ctx);
320ae51f
JA
1615}
1616
157f377b
JA
1617/*
1618 * Should only be used carefully, when the caller knows we want to
1619 * bypass a potential IO scheduler on the target device.
1620 */
b0850297 1621void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
157f377b
JA
1622{
1623 struct blk_mq_ctx *ctx = rq->mq_ctx;
1624 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1625
1626 spin_lock(&hctx->lock);
1627 list_add_tail(&rq->queuelist, &hctx->dispatch);
1628 spin_unlock(&hctx->lock);
1629
b0850297
ML
1630 if (run_queue)
1631 blk_mq_run_hw_queue(hctx, false);
157f377b
JA
1632}
1633
bd166ef1
JA
1634void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
1635 struct list_head *list)
320ae51f
JA
1636
1637{
320ae51f
JA
1638 /*
1639 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1640 * offline now
1641 */
1642 spin_lock(&ctx->lock);
1643 while (!list_empty(list)) {
1644 struct request *rq;
1645
1646 rq = list_first_entry(list, struct request, queuelist);
e57690fe 1647 BUG_ON(rq->mq_ctx != ctx);
320ae51f 1648 list_del_init(&rq->queuelist);
e57690fe 1649 __blk_mq_insert_req_list(hctx, rq, false);
320ae51f 1650 }
cfd0c552 1651 blk_mq_hctx_mark_pending(hctx, ctx);
320ae51f 1652 spin_unlock(&ctx->lock);
320ae51f
JA
1653}
1654
1655static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1656{
1657 struct request *rqa = container_of(a, struct request, queuelist);
1658 struct request *rqb = container_of(b, struct request, queuelist);
1659
1660 return !(rqa->mq_ctx < rqb->mq_ctx ||
1661 (rqa->mq_ctx == rqb->mq_ctx &&
1662 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1663}
1664
1665void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1666{
1667 struct blk_mq_ctx *this_ctx;
1668 struct request_queue *this_q;
1669 struct request *rq;
1670 LIST_HEAD(list);
1671 LIST_HEAD(ctx_list);
1672 unsigned int depth;
1673
1674 list_splice_init(&plug->mq_list, &list);
1675
1676 list_sort(NULL, &list, plug_ctx_cmp);
1677
1678 this_q = NULL;
1679 this_ctx = NULL;
1680 depth = 0;
1681
1682 while (!list_empty(&list)) {
1683 rq = list_entry_rq(list.next);
1684 list_del_init(&rq->queuelist);
1685 BUG_ON(!rq->q);
1686 if (rq->mq_ctx != this_ctx) {
1687 if (this_ctx) {
bd166ef1
JA
1688 trace_block_unplug(this_q, depth, from_schedule);
1689 blk_mq_sched_insert_requests(this_q, this_ctx,
1690 &ctx_list,
1691 from_schedule);
320ae51f
JA
1692 }
1693
1694 this_ctx = rq->mq_ctx;
1695 this_q = rq->q;
1696 depth = 0;
1697 }
1698
1699 depth++;
1700 list_add_tail(&rq->queuelist, &ctx_list);
1701 }
1702
1703 /*
1704 * If 'this_ctx' is set, we know we have entries to complete
1705 * on 'ctx_list'. Do those.
1706 */
1707 if (this_ctx) {
bd166ef1
JA
1708 trace_block_unplug(this_q, depth, from_schedule);
1709 blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
1710 from_schedule);
320ae51f
JA
1711 }
1712}
1713
1714static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1715{
da8d7f07 1716 blk_init_request_from_bio(rq, bio);
4b570521 1717
85acb3ba
SL
1718 blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
1719
6e85eaf3 1720 blk_account_io_start(rq, true);
320ae51f
JA
1721}
1722
ab42f35d
ML
1723static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
1724 struct blk_mq_ctx *ctx,
1725 struct request *rq)
1726{
1727 spin_lock(&ctx->lock);
1728 __blk_mq_insert_request(hctx, rq, false);
1729 spin_unlock(&ctx->lock);
07068d5b 1730}
14ec77f3 1731
fd2d3326
JA
1732static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
1733{
bd166ef1
JA
1734 if (rq->tag != -1)
1735 return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
1736
1737 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
fd2d3326
JA
1738}
1739
0f95549c
MS
1740static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
1741 struct request *rq,
1742 blk_qc_t *cookie)
f984df1f 1743{
f984df1f 1744 struct request_queue *q = rq->q;
f984df1f
SL
1745 struct blk_mq_queue_data bd = {
1746 .rq = rq,
d945a365 1747 .last = true,
f984df1f 1748 };
bd166ef1 1749 blk_qc_t new_cookie;
f06345ad 1750 blk_status_t ret;
0f95549c
MS
1751
1752 new_cookie = request_to_qc_t(hctx, rq);
1753
1754 /*
1755 * For OK queue, we are done. For error, caller may kill it.
1756 * Any other error (busy), just add it to our list as we
1757 * previously would have done.
1758 */
1759 ret = q->mq_ops->queue_rq(hctx, &bd);
1760 switch (ret) {
1761 case BLK_STS_OK:
1762 *cookie = new_cookie;
1763 break;
1764 case BLK_STS_RESOURCE:
86ff7c2a 1765 case BLK_STS_DEV_RESOURCE:
0f95549c
MS
1766 __blk_mq_requeue_request(rq);
1767 break;
1768 default:
1769 *cookie = BLK_QC_T_NONE;
1770 break;
1771 }
1772
1773 return ret;
1774}
1775
0f95549c
MS
1776static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1777 struct request *rq,
396eaf21
ML
1778 blk_qc_t *cookie,
1779 bool bypass_insert)
0f95549c
MS
1780{
1781 struct request_queue *q = rq->q;
d964f04a
ML
1782 bool run_queue = true;
1783
23d4ee19
ML
1784 /*
1785 * RCU or SRCU read lock is needed before checking quiesced flag.
1786 *
1787 * When queue is stopped or quiesced, ignore 'bypass_insert' from
c77ff7fd 1788 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
23d4ee19
ML
1789 * and avoid driver to try to dispatch again.
1790 */
f4560ffe 1791 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
d964f04a 1792 run_queue = false;
23d4ee19 1793 bypass_insert = false;
d964f04a
ML
1794 goto insert;
1795 }
f984df1f 1796
396eaf21 1797 if (q->elevator && !bypass_insert)
2253efc8
BVA
1798 goto insert;
1799
0bca799b 1800 if (!blk_mq_get_dispatch_budget(hctx))
bd166ef1
JA
1801 goto insert;
1802
0bca799b
ML
1803 if (!blk_mq_get_driver_tag(rq, NULL, false)) {
1804 blk_mq_put_dispatch_budget(hctx);
de148297 1805 goto insert;
88022d72 1806 }
de148297 1807
0f95549c 1808 return __blk_mq_issue_directly(hctx, rq, cookie);
2253efc8 1809insert:
396eaf21
ML
1810 if (bypass_insert)
1811 return BLK_STS_RESOURCE;
0f95549c 1812
23d4ee19 1813 blk_mq_sched_insert_request(rq, false, run_queue, false);
0f95549c 1814 return BLK_STS_OK;
f984df1f
SL
1815}
1816
5eb6126e
CH
1817static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
1818 struct request *rq, blk_qc_t *cookie)
1819{
0f95549c 1820 blk_status_t ret;
04ced159 1821 int srcu_idx;
bf4907c0 1822
04ced159 1823 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
bf4907c0 1824
04ced159 1825 hctx_lock(hctx, &srcu_idx);
0f95549c 1826
396eaf21 1827 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
86ff7c2a 1828 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
23d4ee19 1829 blk_mq_sched_insert_request(rq, false, true, false);
0f95549c
MS
1830 else if (ret != BLK_STS_OK)
1831 blk_mq_end_request(rq, ret);
1832
04ced159 1833 hctx_unlock(hctx, srcu_idx);
5eb6126e
CH
1834}
1835
c77ff7fd 1836blk_status_t blk_mq_request_issue_directly(struct request *rq)
396eaf21
ML
1837{
1838 blk_status_t ret;
1839 int srcu_idx;
1840 blk_qc_t unused_cookie;
1841 struct blk_mq_ctx *ctx = rq->mq_ctx;
1842 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
1843
1844 hctx_lock(hctx, &srcu_idx);
1845 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
1846 hctx_unlock(hctx, srcu_idx);
1847
1848 return ret;
5eb6126e
CH
1849}
1850
dece1635 1851static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
07068d5b 1852{
ef295ecf 1853 const int is_sync = op_is_sync(bio->bi_opf);
f73f44eb 1854 const int is_flush_fua = op_is_flush(bio->bi_opf);
5a797e00 1855 struct blk_mq_alloc_data data = { .flags = 0 };
07068d5b 1856 struct request *rq;
5eb6126e 1857 unsigned int request_count = 0;
f984df1f 1858 struct blk_plug *plug;
5b3f341f 1859 struct request *same_queue_rq = NULL;
7b371636 1860 blk_qc_t cookie;
87760e5e 1861 unsigned int wb_acct;
07068d5b
JA
1862
1863 blk_queue_bounce(q, &bio);
1864
af67c31f 1865 blk_queue_split(q, &bio);
f36ea50c 1866
e23947bd 1867 if (!bio_integrity_prep(bio))
dece1635 1868 return BLK_QC_T_NONE;
07068d5b 1869
87c279e6
OS
1870 if (!is_flush_fua && !blk_queue_nomerges(q) &&
1871 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
1872 return BLK_QC_T_NONE;
f984df1f 1873
bd166ef1
JA
1874 if (blk_mq_sched_bio_merge(q, bio))
1875 return BLK_QC_T_NONE;
1876
87760e5e
JA
1877 wb_acct = wbt_wait(q->rq_wb, bio, NULL);
1878
bd166ef1
JA
1879 trace_block_getrq(q, bio, bio->bi_opf);
1880
d2c0d383 1881 rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
87760e5e
JA
1882 if (unlikely(!rq)) {
1883 __wbt_done(q->rq_wb, wb_acct);
03a07c92
GR
1884 if (bio->bi_opf & REQ_NOWAIT)
1885 bio_wouldblock_error(bio);
dece1635 1886 return BLK_QC_T_NONE;
87760e5e
JA
1887 }
1888
a8a45941 1889 wbt_track(rq, wb_acct);
07068d5b 1890
fd2d3326 1891 cookie = request_to_qc_t(data.hctx, rq);
07068d5b 1892
f984df1f 1893 plug = current->plug;
07068d5b 1894 if (unlikely(is_flush_fua)) {
f984df1f 1895 blk_mq_put_ctx(data.ctx);
07068d5b 1896 blk_mq_bio_to_request(rq, bio);
923218f6
ML
1897
1898 /* bypass scheduler for flush rq */
1899 blk_insert_flush(rq);
1900 blk_mq_run_hw_queue(data.hctx, true);
a4d907b6 1901 } else if (plug && q->nr_hw_queues == 1) {
600271d9
SL
1902 struct request *last = NULL;
1903
b00c53e8 1904 blk_mq_put_ctx(data.ctx);
e6c4438b 1905 blk_mq_bio_to_request(rq, bio);
0a6219a9
ML
1906
1907 /*
1908 * @request_count may become stale because of schedule
1909 * out, so check the list again.
1910 */
1911 if (list_empty(&plug->mq_list))
1912 request_count = 0;
254d259d
CH
1913 else if (blk_queue_nomerges(q))
1914 request_count = blk_plug_queued_count(q);
1915
676d0607 1916 if (!request_count)
e6c4438b 1917 trace_block_plug(q);
600271d9
SL
1918 else
1919 last = list_entry_rq(plug->mq_list.prev);
b094f89c 1920
600271d9
SL
1921 if (request_count >= BLK_MAX_REQUEST_COUNT || (last &&
1922 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
e6c4438b
JM
1923 blk_flush_plug_list(plug, false);
1924 trace_block_plug(q);
320ae51f 1925 }
b094f89c 1926
e6c4438b 1927 list_add_tail(&rq->queuelist, &plug->mq_list);
2299722c 1928 } else if (plug && !blk_queue_nomerges(q)) {
bd166ef1 1929 blk_mq_bio_to_request(rq, bio);
07068d5b 1930
07068d5b 1931 /*
6a83e74d 1932 * We do limited plugging. If the bio can be merged, do that.
f984df1f
SL
1933 * Otherwise the existing request in the plug list will be
1934 * issued. So the plug list will have one request at most
2299722c
CH
1935 * The plug list might get flushed before this. If that happens,
1936 * the plug list is empty, and same_queue_rq is invalid.
07068d5b 1937 */
2299722c
CH
1938 if (list_empty(&plug->mq_list))
1939 same_queue_rq = NULL;
1940 if (same_queue_rq)
1941 list_del_init(&same_queue_rq->queuelist);
1942 list_add_tail(&rq->queuelist, &plug->mq_list);
1943
bf4907c0
JA
1944 blk_mq_put_ctx(data.ctx);
1945
dad7a3be
ML
1946 if (same_queue_rq) {
1947 data.hctx = blk_mq_map_queue(q,
1948 same_queue_rq->mq_ctx->cpu);
2299722c
CH
1949 blk_mq_try_issue_directly(data.hctx, same_queue_rq,
1950 &cookie);
dad7a3be 1951 }
a4d907b6 1952 } else if (q->nr_hw_queues > 1 && is_sync) {
bf4907c0 1953 blk_mq_put_ctx(data.ctx);
2299722c 1954 blk_mq_bio_to_request(rq, bio);
2299722c 1955 blk_mq_try_issue_directly(data.hctx, rq, &cookie);
a4d907b6 1956 } else if (q->elevator) {
b00c53e8 1957 blk_mq_put_ctx(data.ctx);
bd166ef1 1958 blk_mq_bio_to_request(rq, bio);
9e97d295 1959 blk_mq_sched_insert_request(rq, false, true, true);
ab42f35d 1960 } else {
b00c53e8 1961 blk_mq_put_ctx(data.ctx);
ab42f35d
ML
1962 blk_mq_bio_to_request(rq, bio);
1963 blk_mq_queue_io(data.hctx, data.ctx, rq);
a4d907b6 1964 blk_mq_run_hw_queue(data.hctx, true);
ab42f35d 1965 }
320ae51f 1966
7b371636 1967 return cookie;
320ae51f
JA
1968}
1969
cc71a6f4
JA
1970void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
1971 unsigned int hctx_idx)
95363efd 1972{
e9b267d9 1973 struct page *page;
320ae51f 1974
24d2f903 1975 if (tags->rqs && set->ops->exit_request) {
e9b267d9 1976 int i;
320ae51f 1977
24d2f903 1978 for (i = 0; i < tags->nr_tags; i++) {
2af8cbe3
JA
1979 struct request *rq = tags->static_rqs[i];
1980
1981 if (!rq)
e9b267d9 1982 continue;
d6296d39 1983 set->ops->exit_request(set, rq, hctx_idx);
2af8cbe3 1984 tags->static_rqs[i] = NULL;
e9b267d9 1985 }
320ae51f 1986 }
320ae51f 1987
24d2f903
CH
1988 while (!list_empty(&tags->page_list)) {
1989 page = list_first_entry(&tags->page_list, struct page, lru);
6753471c 1990 list_del_init(&page->lru);
f75782e4
CM
1991 /*
1992 * Remove kmemleak object previously allocated in
1993 * blk_mq_init_rq_map().
1994 */
1995 kmemleak_free(page_address(page));
320ae51f
JA
1996 __free_pages(page, page->private);
1997 }
cc71a6f4 1998}
320ae51f 1999
cc71a6f4
JA
2000void blk_mq_free_rq_map(struct blk_mq_tags *tags)
2001{
24d2f903 2002 kfree(tags->rqs);
cc71a6f4 2003 tags->rqs = NULL;
2af8cbe3
JA
2004 kfree(tags->static_rqs);
2005 tags->static_rqs = NULL;
320ae51f 2006
24d2f903 2007 blk_mq_free_tags(tags);
320ae51f
JA
2008}
2009
cc71a6f4
JA
2010struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
2011 unsigned int hctx_idx,
2012 unsigned int nr_tags,
2013 unsigned int reserved_tags)
320ae51f 2014{
24d2f903 2015 struct blk_mq_tags *tags;
59f082e4 2016 int node;
320ae51f 2017
59f082e4
SL
2018 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
2019 if (node == NUMA_NO_NODE)
2020 node = set->numa_node;
2021
2022 tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
24391c0d 2023 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
24d2f903
CH
2024 if (!tags)
2025 return NULL;
320ae51f 2026
cc71a6f4 2027 tags->rqs = kzalloc_node(nr_tags * sizeof(struct request *),
36e1f3d1 2028 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
59f082e4 2029 node);
24d2f903
CH
2030 if (!tags->rqs) {
2031 blk_mq_free_tags(tags);
2032 return NULL;
2033 }
320ae51f 2034
2af8cbe3
JA
2035 tags->static_rqs = kzalloc_node(nr_tags * sizeof(struct request *),
2036 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
59f082e4 2037 node);
2af8cbe3
JA
2038 if (!tags->static_rqs) {
2039 kfree(tags->rqs);
2040 blk_mq_free_tags(tags);
2041 return NULL;
2042 }
2043
cc71a6f4
JA
2044 return tags;
2045}
2046
2047static size_t order_to_size(unsigned int order)
2048{
2049 return (size_t)PAGE_SIZE << order;
2050}
2051
1d9bd516
TH
2052static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
2053 unsigned int hctx_idx, int node)
2054{
2055 int ret;
2056
2057 if (set->ops->init_request) {
2058 ret = set->ops->init_request(set, rq, hctx_idx, node);
2059 if (ret)
2060 return ret;
2061 }
2062
2063 seqcount_init(&rq->gstate_seq);
2064 u64_stats_init(&rq->aborted_gstate_sync);
f4560231
JW
2065 /*
2066 * start gstate with gen 1 instead of 0, otherwise it will be equal
2067 * to aborted_gstate, and be identified timed out by
2068 * blk_mq_terminate_expired.
2069 */
2070 WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
2071
1d9bd516
TH
2072 return 0;
2073}
2074
cc71a6f4
JA
2075int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
2076 unsigned int hctx_idx, unsigned int depth)
2077{
2078 unsigned int i, j, entries_per_page, max_order = 4;
2079 size_t rq_size, left;
59f082e4
SL
2080 int node;
2081
2082 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
2083 if (node == NUMA_NO_NODE)
2084 node = set->numa_node;
cc71a6f4
JA
2085
2086 INIT_LIST_HEAD(&tags->page_list);
2087
320ae51f
JA
2088 /*
2089 * rq_size is the size of the request plus driver payload, rounded
2090 * to the cacheline size
2091 */
24d2f903 2092 rq_size = round_up(sizeof(struct request) + set->cmd_size,
320ae51f 2093 cache_line_size());
cc71a6f4 2094 left = rq_size * depth;
320ae51f 2095
cc71a6f4 2096 for (i = 0; i < depth; ) {
320ae51f
JA
2097 int this_order = max_order;
2098 struct page *page;
2099 int to_do;
2100 void *p;
2101
b3a834b1 2102 while (this_order && left < order_to_size(this_order - 1))
320ae51f
JA
2103 this_order--;
2104
2105 do {
59f082e4 2106 page = alloc_pages_node(node,
36e1f3d1 2107 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
a5164405 2108 this_order);
320ae51f
JA
2109 if (page)
2110 break;
2111 if (!this_order--)
2112 break;
2113 if (order_to_size(this_order) < rq_size)
2114 break;
2115 } while (1);
2116
2117 if (!page)
24d2f903 2118 goto fail;
320ae51f
JA
2119
2120 page->private = this_order;
24d2f903 2121 list_add_tail(&page->lru, &tags->page_list);
320ae51f
JA
2122
2123 p = page_address(page);
f75782e4
CM
2124 /*
2125 * Allow kmemleak to scan these pages as they contain pointers
2126 * to additional allocations like via ops->init_request().
2127 */
36e1f3d1 2128 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
320ae51f 2129 entries_per_page = order_to_size(this_order) / rq_size;
cc71a6f4 2130 to_do = min(entries_per_page, depth - i);
320ae51f
JA
2131 left -= to_do * rq_size;
2132 for (j = 0; j < to_do; j++) {
2af8cbe3
JA
2133 struct request *rq = p;
2134
2135 tags->static_rqs[i] = rq;
1d9bd516
TH
2136 if (blk_mq_init_request(set, rq, hctx_idx, node)) {
2137 tags->static_rqs[i] = NULL;
2138 goto fail;
e9b267d9
CH
2139 }
2140
320ae51f
JA
2141 p += rq_size;
2142 i++;
2143 }
2144 }
cc71a6f4 2145 return 0;
320ae51f 2146
24d2f903 2147fail:
cc71a6f4
JA
2148 blk_mq_free_rqs(set, tags, hctx_idx);
2149 return -ENOMEM;
320ae51f
JA
2150}
2151
e57690fe
JA
2152/*
2153 * 'cpu' is going away. splice any existing rq_list entries from this
2154 * software queue to the hw queue dispatch list, and ensure that it
2155 * gets run.
2156 */
9467f859 2157static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
484b4061 2158{
9467f859 2159 struct blk_mq_hw_ctx *hctx;
484b4061
JA
2160 struct blk_mq_ctx *ctx;
2161 LIST_HEAD(tmp);
2162
9467f859 2163 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
e57690fe 2164 ctx = __blk_mq_get_ctx(hctx->queue, cpu);
484b4061
JA
2165
2166 spin_lock(&ctx->lock);
2167 if (!list_empty(&ctx->rq_list)) {
2168 list_splice_init(&ctx->rq_list, &tmp);
2169 blk_mq_hctx_clear_pending(hctx, ctx);
2170 }
2171 spin_unlock(&ctx->lock);
2172
2173 if (list_empty(&tmp))
9467f859 2174 return 0;
484b4061 2175
e57690fe
JA
2176 spin_lock(&hctx->lock);
2177 list_splice_tail_init(&tmp, &hctx->dispatch);
2178 spin_unlock(&hctx->lock);
484b4061
JA
2179
2180 blk_mq_run_hw_queue(hctx, true);
9467f859 2181 return 0;
484b4061
JA
2182}
2183
9467f859 2184static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
484b4061 2185{
9467f859
TG
2186 cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
2187 &hctx->cpuhp_dead);
484b4061
JA
2188}
2189
c3b4afca 2190/* hctx->ctxs will be freed in queue's release handler */
08e98fc6
ML
2191static void blk_mq_exit_hctx(struct request_queue *q,
2192 struct blk_mq_tag_set *set,
2193 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
2194{
9c1051aa
OS
2195 blk_mq_debugfs_unregister_hctx(hctx);
2196
8ab0b7dc
ML
2197 if (blk_mq_hw_queue_mapped(hctx))
2198 blk_mq_tag_idle(hctx);
08e98fc6 2199
f70ced09 2200 if (set->ops->exit_request)
d6296d39 2201 set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
f70ced09 2202
93252632
OS
2203 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
2204
08e98fc6
ML
2205 if (set->ops->exit_hctx)
2206 set->ops->exit_hctx(hctx, hctx_idx);
2207
6a83e74d 2208 if (hctx->flags & BLK_MQ_F_BLOCKING)
05707b64 2209 cleanup_srcu_struct(hctx->srcu);
6a83e74d 2210
9467f859 2211 blk_mq_remove_cpuhp(hctx);
f70ced09 2212 blk_free_flush_queue(hctx->fq);
88459642 2213 sbitmap_free(&hctx->ctx_map);
08e98fc6
ML
2214}
2215
624dbe47
ML
2216static void blk_mq_exit_hw_queues(struct request_queue *q,
2217 struct blk_mq_tag_set *set, int nr_queue)
2218{
2219 struct blk_mq_hw_ctx *hctx;
2220 unsigned int i;
2221
2222 queue_for_each_hw_ctx(q, hctx, i) {
2223 if (i == nr_queue)
2224 break;
08e98fc6 2225 blk_mq_exit_hctx(q, set, hctx, i);
624dbe47 2226 }
624dbe47
ML
2227}
2228
08e98fc6
ML
2229static int blk_mq_init_hctx(struct request_queue *q,
2230 struct blk_mq_tag_set *set,
2231 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
320ae51f 2232{
08e98fc6
ML
2233 int node;
2234
2235 node = hctx->numa_node;
2236 if (node == NUMA_NO_NODE)
2237 node = hctx->numa_node = set->numa_node;
2238
9f993737 2239 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
08e98fc6
ML
2240 spin_lock_init(&hctx->lock);
2241 INIT_LIST_HEAD(&hctx->dispatch);
2242 hctx->queue = q;
2404e607 2243 hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
08e98fc6 2244
9467f859 2245 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
08e98fc6
ML
2246
2247 hctx->tags = set->tags[hctx_idx];
320ae51f
JA
2248
2249 /*
08e98fc6
ML
2250 * Allocate space for all possible cpus to avoid allocation at
2251 * runtime
320ae51f 2252 */
d904bfa7 2253 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
08e98fc6
ML
2254 GFP_KERNEL, node);
2255 if (!hctx->ctxs)
2256 goto unregister_cpu_notifier;
320ae51f 2257
88459642
OS
2258 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
2259 node))
08e98fc6 2260 goto free_ctxs;
320ae51f 2261
08e98fc6 2262 hctx->nr_ctx = 0;
320ae51f 2263
eb619fdb
JA
2264 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
2265 INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
2266
08e98fc6
ML
2267 if (set->ops->init_hctx &&
2268 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
2269 goto free_bitmap;
320ae51f 2270
93252632
OS
2271 if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
2272 goto exit_hctx;
2273
f70ced09
ML
2274 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
2275 if (!hctx->fq)
93252632 2276 goto sched_exit_hctx;
320ae51f 2277
1d9bd516 2278 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
f70ced09 2279 goto free_fq;
320ae51f 2280
6a83e74d 2281 if (hctx->flags & BLK_MQ_F_BLOCKING)
05707b64 2282 init_srcu_struct(hctx->srcu);
6a83e74d 2283
9c1051aa
OS
2284 blk_mq_debugfs_register_hctx(q, hctx);
2285
08e98fc6 2286 return 0;
320ae51f 2287
f70ced09
ML
2288 free_fq:
2289 kfree(hctx->fq);
93252632
OS
2290 sched_exit_hctx:
2291 blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
f70ced09
ML
2292 exit_hctx:
2293 if (set->ops->exit_hctx)
2294 set->ops->exit_hctx(hctx, hctx_idx);
08e98fc6 2295 free_bitmap:
88459642 2296 sbitmap_free(&hctx->ctx_map);
08e98fc6
ML
2297 free_ctxs:
2298 kfree(hctx->ctxs);
2299 unregister_cpu_notifier:
9467f859 2300 blk_mq_remove_cpuhp(hctx);
08e98fc6
ML
2301 return -1;
2302}
320ae51f 2303
320ae51f
JA
2304static void blk_mq_init_cpu_queues(struct request_queue *q,
2305 unsigned int nr_hw_queues)
2306{
2307 unsigned int i;
2308
2309 for_each_possible_cpu(i) {
2310 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
2311 struct blk_mq_hw_ctx *hctx;
2312
320ae51f
JA
2313 __ctx->cpu = i;
2314 spin_lock_init(&__ctx->lock);
2315 INIT_LIST_HEAD(&__ctx->rq_list);
2316 __ctx->queue = q;
2317
320ae51f
JA
2318 /*
2319 * Set local node, IFF we have more than one hw queue. If
2320 * not, we remain on the home node of the device
2321 */
20e4d813 2322 hctx = blk_mq_map_queue(q, i);
320ae51f 2323 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
bffed457 2324 hctx->numa_node = local_memory_node(cpu_to_node(i));
320ae51f
JA
2325 }
2326}
2327
cc71a6f4
JA
2328static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
2329{
2330 int ret = 0;
2331
2332 set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
2333 set->queue_depth, set->reserved_tags);
2334 if (!set->tags[hctx_idx])
2335 return false;
2336
2337 ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
2338 set->queue_depth);
2339 if (!ret)
2340 return true;
2341
2342 blk_mq_free_rq_map(set->tags[hctx_idx]);
2343 set->tags[hctx_idx] = NULL;
2344 return false;
2345}
2346
2347static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2348 unsigned int hctx_idx)
2349{
bd166ef1
JA
2350 if (set->tags[hctx_idx]) {
2351 blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
2352 blk_mq_free_rq_map(set->tags[hctx_idx]);
2353 set->tags[hctx_idx] = NULL;
2354 }
cc71a6f4
JA
2355}
2356
4b855ad3 2357static void blk_mq_map_swqueue(struct request_queue *q)
320ae51f 2358{
4412efec 2359 unsigned int i, hctx_idx;
320ae51f
JA
2360 struct blk_mq_hw_ctx *hctx;
2361 struct blk_mq_ctx *ctx;
2a34c087 2362 struct blk_mq_tag_set *set = q->tag_set;
320ae51f 2363
60de074b
AM
2364 /*
2365 * Avoid others reading imcomplete hctx->cpumask through sysfs
2366 */
2367 mutex_lock(&q->sysfs_lock);
2368
320ae51f 2369 queue_for_each_hw_ctx(q, hctx, i) {
e4043dcf 2370 cpumask_clear(hctx->cpumask);
320ae51f
JA
2371 hctx->nr_ctx = 0;
2372 }
2373
2374 /*
4b855ad3 2375 * Map software to hardware queues.
4412efec
ML
2376 *
2377 * If the cpu isn't present, the cpu is mapped to first hctx.
320ae51f 2378 */
20e4d813 2379 for_each_possible_cpu(i) {
4412efec
ML
2380 hctx_idx = q->mq_map[i];
2381 /* unmapped hw queue can be remapped after CPU topo changed */
2382 if (!set->tags[hctx_idx] &&
2383 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2384 /*
2385 * If tags initialization fail for some hctx,
2386 * that hctx won't be brought online. In this
2387 * case, remap the current ctx to hctx[0] which
2388 * is guaranteed to always have tags allocated
2389 */
2390 q->mq_map[i] = 0;
2391 }
2392
897bb0c7 2393 ctx = per_cpu_ptr(q->queue_ctx, i);
7d7e0f90 2394 hctx = blk_mq_map_queue(q, i);
868f2f0b 2395
e4043dcf 2396 cpumask_set_cpu(i, hctx->cpumask);
320ae51f
JA
2397 ctx->index_hw = hctx->nr_ctx;
2398 hctx->ctxs[hctx->nr_ctx++] = ctx;
2399 }
506e931f 2400
60de074b
AM
2401 mutex_unlock(&q->sysfs_lock);
2402
506e931f 2403 queue_for_each_hw_ctx(q, hctx, i) {
4412efec
ML
2404 /*
2405 * If no software queues are mapped to this hardware queue,
2406 * disable it and free the request entries.
2407 */
2408 if (!hctx->nr_ctx) {
2409 /* Never unmap queue 0. We need it as a
2410 * fallback in case of a new remap fails
2411 * allocation
2412 */
2413 if (i && set->tags[i])
2414 blk_mq_free_map_and_requests(set, i);
2415
2416 hctx->tags = NULL;
2417 continue;
2418 }
484b4061 2419
2a34c087
ML
2420 hctx->tags = set->tags[i];
2421 WARN_ON(!hctx->tags);
2422
889fa31f
CY
2423 /*
2424 * Set the map size to the number of mapped software queues.
2425 * This is more accurate and more efficient than looping
2426 * over all possibly mapped software queues.
2427 */
88459642 2428 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx);
889fa31f 2429
484b4061
JA
2430 /*
2431 * Initialize batch roundrobin counts
2432 */
f82ddf19 2433 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
506e931f
JA
2434 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
2435 }
320ae51f
JA
2436}
2437
8e8320c9
JA
2438/*
2439 * Caller needs to ensure that we're either frozen/quiesced, or that
2440 * the queue isn't live yet.
2441 */
2404e607 2442static void queue_set_hctx_shared(struct request_queue *q, bool shared)
0d2602ca
JA
2443{
2444 struct blk_mq_hw_ctx *hctx;
0d2602ca
JA
2445 int i;
2446
2404e607 2447 queue_for_each_hw_ctx(q, hctx, i) {
8e8320c9
JA
2448 if (shared) {
2449 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2450 atomic_inc(&q->shared_hctx_restart);
2404e607 2451 hctx->flags |= BLK_MQ_F_TAG_SHARED;
8e8320c9
JA
2452 } else {
2453 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
2454 atomic_dec(&q->shared_hctx_restart);
2404e607 2455 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
8e8320c9 2456 }
2404e607
JM
2457 }
2458}
2459
8e8320c9
JA
2460static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
2461 bool shared)
2404e607
JM
2462{
2463 struct request_queue *q;
0d2602ca 2464
705cda97
BVA
2465 lockdep_assert_held(&set->tag_list_lock);
2466
0d2602ca
JA
2467 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2468 blk_mq_freeze_queue(q);
2404e607 2469 queue_set_hctx_shared(q, shared);
0d2602ca
JA
2470 blk_mq_unfreeze_queue(q);
2471 }
2472}
2473
2474static void blk_mq_del_queue_tag_set(struct request_queue *q)
2475{
2476 struct blk_mq_tag_set *set = q->tag_set;
2477
0d2602ca 2478 mutex_lock(&set->tag_list_lock);
705cda97
BVA
2479 list_del_rcu(&q->tag_set_list);
2480 INIT_LIST_HEAD(&q->tag_set_list);
2404e607
JM
2481 if (list_is_singular(&set->tag_list)) {
2482 /* just transitioned to unshared */
2483 set->flags &= ~BLK_MQ_F_TAG_SHARED;
2484 /* update existing queue */
2485 blk_mq_update_tag_set_depth(set, false);
2486 }
0d2602ca 2487 mutex_unlock(&set->tag_list_lock);
705cda97
BVA
2488
2489 synchronize_rcu();
0d2602ca
JA
2490}
2491
2492static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
2493 struct request_queue *q)
2494{
2495 q->tag_set = set;
2496
2497 mutex_lock(&set->tag_list_lock);
2404e607 2498
ff821d27
JA
2499 /*
2500 * Check to see if we're transitioning to shared (from 1 to 2 queues).
2501 */
2502 if (!list_empty(&set->tag_list) &&
2503 !(set->flags & BLK_MQ_F_TAG_SHARED)) {
2404e607
JM
2504 set->flags |= BLK_MQ_F_TAG_SHARED;
2505 /* update existing queue */
2506 blk_mq_update_tag_set_depth(set, true);
2507 }
2508 if (set->flags & BLK_MQ_F_TAG_SHARED)
2509 queue_set_hctx_shared(q, true);
705cda97 2510 list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
2404e607 2511
0d2602ca
JA
2512 mutex_unlock(&set->tag_list_lock);
2513}
2514
e09aae7e
ML
2515/*
2516 * It is the actual release handler for mq, but we do it from
2517 * request queue's release handler for avoiding use-after-free
2518 * and headache because q->mq_kobj shouldn't have been introduced,
2519 * but we can't group ctx/kctx kobj without it.
2520 */
2521void blk_mq_release(struct request_queue *q)
2522{
2523 struct blk_mq_hw_ctx *hctx;
2524 unsigned int i;
2525
2526 /* hctx kobj stays in hctx */
c3b4afca
ML
2527 queue_for_each_hw_ctx(q, hctx, i) {
2528 if (!hctx)
2529 continue;
6c8b232e 2530 kobject_put(&hctx->kobj);
c3b4afca 2531 }
e09aae7e 2532
a723bab3
AM
2533 q->mq_map = NULL;
2534
e09aae7e
ML
2535 kfree(q->queue_hw_ctx);
2536
7ea5fe31
ML
2537 /*
2538 * release .mq_kobj and sw queue's kobject now because
2539 * both share lifetime with request queue.
2540 */
2541 blk_mq_sysfs_deinit(q);
2542
e09aae7e
ML
2543 free_percpu(q->queue_ctx);
2544}
2545
24d2f903 2546struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
b62c21b7
MS
2547{
2548 struct request_queue *uninit_q, *q;
2549
5ee0524b 2550 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL);
b62c21b7
MS
2551 if (!uninit_q)
2552 return ERR_PTR(-ENOMEM);
2553
2554 q = blk_mq_init_allocated_queue(set, uninit_q);
2555 if (IS_ERR(q))
2556 blk_cleanup_queue(uninit_q);
2557
2558 return q;
2559}
2560EXPORT_SYMBOL(blk_mq_init_queue);
2561
07319678
BVA
2562static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
2563{
2564 int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
2565
05707b64 2566 BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
07319678
BVA
2567 __alignof__(struct blk_mq_hw_ctx)) !=
2568 sizeof(struct blk_mq_hw_ctx));
2569
2570 if (tag_set->flags & BLK_MQ_F_BLOCKING)
2571 hw_ctx_size += sizeof(struct srcu_struct);
2572
2573 return hw_ctx_size;
2574}
2575
868f2f0b
KB
2576static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
2577 struct request_queue *q)
320ae51f 2578{
868f2f0b
KB
2579 int i, j;
2580 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
f14bbe77 2581
868f2f0b 2582 blk_mq_sysfs_unregister(q);
fb350e0a
ML
2583
2584 /* protect against switching io scheduler */
2585 mutex_lock(&q->sysfs_lock);
24d2f903 2586 for (i = 0; i < set->nr_hw_queues; i++) {
868f2f0b 2587 int node;
f14bbe77 2588
868f2f0b
KB
2589 if (hctxs[i])
2590 continue;
2591
2592 node = blk_mq_hw_queue_to_node(q->mq_map, i);
07319678 2593 hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
cdef54dd 2594 GFP_KERNEL, node);
320ae51f 2595 if (!hctxs[i])
868f2f0b 2596 break;
320ae51f 2597
a86073e4 2598 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
868f2f0b
KB
2599 node)) {
2600 kfree(hctxs[i]);
2601 hctxs[i] = NULL;
2602 break;
2603 }
e4043dcf 2604
0d2602ca 2605 atomic_set(&hctxs[i]->nr_active, 0);
f14bbe77 2606 hctxs[i]->numa_node = node;
320ae51f 2607 hctxs[i]->queue_num = i;
868f2f0b
KB
2608
2609 if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
2610 free_cpumask_var(hctxs[i]->cpumask);
2611 kfree(hctxs[i]);
2612 hctxs[i] = NULL;
2613 break;
2614 }
2615 blk_mq_hctx_kobj_init(hctxs[i]);
320ae51f 2616 }
868f2f0b
KB
2617 for (j = i; j < q->nr_hw_queues; j++) {
2618 struct blk_mq_hw_ctx *hctx = hctxs[j];
2619
2620 if (hctx) {
cc71a6f4
JA
2621 if (hctx->tags)
2622 blk_mq_free_map_and_requests(set, j);
868f2f0b 2623 blk_mq_exit_hctx(q, set, hctx, j);
868f2f0b 2624 kobject_put(&hctx->kobj);
868f2f0b
KB
2625 hctxs[j] = NULL;
2626
2627 }
2628 }
2629 q->nr_hw_queues = i;
fb350e0a 2630 mutex_unlock(&q->sysfs_lock);
868f2f0b
KB
2631 blk_mq_sysfs_register(q);
2632}
2633
2634struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2635 struct request_queue *q)
2636{
66841672
ML
2637 /* mark the queue as mq asap */
2638 q->mq_ops = set->ops;
2639
34dbad5d 2640 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn,
720b8ccc
SB
2641 blk_mq_poll_stats_bkt,
2642 BLK_MQ_POLL_STATS_BKTS, q);
34dbad5d
OS
2643 if (!q->poll_cb)
2644 goto err_exit;
2645
868f2f0b
KB
2646 q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
2647 if (!q->queue_ctx)
c7de5726 2648 goto err_exit;
868f2f0b 2649
737f98cf
ML
2650 /* init q->mq_kobj and sw queues' kobjects */
2651 blk_mq_sysfs_init(q);
2652
868f2f0b
KB
2653 q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
2654 GFP_KERNEL, set->numa_node);
2655 if (!q->queue_hw_ctx)
2656 goto err_percpu;
2657
bdd17e75 2658 q->mq_map = set->mq_map;
868f2f0b
KB
2659
2660 blk_mq_realloc_hw_ctxs(set, q);
2661 if (!q->nr_hw_queues)
2662 goto err_hctxs;
320ae51f 2663
287922eb 2664 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
e56f698b 2665 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
320ae51f
JA
2666
2667 q->nr_queues = nr_cpu_ids;
320ae51f 2668
94eddfbe 2669 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
320ae51f 2670
05f1dd53 2671 if (!(set->flags & BLK_MQ_F_SG_MERGE))
f78bac2c 2672 queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
05f1dd53 2673
1be036e9
CH
2674 q->sg_reserved_size = INT_MAX;
2675
2849450a 2676 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
6fca6a61
CH
2677 INIT_LIST_HEAD(&q->requeue_list);
2678 spin_lock_init(&q->requeue_lock);
2679
254d259d 2680 blk_queue_make_request(q, blk_mq_make_request);
ea435e1b
CH
2681 if (q->mq_ops->poll)
2682 q->poll_fn = blk_mq_poll;
07068d5b 2683
eba71768
JA
2684 /*
2685 * Do this after blk_queue_make_request() overrides it...
2686 */
2687 q->nr_requests = set->queue_depth;
2688
64f1c21e
JA
2689 /*
2690 * Default to classic polling
2691 */
2692 q->poll_nsec = -1;
2693
24d2f903
CH
2694 if (set->ops->complete)
2695 blk_queue_softirq_done(q, set->ops->complete);
30a91cb4 2696
24d2f903 2697 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
0d2602ca 2698 blk_mq_add_queue_tag_set(set, q);
4b855ad3 2699 blk_mq_map_swqueue(q);
4593fdbe 2700
d3484991
JA
2701 if (!(set->flags & BLK_MQ_F_NO_SCHED)) {
2702 int ret;
2703
2704 ret = blk_mq_sched_init(q);
2705 if (ret)
2706 return ERR_PTR(ret);
2707 }
2708
320ae51f 2709 return q;
18741986 2710
320ae51f 2711err_hctxs:
868f2f0b 2712 kfree(q->queue_hw_ctx);
320ae51f 2713err_percpu:
868f2f0b 2714 free_percpu(q->queue_ctx);
c7de5726
ML
2715err_exit:
2716 q->mq_ops = NULL;
320ae51f
JA
2717 return ERR_PTR(-ENOMEM);
2718}
b62c21b7 2719EXPORT_SYMBOL(blk_mq_init_allocated_queue);
320ae51f
JA
2720
2721void blk_mq_free_queue(struct request_queue *q)
2722{
624dbe47 2723 struct blk_mq_tag_set *set = q->tag_set;
320ae51f 2724
0d2602ca 2725 blk_mq_del_queue_tag_set(q);
624dbe47 2726 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
320ae51f 2727}
320ae51f
JA
2728
2729/* Basically redo blk_mq_init_queue with queue frozen */
4b855ad3 2730static void blk_mq_queue_reinit(struct request_queue *q)
320ae51f 2731{
4ecd4fef 2732 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
320ae51f 2733
9c1051aa 2734 blk_mq_debugfs_unregister_hctxs(q);
67aec14c
JA
2735 blk_mq_sysfs_unregister(q);
2736
320ae51f
JA
2737 /*
2738 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
ff821d27
JA
2739 * we should change hctx numa_node according to the new topology (this
2740 * involves freeing and re-allocating memory, worth doing?)
320ae51f 2741 */
4b855ad3 2742 blk_mq_map_swqueue(q);
320ae51f 2743
67aec14c 2744 blk_mq_sysfs_register(q);
9c1051aa 2745 blk_mq_debugfs_register_hctxs(q);
320ae51f
JA
2746}
2747
a5164405
JA
2748static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2749{
2750 int i;
2751
cc71a6f4
JA
2752 for (i = 0; i < set->nr_hw_queues; i++)
2753 if (!__blk_mq_alloc_rq_map(set, i))
a5164405 2754 goto out_unwind;
a5164405
JA
2755
2756 return 0;
2757
2758out_unwind:
2759 while (--i >= 0)
cc71a6f4 2760 blk_mq_free_rq_map(set->tags[i]);
a5164405 2761
a5164405
JA
2762 return -ENOMEM;
2763}
2764
2765/*
2766 * Allocate the request maps associated with this tag_set. Note that this
2767 * may reduce the depth asked for, if memory is tight. set->queue_depth
2768 * will be updated to reflect the allocated depth.
2769 */
2770static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2771{
2772 unsigned int depth;
2773 int err;
2774
2775 depth = set->queue_depth;
2776 do {
2777 err = __blk_mq_alloc_rq_maps(set);
2778 if (!err)
2779 break;
2780
2781 set->queue_depth >>= 1;
2782 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2783 err = -ENOMEM;
2784 break;
2785 }
2786 } while (set->queue_depth);
2787
2788 if (!set->queue_depth || err) {
2789 pr_err("blk-mq: failed to allocate request map\n");
2790 return -ENOMEM;
2791 }
2792
2793 if (depth != set->queue_depth)
2794 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2795 depth, set->queue_depth);
2796
2797 return 0;
2798}
2799
ebe8bddb
OS
2800static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
2801{
7d4901a9
ML
2802 if (set->ops->map_queues) {
2803 int cpu;
2804 /*
2805 * transport .map_queues is usually done in the following
2806 * way:
2807 *
2808 * for (queue = 0; queue < set->nr_hw_queues; queue++) {
2809 * mask = get_cpu_mask(queue)
2810 * for_each_cpu(cpu, mask)
2811 * set->mq_map[cpu] = queue;
2812 * }
2813 *
2814 * When we need to remap, the table has to be cleared for
2815 * killing stale mapping since one CPU may not be mapped
2816 * to any hw queue.
2817 */
2818 for_each_possible_cpu(cpu)
2819 set->mq_map[cpu] = 0;
2820
ebe8bddb 2821 return set->ops->map_queues(set);
7d4901a9 2822 } else
ebe8bddb
OS
2823 return blk_mq_map_queues(set);
2824}
2825
a4391c64
JA
2826/*
2827 * Alloc a tag set to be associated with one or more request queues.
2828 * May fail with EINVAL for various error conditions. May adjust the
2829 * requested depth down, if if it too large. In that case, the set
2830 * value will be stored in set->queue_depth.
2831 */
24d2f903
CH
2832int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2833{
da695ba2
CH
2834 int ret;
2835
205fb5f5
BVA
2836 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2837
24d2f903
CH
2838 if (!set->nr_hw_queues)
2839 return -EINVAL;
a4391c64 2840 if (!set->queue_depth)
24d2f903
CH
2841 return -EINVAL;
2842 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2843 return -EINVAL;
2844
7d7e0f90 2845 if (!set->ops->queue_rq)
24d2f903
CH
2846 return -EINVAL;
2847
de148297
ML
2848 if (!set->ops->get_budget ^ !set->ops->put_budget)
2849 return -EINVAL;
2850
a4391c64
JA
2851 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2852 pr_info("blk-mq: reduced tag depth to %u\n",
2853 BLK_MQ_MAX_DEPTH);
2854 set->queue_depth = BLK_MQ_MAX_DEPTH;
2855 }
24d2f903 2856
6637fadf
SL
2857 /*
2858 * If a crashdump is active, then we are potentially in a very
2859 * memory constrained environment. Limit us to 1 queue and
2860 * 64 tags to prevent using too much memory.
2861 */
2862 if (is_kdump_kernel()) {
2863 set->nr_hw_queues = 1;
2864 set->queue_depth = min(64U, set->queue_depth);
2865 }
868f2f0b
KB
2866 /*
2867 * There is no use for more h/w queues than cpus.
2868 */
2869 if (set->nr_hw_queues > nr_cpu_ids)
2870 set->nr_hw_queues = nr_cpu_ids;
6637fadf 2871
868f2f0b 2872 set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
24d2f903
CH
2873 GFP_KERNEL, set->numa_node);
2874 if (!set->tags)
a5164405 2875 return -ENOMEM;
24d2f903 2876
da695ba2
CH
2877 ret = -ENOMEM;
2878 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2879 GFP_KERNEL, set->numa_node);
bdd17e75
CH
2880 if (!set->mq_map)
2881 goto out_free_tags;
2882
ebe8bddb 2883 ret = blk_mq_update_queue_map(set);
da695ba2
CH
2884 if (ret)
2885 goto out_free_mq_map;
2886
2887 ret = blk_mq_alloc_rq_maps(set);
2888 if (ret)
bdd17e75 2889 goto out_free_mq_map;
24d2f903 2890
0d2602ca
JA
2891 mutex_init(&set->tag_list_lock);
2892 INIT_LIST_HEAD(&set->tag_list);
2893
24d2f903 2894 return 0;
bdd17e75
CH
2895
2896out_free_mq_map:
2897 kfree(set->mq_map);
2898 set->mq_map = NULL;
2899out_free_tags:
5676e7b6
RE
2900 kfree(set->tags);
2901 set->tags = NULL;
da695ba2 2902 return ret;
24d2f903
CH
2903}
2904EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2905
2906void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2907{
2908 int i;
2909
cc71a6f4
JA
2910 for (i = 0; i < nr_cpu_ids; i++)
2911 blk_mq_free_map_and_requests(set, i);
484b4061 2912
bdd17e75
CH
2913 kfree(set->mq_map);
2914 set->mq_map = NULL;
2915
981bd189 2916 kfree(set->tags);
5676e7b6 2917 set->tags = NULL;
24d2f903
CH
2918}
2919EXPORT_SYMBOL(blk_mq_free_tag_set);
2920
e3a2b3f9
JA
2921int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2922{
2923 struct blk_mq_tag_set *set = q->tag_set;
2924 struct blk_mq_hw_ctx *hctx;
2925 int i, ret;
2926
bd166ef1 2927 if (!set)
e3a2b3f9
JA
2928 return -EINVAL;
2929
70f36b60 2930 blk_mq_freeze_queue(q);
24f5a90f 2931 blk_mq_quiesce_queue(q);
70f36b60 2932
e3a2b3f9
JA
2933 ret = 0;
2934 queue_for_each_hw_ctx(q, hctx, i) {
e9137d4b
KB
2935 if (!hctx->tags)
2936 continue;
bd166ef1
JA
2937 /*
2938 * If we're using an MQ scheduler, just update the scheduler
2939 * queue depth. This is similar to what the old code would do.
2940 */
70f36b60 2941 if (!hctx->sched_tags) {
c2e82a23 2942 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr,
70f36b60
JA
2943 false);
2944 } else {
2945 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
2946 nr, true);
2947 }
e3a2b3f9
JA
2948 if (ret)
2949 break;
2950 }
2951
2952 if (!ret)
2953 q->nr_requests = nr;
2954
24f5a90f 2955 blk_mq_unquiesce_queue(q);
70f36b60 2956 blk_mq_unfreeze_queue(q);
70f36b60 2957
e3a2b3f9
JA
2958 return ret;
2959}
2960
e4dc2b32
KB
2961static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
2962 int nr_hw_queues)
868f2f0b
KB
2963{
2964 struct request_queue *q;
2965
705cda97
BVA
2966 lockdep_assert_held(&set->tag_list_lock);
2967
868f2f0b
KB
2968 if (nr_hw_queues > nr_cpu_ids)
2969 nr_hw_queues = nr_cpu_ids;
2970 if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
2971 return;
2972
2973 list_for_each_entry(q, &set->tag_list, tag_set_list)
2974 blk_mq_freeze_queue(q);
2975
2976 set->nr_hw_queues = nr_hw_queues;
ebe8bddb 2977 blk_mq_update_queue_map(set);
868f2f0b
KB
2978 list_for_each_entry(q, &set->tag_list, tag_set_list) {
2979 blk_mq_realloc_hw_ctxs(set, q);
4b855ad3 2980 blk_mq_queue_reinit(q);
868f2f0b
KB
2981 }
2982
2983 list_for_each_entry(q, &set->tag_list, tag_set_list)
2984 blk_mq_unfreeze_queue(q);
2985}
e4dc2b32
KB
2986
2987void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
2988{
2989 mutex_lock(&set->tag_list_lock);
2990 __blk_mq_update_nr_hw_queues(set, nr_hw_queues);
2991 mutex_unlock(&set->tag_list_lock);
2992}
868f2f0b
KB
2993EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
2994
34dbad5d
OS
2995/* Enable polling stats and return whether they were already enabled. */
2996static bool blk_poll_stats_enable(struct request_queue *q)
2997{
2998 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
7dfdbc73 2999 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
34dbad5d
OS
3000 return true;
3001 blk_stat_add_callback(q, q->poll_cb);
3002 return false;
3003}
3004
3005static void blk_mq_poll_stats_start(struct request_queue *q)
3006{
3007 /*
3008 * We don't arm the callback if polling stats are not enabled or the
3009 * callback is already active.
3010 */
3011 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
3012 blk_stat_is_active(q->poll_cb))
3013 return;
3014
3015 blk_stat_activate_msecs(q->poll_cb, 100);
3016}
3017
3018static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
3019{
3020 struct request_queue *q = cb->data;
720b8ccc 3021 int bucket;
34dbad5d 3022
720b8ccc
SB
3023 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS; bucket++) {
3024 if (cb->stat[bucket].nr_samples)
3025 q->poll_stat[bucket] = cb->stat[bucket];
3026 }
34dbad5d
OS
3027}
3028
64f1c21e
JA
3029static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
3030 struct blk_mq_hw_ctx *hctx,
3031 struct request *rq)
3032{
64f1c21e 3033 unsigned long ret = 0;
720b8ccc 3034 int bucket;
64f1c21e
JA
3035
3036 /*
3037 * If stats collection isn't on, don't sleep but turn it on for
3038 * future users
3039 */
34dbad5d 3040 if (!blk_poll_stats_enable(q))
64f1c21e
JA
3041 return 0;
3042
64f1c21e
JA
3043 /*
3044 * As an optimistic guess, use half of the mean service time
3045 * for this type of request. We can (and should) make this smarter.
3046 * For instance, if the completion latencies are tight, we can
3047 * get closer than just half the mean. This is especially
3048 * important on devices where the completion latencies are longer
720b8ccc
SB
3049 * than ~10 usec. We do use the stats for the relevant IO size
3050 * if available which does lead to better estimates.
64f1c21e 3051 */
720b8ccc
SB
3052 bucket = blk_mq_poll_stats_bkt(rq);
3053 if (bucket < 0)
3054 return ret;
3055
3056 if (q->poll_stat[bucket].nr_samples)
3057 ret = (q->poll_stat[bucket].mean + 1) / 2;
64f1c21e
JA
3058
3059 return ret;
3060}
3061
06426adf 3062static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
64f1c21e 3063 struct blk_mq_hw_ctx *hctx,
06426adf
JA
3064 struct request *rq)
3065{
3066 struct hrtimer_sleeper hs;
3067 enum hrtimer_mode mode;
64f1c21e 3068 unsigned int nsecs;
06426adf
JA
3069 ktime_t kt;
3070
76a86f9d 3071 if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
64f1c21e
JA
3072 return false;
3073
3074 /*
3075 * poll_nsec can be:
3076 *
3077 * -1: don't ever hybrid sleep
3078 * 0: use half of prev avg
3079 * >0: use this specific value
3080 */
3081 if (q->poll_nsec == -1)
3082 return false;
3083 else if (q->poll_nsec > 0)
3084 nsecs = q->poll_nsec;
3085 else
3086 nsecs = blk_mq_poll_nsecs(q, hctx, rq);
3087
3088 if (!nsecs)
06426adf
JA
3089 return false;
3090
76a86f9d 3091 rq->rq_flags |= RQF_MQ_POLL_SLEPT;
06426adf
JA
3092
3093 /*
3094 * This will be replaced with the stats tracking code, using
3095 * 'avg_completion_time / 2' as the pre-sleep target.
3096 */
8b0e1953 3097 kt = nsecs;
06426adf
JA
3098
3099 mode = HRTIMER_MODE_REL;
3100 hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
3101 hrtimer_set_expires(&hs.timer, kt);
3102
3103 hrtimer_init_sleeper(&hs, current);
3104 do {
5a61c363 3105 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE)
06426adf
JA
3106 break;
3107 set_current_state(TASK_UNINTERRUPTIBLE);
3108 hrtimer_start_expires(&hs.timer, mode);
3109 if (hs.task)
3110 io_schedule();
3111 hrtimer_cancel(&hs.timer);
3112 mode = HRTIMER_MODE_ABS;
3113 } while (hs.task && !signal_pending(current));
3114
3115 __set_current_state(TASK_RUNNING);
3116 destroy_hrtimer_on_stack(&hs.timer);
3117 return true;
3118}
3119
bbd7bb70
JA
3120static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
3121{
3122 struct request_queue *q = hctx->queue;
3123 long state;
3124
06426adf
JA
3125 /*
3126 * If we sleep, have the caller restart the poll loop to reset
3127 * the state. Like for the other success return cases, the
3128 * caller is responsible for checking if the IO completed. If
3129 * the IO isn't complete, we'll get called again and will go
3130 * straight to the busy poll loop.
3131 */
64f1c21e 3132 if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
06426adf
JA
3133 return true;
3134
bbd7bb70
JA
3135 hctx->poll_considered++;
3136
3137 state = current->state;
3138 while (!need_resched()) {
3139 int ret;
3140
3141 hctx->poll_invoked++;
3142
3143 ret = q->mq_ops->poll(hctx, rq->tag);
3144 if (ret > 0) {
3145 hctx->poll_success++;
3146 set_current_state(TASK_RUNNING);
3147 return true;
3148 }
3149
3150 if (signal_pending_state(state, current))
3151 set_current_state(TASK_RUNNING);
3152
3153 if (current->state == TASK_RUNNING)
3154 return true;
3155 if (ret < 0)
3156 break;
3157 cpu_relax();
3158 }
3159
67b4110f 3160 __set_current_state(TASK_RUNNING);
bbd7bb70
JA
3161 return false;
3162}
3163
ea435e1b 3164static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
bbd7bb70
JA
3165{
3166 struct blk_mq_hw_ctx *hctx;
bbd7bb70
JA
3167 struct request *rq;
3168
ea435e1b 3169 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
bbd7bb70
JA
3170 return false;
3171
bbd7bb70 3172 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
bd166ef1
JA
3173 if (!blk_qc_t_is_internal(cookie))
3174 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
3a07bb1d 3175 else {
bd166ef1 3176 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
3a07bb1d
JA
3177 /*
3178 * With scheduling, if the request has completed, we'll
3179 * get a NULL return here, as we clear the sched tag when
3180 * that happens. The request still remains valid, like always,
3181 * so we should be safe with just the NULL check.
3182 */
3183 if (!rq)
3184 return false;
3185 }
bbd7bb70
JA
3186
3187 return __blk_mq_poll(hctx, rq);
3188}
bbd7bb70 3189
320ae51f
JA
3190static int __init blk_mq_init(void)
3191{
9467f859
TG
3192 cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
3193 blk_mq_hctx_notify_dead);
320ae51f
JA
3194 return 0;
3195}
3196subsys_initcall(blk_mq_init);