block: fix plug list flushing for nomerge queues
[linux-2.6-block.git] / block / blk-mq.c
CommitLineData
75bb4625
JA
1/*
2 * Block multiqueue core code
3 *
4 * Copyright (C) 2013-2014 Jens Axboe
5 * Copyright (C) 2013-2014 Christoph Hellwig
6 */
320ae51f
JA
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/backing-dev.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
f75782e4 12#include <linux/kmemleak.h>
320ae51f
JA
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/workqueue.h>
17#include <linux/smp.h>
18#include <linux/llist.h>
19#include <linux/list_sort.h>
20#include <linux/cpu.h>
21#include <linux/cache.h>
22#include <linux/sched/sysctl.h>
23#include <linux/delay.h>
aedcd72f 24#include <linux/crash_dump.h>
320ae51f
JA
25
26#include <trace/events/block.h>
27
28#include <linux/blk-mq.h>
29#include "blk.h"
30#include "blk-mq.h"
31#include "blk-mq-tag.h"
32
33static DEFINE_MUTEX(all_q_mutex);
34static LIST_HEAD(all_q_list);
35
36static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
37
320ae51f
JA
38/*
39 * Check if any of the ctx's have pending work in this hardware queue
40 */
41static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
42{
43 unsigned int i;
44
569fd0ce 45 for (i = 0; i < hctx->ctx_map.size; i++)
1429d7c9 46 if (hctx->ctx_map.map[i].word)
320ae51f
JA
47 return true;
48
49 return false;
50}
51
1429d7c9
JA
52static inline struct blk_align_bitmap *get_bm(struct blk_mq_hw_ctx *hctx,
53 struct blk_mq_ctx *ctx)
54{
55 return &hctx->ctx_map.map[ctx->index_hw / hctx->ctx_map.bits_per_word];
56}
57
58#define CTX_TO_BIT(hctx, ctx) \
59 ((ctx)->index_hw & ((hctx)->ctx_map.bits_per_word - 1))
60
320ae51f
JA
61/*
62 * Mark this ctx as having pending work in this hardware queue
63 */
64static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
65 struct blk_mq_ctx *ctx)
66{
1429d7c9
JA
67 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
68
69 if (!test_bit(CTX_TO_BIT(hctx, ctx), &bm->word))
70 set_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
71}
72
73static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
74 struct blk_mq_ctx *ctx)
75{
76 struct blk_align_bitmap *bm = get_bm(hctx, ctx);
77
78 clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
320ae51f
JA
79}
80
bfd343aa 81static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
320ae51f 82{
add703fd
TH
83 while (true) {
84 int ret;
320ae51f 85
add703fd
TH
86 if (percpu_ref_tryget_live(&q->mq_usage_counter))
87 return 0;
320ae51f 88
bfd343aa
KB
89 if (!(gfp & __GFP_WAIT))
90 return -EBUSY;
91
add703fd 92 ret = wait_event_interruptible(q->mq_freeze_wq,
4ecd4fef
CH
93 !atomic_read(&q->mq_freeze_depth) ||
94 blk_queue_dying(q));
add703fd
TH
95 if (blk_queue_dying(q))
96 return -ENODEV;
97 if (ret)
98 return ret;
99 }
320ae51f
JA
100}
101
102static void blk_mq_queue_exit(struct request_queue *q)
103{
add703fd
TH
104 percpu_ref_put(&q->mq_usage_counter);
105}
106
107static void blk_mq_usage_counter_release(struct percpu_ref *ref)
108{
109 struct request_queue *q =
110 container_of(ref, struct request_queue, mq_usage_counter);
111
112 wake_up_all(&q->mq_freeze_wq);
320ae51f
JA
113}
114
b4c6a028 115void blk_mq_freeze_queue_start(struct request_queue *q)
43a5e4e2 116{
4ecd4fef 117 int freeze_depth;
cddd5d17 118
4ecd4fef
CH
119 freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
120 if (freeze_depth == 1) {
9eca8046 121 percpu_ref_kill(&q->mq_usage_counter);
b94ec296 122 blk_mq_run_hw_queues(q, false);
cddd5d17 123 }
f3af020b 124}
b4c6a028 125EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
f3af020b
TH
126
127static void blk_mq_freeze_queue_wait(struct request_queue *q)
128{
add703fd 129 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
43a5e4e2
ML
130}
131
f3af020b
TH
132/*
133 * Guarantee no request is in use, so we can change any data structure of
134 * the queue afterward.
135 */
136void blk_mq_freeze_queue(struct request_queue *q)
137{
138 blk_mq_freeze_queue_start(q);
139 blk_mq_freeze_queue_wait(q);
140}
c761d96b 141EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
f3af020b 142
b4c6a028 143void blk_mq_unfreeze_queue(struct request_queue *q)
320ae51f 144{
4ecd4fef 145 int freeze_depth;
320ae51f 146
4ecd4fef
CH
147 freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
148 WARN_ON_ONCE(freeze_depth < 0);
149 if (!freeze_depth) {
add703fd 150 percpu_ref_reinit(&q->mq_usage_counter);
320ae51f 151 wake_up_all(&q->mq_freeze_wq);
add703fd 152 }
320ae51f 153}
b4c6a028 154EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
320ae51f 155
aed3ea94
JA
156void blk_mq_wake_waiters(struct request_queue *q)
157{
158 struct blk_mq_hw_ctx *hctx;
159 unsigned int i;
160
161 queue_for_each_hw_ctx(q, hctx, i)
162 if (blk_mq_hw_queue_mapped(hctx))
163 blk_mq_tag_wakeup_all(hctx->tags, true);
3fd5940c
KB
164
165 /*
166 * If we are called because the queue has now been marked as
167 * dying, we need to ensure that processes currently waiting on
168 * the queue are notified as well.
169 */
170 wake_up_all(&q->mq_freeze_wq);
aed3ea94
JA
171}
172
320ae51f
JA
173bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
174{
175 return blk_mq_has_free_tags(hctx->tags);
176}
177EXPORT_SYMBOL(blk_mq_can_queue);
178
94eddfbe
JA
179static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
180 struct request *rq, unsigned int rw_flags)
320ae51f 181{
94eddfbe
JA
182 if (blk_queue_io_stat(q))
183 rw_flags |= REQ_IO_STAT;
184
af76e555
CH
185 INIT_LIST_HEAD(&rq->queuelist);
186 /* csd/requeue_work/fifo_time is initialized before use */
187 rq->q = q;
320ae51f 188 rq->mq_ctx = ctx;
0d2602ca 189 rq->cmd_flags |= rw_flags;
af76e555
CH
190 /* do not touch atomic flags, it needs atomic ops against the timer */
191 rq->cpu = -1;
af76e555
CH
192 INIT_HLIST_NODE(&rq->hash);
193 RB_CLEAR_NODE(&rq->rb_node);
af76e555
CH
194 rq->rq_disk = NULL;
195 rq->part = NULL;
3ee32372 196 rq->start_time = jiffies;
af76e555
CH
197#ifdef CONFIG_BLK_CGROUP
198 rq->rl = NULL;
0fec08b4 199 set_start_time_ns(rq);
af76e555
CH
200 rq->io_start_time_ns = 0;
201#endif
202 rq->nr_phys_segments = 0;
203#if defined(CONFIG_BLK_DEV_INTEGRITY)
204 rq->nr_integrity_segments = 0;
205#endif
af76e555
CH
206 rq->special = NULL;
207 /* tag was already set */
208 rq->errors = 0;
af76e555 209
6f4a1626
TB
210 rq->cmd = rq->__cmd;
211
af76e555
CH
212 rq->extra_len = 0;
213 rq->sense_len = 0;
214 rq->resid_len = 0;
215 rq->sense = NULL;
216
af76e555 217 INIT_LIST_HEAD(&rq->timeout_list);
f6be4fb4
JA
218 rq->timeout = 0;
219
af76e555
CH
220 rq->end_io = NULL;
221 rq->end_io_data = NULL;
222 rq->next_rq = NULL;
223
320ae51f
JA
224 ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
225}
226
5dee8577 227static struct request *
cb96a42c 228__blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw)
5dee8577
CH
229{
230 struct request *rq;
231 unsigned int tag;
232
cb96a42c 233 tag = blk_mq_get_tag(data);
5dee8577 234 if (tag != BLK_MQ_TAG_FAIL) {
cb96a42c 235 rq = data->hctx->tags->rqs[tag];
5dee8577 236
cb96a42c 237 if (blk_mq_tag_busy(data->hctx)) {
5dee8577 238 rq->cmd_flags = REQ_MQ_INFLIGHT;
cb96a42c 239 atomic_inc(&data->hctx->nr_active);
5dee8577
CH
240 }
241
242 rq->tag = tag;
cb96a42c 243 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw);
5dee8577
CH
244 return rq;
245 }
246
247 return NULL;
248}
249
4ce01dd1
CH
250struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
251 bool reserved)
320ae51f 252{
d852564f
CH
253 struct blk_mq_ctx *ctx;
254 struct blk_mq_hw_ctx *hctx;
320ae51f 255 struct request *rq;
cb96a42c 256 struct blk_mq_alloc_data alloc_data;
a492f075 257 int ret;
320ae51f 258
bfd343aa 259 ret = blk_mq_queue_enter(q, gfp);
a492f075
JL
260 if (ret)
261 return ERR_PTR(ret);
320ae51f 262
d852564f
CH
263 ctx = blk_mq_get_ctx(q);
264 hctx = q->mq_ops->map_queue(q, ctx->cpu);
cb96a42c
ML
265 blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT,
266 reserved, ctx, hctx);
d852564f 267
cb96a42c 268 rq = __blk_mq_alloc_request(&alloc_data, rw);
d852564f
CH
269 if (!rq && (gfp & __GFP_WAIT)) {
270 __blk_mq_run_hw_queue(hctx);
271 blk_mq_put_ctx(ctx);
272
273 ctx = blk_mq_get_ctx(q);
274 hctx = q->mq_ops->map_queue(q, ctx->cpu);
cb96a42c
ML
275 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx,
276 hctx);
277 rq = __blk_mq_alloc_request(&alloc_data, rw);
278 ctx = alloc_data.ctx;
d852564f
CH
279 }
280 blk_mq_put_ctx(ctx);
c76541a9
KB
281 if (!rq) {
282 blk_mq_queue_exit(q);
a492f075 283 return ERR_PTR(-EWOULDBLOCK);
c76541a9 284 }
320ae51f
JA
285 return rq;
286}
4bb659b1 287EXPORT_SYMBOL(blk_mq_alloc_request);
320ae51f 288
320ae51f
JA
289static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
290 struct blk_mq_ctx *ctx, struct request *rq)
291{
292 const int tag = rq->tag;
293 struct request_queue *q = rq->q;
294
0d2602ca
JA
295 if (rq->cmd_flags & REQ_MQ_INFLIGHT)
296 atomic_dec(&hctx->nr_active);
683d0e12 297 rq->cmd_flags = 0;
0d2602ca 298
af76e555 299 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
0d2602ca 300 blk_mq_put_tag(hctx, tag, &ctx->last_tag);
320ae51f
JA
301 blk_mq_queue_exit(q);
302}
303
7c7f2f2b 304void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
320ae51f
JA
305{
306 struct blk_mq_ctx *ctx = rq->mq_ctx;
320ae51f
JA
307
308 ctx->rq_completed[rq_is_sync(rq)]++;
320ae51f 309 __blk_mq_free_request(hctx, ctx, rq);
7c7f2f2b
JA
310
311}
312EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
313
314void blk_mq_free_request(struct request *rq)
315{
316 struct blk_mq_hw_ctx *hctx;
317 struct request_queue *q = rq->q;
318
319 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
320 blk_mq_free_hctx_request(hctx, rq);
320ae51f 321}
1a3b595a 322EXPORT_SYMBOL_GPL(blk_mq_free_request);
320ae51f 323
c8a446ad 324inline void __blk_mq_end_request(struct request *rq, int error)
320ae51f 325{
0d11e6ac
ML
326 blk_account_io_done(rq);
327
91b63639 328 if (rq->end_io) {
320ae51f 329 rq->end_io(rq, error);
91b63639
CH
330 } else {
331 if (unlikely(blk_bidi_rq(rq)))
332 blk_mq_free_request(rq->next_rq);
320ae51f 333 blk_mq_free_request(rq);
91b63639 334 }
320ae51f 335}
c8a446ad 336EXPORT_SYMBOL(__blk_mq_end_request);
63151a44 337
c8a446ad 338void blk_mq_end_request(struct request *rq, int error)
63151a44
CH
339{
340 if (blk_update_request(rq, error, blk_rq_bytes(rq)))
341 BUG();
c8a446ad 342 __blk_mq_end_request(rq, error);
63151a44 343}
c8a446ad 344EXPORT_SYMBOL(blk_mq_end_request);
320ae51f 345
30a91cb4 346static void __blk_mq_complete_request_remote(void *data)
320ae51f 347{
3d6efbf6 348 struct request *rq = data;
320ae51f 349
30a91cb4 350 rq->q->softirq_done_fn(rq);
320ae51f 351}
320ae51f 352
ed851860 353static void blk_mq_ipi_complete_request(struct request *rq)
320ae51f
JA
354{
355 struct blk_mq_ctx *ctx = rq->mq_ctx;
38535201 356 bool shared = false;
320ae51f
JA
357 int cpu;
358
38535201 359 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
30a91cb4
CH
360 rq->q->softirq_done_fn(rq);
361 return;
362 }
320ae51f
JA
363
364 cpu = get_cpu();
38535201
CH
365 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
366 shared = cpus_share_cache(cpu, ctx->cpu);
367
368 if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
30a91cb4 369 rq->csd.func = __blk_mq_complete_request_remote;
3d6efbf6
CH
370 rq->csd.info = rq;
371 rq->csd.flags = 0;
c46fff2a 372 smp_call_function_single_async(ctx->cpu, &rq->csd);
3d6efbf6 373 } else {
30a91cb4 374 rq->q->softirq_done_fn(rq);
3d6efbf6 375 }
320ae51f
JA
376 put_cpu();
377}
30a91cb4 378
ed851860
JA
379void __blk_mq_complete_request(struct request *rq)
380{
381 struct request_queue *q = rq->q;
382
383 if (!q->softirq_done_fn)
c8a446ad 384 blk_mq_end_request(rq, rq->errors);
ed851860
JA
385 else
386 blk_mq_ipi_complete_request(rq);
387}
388
30a91cb4
CH
389/**
390 * blk_mq_complete_request - end I/O on a request
391 * @rq: the request being processed
392 *
393 * Description:
394 * Ends all I/O on a request. It does not handle partial completions.
395 * The actual completion happens out-of-order, through a IPI handler.
396 **/
f4829a9b 397void blk_mq_complete_request(struct request *rq, int error)
30a91cb4 398{
95f09684
JA
399 struct request_queue *q = rq->q;
400
401 if (unlikely(blk_should_fake_timeout(q)))
30a91cb4 402 return;
f4829a9b
CH
403 if (!blk_mark_rq_complete(rq)) {
404 rq->errors = error;
ed851860 405 __blk_mq_complete_request(rq);
f4829a9b 406 }
30a91cb4
CH
407}
408EXPORT_SYMBOL(blk_mq_complete_request);
320ae51f 409
973c0191
KB
410int blk_mq_request_started(struct request *rq)
411{
412 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
413}
414EXPORT_SYMBOL_GPL(blk_mq_request_started);
415
e2490073 416void blk_mq_start_request(struct request *rq)
320ae51f
JA
417{
418 struct request_queue *q = rq->q;
419
420 trace_block_rq_issue(q, rq);
421
742ee69b 422 rq->resid_len = blk_rq_bytes(rq);
91b63639
CH
423 if (unlikely(blk_bidi_rq(rq)))
424 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
742ee69b 425
2b8393b4 426 blk_add_timer(rq);
87ee7b11 427
538b7534
JA
428 /*
429 * Ensure that ->deadline is visible before set the started
430 * flag and clear the completed flag.
431 */
432 smp_mb__before_atomic();
433
87ee7b11
JA
434 /*
435 * Mark us as started and clear complete. Complete might have been
436 * set if requeue raced with timeout, which then marked it as
437 * complete. So be sure to clear complete again when we start
438 * the request, otherwise we'll ignore the completion event.
439 */
4b570521
JA
440 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
441 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
442 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
443 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
49f5baa5
CH
444
445 if (q->dma_drain_size && blk_rq_bytes(rq)) {
446 /*
447 * Make sure space for the drain appears. We know we can do
448 * this because max_hw_segments has been adjusted to be one
449 * fewer than the device can handle.
450 */
451 rq->nr_phys_segments++;
452 }
320ae51f 453}
e2490073 454EXPORT_SYMBOL(blk_mq_start_request);
320ae51f 455
ed0791b2 456static void __blk_mq_requeue_request(struct request *rq)
320ae51f
JA
457{
458 struct request_queue *q = rq->q;
459
460 trace_block_rq_requeue(q, rq);
49f5baa5 461
e2490073
CH
462 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
463 if (q->dma_drain_size && blk_rq_bytes(rq))
464 rq->nr_phys_segments--;
465 }
320ae51f
JA
466}
467
ed0791b2
CH
468void blk_mq_requeue_request(struct request *rq)
469{
ed0791b2 470 __blk_mq_requeue_request(rq);
ed0791b2 471
ed0791b2 472 BUG_ON(blk_queued_rq(rq));
6fca6a61 473 blk_mq_add_to_requeue_list(rq, true);
ed0791b2
CH
474}
475EXPORT_SYMBOL(blk_mq_requeue_request);
476
6fca6a61
CH
477static void blk_mq_requeue_work(struct work_struct *work)
478{
479 struct request_queue *q =
480 container_of(work, struct request_queue, requeue_work);
481 LIST_HEAD(rq_list);
482 struct request *rq, *next;
483 unsigned long flags;
484
485 spin_lock_irqsave(&q->requeue_lock, flags);
486 list_splice_init(&q->requeue_list, &rq_list);
487 spin_unlock_irqrestore(&q->requeue_lock, flags);
488
489 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
490 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
491 continue;
492
493 rq->cmd_flags &= ~REQ_SOFTBARRIER;
494 list_del_init(&rq->queuelist);
495 blk_mq_insert_request(rq, true, false, false);
496 }
497
498 while (!list_empty(&rq_list)) {
499 rq = list_entry(rq_list.next, struct request, queuelist);
500 list_del_init(&rq->queuelist);
501 blk_mq_insert_request(rq, false, false, false);
502 }
503
8b957415
JA
504 /*
505 * Use the start variant of queue running here, so that running
506 * the requeue work will kick stopped queues.
507 */
508 blk_mq_start_hw_queues(q);
6fca6a61
CH
509}
510
511void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
512{
513 struct request_queue *q = rq->q;
514 unsigned long flags;
515
516 /*
517 * We abuse this flag that is otherwise used by the I/O scheduler to
518 * request head insertation from the workqueue.
519 */
520 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
521
522 spin_lock_irqsave(&q->requeue_lock, flags);
523 if (at_head) {
524 rq->cmd_flags |= REQ_SOFTBARRIER;
525 list_add(&rq->queuelist, &q->requeue_list);
526 } else {
527 list_add_tail(&rq->queuelist, &q->requeue_list);
528 }
529 spin_unlock_irqrestore(&q->requeue_lock, flags);
530}
531EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
532
c68ed59f
KB
533void blk_mq_cancel_requeue_work(struct request_queue *q)
534{
535 cancel_work_sync(&q->requeue_work);
536}
537EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
538
6fca6a61
CH
539void blk_mq_kick_requeue_list(struct request_queue *q)
540{
541 kblockd_schedule_work(&q->requeue_work);
542}
543EXPORT_SYMBOL(blk_mq_kick_requeue_list);
544
1885b24d
JA
545void blk_mq_abort_requeue_list(struct request_queue *q)
546{
547 unsigned long flags;
548 LIST_HEAD(rq_list);
549
550 spin_lock_irqsave(&q->requeue_lock, flags);
551 list_splice_init(&q->requeue_list, &rq_list);
552 spin_unlock_irqrestore(&q->requeue_lock, flags);
553
554 while (!list_empty(&rq_list)) {
555 struct request *rq;
556
557 rq = list_first_entry(&rq_list, struct request, queuelist);
558 list_del_init(&rq->queuelist);
559 rq->errors = -EIO;
560 blk_mq_end_request(rq, rq->errors);
561 }
562}
563EXPORT_SYMBOL(blk_mq_abort_requeue_list);
564
0e62f51f
JA
565struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
566{
0048b483 567 return tags->rqs[tag];
24d2f903
CH
568}
569EXPORT_SYMBOL(blk_mq_tag_to_rq);
570
320ae51f 571struct blk_mq_timeout_data {
46f92d42
CH
572 unsigned long next;
573 unsigned int next_set;
320ae51f
JA
574};
575
90415837 576void blk_mq_rq_timed_out(struct request *req, bool reserved)
320ae51f 577{
46f92d42
CH
578 struct blk_mq_ops *ops = req->q->mq_ops;
579 enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
87ee7b11
JA
580
581 /*
582 * We know that complete is set at this point. If STARTED isn't set
583 * anymore, then the request isn't active and the "timeout" should
584 * just be ignored. This can happen due to the bitflag ordering.
585 * Timeout first checks if STARTED is set, and if it is, assumes
586 * the request is active. But if we race with completion, then
587 * we both flags will get cleared. So check here again, and ignore
588 * a timeout event with a request that isn't active.
589 */
46f92d42
CH
590 if (!test_bit(REQ_ATOM_STARTED, &req->atomic_flags))
591 return;
87ee7b11 592
46f92d42 593 if (ops->timeout)
0152fb6b 594 ret = ops->timeout(req, reserved);
46f92d42
CH
595
596 switch (ret) {
597 case BLK_EH_HANDLED:
598 __blk_mq_complete_request(req);
599 break;
600 case BLK_EH_RESET_TIMER:
601 blk_add_timer(req);
602 blk_clear_rq_complete(req);
603 break;
604 case BLK_EH_NOT_HANDLED:
605 break;
606 default:
607 printk(KERN_ERR "block: bad eh return: %d\n", ret);
608 break;
609 }
87ee7b11 610}
5b3f25fc 611
81481eb4
CH
612static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
613 struct request *rq, void *priv, bool reserved)
614{
615 struct blk_mq_timeout_data *data = priv;
87ee7b11 616
eb130dbf
KB
617 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
618 /*
619 * If a request wasn't started before the queue was
620 * marked dying, kill it here or it'll go unnoticed.
621 */
f4829a9b
CH
622 if (unlikely(blk_queue_dying(rq->q)))
623 blk_mq_complete_request(rq, -EIO);
46f92d42 624 return;
eb130dbf 625 }
5b3f25fc
KB
626 if (rq->cmd_flags & REQ_NO_TIMEOUT)
627 return;
87ee7b11 628
46f92d42
CH
629 if (time_after_eq(jiffies, rq->deadline)) {
630 if (!blk_mark_rq_complete(rq))
0152fb6b 631 blk_mq_rq_timed_out(rq, reserved);
46f92d42
CH
632 } else if (!data->next_set || time_after(data->next, rq->deadline)) {
633 data->next = rq->deadline;
634 data->next_set = 1;
635 }
87ee7b11
JA
636}
637
81481eb4 638static void blk_mq_rq_timer(unsigned long priv)
320ae51f 639{
81481eb4
CH
640 struct request_queue *q = (struct request_queue *)priv;
641 struct blk_mq_timeout_data data = {
642 .next = 0,
643 .next_set = 0,
644 };
81481eb4 645 int i;
320ae51f 646
0bf6cd5b 647 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
320ae51f 648
81481eb4
CH
649 if (data.next_set) {
650 data.next = blk_rq_timeout(round_jiffies_up(data.next));
651 mod_timer(&q->timeout, data.next);
0d2602ca 652 } else {
0bf6cd5b
CH
653 struct blk_mq_hw_ctx *hctx;
654
f054b56c
ML
655 queue_for_each_hw_ctx(q, hctx, i) {
656 /* the hctx may be unmapped, so check it here */
657 if (blk_mq_hw_queue_mapped(hctx))
658 blk_mq_tag_idle(hctx);
659 }
0d2602ca 660 }
320ae51f
JA
661}
662
663/*
664 * Reverse check our software queue for entries that we could potentially
665 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
666 * too much time checking for merges.
667 */
668static bool blk_mq_attempt_merge(struct request_queue *q,
669 struct blk_mq_ctx *ctx, struct bio *bio)
670{
671 struct request *rq;
672 int checked = 8;
673
674 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
675 int el_ret;
676
677 if (!checked--)
678 break;
679
680 if (!blk_rq_merge_ok(rq, bio))
681 continue;
682
683 el_ret = blk_try_merge(rq, bio);
684 if (el_ret == ELEVATOR_BACK_MERGE) {
685 if (bio_attempt_back_merge(q, rq, bio)) {
686 ctx->rq_merged++;
687 return true;
688 }
689 break;
690 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
691 if (bio_attempt_front_merge(q, rq, bio)) {
692 ctx->rq_merged++;
693 return true;
694 }
695 break;
696 }
697 }
698
699 return false;
700}
701
1429d7c9
JA
702/*
703 * Process software queues that have been marked busy, splicing them
704 * to the for-dispatch
705 */
706static void flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list)
707{
708 struct blk_mq_ctx *ctx;
709 int i;
710
569fd0ce 711 for (i = 0; i < hctx->ctx_map.size; i++) {
1429d7c9
JA
712 struct blk_align_bitmap *bm = &hctx->ctx_map.map[i];
713 unsigned int off, bit;
714
715 if (!bm->word)
716 continue;
717
718 bit = 0;
719 off = i * hctx->ctx_map.bits_per_word;
720 do {
721 bit = find_next_bit(&bm->word, bm->depth, bit);
722 if (bit >= bm->depth)
723 break;
724
725 ctx = hctx->ctxs[bit + off];
726 clear_bit(bit, &bm->word);
727 spin_lock(&ctx->lock);
728 list_splice_tail_init(&ctx->rq_list, list);
729 spin_unlock(&ctx->lock);
730
731 bit++;
732 } while (1);
733 }
734}
735
320ae51f
JA
736/*
737 * Run this hardware queue, pulling any software queues mapped to it in.
738 * Note that this function currently has various problems around ordering
739 * of IO. In particular, we'd like FIFO behaviour on handling existing
740 * items on the hctx->dispatch list. Ignore that for now.
741 */
742static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
743{
744 struct request_queue *q = hctx->queue;
320ae51f
JA
745 struct request *rq;
746 LIST_HEAD(rq_list);
74c45052
JA
747 LIST_HEAD(driver_list);
748 struct list_head *dptr;
1429d7c9 749 int queued;
320ae51f 750
fd1270d5 751 WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
e4043dcf 752
5d12f905 753 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
320ae51f
JA
754 return;
755
756 hctx->run++;
757
758 /*
759 * Touch any software queue that has pending entries.
760 */
1429d7c9 761 flush_busy_ctxs(hctx, &rq_list);
320ae51f
JA
762
763 /*
764 * If we have previous entries on our dispatch list, grab them
765 * and stuff them at the front for more fair dispatch.
766 */
767 if (!list_empty_careful(&hctx->dispatch)) {
768 spin_lock(&hctx->lock);
769 if (!list_empty(&hctx->dispatch))
770 list_splice_init(&hctx->dispatch, &rq_list);
771 spin_unlock(&hctx->lock);
772 }
773
74c45052
JA
774 /*
775 * Start off with dptr being NULL, so we start the first request
776 * immediately, even if we have more pending.
777 */
778 dptr = NULL;
779
320ae51f
JA
780 /*
781 * Now process all the entries, sending them to the driver.
782 */
1429d7c9 783 queued = 0;
320ae51f 784 while (!list_empty(&rq_list)) {
74c45052 785 struct blk_mq_queue_data bd;
320ae51f
JA
786 int ret;
787
788 rq = list_first_entry(&rq_list, struct request, queuelist);
789 list_del_init(&rq->queuelist);
320ae51f 790
74c45052
JA
791 bd.rq = rq;
792 bd.list = dptr;
793 bd.last = list_empty(&rq_list);
794
795 ret = q->mq_ops->queue_rq(hctx, &bd);
320ae51f
JA
796 switch (ret) {
797 case BLK_MQ_RQ_QUEUE_OK:
798 queued++;
799 continue;
800 case BLK_MQ_RQ_QUEUE_BUSY:
320ae51f 801 list_add(&rq->queuelist, &rq_list);
ed0791b2 802 __blk_mq_requeue_request(rq);
320ae51f
JA
803 break;
804 default:
805 pr_err("blk-mq: bad return on queue: %d\n", ret);
320ae51f 806 case BLK_MQ_RQ_QUEUE_ERROR:
1e93b8c2 807 rq->errors = -EIO;
c8a446ad 808 blk_mq_end_request(rq, rq->errors);
320ae51f
JA
809 break;
810 }
811
812 if (ret == BLK_MQ_RQ_QUEUE_BUSY)
813 break;
74c45052
JA
814
815 /*
816 * We've done the first request. If we have more than 1
817 * left in the list, set dptr to defer issue.
818 */
819 if (!dptr && rq_list.next != rq_list.prev)
820 dptr = &driver_list;
320ae51f
JA
821 }
822
823 if (!queued)
824 hctx->dispatched[0]++;
825 else if (queued < (1 << (BLK_MQ_MAX_DISPATCH_ORDER - 1)))
826 hctx->dispatched[ilog2(queued) + 1]++;
827
828 /*
829 * Any items that need requeuing? Stuff them into hctx->dispatch,
830 * that is where we will continue on next queue run.
831 */
832 if (!list_empty(&rq_list)) {
833 spin_lock(&hctx->lock);
834 list_splice(&rq_list, &hctx->dispatch);
835 spin_unlock(&hctx->lock);
9ba52e58
SL
836 /*
837 * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
838 * it's possible the queue is stopped and restarted again
839 * before this. Queue restart will dispatch requests. And since
840 * requests in rq_list aren't added into hctx->dispatch yet,
841 * the requests in rq_list might get lost.
842 *
843 * blk_mq_run_hw_queue() already checks the STOPPED bit
844 **/
845 blk_mq_run_hw_queue(hctx, true);
320ae51f
JA
846 }
847}
848
506e931f
JA
849/*
850 * It'd be great if the workqueue API had a way to pass
851 * in a mask and had some smarts for more clever placement.
852 * For now we just round-robin here, switching for every
853 * BLK_MQ_CPU_WORK_BATCH queued items.
854 */
855static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
856{
b657d7e6
CH
857 if (hctx->queue->nr_hw_queues == 1)
858 return WORK_CPU_UNBOUND;
506e931f
JA
859
860 if (--hctx->next_cpu_batch <= 0) {
b657d7e6 861 int cpu = hctx->next_cpu, next_cpu;
506e931f
JA
862
863 next_cpu = cpumask_next(hctx->next_cpu, hctx->cpumask);
864 if (next_cpu >= nr_cpu_ids)
865 next_cpu = cpumask_first(hctx->cpumask);
866
867 hctx->next_cpu = next_cpu;
868 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
b657d7e6
CH
869
870 return cpu;
506e931f
JA
871 }
872
b657d7e6 873 return hctx->next_cpu;
506e931f
JA
874}
875
320ae51f
JA
876void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
877{
19c66e59
ML
878 if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state) ||
879 !blk_mq_hw_queue_mapped(hctx)))
320ae51f
JA
880 return;
881
398205b8 882 if (!async) {
2a90d4aa
PB
883 int cpu = get_cpu();
884 if (cpumask_test_cpu(cpu, hctx->cpumask)) {
398205b8 885 __blk_mq_run_hw_queue(hctx);
2a90d4aa 886 put_cpu();
398205b8
PB
887 return;
888 }
e4043dcf 889
2a90d4aa 890 put_cpu();
e4043dcf 891 }
398205b8 892
b657d7e6
CH
893 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
894 &hctx->run_work, 0);
320ae51f
JA
895}
896
b94ec296 897void blk_mq_run_hw_queues(struct request_queue *q, bool async)
320ae51f
JA
898{
899 struct blk_mq_hw_ctx *hctx;
900 int i;
901
902 queue_for_each_hw_ctx(q, hctx, i) {
903 if ((!blk_mq_hctx_has_pending(hctx) &&
904 list_empty_careful(&hctx->dispatch)) ||
5d12f905 905 test_bit(BLK_MQ_S_STOPPED, &hctx->state))
320ae51f
JA
906 continue;
907
b94ec296 908 blk_mq_run_hw_queue(hctx, async);
320ae51f
JA
909 }
910}
b94ec296 911EXPORT_SYMBOL(blk_mq_run_hw_queues);
320ae51f
JA
912
913void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
914{
70f4db63
CH
915 cancel_delayed_work(&hctx->run_work);
916 cancel_delayed_work(&hctx->delay_work);
320ae51f
JA
917 set_bit(BLK_MQ_S_STOPPED, &hctx->state);
918}
919EXPORT_SYMBOL(blk_mq_stop_hw_queue);
920
280d45f6
CH
921void blk_mq_stop_hw_queues(struct request_queue *q)
922{
923 struct blk_mq_hw_ctx *hctx;
924 int i;
925
926 queue_for_each_hw_ctx(q, hctx, i)
927 blk_mq_stop_hw_queue(hctx);
928}
929EXPORT_SYMBOL(blk_mq_stop_hw_queues);
930
320ae51f
JA
931void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
932{
933 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
e4043dcf 934
0ffbce80 935 blk_mq_run_hw_queue(hctx, false);
320ae51f
JA
936}
937EXPORT_SYMBOL(blk_mq_start_hw_queue);
938
2f268556
CH
939void blk_mq_start_hw_queues(struct request_queue *q)
940{
941 struct blk_mq_hw_ctx *hctx;
942 int i;
943
944 queue_for_each_hw_ctx(q, hctx, i)
945 blk_mq_start_hw_queue(hctx);
946}
947EXPORT_SYMBOL(blk_mq_start_hw_queues);
948
1b4a3258 949void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
320ae51f
JA
950{
951 struct blk_mq_hw_ctx *hctx;
952 int i;
953
954 queue_for_each_hw_ctx(q, hctx, i) {
955 if (!test_bit(BLK_MQ_S_STOPPED, &hctx->state))
956 continue;
957
958 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
1b4a3258 959 blk_mq_run_hw_queue(hctx, async);
320ae51f
JA
960 }
961}
962EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
963
70f4db63 964static void blk_mq_run_work_fn(struct work_struct *work)
320ae51f
JA
965{
966 struct blk_mq_hw_ctx *hctx;
967
70f4db63 968 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
e4043dcf 969
320ae51f
JA
970 __blk_mq_run_hw_queue(hctx);
971}
972
70f4db63
CH
973static void blk_mq_delay_work_fn(struct work_struct *work)
974{
975 struct blk_mq_hw_ctx *hctx;
976
977 hctx = container_of(work, struct blk_mq_hw_ctx, delay_work.work);
978
979 if (test_and_clear_bit(BLK_MQ_S_STOPPED, &hctx->state))
980 __blk_mq_run_hw_queue(hctx);
981}
982
983void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
984{
19c66e59
ML
985 if (unlikely(!blk_mq_hw_queue_mapped(hctx)))
986 return;
70f4db63 987
b657d7e6
CH
988 kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
989 &hctx->delay_work, msecs_to_jiffies(msecs));
70f4db63
CH
990}
991EXPORT_SYMBOL(blk_mq_delay_queue);
992
320ae51f 993static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
72a0a36e 994 struct request *rq, bool at_head)
320ae51f
JA
995{
996 struct blk_mq_ctx *ctx = rq->mq_ctx;
997
01b983c9
JA
998 trace_block_rq_insert(hctx->queue, rq);
999
72a0a36e
CH
1000 if (at_head)
1001 list_add(&rq->queuelist, &ctx->rq_list);
1002 else
1003 list_add_tail(&rq->queuelist, &ctx->rq_list);
4bb659b1 1004
320ae51f 1005 blk_mq_hctx_mark_pending(hctx, ctx);
320ae51f
JA
1006}
1007
eeabc850
CH
1008void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
1009 bool async)
320ae51f 1010{
eeabc850 1011 struct request_queue *q = rq->q;
320ae51f 1012 struct blk_mq_hw_ctx *hctx;
eeabc850
CH
1013 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
1014
1015 current_ctx = blk_mq_get_ctx(q);
1016 if (!cpu_online(ctx->cpu))
1017 rq->mq_ctx = ctx = current_ctx;
320ae51f 1018
320ae51f
JA
1019 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1020
a57a178a
CH
1021 spin_lock(&ctx->lock);
1022 __blk_mq_insert_request(hctx, rq, at_head);
1023 spin_unlock(&ctx->lock);
320ae51f 1024
320ae51f
JA
1025 if (run_queue)
1026 blk_mq_run_hw_queue(hctx, async);
e4043dcf
JA
1027
1028 blk_mq_put_ctx(current_ctx);
320ae51f
JA
1029}
1030
1031static void blk_mq_insert_requests(struct request_queue *q,
1032 struct blk_mq_ctx *ctx,
1033 struct list_head *list,
1034 int depth,
1035 bool from_schedule)
1036
1037{
1038 struct blk_mq_hw_ctx *hctx;
1039 struct blk_mq_ctx *current_ctx;
1040
1041 trace_block_unplug(q, depth, !from_schedule);
1042
1043 current_ctx = blk_mq_get_ctx(q);
1044
1045 if (!cpu_online(ctx->cpu))
1046 ctx = current_ctx;
1047 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1048
1049 /*
1050 * preemption doesn't flush plug list, so it's possible ctx->cpu is
1051 * offline now
1052 */
1053 spin_lock(&ctx->lock);
1054 while (!list_empty(list)) {
1055 struct request *rq;
1056
1057 rq = list_first_entry(list, struct request, queuelist);
1058 list_del_init(&rq->queuelist);
1059 rq->mq_ctx = ctx;
72a0a36e 1060 __blk_mq_insert_request(hctx, rq, false);
320ae51f
JA
1061 }
1062 spin_unlock(&ctx->lock);
1063
320ae51f 1064 blk_mq_run_hw_queue(hctx, from_schedule);
e4043dcf 1065 blk_mq_put_ctx(current_ctx);
320ae51f
JA
1066}
1067
1068static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
1069{
1070 struct request *rqa = container_of(a, struct request, queuelist);
1071 struct request *rqb = container_of(b, struct request, queuelist);
1072
1073 return !(rqa->mq_ctx < rqb->mq_ctx ||
1074 (rqa->mq_ctx == rqb->mq_ctx &&
1075 blk_rq_pos(rqa) < blk_rq_pos(rqb)));
1076}
1077
1078void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1079{
1080 struct blk_mq_ctx *this_ctx;
1081 struct request_queue *this_q;
1082 struct request *rq;
1083 LIST_HEAD(list);
1084 LIST_HEAD(ctx_list);
1085 unsigned int depth;
1086
1087 list_splice_init(&plug->mq_list, &list);
1088
1089 list_sort(NULL, &list, plug_ctx_cmp);
1090
1091 this_q = NULL;
1092 this_ctx = NULL;
1093 depth = 0;
1094
1095 while (!list_empty(&list)) {
1096 rq = list_entry_rq(list.next);
1097 list_del_init(&rq->queuelist);
1098 BUG_ON(!rq->q);
1099 if (rq->mq_ctx != this_ctx) {
1100 if (this_ctx) {
1101 blk_mq_insert_requests(this_q, this_ctx,
1102 &ctx_list, depth,
1103 from_schedule);
1104 }
1105
1106 this_ctx = rq->mq_ctx;
1107 this_q = rq->q;
1108 depth = 0;
1109 }
1110
1111 depth++;
1112 list_add_tail(&rq->queuelist, &ctx_list);
1113 }
1114
1115 /*
1116 * If 'this_ctx' is set, we know we have entries to complete
1117 * on 'ctx_list'. Do those.
1118 */
1119 if (this_ctx) {
1120 blk_mq_insert_requests(this_q, this_ctx, &ctx_list, depth,
1121 from_schedule);
1122 }
1123}
1124
1125static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
1126{
1127 init_request_from_bio(rq, bio);
4b570521 1128
3ee32372 1129 if (blk_do_io_stat(rq))
4b570521 1130 blk_account_io_start(rq, 1);
320ae51f
JA
1131}
1132
274a5843
JA
1133static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
1134{
1135 return (hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
1136 !blk_queue_nomerges(hctx->queue);
1137}
1138
07068d5b
JA
1139static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
1140 struct blk_mq_ctx *ctx,
1141 struct request *rq, struct bio *bio)
320ae51f 1142{
274a5843 1143 if (!hctx_allow_merges(hctx)) {
07068d5b
JA
1144 blk_mq_bio_to_request(rq, bio);
1145 spin_lock(&ctx->lock);
1146insert_rq:
1147 __blk_mq_insert_request(hctx, rq, false);
1148 spin_unlock(&ctx->lock);
1149 return false;
1150 } else {
274a5843
JA
1151 struct request_queue *q = hctx->queue;
1152
07068d5b
JA
1153 spin_lock(&ctx->lock);
1154 if (!blk_mq_attempt_merge(q, ctx, bio)) {
1155 blk_mq_bio_to_request(rq, bio);
1156 goto insert_rq;
1157 }
320ae51f 1158
07068d5b
JA
1159 spin_unlock(&ctx->lock);
1160 __blk_mq_free_request(hctx, ctx, rq);
1161 return true;
14ec77f3 1162 }
07068d5b 1163}
14ec77f3 1164
07068d5b
JA
1165struct blk_map_ctx {
1166 struct blk_mq_hw_ctx *hctx;
1167 struct blk_mq_ctx *ctx;
1168};
1169
1170static struct request *blk_mq_map_request(struct request_queue *q,
1171 struct bio *bio,
1172 struct blk_map_ctx *data)
1173{
1174 struct blk_mq_hw_ctx *hctx;
1175 struct blk_mq_ctx *ctx;
1176 struct request *rq;
1177 int rw = bio_data_dir(bio);
cb96a42c 1178 struct blk_mq_alloc_data alloc_data;
320ae51f 1179
bfd343aa 1180 if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
4246a0b6 1181 bio_io_error(bio);
07068d5b 1182 return NULL;
320ae51f
JA
1183 }
1184
1185 ctx = blk_mq_get_ctx(q);
1186 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1187
07068d5b 1188 if (rw_is_sync(bio->bi_rw))
27fbf4e8 1189 rw |= REQ_SYNC;
07068d5b 1190
320ae51f 1191 trace_block_getrq(q, bio, rw);
cb96a42c
ML
1192 blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx,
1193 hctx);
1194 rq = __blk_mq_alloc_request(&alloc_data, rw);
5dee8577 1195 if (unlikely(!rq)) {
793597a6 1196 __blk_mq_run_hw_queue(hctx);
320ae51f
JA
1197 blk_mq_put_ctx(ctx);
1198 trace_block_sleeprq(q, bio, rw);
793597a6
CH
1199
1200 ctx = blk_mq_get_ctx(q);
320ae51f 1201 hctx = q->mq_ops->map_queue(q, ctx->cpu);
cb96a42c
ML
1202 blk_mq_set_alloc_data(&alloc_data, q,
1203 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx);
1204 rq = __blk_mq_alloc_request(&alloc_data, rw);
1205 ctx = alloc_data.ctx;
1206 hctx = alloc_data.hctx;
320ae51f
JA
1207 }
1208
1209 hctx->queued++;
07068d5b
JA
1210 data->hctx = hctx;
1211 data->ctx = ctx;
1212 return rq;
1213}
1214
f984df1f
SL
1215static int blk_mq_direct_issue_request(struct request *rq)
1216{
1217 int ret;
1218 struct request_queue *q = rq->q;
1219 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
1220 rq->mq_ctx->cpu);
1221 struct blk_mq_queue_data bd = {
1222 .rq = rq,
1223 .list = NULL,
1224 .last = 1
1225 };
1226
1227 /*
1228 * For OK queue, we are done. For error, kill it. Any other
1229 * error (busy), just add it to our list as we previously
1230 * would have done
1231 */
1232 ret = q->mq_ops->queue_rq(hctx, &bd);
1233 if (ret == BLK_MQ_RQ_QUEUE_OK)
1234 return 0;
1235 else {
1236 __blk_mq_requeue_request(rq);
1237
1238 if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
1239 rq->errors = -EIO;
1240 blk_mq_end_request(rq, rq->errors);
1241 return 0;
1242 }
1243 return -1;
1244 }
1245}
1246
07068d5b
JA
1247/*
1248 * Multiple hardware queue variant. This will not use per-process plugs,
1249 * but will attempt to bypass the hctx queueing if we can go straight to
1250 * hardware for SYNC IO.
1251 */
1252static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
1253{
1254 const int is_sync = rw_is_sync(bio->bi_rw);
1255 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
1256 struct blk_map_ctx data;
1257 struct request *rq;
f984df1f
SL
1258 unsigned int request_count = 0;
1259 struct blk_plug *plug;
5b3f341f 1260 struct request *same_queue_rq = NULL;
07068d5b
JA
1261
1262 blk_queue_bounce(q, &bio);
1263
1264 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
4246a0b6 1265 bio_io_error(bio);
07068d5b
JA
1266 return;
1267 }
1268
54efd50b
KO
1269 blk_queue_split(q, &bio, q->bio_split);
1270
0809e3ac
JM
1271 if (!is_flush_fua && !blk_queue_nomerges(q)) {
1272 if (blk_attempt_plug_merge(q, bio, &request_count,
1273 &same_queue_rq))
1274 return;
1275 } else
1276 request_count = blk_plug_queued_count(q);
f984df1f 1277
07068d5b
JA
1278 rq = blk_mq_map_request(q, bio, &data);
1279 if (unlikely(!rq))
1280 return;
1281
1282 if (unlikely(is_flush_fua)) {
1283 blk_mq_bio_to_request(rq, bio);
1284 blk_insert_flush(rq);
1285 goto run_queue;
1286 }
1287
f984df1f 1288 plug = current->plug;
e167dfb5
JA
1289 /*
1290 * If the driver supports defer issued based on 'last', then
1291 * queue it up like normal since we can potentially save some
1292 * CPU this way.
1293 */
f984df1f
SL
1294 if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
1295 !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
1296 struct request *old_rq = NULL;
07068d5b
JA
1297
1298 blk_mq_bio_to_request(rq, bio);
07068d5b
JA
1299
1300 /*
f984df1f
SL
1301 * we do limited pluging. If bio can be merged, do merge.
1302 * Otherwise the existing request in the plug list will be
1303 * issued. So the plug list will have one request at most
07068d5b 1304 */
f984df1f 1305 if (plug) {
5b3f341f
SL
1306 /*
1307 * The plug list might get flushed before this. If that
1308 * happens, same_queue_rq is invalid and plug list is empty
1309 **/
1310 if (same_queue_rq && !list_empty(&plug->mq_list)) {
1311 old_rq = same_queue_rq;
f984df1f 1312 list_del_init(&old_rq->queuelist);
07068d5b 1313 }
f984df1f
SL
1314 list_add_tail(&rq->queuelist, &plug->mq_list);
1315 } else /* is_sync */
1316 old_rq = rq;
1317 blk_mq_put_ctx(data.ctx);
1318 if (!old_rq)
239ad215 1319 return;
f984df1f
SL
1320 if (!blk_mq_direct_issue_request(old_rq))
1321 return;
1322 blk_mq_insert_request(old_rq, false, true, true);
1323 return;
07068d5b
JA
1324 }
1325
1326 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1327 /*
1328 * For a SYNC request, send it to the hardware immediately. For
1329 * an ASYNC request, just ensure that we run it later on. The
1330 * latter allows for merging opportunities and more efficient
1331 * dispatching.
1332 */
1333run_queue:
1334 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
1335 }
07068d5b
JA
1336 blk_mq_put_ctx(data.ctx);
1337}
1338
1339/*
1340 * Single hardware queue variant. This will attempt to use any per-process
1341 * plug for merging and IO deferral.
1342 */
1343static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
1344{
1345 const int is_sync = rw_is_sync(bio->bi_rw);
1346 const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
e6c4438b
JM
1347 struct blk_plug *plug;
1348 unsigned int request_count = 0;
07068d5b
JA
1349 struct blk_map_ctx data;
1350 struct request *rq;
1351
07068d5b
JA
1352 blk_queue_bounce(q, &bio);
1353
1354 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
4246a0b6 1355 bio_io_error(bio);
07068d5b
JA
1356 return;
1357 }
1358
54efd50b
KO
1359 blk_queue_split(q, &bio, q->bio_split);
1360
e6c4438b 1361 if (!is_flush_fua && !blk_queue_nomerges(q) &&
5b3f341f 1362 blk_attempt_plug_merge(q, bio, &request_count, NULL))
07068d5b
JA
1363 return;
1364
1365 rq = blk_mq_map_request(q, bio, &data);
ff87bcec
JA
1366 if (unlikely(!rq))
1367 return;
320ae51f
JA
1368
1369 if (unlikely(is_flush_fua)) {
1370 blk_mq_bio_to_request(rq, bio);
320ae51f
JA
1371 blk_insert_flush(rq);
1372 goto run_queue;
1373 }
1374
1375 /*
1376 * A task plug currently exists. Since this is completely lockless,
1377 * utilize that to temporarily store requests until the task is
1378 * either done or scheduled away.
1379 */
e6c4438b
JM
1380 plug = current->plug;
1381 if (plug) {
1382 blk_mq_bio_to_request(rq, bio);
1383 if (list_empty(&plug->mq_list))
1384 trace_block_plug(q);
1385 else if (request_count >= BLK_MAX_REQUEST_COUNT) {
1386 blk_flush_plug_list(plug, false);
1387 trace_block_plug(q);
320ae51f 1388 }
e6c4438b
JM
1389 list_add_tail(&rq->queuelist, &plug->mq_list);
1390 blk_mq_put_ctx(data.ctx);
1391 return;
320ae51f
JA
1392 }
1393
07068d5b
JA
1394 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
1395 /*
1396 * For a SYNC request, send it to the hardware immediately. For
1397 * an ASYNC request, just ensure that we run it later on. The
1398 * latter allows for merging opportunities and more efficient
1399 * dispatching.
1400 */
1401run_queue:
1402 blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
320ae51f
JA
1403 }
1404
07068d5b 1405 blk_mq_put_ctx(data.ctx);
320ae51f
JA
1406}
1407
1408/*
1409 * Default mapping to a software queue, since we use one per CPU.
1410 */
1411struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu)
1412{
1413 return q->queue_hw_ctx[q->mq_map[cpu]];
1414}
1415EXPORT_SYMBOL(blk_mq_map_queue);
1416
24d2f903
CH
1417static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
1418 struct blk_mq_tags *tags, unsigned int hctx_idx)
95363efd 1419{
e9b267d9 1420 struct page *page;
320ae51f 1421
24d2f903 1422 if (tags->rqs && set->ops->exit_request) {
e9b267d9 1423 int i;
320ae51f 1424
24d2f903
CH
1425 for (i = 0; i < tags->nr_tags; i++) {
1426 if (!tags->rqs[i])
e9b267d9 1427 continue;
24d2f903
CH
1428 set->ops->exit_request(set->driver_data, tags->rqs[i],
1429 hctx_idx, i);
a5164405 1430 tags->rqs[i] = NULL;
e9b267d9 1431 }
320ae51f 1432 }
320ae51f 1433
24d2f903
CH
1434 while (!list_empty(&tags->page_list)) {
1435 page = list_first_entry(&tags->page_list, struct page, lru);
6753471c 1436 list_del_init(&page->lru);
f75782e4
CM
1437 /*
1438 * Remove kmemleak object previously allocated in
1439 * blk_mq_init_rq_map().
1440 */
1441 kmemleak_free(page_address(page));
320ae51f
JA
1442 __free_pages(page, page->private);
1443 }
1444
24d2f903 1445 kfree(tags->rqs);
320ae51f 1446
24d2f903 1447 blk_mq_free_tags(tags);
320ae51f
JA
1448}
1449
1450static size_t order_to_size(unsigned int order)
1451{
4ca08500 1452 return (size_t)PAGE_SIZE << order;
320ae51f
JA
1453}
1454
24d2f903
CH
1455static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
1456 unsigned int hctx_idx)
320ae51f 1457{
24d2f903 1458 struct blk_mq_tags *tags;
320ae51f
JA
1459 unsigned int i, j, entries_per_page, max_order = 4;
1460 size_t rq_size, left;
1461
24d2f903 1462 tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
24391c0d
SL
1463 set->numa_node,
1464 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
24d2f903
CH
1465 if (!tags)
1466 return NULL;
320ae51f 1467
24d2f903
CH
1468 INIT_LIST_HEAD(&tags->page_list);
1469
a5164405
JA
1470 tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
1471 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
1472 set->numa_node);
24d2f903
CH
1473 if (!tags->rqs) {
1474 blk_mq_free_tags(tags);
1475 return NULL;
1476 }
320ae51f
JA
1477
1478 /*
1479 * rq_size is the size of the request plus driver payload, rounded
1480 * to the cacheline size
1481 */
24d2f903 1482 rq_size = round_up(sizeof(struct request) + set->cmd_size,
320ae51f 1483 cache_line_size());
24d2f903 1484 left = rq_size * set->queue_depth;
320ae51f 1485
24d2f903 1486 for (i = 0; i < set->queue_depth; ) {
320ae51f
JA
1487 int this_order = max_order;
1488 struct page *page;
1489 int to_do;
1490 void *p;
1491
1492 while (left < order_to_size(this_order - 1) && this_order)
1493 this_order--;
1494
1495 do {
a5164405 1496 page = alloc_pages_node(set->numa_node,
ac211175 1497 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
a5164405 1498 this_order);
320ae51f
JA
1499 if (page)
1500 break;
1501 if (!this_order--)
1502 break;
1503 if (order_to_size(this_order) < rq_size)
1504 break;
1505 } while (1);
1506
1507 if (!page)
24d2f903 1508 goto fail;
320ae51f
JA
1509
1510 page->private = this_order;
24d2f903 1511 list_add_tail(&page->lru, &tags->page_list);
320ae51f
JA
1512
1513 p = page_address(page);
f75782e4
CM
1514 /*
1515 * Allow kmemleak to scan these pages as they contain pointers
1516 * to additional allocations like via ops->init_request().
1517 */
1518 kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
320ae51f 1519 entries_per_page = order_to_size(this_order) / rq_size;
24d2f903 1520 to_do = min(entries_per_page, set->queue_depth - i);
320ae51f
JA
1521 left -= to_do * rq_size;
1522 for (j = 0; j < to_do; j++) {
24d2f903
CH
1523 tags->rqs[i] = p;
1524 if (set->ops->init_request) {
1525 if (set->ops->init_request(set->driver_data,
1526 tags->rqs[i], hctx_idx, i,
a5164405
JA
1527 set->numa_node)) {
1528 tags->rqs[i] = NULL;
24d2f903 1529 goto fail;
a5164405 1530 }
e9b267d9
CH
1531 }
1532
320ae51f
JA
1533 p += rq_size;
1534 i++;
1535 }
1536 }
24d2f903 1537 return tags;
320ae51f 1538
24d2f903 1539fail:
24d2f903
CH
1540 blk_mq_free_rq_map(set, tags, hctx_idx);
1541 return NULL;
320ae51f
JA
1542}
1543
1429d7c9
JA
1544static void blk_mq_free_bitmap(struct blk_mq_ctxmap *bitmap)
1545{
1546 kfree(bitmap->map);
1547}
1548
1549static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
1550{
1551 unsigned int bpw = 8, total, num_maps, i;
1552
1553 bitmap->bits_per_word = bpw;
1554
1555 num_maps = ALIGN(nr_cpu_ids, bpw) / bpw;
1556 bitmap->map = kzalloc_node(num_maps * sizeof(struct blk_align_bitmap),
1557 GFP_KERNEL, node);
1558 if (!bitmap->map)
1559 return -ENOMEM;
1560
1429d7c9
JA
1561 total = nr_cpu_ids;
1562 for (i = 0; i < num_maps; i++) {
1563 bitmap->map[i].depth = min(total, bitmap->bits_per_word);
1564 total -= bitmap->map[i].depth;
1565 }
1566
1567 return 0;
1568}
1569
484b4061
JA
1570static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
1571{
1572 struct request_queue *q = hctx->queue;
1573 struct blk_mq_ctx *ctx;
1574 LIST_HEAD(tmp);
1575
1576 /*
1577 * Move ctx entries to new CPU, if this one is going away.
1578 */
1579 ctx = __blk_mq_get_ctx(q, cpu);
1580
1581 spin_lock(&ctx->lock);
1582 if (!list_empty(&ctx->rq_list)) {
1583 list_splice_init(&ctx->rq_list, &tmp);
1584 blk_mq_hctx_clear_pending(hctx, ctx);
1585 }
1586 spin_unlock(&ctx->lock);
1587
1588 if (list_empty(&tmp))
1589 return NOTIFY_OK;
1590
1591 ctx = blk_mq_get_ctx(q);
1592 spin_lock(&ctx->lock);
1593
1594 while (!list_empty(&tmp)) {
1595 struct request *rq;
1596
1597 rq = list_first_entry(&tmp, struct request, queuelist);
1598 rq->mq_ctx = ctx;
1599 list_move_tail(&rq->queuelist, &ctx->rq_list);
1600 }
1601
1602 hctx = q->mq_ops->map_queue(q, ctx->cpu);
1603 blk_mq_hctx_mark_pending(hctx, ctx);
1604
1605 spin_unlock(&ctx->lock);
1606
1607 blk_mq_run_hw_queue(hctx, true);
1608 blk_mq_put_ctx(ctx);
1609 return NOTIFY_OK;
1610}
1611
484b4061
JA
1612static int blk_mq_hctx_notify(void *data, unsigned long action,
1613 unsigned int cpu)
1614{
1615 struct blk_mq_hw_ctx *hctx = data;
1616
1617 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
1618 return blk_mq_hctx_cpu_offline(hctx, cpu);
2a34c087
ML
1619
1620 /*
1621 * In case of CPU online, tags may be reallocated
1622 * in blk_mq_map_swqueue() after mapping is updated.
1623 */
484b4061
JA
1624
1625 return NOTIFY_OK;
1626}
1627
c3b4afca 1628/* hctx->ctxs will be freed in queue's release handler */
08e98fc6
ML
1629static void blk_mq_exit_hctx(struct request_queue *q,
1630 struct blk_mq_tag_set *set,
1631 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1632{
f70ced09
ML
1633 unsigned flush_start_tag = set->queue_depth;
1634
08e98fc6
ML
1635 blk_mq_tag_idle(hctx);
1636
f70ced09
ML
1637 if (set->ops->exit_request)
1638 set->ops->exit_request(set->driver_data,
1639 hctx->fq->flush_rq, hctx_idx,
1640 flush_start_tag + hctx_idx);
1641
08e98fc6
ML
1642 if (set->ops->exit_hctx)
1643 set->ops->exit_hctx(hctx, hctx_idx);
1644
1645 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
f70ced09 1646 blk_free_flush_queue(hctx->fq);
08e98fc6
ML
1647 blk_mq_free_bitmap(&hctx->ctx_map);
1648}
1649
624dbe47
ML
1650static void blk_mq_exit_hw_queues(struct request_queue *q,
1651 struct blk_mq_tag_set *set, int nr_queue)
1652{
1653 struct blk_mq_hw_ctx *hctx;
1654 unsigned int i;
1655
1656 queue_for_each_hw_ctx(q, hctx, i) {
1657 if (i == nr_queue)
1658 break;
08e98fc6 1659 blk_mq_exit_hctx(q, set, hctx, i);
624dbe47 1660 }
624dbe47
ML
1661}
1662
1663static void blk_mq_free_hw_queues(struct request_queue *q,
1664 struct blk_mq_tag_set *set)
1665{
1666 struct blk_mq_hw_ctx *hctx;
1667 unsigned int i;
1668
e09aae7e 1669 queue_for_each_hw_ctx(q, hctx, i)
624dbe47 1670 free_cpumask_var(hctx->cpumask);
624dbe47
ML
1671}
1672
08e98fc6
ML
1673static int blk_mq_init_hctx(struct request_queue *q,
1674 struct blk_mq_tag_set *set,
1675 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
320ae51f 1676{
08e98fc6 1677 int node;
f70ced09 1678 unsigned flush_start_tag = set->queue_depth;
08e98fc6
ML
1679
1680 node = hctx->numa_node;
1681 if (node == NUMA_NO_NODE)
1682 node = hctx->numa_node = set->numa_node;
1683
1684 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
1685 INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
1686 spin_lock_init(&hctx->lock);
1687 INIT_LIST_HEAD(&hctx->dispatch);
1688 hctx->queue = q;
1689 hctx->queue_num = hctx_idx;
1690 hctx->flags = set->flags;
08e98fc6
ML
1691
1692 blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
1693 blk_mq_hctx_notify, hctx);
1694 blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
1695
1696 hctx->tags = set->tags[hctx_idx];
320ae51f
JA
1697
1698 /*
08e98fc6
ML
1699 * Allocate space for all possible cpus to avoid allocation at
1700 * runtime
320ae51f 1701 */
08e98fc6
ML
1702 hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
1703 GFP_KERNEL, node);
1704 if (!hctx->ctxs)
1705 goto unregister_cpu_notifier;
320ae51f 1706
08e98fc6
ML
1707 if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
1708 goto free_ctxs;
320ae51f 1709
08e98fc6 1710 hctx->nr_ctx = 0;
320ae51f 1711
08e98fc6
ML
1712 if (set->ops->init_hctx &&
1713 set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
1714 goto free_bitmap;
320ae51f 1715
f70ced09
ML
1716 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
1717 if (!hctx->fq)
1718 goto exit_hctx;
320ae51f 1719
f70ced09
ML
1720 if (set->ops->init_request &&
1721 set->ops->init_request(set->driver_data,
1722 hctx->fq->flush_rq, hctx_idx,
1723 flush_start_tag + hctx_idx, node))
1724 goto free_fq;
320ae51f 1725
08e98fc6 1726 return 0;
320ae51f 1727
f70ced09
ML
1728 free_fq:
1729 kfree(hctx->fq);
1730 exit_hctx:
1731 if (set->ops->exit_hctx)
1732 set->ops->exit_hctx(hctx, hctx_idx);
08e98fc6
ML
1733 free_bitmap:
1734 blk_mq_free_bitmap(&hctx->ctx_map);
1735 free_ctxs:
1736 kfree(hctx->ctxs);
1737 unregister_cpu_notifier:
1738 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
320ae51f 1739
08e98fc6
ML
1740 return -1;
1741}
320ae51f 1742
08e98fc6
ML
1743static int blk_mq_init_hw_queues(struct request_queue *q,
1744 struct blk_mq_tag_set *set)
1745{
1746 struct blk_mq_hw_ctx *hctx;
1747 unsigned int i;
320ae51f 1748
08e98fc6
ML
1749 /*
1750 * Initialize hardware queues
1751 */
1752 queue_for_each_hw_ctx(q, hctx, i) {
1753 if (blk_mq_init_hctx(q, set, hctx, i))
320ae51f
JA
1754 break;
1755 }
1756
1757 if (i == q->nr_hw_queues)
1758 return 0;
1759
1760 /*
1761 * Init failed
1762 */
624dbe47 1763 blk_mq_exit_hw_queues(q, set, i);
320ae51f
JA
1764
1765 return 1;
1766}
1767
1768static void blk_mq_init_cpu_queues(struct request_queue *q,
1769 unsigned int nr_hw_queues)
1770{
1771 unsigned int i;
1772
1773 for_each_possible_cpu(i) {
1774 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
1775 struct blk_mq_hw_ctx *hctx;
1776
1777 memset(__ctx, 0, sizeof(*__ctx));
1778 __ctx->cpu = i;
1779 spin_lock_init(&__ctx->lock);
1780 INIT_LIST_HEAD(&__ctx->rq_list);
1781 __ctx->queue = q;
1782
1783 /* If the cpu isn't online, the cpu is mapped to first hctx */
320ae51f
JA
1784 if (!cpu_online(i))
1785 continue;
1786
e4043dcf 1787 hctx = q->mq_ops->map_queue(q, i);
e4043dcf 1788
320ae51f
JA
1789 /*
1790 * Set local node, IFF we have more than one hw queue. If
1791 * not, we remain on the home node of the device
1792 */
1793 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
1794 hctx->numa_node = cpu_to_node(i);
1795 }
1796}
1797
5778322e
AM
1798static void blk_mq_map_swqueue(struct request_queue *q,
1799 const struct cpumask *online_mask)
320ae51f
JA
1800{
1801 unsigned int i;
1802 struct blk_mq_hw_ctx *hctx;
1803 struct blk_mq_ctx *ctx;
2a34c087 1804 struct blk_mq_tag_set *set = q->tag_set;
320ae51f 1805
60de074b
AM
1806 /*
1807 * Avoid others reading imcomplete hctx->cpumask through sysfs
1808 */
1809 mutex_lock(&q->sysfs_lock);
1810
320ae51f 1811 queue_for_each_hw_ctx(q, hctx, i) {
e4043dcf 1812 cpumask_clear(hctx->cpumask);
320ae51f
JA
1813 hctx->nr_ctx = 0;
1814 }
1815
1816 /*
1817 * Map software to hardware queues
1818 */
1819 queue_for_each_ctx(q, ctx, i) {
1820 /* If the cpu isn't online, the cpu is mapped to first hctx */
5778322e 1821 if (!cpumask_test_cpu(i, online_mask))
e4043dcf
JA
1822 continue;
1823
320ae51f 1824 hctx = q->mq_ops->map_queue(q, i);
e4043dcf 1825 cpumask_set_cpu(i, hctx->cpumask);
320ae51f
JA
1826 ctx->index_hw = hctx->nr_ctx;
1827 hctx->ctxs[hctx->nr_ctx++] = ctx;
1828 }
506e931f 1829
60de074b
AM
1830 mutex_unlock(&q->sysfs_lock);
1831
506e931f 1832 queue_for_each_hw_ctx(q, hctx, i) {
889fa31f
CY
1833 struct blk_mq_ctxmap *map = &hctx->ctx_map;
1834
484b4061 1835 /*
a68aafa5
JA
1836 * If no software queues are mapped to this hardware queue,
1837 * disable it and free the request entries.
484b4061
JA
1838 */
1839 if (!hctx->nr_ctx) {
484b4061
JA
1840 if (set->tags[i]) {
1841 blk_mq_free_rq_map(set, set->tags[i], i);
1842 set->tags[i] = NULL;
484b4061 1843 }
2a34c087 1844 hctx->tags = NULL;
484b4061
JA
1845 continue;
1846 }
1847
2a34c087
ML
1848 /* unmapped hw queue can be remapped after CPU topo changed */
1849 if (!set->tags[i])
1850 set->tags[i] = blk_mq_init_rq_map(set, i);
1851 hctx->tags = set->tags[i];
1852 WARN_ON(!hctx->tags);
1853
889fa31f
CY
1854 /*
1855 * Set the map size to the number of mapped software queues.
1856 * This is more accurate and more efficient than looping
1857 * over all possibly mapped software queues.
1858 */
569fd0ce 1859 map->size = DIV_ROUND_UP(hctx->nr_ctx, map->bits_per_word);
889fa31f 1860
484b4061
JA
1861 /*
1862 * Initialize batch roundrobin counts
1863 */
506e931f
JA
1864 hctx->next_cpu = cpumask_first(hctx->cpumask);
1865 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1866 }
1356aae0
AM
1867
1868 queue_for_each_ctx(q, ctx, i) {
5778322e 1869 if (!cpumask_test_cpu(i, online_mask))
1356aae0
AM
1870 continue;
1871
1872 hctx = q->mq_ops->map_queue(q, i);
1873 cpumask_set_cpu(i, hctx->tags->cpumask);
1874 }
320ae51f
JA
1875}
1876
0d2602ca
JA
1877static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1878{
1879 struct blk_mq_hw_ctx *hctx;
1880 struct request_queue *q;
1881 bool shared;
1882 int i;
1883
1884 if (set->tag_list.next == set->tag_list.prev)
1885 shared = false;
1886 else
1887 shared = true;
1888
1889 list_for_each_entry(q, &set->tag_list, tag_set_list) {
1890 blk_mq_freeze_queue(q);
1891
1892 queue_for_each_hw_ctx(q, hctx, i) {
1893 if (shared)
1894 hctx->flags |= BLK_MQ_F_TAG_SHARED;
1895 else
1896 hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
1897 }
1898 blk_mq_unfreeze_queue(q);
1899 }
1900}
1901
1902static void blk_mq_del_queue_tag_set(struct request_queue *q)
1903{
1904 struct blk_mq_tag_set *set = q->tag_set;
1905
0d2602ca
JA
1906 mutex_lock(&set->tag_list_lock);
1907 list_del_init(&q->tag_set_list);
1908 blk_mq_update_tag_set_depth(set);
1909 mutex_unlock(&set->tag_list_lock);
0d2602ca
JA
1910}
1911
1912static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
1913 struct request_queue *q)
1914{
1915 q->tag_set = set;
1916
1917 mutex_lock(&set->tag_list_lock);
1918 list_add_tail(&q->tag_set_list, &set->tag_list);
1919 blk_mq_update_tag_set_depth(set);
1920 mutex_unlock(&set->tag_list_lock);
1921}
1922
e09aae7e
ML
1923/*
1924 * It is the actual release handler for mq, but we do it from
1925 * request queue's release handler for avoiding use-after-free
1926 * and headache because q->mq_kobj shouldn't have been introduced,
1927 * but we can't group ctx/kctx kobj without it.
1928 */
1929void blk_mq_release(struct request_queue *q)
1930{
1931 struct blk_mq_hw_ctx *hctx;
1932 unsigned int i;
1933
1934 /* hctx kobj stays in hctx */
c3b4afca
ML
1935 queue_for_each_hw_ctx(q, hctx, i) {
1936 if (!hctx)
1937 continue;
1938 kfree(hctx->ctxs);
e09aae7e 1939 kfree(hctx);
c3b4afca 1940 }
e09aae7e 1941
a723bab3
AM
1942 kfree(q->mq_map);
1943 q->mq_map = NULL;
1944
e09aae7e
ML
1945 kfree(q->queue_hw_ctx);
1946
1947 /* ctx kobj stays in queue_ctx */
1948 free_percpu(q->queue_ctx);
1949}
1950
24d2f903 1951struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
b62c21b7
MS
1952{
1953 struct request_queue *uninit_q, *q;
1954
1955 uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
1956 if (!uninit_q)
1957 return ERR_PTR(-ENOMEM);
1958
1959 q = blk_mq_init_allocated_queue(set, uninit_q);
1960 if (IS_ERR(q))
1961 blk_cleanup_queue(uninit_q);
1962
1963 return q;
1964}
1965EXPORT_SYMBOL(blk_mq_init_queue);
1966
1967struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
1968 struct request_queue *q)
320ae51f
JA
1969{
1970 struct blk_mq_hw_ctx **hctxs;
e6cdb092 1971 struct blk_mq_ctx __percpu *ctx;
f14bbe77 1972 unsigned int *map;
320ae51f
JA
1973 int i;
1974
320ae51f
JA
1975 ctx = alloc_percpu(struct blk_mq_ctx);
1976 if (!ctx)
1977 return ERR_PTR(-ENOMEM);
1978
24d2f903
CH
1979 hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
1980 set->numa_node);
320ae51f
JA
1981
1982 if (!hctxs)
1983 goto err_percpu;
1984
f14bbe77
JA
1985 map = blk_mq_make_queue_map(set);
1986 if (!map)
1987 goto err_map;
1988
24d2f903 1989 for (i = 0; i < set->nr_hw_queues; i++) {
f14bbe77
JA
1990 int node = blk_mq_hw_queue_to_node(map, i);
1991
cdef54dd
CH
1992 hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
1993 GFP_KERNEL, node);
320ae51f
JA
1994 if (!hctxs[i])
1995 goto err_hctxs;
1996
a86073e4
JA
1997 if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
1998 node))
e4043dcf
JA
1999 goto err_hctxs;
2000
0d2602ca 2001 atomic_set(&hctxs[i]->nr_active, 0);
f14bbe77 2002 hctxs[i]->numa_node = node;
320ae51f
JA
2003 hctxs[i]->queue_num = i;
2004 }
2005
17497acb
TH
2006 /*
2007 * Init percpu_ref in atomic mode so that it's faster to shutdown.
2008 * See blk_register_queue() for details.
2009 */
a34375ef 2010 if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
17497acb 2011 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
b62c21b7 2012 goto err_hctxs;
3d2936f4 2013
320ae51f 2014 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
e56f698b 2015 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
320ae51f
JA
2016
2017 q->nr_queues = nr_cpu_ids;
24d2f903 2018 q->nr_hw_queues = set->nr_hw_queues;
f14bbe77 2019 q->mq_map = map;
320ae51f
JA
2020
2021 q->queue_ctx = ctx;
2022 q->queue_hw_ctx = hctxs;
2023
24d2f903 2024 q->mq_ops = set->ops;
94eddfbe 2025 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
320ae51f 2026
05f1dd53
JA
2027 if (!(set->flags & BLK_MQ_F_SG_MERGE))
2028 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE;
2029
1be036e9
CH
2030 q->sg_reserved_size = INT_MAX;
2031
6fca6a61
CH
2032 INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
2033 INIT_LIST_HEAD(&q->requeue_list);
2034 spin_lock_init(&q->requeue_lock);
2035
07068d5b
JA
2036 if (q->nr_hw_queues > 1)
2037 blk_queue_make_request(q, blk_mq_make_request);
2038 else
2039 blk_queue_make_request(q, blk_sq_make_request);
2040
eba71768
JA
2041 /*
2042 * Do this after blk_queue_make_request() overrides it...
2043 */
2044 q->nr_requests = set->queue_depth;
2045
24d2f903
CH
2046 if (set->ops->complete)
2047 blk_queue_softirq_done(q, set->ops->complete);
30a91cb4 2048
24d2f903 2049 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
320ae51f 2050
24d2f903 2051 if (blk_mq_init_hw_queues(q, set))
b62c21b7 2052 goto err_hctxs;
18741986 2053
5778322e 2054 get_online_cpus();
320ae51f 2055 mutex_lock(&all_q_mutex);
320ae51f 2056
4593fdbe 2057 list_add_tail(&q->all_q_node, &all_q_list);
0d2602ca 2058 blk_mq_add_queue_tag_set(set, q);
5778322e 2059 blk_mq_map_swqueue(q, cpu_online_mask);
0d2602ca 2060
4593fdbe 2061 mutex_unlock(&all_q_mutex);
5778322e 2062 put_online_cpus();
484b4061 2063
320ae51f 2064 return q;
18741986 2065
320ae51f 2066err_hctxs:
f14bbe77 2067 kfree(map);
24d2f903 2068 for (i = 0; i < set->nr_hw_queues; i++) {
320ae51f
JA
2069 if (!hctxs[i])
2070 break;
e4043dcf 2071 free_cpumask_var(hctxs[i]->cpumask);
cdef54dd 2072 kfree(hctxs[i]);
320ae51f 2073 }
f14bbe77 2074err_map:
320ae51f
JA
2075 kfree(hctxs);
2076err_percpu:
2077 free_percpu(ctx);
2078 return ERR_PTR(-ENOMEM);
2079}
b62c21b7 2080EXPORT_SYMBOL(blk_mq_init_allocated_queue);
320ae51f
JA
2081
2082void blk_mq_free_queue(struct request_queue *q)
2083{
624dbe47 2084 struct blk_mq_tag_set *set = q->tag_set;
320ae51f 2085
0e626368
AM
2086 mutex_lock(&all_q_mutex);
2087 list_del_init(&q->all_q_node);
2088 mutex_unlock(&all_q_mutex);
2089
0d2602ca
JA
2090 blk_mq_del_queue_tag_set(q);
2091
624dbe47
ML
2092 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
2093 blk_mq_free_hw_queues(q, set);
320ae51f 2094
add703fd 2095 percpu_ref_exit(&q->mq_usage_counter);
320ae51f 2096}
320ae51f
JA
2097
2098/* Basically redo blk_mq_init_queue with queue frozen */
5778322e
AM
2099static void blk_mq_queue_reinit(struct request_queue *q,
2100 const struct cpumask *online_mask)
320ae51f 2101{
4ecd4fef 2102 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
320ae51f 2103
67aec14c
JA
2104 blk_mq_sysfs_unregister(q);
2105
5778322e 2106 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
320ae51f
JA
2107
2108 /*
2109 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
2110 * we should change hctx numa_node according to new topology (this
2111 * involves free and re-allocate memory, worthy doing?)
2112 */
2113
5778322e 2114 blk_mq_map_swqueue(q, online_mask);
320ae51f 2115
67aec14c 2116 blk_mq_sysfs_register(q);
320ae51f
JA
2117}
2118
f618ef7c
PG
2119static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
2120 unsigned long action, void *hcpu)
320ae51f
JA
2121{
2122 struct request_queue *q;
5778322e
AM
2123 int cpu = (unsigned long)hcpu;
2124 /*
2125 * New online cpumask which is going to be set in this hotplug event.
2126 * Declare this cpumasks as global as cpu-hotplug operation is invoked
2127 * one-by-one and dynamically allocating this could result in a failure.
2128 */
2129 static struct cpumask online_new;
320ae51f
JA
2130
2131 /*
5778322e
AM
2132 * Before hotadded cpu starts handling requests, new mappings must
2133 * be established. Otherwise, these requests in hw queue might
2134 * never be dispatched.
2135 *
2136 * For example, there is a single hw queue (hctx) and two CPU queues
2137 * (ctx0 for CPU0, and ctx1 for CPU1).
2138 *
2139 * Now CPU1 is just onlined and a request is inserted into
2140 * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
2141 * still zero.
2142 *
2143 * And then while running hw queue, flush_busy_ctxs() finds bit0 is
2144 * set in pending bitmap and tries to retrieve requests in
2145 * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0,
2146 * so the request in ctx1->rq_list is ignored.
320ae51f 2147 */
5778322e
AM
2148 switch (action & ~CPU_TASKS_FROZEN) {
2149 case CPU_DEAD:
2150 case CPU_UP_CANCELED:
2151 cpumask_copy(&online_new, cpu_online_mask);
2152 break;
2153 case CPU_UP_PREPARE:
2154 cpumask_copy(&online_new, cpu_online_mask);
2155 cpumask_set_cpu(cpu, &online_new);
2156 break;
2157 default:
320ae51f 2158 return NOTIFY_OK;
5778322e 2159 }
320ae51f
JA
2160
2161 mutex_lock(&all_q_mutex);
f3af020b
TH
2162
2163 /*
2164 * We need to freeze and reinit all existing queues. Freezing
2165 * involves synchronous wait for an RCU grace period and doing it
2166 * one by one may take a long time. Start freezing all queues in
2167 * one swoop and then wait for the completions so that freezing can
2168 * take place in parallel.
2169 */
2170 list_for_each_entry(q, &all_q_list, all_q_node)
2171 blk_mq_freeze_queue_start(q);
f054b56c 2172 list_for_each_entry(q, &all_q_list, all_q_node) {
f3af020b
TH
2173 blk_mq_freeze_queue_wait(q);
2174
f054b56c
ML
2175 /*
2176 * timeout handler can't touch hw queue during the
2177 * reinitialization
2178 */
2179 del_timer_sync(&q->timeout);
2180 }
2181
320ae51f 2182 list_for_each_entry(q, &all_q_list, all_q_node)
5778322e 2183 blk_mq_queue_reinit(q, &online_new);
f3af020b
TH
2184
2185 list_for_each_entry(q, &all_q_list, all_q_node)
2186 blk_mq_unfreeze_queue(q);
2187
320ae51f
JA
2188 mutex_unlock(&all_q_mutex);
2189 return NOTIFY_OK;
2190}
2191
a5164405
JA
2192static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2193{
2194 int i;
2195
2196 for (i = 0; i < set->nr_hw_queues; i++) {
2197 set->tags[i] = blk_mq_init_rq_map(set, i);
2198 if (!set->tags[i])
2199 goto out_unwind;
2200 }
2201
2202 return 0;
2203
2204out_unwind:
2205 while (--i >= 0)
2206 blk_mq_free_rq_map(set, set->tags[i], i);
2207
a5164405
JA
2208 return -ENOMEM;
2209}
2210
2211/*
2212 * Allocate the request maps associated with this tag_set. Note that this
2213 * may reduce the depth asked for, if memory is tight. set->queue_depth
2214 * will be updated to reflect the allocated depth.
2215 */
2216static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
2217{
2218 unsigned int depth;
2219 int err;
2220
2221 depth = set->queue_depth;
2222 do {
2223 err = __blk_mq_alloc_rq_maps(set);
2224 if (!err)
2225 break;
2226
2227 set->queue_depth >>= 1;
2228 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) {
2229 err = -ENOMEM;
2230 break;
2231 }
2232 } while (set->queue_depth);
2233
2234 if (!set->queue_depth || err) {
2235 pr_err("blk-mq: failed to allocate request map\n");
2236 return -ENOMEM;
2237 }
2238
2239 if (depth != set->queue_depth)
2240 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
2241 depth, set->queue_depth);
2242
2243 return 0;
2244}
2245
f26cdc85
KB
2246struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags)
2247{
2248 return tags->cpumask;
2249}
2250EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
2251
a4391c64
JA
2252/*
2253 * Alloc a tag set to be associated with one or more request queues.
2254 * May fail with EINVAL for various error conditions. May adjust the
2255 * requested depth down, if if it too large. In that case, the set
2256 * value will be stored in set->queue_depth.
2257 */
24d2f903
CH
2258int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2259{
205fb5f5
BVA
2260 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2261
24d2f903
CH
2262 if (!set->nr_hw_queues)
2263 return -EINVAL;
a4391c64 2264 if (!set->queue_depth)
24d2f903
CH
2265 return -EINVAL;
2266 if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
2267 return -EINVAL;
2268
f9018ac9 2269 if (!set->ops->queue_rq || !set->ops->map_queue)
24d2f903
CH
2270 return -EINVAL;
2271
a4391c64
JA
2272 if (set->queue_depth > BLK_MQ_MAX_DEPTH) {
2273 pr_info("blk-mq: reduced tag depth to %u\n",
2274 BLK_MQ_MAX_DEPTH);
2275 set->queue_depth = BLK_MQ_MAX_DEPTH;
2276 }
24d2f903 2277
6637fadf
SL
2278 /*
2279 * If a crashdump is active, then we are potentially in a very
2280 * memory constrained environment. Limit us to 1 queue and
2281 * 64 tags to prevent using too much memory.
2282 */
2283 if (is_kdump_kernel()) {
2284 set->nr_hw_queues = 1;
2285 set->queue_depth = min(64U, set->queue_depth);
2286 }
2287
48479005
ML
2288 set->tags = kmalloc_node(set->nr_hw_queues *
2289 sizeof(struct blk_mq_tags *),
24d2f903
CH
2290 GFP_KERNEL, set->numa_node);
2291 if (!set->tags)
a5164405 2292 return -ENOMEM;
24d2f903 2293
a5164405
JA
2294 if (blk_mq_alloc_rq_maps(set))
2295 goto enomem;
24d2f903 2296
0d2602ca
JA
2297 mutex_init(&set->tag_list_lock);
2298 INIT_LIST_HEAD(&set->tag_list);
2299
24d2f903 2300 return 0;
a5164405 2301enomem:
5676e7b6
RE
2302 kfree(set->tags);
2303 set->tags = NULL;
24d2f903
CH
2304 return -ENOMEM;
2305}
2306EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2307
2308void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2309{
2310 int i;
2311
484b4061 2312 for (i = 0; i < set->nr_hw_queues; i++) {
f26cdc85 2313 if (set->tags[i]) {
484b4061 2314 blk_mq_free_rq_map(set, set->tags[i], i);
f26cdc85
KB
2315 free_cpumask_var(set->tags[i]->cpumask);
2316 }
484b4061
JA
2317 }
2318
981bd189 2319 kfree(set->tags);
5676e7b6 2320 set->tags = NULL;
24d2f903
CH
2321}
2322EXPORT_SYMBOL(blk_mq_free_tag_set);
2323
e3a2b3f9
JA
2324int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
2325{
2326 struct blk_mq_tag_set *set = q->tag_set;
2327 struct blk_mq_hw_ctx *hctx;
2328 int i, ret;
2329
2330 if (!set || nr > set->queue_depth)
2331 return -EINVAL;
2332
2333 ret = 0;
2334 queue_for_each_hw_ctx(q, hctx, i) {
2335 ret = blk_mq_tag_update_depth(hctx->tags, nr);
2336 if (ret)
2337 break;
2338 }
2339
2340 if (!ret)
2341 q->nr_requests = nr;
2342
2343 return ret;
2344}
2345
676141e4
JA
2346void blk_mq_disable_hotplug(void)
2347{
2348 mutex_lock(&all_q_mutex);
2349}
2350
2351void blk_mq_enable_hotplug(void)
2352{
2353 mutex_unlock(&all_q_mutex);
2354}
2355
320ae51f
JA
2356static int __init blk_mq_init(void)
2357{
320ae51f
JA
2358 blk_mq_cpu_init();
2359
add703fd 2360 hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
320ae51f
JA
2361
2362 return 0;
2363}
2364subsys_initcall(blk_mq_init);