Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
bd166ef1 JA |
2 | /* |
3 | * blk-mq scheduling framework | |
4 | * | |
5 | * Copyright (C) 2016 Jens Axboe | |
6 | */ | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/blk-mq.h> | |
6e6fcbc2 | 10 | #include <linux/list_sort.h> |
bd166ef1 JA |
11 | |
12 | #include <trace/events/block.h> | |
13 | ||
14 | #include "blk.h" | |
15 | #include "blk-mq.h" | |
d332ce09 | 16 | #include "blk-mq-debugfs.h" |
bd166ef1 JA |
17 | #include "blk-mq-sched.h" |
18 | #include "blk-mq-tag.h" | |
19 | #include "blk-wbt.h" | |
20 | ||
e2b3fa5a | 21 | void blk_mq_sched_assign_ioc(struct request *rq) |
bd166ef1 | 22 | { |
44e8c2bf | 23 | struct request_queue *q = rq->q; |
0c62bff1 | 24 | struct io_context *ioc; |
bd166ef1 JA |
25 | struct io_cq *icq; |
26 | ||
0c62bff1 JA |
27 | /* |
28 | * May not have an IO context if it's a passthrough request | |
29 | */ | |
30 | ioc = current->io_context; | |
31 | if (!ioc) | |
32 | return; | |
33 | ||
0d945c1f | 34 | spin_lock_irq(&q->queue_lock); |
bd166ef1 | 35 | icq = ioc_lookup_icq(ioc, q); |
0d945c1f | 36 | spin_unlock_irq(&q->queue_lock); |
bd166ef1 JA |
37 | |
38 | if (!icq) { | |
39 | icq = ioc_create_icq(ioc, q, GFP_ATOMIC); | |
40 | if (!icq) | |
41 | return; | |
42 | } | |
ea511e3c | 43 | get_io_context(icq->ioc); |
44e8c2bf | 44 | rq->elv.icq = icq; |
bd166ef1 JA |
45 | } |
46 | ||
8e8320c9 JA |
47 | /* |
48 | * Mark a hardware queue as needing a restart. For shared queues, maintain | |
49 | * a count of how many hardware queues are marked for restart. | |
50 | */ | |
7211aef8 | 51 | void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) |
8e8320c9 JA |
52 | { |
53 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
54 | return; | |
55 | ||
97889f9a | 56 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
8e8320c9 | 57 | } |
7211aef8 | 58 | EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); |
8e8320c9 | 59 | |
97889f9a | 60 | void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) |
8e8320c9 JA |
61 | { |
62 | if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
97889f9a ML |
63 | return; |
64 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); | |
8e8320c9 | 65 | |
d7d8535f ML |
66 | /* |
67 | * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) | |
68 | * in blk_mq_run_hw_queue(). Its pair is the barrier in | |
69 | * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, | |
70 | * meantime new request added to hctx->dispatch is missed to check in | |
71 | * blk_mq_run_hw_queue(). | |
72 | */ | |
73 | smp_mb(); | |
74 | ||
97889f9a | 75 | blk_mq_run_hw_queue(hctx, true); |
8e8320c9 JA |
76 | } |
77 | ||
6e6fcbc2 ML |
78 | static int sched_rq_cmp(void *priv, struct list_head *a, struct list_head *b) |
79 | { | |
80 | struct request *rqa = container_of(a, struct request, queuelist); | |
81 | struct request *rqb = container_of(b, struct request, queuelist); | |
82 | ||
83 | return rqa->mq_hctx > rqb->mq_hctx; | |
84 | } | |
85 | ||
86 | static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) | |
87 | { | |
88 | struct blk_mq_hw_ctx *hctx = | |
89 | list_first_entry(rq_list, struct request, queuelist)->mq_hctx; | |
90 | struct request *rq; | |
91 | LIST_HEAD(hctx_list); | |
92 | unsigned int count = 0; | |
6e6fcbc2 ML |
93 | |
94 | list_for_each_entry(rq, rq_list, queuelist) { | |
95 | if (rq->mq_hctx != hctx) { | |
96 | list_cut_before(&hctx_list, rq_list, &rq->queuelist); | |
97 | goto dispatch; | |
98 | } | |
99 | count++; | |
100 | } | |
101 | list_splice_tail_init(rq_list, &hctx_list); | |
102 | ||
103 | dispatch: | |
106e71c5 | 104 | return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); |
6e6fcbc2 ML |
105 | } |
106 | ||
a0823421 DA |
107 | #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ |
108 | ||
1f460b63 ML |
109 | /* |
110 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts | |
111 | * its queue by itself in its completion handler, so we don't need to | |
112 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. | |
28d65729 SQ |
113 | * |
114 | * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to | |
115 | * be run again. This is necessary to avoid starving flushes. | |
1f460b63 | 116 | */ |
6e6fcbc2 | 117 | static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) |
caf8eb0d ML |
118 | { |
119 | struct request_queue *q = hctx->queue; | |
120 | struct elevator_queue *e = q->elevator; | |
6e6fcbc2 ML |
121 | bool multi_hctxs = false, run_queue = false; |
122 | bool dispatched = false, busy = false; | |
123 | unsigned int max_dispatch; | |
caf8eb0d | 124 | LIST_HEAD(rq_list); |
6e6fcbc2 ML |
125 | int count = 0; |
126 | ||
127 | if (hctx->dispatch_busy) | |
128 | max_dispatch = 1; | |
129 | else | |
130 | max_dispatch = hctx->queue->nr_requests; | |
caf8eb0d ML |
131 | |
132 | do { | |
6e6fcbc2 ML |
133 | struct request *rq; |
134 | ||
f9cd4bfe | 135 | if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) |
caf8eb0d | 136 | break; |
de148297 | 137 | |
28d65729 | 138 | if (!list_empty_careful(&hctx->dispatch)) { |
6e6fcbc2 | 139 | busy = true; |
28d65729 SQ |
140 | break; |
141 | } | |
142 | ||
65c76369 | 143 | if (!blk_mq_get_dispatch_budget(q)) |
1f460b63 | 144 | break; |
de148297 | 145 | |
f9cd4bfe | 146 | rq = e->type->ops.dispatch_request(hctx); |
de148297 | 147 | if (!rq) { |
65c76369 | 148 | blk_mq_put_dispatch_budget(q); |
a0823421 DA |
149 | /* |
150 | * We're releasing without dispatching. Holding the | |
151 | * budget could have blocked any "hctx"s with the | |
152 | * same queue and if we didn't dispatch then there's | |
153 | * no guarantee anyone will kick the queue. Kick it | |
154 | * ourselves. | |
155 | */ | |
6e6fcbc2 | 156 | run_queue = true; |
de148297 | 157 | break; |
de148297 ML |
158 | } |
159 | ||
160 | /* | |
161 | * Now this rq owns the budget which has to be released | |
162 | * if this rq won't be queued to driver via .queue_rq() | |
163 | * in blk_mq_dispatch_rq_list(). | |
164 | */ | |
6e6fcbc2 ML |
165 | list_add_tail(&rq->queuelist, &rq_list); |
166 | if (rq->mq_hctx != hctx) | |
167 | multi_hctxs = true; | |
168 | } while (++count < max_dispatch); | |
169 | ||
170 | if (!count) { | |
171 | if (run_queue) | |
172 | blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); | |
173 | } else if (multi_hctxs) { | |
174 | /* | |
175 | * Requests from different hctx may be dequeued from some | |
176 | * schedulers, such as bfq and deadline. | |
177 | * | |
178 | * Sort the requests in the list according to their hctx, | |
179 | * dispatch batching requests from same hctx at a time. | |
180 | */ | |
181 | list_sort(NULL, &rq_list, sched_rq_cmp); | |
182 | do { | |
183 | dispatched |= blk_mq_dispatch_hctx_list(&rq_list); | |
184 | } while (!list_empty(&rq_list)); | |
185 | } else { | |
186 | dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); | |
187 | } | |
188 | ||
189 | if (busy) | |
190 | return -EAGAIN; | |
191 | return !!dispatched; | |
192 | } | |
193 | ||
194 | static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) | |
195 | { | |
196 | int ret; | |
197 | ||
198 | do { | |
199 | ret = __blk_mq_do_dispatch_sched(hctx); | |
200 | } while (ret == 1); | |
28d65729 SQ |
201 | |
202 | return ret; | |
caf8eb0d ML |
203 | } |
204 | ||
b347689f ML |
205 | static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, |
206 | struct blk_mq_ctx *ctx) | |
207 | { | |
f31967f0 | 208 | unsigned short idx = ctx->index_hw[hctx->type]; |
b347689f ML |
209 | |
210 | if (++idx == hctx->nr_ctx) | |
211 | idx = 0; | |
212 | ||
213 | return hctx->ctxs[idx]; | |
214 | } | |
215 | ||
1f460b63 ML |
216 | /* |
217 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts | |
218 | * its queue by itself in its completion handler, so we don't need to | |
219 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. | |
28d65729 SQ |
220 | * |
221 | * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to | |
c4aecaa2 | 222 | * be run again. This is necessary to avoid starving flushes. |
1f460b63 | 223 | */ |
28d65729 | 224 | static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) |
b347689f ML |
225 | { |
226 | struct request_queue *q = hctx->queue; | |
227 | LIST_HEAD(rq_list); | |
228 | struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); | |
28d65729 | 229 | int ret = 0; |
445874e8 | 230 | struct request *rq; |
b347689f ML |
231 | |
232 | do { | |
28d65729 SQ |
233 | if (!list_empty_careful(&hctx->dispatch)) { |
234 | ret = -EAGAIN; | |
235 | break; | |
236 | } | |
237 | ||
b347689f ML |
238 | if (!sbitmap_any_bit_set(&hctx->ctx_map)) |
239 | break; | |
240 | ||
65c76369 | 241 | if (!blk_mq_get_dispatch_budget(q)) |
1f460b63 | 242 | break; |
b347689f ML |
243 | |
244 | rq = blk_mq_dequeue_from_ctx(hctx, ctx); | |
245 | if (!rq) { | |
65c76369 | 246 | blk_mq_put_dispatch_budget(q); |
a0823421 DA |
247 | /* |
248 | * We're releasing without dispatching. Holding the | |
249 | * budget could have blocked any "hctx"s with the | |
250 | * same queue and if we didn't dispatch then there's | |
251 | * no guarantee anyone will kick the queue. Kick it | |
252 | * ourselves. | |
253 | */ | |
254 | blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); | |
b347689f | 255 | break; |
b347689f ML |
256 | } |
257 | ||
258 | /* | |
259 | * Now this rq owns the budget which has to be released | |
260 | * if this rq won't be queued to driver via .queue_rq() | |
261 | * in blk_mq_dispatch_rq_list(). | |
262 | */ | |
263 | list_add(&rq->queuelist, &rq_list); | |
264 | ||
265 | /* round robin for fair dispatch */ | |
266 | ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); | |
267 | ||
1fd40b5e | 268 | } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); |
b347689f ML |
269 | |
270 | WRITE_ONCE(hctx->dispatch_from, ctx); | |
28d65729 | 271 | return ret; |
b347689f ML |
272 | } |
273 | ||
e1b586f2 | 274 | static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |
bd166ef1 | 275 | { |
81380ca1 OS |
276 | struct request_queue *q = hctx->queue; |
277 | struct elevator_queue *e = q->elevator; | |
f9cd4bfe | 278 | const bool has_sched_dispatch = e && e->type->ops.dispatch_request; |
28d65729 | 279 | int ret = 0; |
bd166ef1 JA |
280 | LIST_HEAD(rq_list); |
281 | ||
bd166ef1 JA |
282 | /* |
283 | * If we have previous entries on our dispatch list, grab them first for | |
284 | * more fair dispatch. | |
285 | */ | |
286 | if (!list_empty_careful(&hctx->dispatch)) { | |
287 | spin_lock(&hctx->lock); | |
288 | if (!list_empty(&hctx->dispatch)) | |
289 | list_splice_init(&hctx->dispatch, &rq_list); | |
290 | spin_unlock(&hctx->lock); | |
291 | } | |
292 | ||
293 | /* | |
294 | * Only ask the scheduler for requests, if we didn't have residual | |
295 | * requests from the dispatch list. This is to avoid the case where | |
296 | * we only ever dispatch a fraction of the requests available because | |
297 | * of low device queue depth. Once we pull requests out of the IO | |
298 | * scheduler, we can no longer merge or sort them. So it's best to | |
299 | * leave them there for as long as we can. Mark the hw queue as | |
300 | * needing a restart in that case. | |
caf8eb0d ML |
301 | * |
302 | * We want to dispatch from the scheduler if there was nothing | |
303 | * on the dispatch list or we were able to dispatch from the | |
304 | * dispatch list. | |
bd166ef1 | 305 | */ |
c13660a0 | 306 | if (!list_empty(&rq_list)) { |
d38d3515 | 307 | blk_mq_sched_mark_restart_hctx(hctx); |
1fd40b5e | 308 | if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { |
b347689f | 309 | if (has_sched_dispatch) |
28d65729 | 310 | ret = blk_mq_do_dispatch_sched(hctx); |
b347689f | 311 | else |
28d65729 | 312 | ret = blk_mq_do_dispatch_ctx(hctx); |
b347689f | 313 | } |
caf8eb0d | 314 | } else if (has_sched_dispatch) { |
28d65729 | 315 | ret = blk_mq_do_dispatch_sched(hctx); |
6e768717 ML |
316 | } else if (hctx->dispatch_busy) { |
317 | /* dequeue request one by one from sw queue if queue is busy */ | |
28d65729 | 318 | ret = blk_mq_do_dispatch_ctx(hctx); |
caf8eb0d | 319 | } else { |
c13660a0 | 320 | blk_mq_flush_busy_ctxs(hctx, &rq_list); |
1fd40b5e | 321 | blk_mq_dispatch_rq_list(hctx, &rq_list, 0); |
64765a75 | 322 | } |
28d65729 SQ |
323 | |
324 | return ret; | |
325 | } | |
326 | ||
327 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | |
328 | { | |
329 | struct request_queue *q = hctx->queue; | |
330 | ||
331 | /* RCU or SRCU read lock is needed before checking quiesced flag */ | |
332 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) | |
333 | return; | |
334 | ||
335 | hctx->run++; | |
336 | ||
337 | /* | |
338 | * A return of -EAGAIN is an indication that hctx->dispatch is not | |
339 | * empty and we must run again in order to avoid starving flushes. | |
340 | */ | |
341 | if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { | |
342 | if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) | |
343 | blk_mq_run_hw_queue(hctx, true); | |
344 | } | |
bd166ef1 JA |
345 | } |
346 | ||
14ccb66b CH |
347 | bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, |
348 | unsigned int nr_segs) | |
bd166ef1 JA |
349 | { |
350 | struct elevator_queue *e = q->elevator; | |
9bddeb2a | 351 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); |
8ccdf4a3 | 352 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); |
9bddeb2a | 353 | bool ret = false; |
c16d6b5a | 354 | enum hctx_type type; |
bd166ef1 | 355 | |
c05f4220 | 356 | if (e && e->type->ops.bio_merge) |
14ccb66b | 357 | return e->type->ops.bio_merge(hctx, bio, nr_segs); |
bd166ef1 | 358 | |
c16d6b5a | 359 | type = hctx->type; |
cdfcef9e BW |
360 | if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || |
361 | list_empty_careful(&ctx->rq_lists[type])) | |
362 | return false; | |
363 | ||
364 | /* default per sw-queue merge */ | |
365 | spin_lock(&ctx->lock); | |
366 | /* | |
367 | * Reverse check our software queue for entries that we could | |
368 | * potentially merge with. Currently includes a hand-wavy stop | |
369 | * count of 8, to not spend too much time checking for merges. | |
370 | */ | |
371 | if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { | |
372 | ctx->rq_merged++; | |
373 | ret = true; | |
9bddeb2a ML |
374 | } |
375 | ||
cdfcef9e BW |
376 | spin_unlock(&ctx->lock); |
377 | ||
9bddeb2a | 378 | return ret; |
bd166ef1 JA |
379 | } |
380 | ||
381 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) | |
382 | { | |
383 | return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); | |
384 | } | |
385 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); | |
386 | ||
387 | void blk_mq_sched_request_inserted(struct request *rq) | |
388 | { | |
389 | trace_block_rq_insert(rq->q, rq); | |
390 | } | |
391 | EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted); | |
392 | ||
0cacba6c | 393 | static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, |
a6a252e6 | 394 | bool has_sched, |
0cacba6c | 395 | struct request *rq) |
bd166ef1 | 396 | { |
01e99aec ML |
397 | /* |
398 | * dispatch flush and passthrough rq directly | |
399 | * | |
400 | * passthrough request has to be added to hctx->dispatch directly. | |
401 | * For some reason, device may be in one situation which can't | |
402 | * handle FS request, so STS_RESOURCE is always returned and the | |
403 | * FS request will be added to hctx->dispatch. However passthrough | |
404 | * request may be required at that time for fixing the problem. If | |
405 | * passthrough request is added to scheduler queue, there isn't any | |
406 | * chance to dispatch it given we prioritize requests in hctx->dispatch. | |
407 | */ | |
408 | if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) | |
a6a252e6 | 409 | return true; |
a6a252e6 | 410 | |
923218f6 | 411 | if (has_sched) |
bd166ef1 | 412 | rq->rq_flags |= RQF_SORTED; |
bd166ef1 | 413 | |
a6a252e6 | 414 | return false; |
bd166ef1 | 415 | } |
bd166ef1 | 416 | |
bd6737f1 | 417 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, |
9e97d295 | 418 | bool run_queue, bool async) |
bd6737f1 JA |
419 | { |
420 | struct request_queue *q = rq->q; | |
421 | struct elevator_queue *e = q->elevator; | |
422 | struct blk_mq_ctx *ctx = rq->mq_ctx; | |
ea4f995e | 423 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
bd6737f1 | 424 | |
e44a6a23 | 425 | WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG)); |
923218f6 | 426 | |
01e99aec | 427 | if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) { |
cc3200ea ML |
428 | /* |
429 | * Firstly normal IO request is inserted to scheduler queue or | |
430 | * sw queue, meantime we add flush request to dispatch queue( | |
431 | * hctx->dispatch) directly and there is at most one in-flight | |
432 | * flush request for each hw queue, so it doesn't matter to add | |
433 | * flush request to tail or front of the dispatch queue. | |
434 | * | |
435 | * Secondly in case of NCQ, flush request belongs to non-NCQ | |
436 | * command, and queueing it will fail when there is any | |
437 | * in-flight normal IO request(NCQ command). When adding flush | |
438 | * rq to the front of hctx->dispatch, it is easier to introduce | |
439 | * extra time to flush rq's latency because of S_SCHED_RESTART | |
440 | * compared with adding to the tail of dispatch queue, then | |
441 | * chance of flush merge is increased, and less flush requests | |
442 | * will be issued to controller. It is observed that ~10% time | |
443 | * is saved in blktests block/004 on disk attached to AHCI/NCQ | |
444 | * drive when adding flush rq to the front of hctx->dispatch. | |
445 | * | |
446 | * Simply queue flush rq to the front of hctx->dispatch so that | |
447 | * intensive flush workloads can benefit in case of NCQ HW. | |
448 | */ | |
449 | at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; | |
01e99aec | 450 | blk_mq_request_bypass_insert(rq, at_head, false); |
0cacba6c | 451 | goto run; |
01e99aec | 452 | } |
0cacba6c | 453 | |
f9cd4bfe | 454 | if (e && e->type->ops.insert_requests) { |
bd6737f1 JA |
455 | LIST_HEAD(list); |
456 | ||
457 | list_add(&rq->queuelist, &list); | |
f9cd4bfe | 458 | e->type->ops.insert_requests(hctx, &list, at_head); |
bd6737f1 JA |
459 | } else { |
460 | spin_lock(&ctx->lock); | |
461 | __blk_mq_insert_request(hctx, rq, at_head); | |
462 | spin_unlock(&ctx->lock); | |
463 | } | |
464 | ||
0cacba6c | 465 | run: |
bd6737f1 JA |
466 | if (run_queue) |
467 | blk_mq_run_hw_queue(hctx, async); | |
468 | } | |
469 | ||
67cae4c9 | 470 | void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, |
bd6737f1 JA |
471 | struct blk_mq_ctx *ctx, |
472 | struct list_head *list, bool run_queue_async) | |
473 | { | |
f9afca4d | 474 | struct elevator_queue *e; |
e87eb301 ML |
475 | struct request_queue *q = hctx->queue; |
476 | ||
477 | /* | |
478 | * blk_mq_sched_insert_requests() is called from flush plug | |
479 | * context only, and hold one usage counter to prevent queue | |
480 | * from being released. | |
481 | */ | |
482 | percpu_ref_get(&q->q_usage_counter); | |
bd6737f1 | 483 | |
f9afca4d | 484 | e = hctx->queue->elevator; |
f9cd4bfe JA |
485 | if (e && e->type->ops.insert_requests) |
486 | e->type->ops.insert_requests(hctx, list, false); | |
6ce3dd6e ML |
487 | else { |
488 | /* | |
489 | * try to issue requests directly if the hw queue isn't | |
490 | * busy in case of 'none' scheduler, and this way may save | |
491 | * us one extra enqueue & dequeue to sw queue. | |
492 | */ | |
fd9c40f6 | 493 | if (!hctx->dispatch_busy && !e && !run_queue_async) { |
6ce3dd6e | 494 | blk_mq_try_issue_list_directly(hctx, list); |
fd9c40f6 | 495 | if (list_empty(list)) |
e87eb301 | 496 | goto out; |
fd9c40f6 BVA |
497 | } |
498 | blk_mq_insert_requests(hctx, ctx, list); | |
6ce3dd6e | 499 | } |
bd6737f1 JA |
500 | |
501 | blk_mq_run_hw_queue(hctx, run_queue_async); | |
e87eb301 ML |
502 | out: |
503 | percpu_ref_put(&q->q_usage_counter); | |
bd6737f1 JA |
504 | } |
505 | ||
bd166ef1 JA |
506 | static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, |
507 | struct blk_mq_hw_ctx *hctx, | |
508 | unsigned int hctx_idx) | |
509 | { | |
32bc15af | 510 | unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; |
1c0706a7 | 511 | |
bd166ef1 JA |
512 | if (hctx->sched_tags) { |
513 | blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); | |
1c0706a7 | 514 | blk_mq_free_rq_map(hctx->sched_tags, flags); |
bd166ef1 JA |
515 | hctx->sched_tags = NULL; |
516 | } | |
517 | } | |
518 | ||
6917ff0b OS |
519 | static int blk_mq_sched_alloc_tags(struct request_queue *q, |
520 | struct blk_mq_hw_ctx *hctx, | |
521 | unsigned int hctx_idx) | |
522 | { | |
523 | struct blk_mq_tag_set *set = q->tag_set; | |
32bc15af JG |
524 | /* Clear HCTX_SHARED so tags are init'ed */ |
525 | unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; | |
6917ff0b OS |
526 | int ret; |
527 | ||
528 | hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, | |
1c0706a7 | 529 | set->reserved_tags, flags); |
6917ff0b OS |
530 | if (!hctx->sched_tags) |
531 | return -ENOMEM; | |
532 | ||
533 | ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); | |
534 | if (ret) | |
535 | blk_mq_sched_free_tags(set, hctx, hctx_idx); | |
536 | ||
537 | return ret; | |
538 | } | |
539 | ||
c3e22192 | 540 | /* called in queue's release handler, tagset has gone away */ |
54d5329d | 541 | static void blk_mq_sched_tags_teardown(struct request_queue *q) |
bd166ef1 | 542 | { |
bd166ef1 | 543 | struct blk_mq_hw_ctx *hctx; |
6917ff0b OS |
544 | int i; |
545 | ||
c3e22192 | 546 | queue_for_each_hw_ctx(q, hctx, i) { |
32bc15af JG |
547 | /* Clear HCTX_SHARED so tags are freed */ |
548 | unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; | |
1c0706a7 | 549 | |
c3e22192 | 550 | if (hctx->sched_tags) { |
1c0706a7 | 551 | blk_mq_free_rq_map(hctx->sched_tags, flags); |
c3e22192 ML |
552 | hctx->sched_tags = NULL; |
553 | } | |
554 | } | |
6917ff0b OS |
555 | } |
556 | ||
557 | int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) | |
558 | { | |
559 | struct blk_mq_hw_ctx *hctx; | |
ee056f98 | 560 | struct elevator_queue *eq; |
6917ff0b OS |
561 | unsigned int i; |
562 | int ret; | |
563 | ||
564 | if (!e) { | |
565 | q->elevator = NULL; | |
32a50fab | 566 | q->nr_requests = q->tag_set->queue_depth; |
6917ff0b OS |
567 | return 0; |
568 | } | |
bd166ef1 JA |
569 | |
570 | /* | |
32825c45 ML |
571 | * Default to double of smaller one between hw queue_depth and 128, |
572 | * since we don't split into sync/async like the old code did. | |
573 | * Additionally, this is a per-hw queue depth. | |
bd166ef1 | 574 | */ |
32825c45 ML |
575 | q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, |
576 | BLKDEV_MAX_RQ); | |
bd166ef1 | 577 | |
bd166ef1 | 578 | queue_for_each_hw_ctx(q, hctx, i) { |
6917ff0b | 579 | ret = blk_mq_sched_alloc_tags(q, hctx, i); |
bd166ef1 | 580 | if (ret) |
6917ff0b | 581 | goto err; |
bd166ef1 JA |
582 | } |
583 | ||
f9cd4bfe | 584 | ret = e->ops.init_sched(q, e); |
6917ff0b OS |
585 | if (ret) |
586 | goto err; | |
bd166ef1 | 587 | |
d332ce09 OS |
588 | blk_mq_debugfs_register_sched(q); |
589 | ||
590 | queue_for_each_hw_ctx(q, hctx, i) { | |
f9cd4bfe JA |
591 | if (e->ops.init_hctx) { |
592 | ret = e->ops.init_hctx(hctx, i); | |
ee056f98 OS |
593 | if (ret) { |
594 | eq = q->elevator; | |
c3e22192 | 595 | blk_mq_sched_free_requests(q); |
ee056f98 OS |
596 | blk_mq_exit_sched(q, eq); |
597 | kobject_put(&eq->kobj); | |
598 | return ret; | |
599 | } | |
600 | } | |
d332ce09 | 601 | blk_mq_debugfs_register_sched_hctx(q, hctx); |
ee056f98 OS |
602 | } |
603 | ||
bd166ef1 | 604 | return 0; |
bd166ef1 | 605 | |
6917ff0b | 606 | err: |
c3e22192 | 607 | blk_mq_sched_free_requests(q); |
54d5329d OS |
608 | blk_mq_sched_tags_teardown(q); |
609 | q->elevator = NULL; | |
6917ff0b | 610 | return ret; |
bd166ef1 | 611 | } |
d3484991 | 612 | |
c3e22192 ML |
613 | /* |
614 | * called in either blk_queue_cleanup or elevator_switch, tagset | |
615 | * is required for freeing requests | |
616 | */ | |
617 | void blk_mq_sched_free_requests(struct request_queue *q) | |
618 | { | |
619 | struct blk_mq_hw_ctx *hctx; | |
620 | int i; | |
621 | ||
c3e22192 ML |
622 | queue_for_each_hw_ctx(q, hctx, i) { |
623 | if (hctx->sched_tags) | |
624 | blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); | |
625 | } | |
626 | } | |
627 | ||
54d5329d OS |
628 | void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) |
629 | { | |
ee056f98 OS |
630 | struct blk_mq_hw_ctx *hctx; |
631 | unsigned int i; | |
632 | ||
d332ce09 OS |
633 | queue_for_each_hw_ctx(q, hctx, i) { |
634 | blk_mq_debugfs_unregister_sched_hctx(hctx); | |
f9cd4bfe JA |
635 | if (e->type->ops.exit_hctx && hctx->sched_data) { |
636 | e->type->ops.exit_hctx(hctx, i); | |
d332ce09 | 637 | hctx->sched_data = NULL; |
ee056f98 OS |
638 | } |
639 | } | |
d332ce09 | 640 | blk_mq_debugfs_unregister_sched(q); |
f9cd4bfe JA |
641 | if (e->type->ops.exit_sched) |
642 | e->type->ops.exit_sched(e); | |
54d5329d OS |
643 | blk_mq_sched_tags_teardown(q); |
644 | q->elevator = NULL; | |
645 | } |