Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
bd166ef1 JA |
2 | /* |
3 | * blk-mq scheduling framework | |
4 | * | |
5 | * Copyright (C) 2016 Jens Axboe | |
6 | */ | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/blk-mq.h> | |
6e6fcbc2 | 10 | #include <linux/list_sort.h> |
bd166ef1 JA |
11 | |
12 | #include <trace/events/block.h> | |
13 | ||
14 | #include "blk.h" | |
15 | #include "blk-mq.h" | |
d332ce09 | 16 | #include "blk-mq-debugfs.h" |
bd166ef1 JA |
17 | #include "blk-mq-sched.h" |
18 | #include "blk-mq-tag.h" | |
19 | #include "blk-wbt.h" | |
20 | ||
8e8320c9 JA |
21 | /* |
22 | * Mark a hardware queue as needing a restart. For shared queues, maintain | |
23 | * a count of how many hardware queues are marked for restart. | |
24 | */ | |
7211aef8 | 25 | void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) |
8e8320c9 JA |
26 | { |
27 | if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) | |
28 | return; | |
29 | ||
97889f9a | 30 | set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
8e8320c9 | 31 | } |
7211aef8 | 32 | EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); |
8e8320c9 | 33 | |
e9ea1596 | 34 | void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) |
8e8320c9 | 35 | { |
97889f9a | 36 | clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); |
8e8320c9 | 37 | |
d7d8535f ML |
38 | /* |
39 | * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch) | |
40 | * in blk_mq_run_hw_queue(). Its pair is the barrier in | |
41 | * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART, | |
42 | * meantime new request added to hctx->dispatch is missed to check in | |
43 | * blk_mq_run_hw_queue(). | |
44 | */ | |
45 | smp_mb(); | |
46 | ||
97889f9a | 47 | blk_mq_run_hw_queue(hctx, true); |
8e8320c9 JA |
48 | } |
49 | ||
4f0f586b ST |
50 | static int sched_rq_cmp(void *priv, const struct list_head *a, |
51 | const struct list_head *b) | |
6e6fcbc2 ML |
52 | { |
53 | struct request *rqa = container_of(a, struct request, queuelist); | |
54 | struct request *rqb = container_of(b, struct request, queuelist); | |
55 | ||
56 | return rqa->mq_hctx > rqb->mq_hctx; | |
57 | } | |
58 | ||
59 | static bool blk_mq_dispatch_hctx_list(struct list_head *rq_list) | |
60 | { | |
61 | struct blk_mq_hw_ctx *hctx = | |
62 | list_first_entry(rq_list, struct request, queuelist)->mq_hctx; | |
63 | struct request *rq; | |
64 | LIST_HEAD(hctx_list); | |
65 | unsigned int count = 0; | |
6e6fcbc2 ML |
66 | |
67 | list_for_each_entry(rq, rq_list, queuelist) { | |
68 | if (rq->mq_hctx != hctx) { | |
69 | list_cut_before(&hctx_list, rq_list, &rq->queuelist); | |
70 | goto dispatch; | |
71 | } | |
72 | count++; | |
73 | } | |
74 | list_splice_tail_init(rq_list, &hctx_list); | |
75 | ||
76 | dispatch: | |
106e71c5 | 77 | return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); |
6e6fcbc2 ML |
78 | } |
79 | ||
a0823421 DA |
80 | #define BLK_MQ_BUDGET_DELAY 3 /* ms units */ |
81 | ||
1f460b63 ML |
82 | /* |
83 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts | |
84 | * its queue by itself in its completion handler, so we don't need to | |
85 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. | |
28d65729 SQ |
86 | * |
87 | * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to | |
88 | * be run again. This is necessary to avoid starving flushes. | |
1f460b63 | 89 | */ |
6e6fcbc2 | 90 | static int __blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) |
caf8eb0d ML |
91 | { |
92 | struct request_queue *q = hctx->queue; | |
93 | struct elevator_queue *e = q->elevator; | |
6e6fcbc2 ML |
94 | bool multi_hctxs = false, run_queue = false; |
95 | bool dispatched = false, busy = false; | |
96 | unsigned int max_dispatch; | |
caf8eb0d | 97 | LIST_HEAD(rq_list); |
6e6fcbc2 ML |
98 | int count = 0; |
99 | ||
100 | if (hctx->dispatch_busy) | |
101 | max_dispatch = 1; | |
102 | else | |
103 | max_dispatch = hctx->queue->nr_requests; | |
caf8eb0d ML |
104 | |
105 | do { | |
6e6fcbc2 | 106 | struct request *rq; |
2a5a24aa | 107 | int budget_token; |
6e6fcbc2 | 108 | |
f9cd4bfe | 109 | if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) |
caf8eb0d | 110 | break; |
de148297 | 111 | |
28d65729 | 112 | if (!list_empty_careful(&hctx->dispatch)) { |
6e6fcbc2 | 113 | busy = true; |
28d65729 SQ |
114 | break; |
115 | } | |
116 | ||
2a5a24aa ML |
117 | budget_token = blk_mq_get_dispatch_budget(q); |
118 | if (budget_token < 0) | |
1f460b63 | 119 | break; |
de148297 | 120 | |
f9cd4bfe | 121 | rq = e->type->ops.dispatch_request(hctx); |
de148297 | 122 | if (!rq) { |
2a5a24aa | 123 | blk_mq_put_dispatch_budget(q, budget_token); |
a0823421 DA |
124 | /* |
125 | * We're releasing without dispatching. Holding the | |
126 | * budget could have blocked any "hctx"s with the | |
127 | * same queue and if we didn't dispatch then there's | |
128 | * no guarantee anyone will kick the queue. Kick it | |
129 | * ourselves. | |
130 | */ | |
6e6fcbc2 | 131 | run_queue = true; |
de148297 | 132 | break; |
de148297 ML |
133 | } |
134 | ||
2a5a24aa ML |
135 | blk_mq_set_rq_budget_token(rq, budget_token); |
136 | ||
de148297 ML |
137 | /* |
138 | * Now this rq owns the budget which has to be released | |
139 | * if this rq won't be queued to driver via .queue_rq() | |
140 | * in blk_mq_dispatch_rq_list(). | |
141 | */ | |
6e6fcbc2 | 142 | list_add_tail(&rq->queuelist, &rq_list); |
61347154 | 143 | count++; |
6e6fcbc2 ML |
144 | if (rq->mq_hctx != hctx) |
145 | multi_hctxs = true; | |
61347154 JK |
146 | |
147 | /* | |
148 | * If we cannot get tag for the request, stop dequeueing | |
149 | * requests from the IO scheduler. We are unlikely to be able | |
150 | * to submit them anyway and it creates false impression for | |
151 | * scheduling heuristics that the device can take more IO. | |
152 | */ | |
153 | if (!blk_mq_get_driver_tag(rq)) | |
154 | break; | |
155 | } while (count < max_dispatch); | |
6e6fcbc2 ML |
156 | |
157 | if (!count) { | |
158 | if (run_queue) | |
159 | blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); | |
160 | } else if (multi_hctxs) { | |
161 | /* | |
162 | * Requests from different hctx may be dequeued from some | |
163 | * schedulers, such as bfq and deadline. | |
164 | * | |
165 | * Sort the requests in the list according to their hctx, | |
166 | * dispatch batching requests from same hctx at a time. | |
167 | */ | |
168 | list_sort(NULL, &rq_list, sched_rq_cmp); | |
169 | do { | |
170 | dispatched |= blk_mq_dispatch_hctx_list(&rq_list); | |
171 | } while (!list_empty(&rq_list)); | |
172 | } else { | |
173 | dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); | |
174 | } | |
175 | ||
176 | if (busy) | |
177 | return -EAGAIN; | |
178 | return !!dispatched; | |
179 | } | |
180 | ||
181 | static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx) | |
182 | { | |
572299f0 | 183 | unsigned long end = jiffies + HZ; |
6e6fcbc2 ML |
184 | int ret; |
185 | ||
186 | do { | |
187 | ret = __blk_mq_do_dispatch_sched(hctx); | |
572299f0 SK |
188 | if (ret != 1) |
189 | break; | |
190 | if (need_resched() || time_is_before_jiffies(end)) { | |
191 | blk_mq_delay_run_hw_queue(hctx, 0); | |
192 | break; | |
193 | } | |
194 | } while (1); | |
28d65729 SQ |
195 | |
196 | return ret; | |
caf8eb0d ML |
197 | } |
198 | ||
b347689f ML |
199 | static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx, |
200 | struct blk_mq_ctx *ctx) | |
201 | { | |
f31967f0 | 202 | unsigned short idx = ctx->index_hw[hctx->type]; |
b347689f ML |
203 | |
204 | if (++idx == hctx->nr_ctx) | |
205 | idx = 0; | |
206 | ||
207 | return hctx->ctxs[idx]; | |
208 | } | |
209 | ||
1f460b63 ML |
210 | /* |
211 | * Only SCSI implements .get_budget and .put_budget, and SCSI restarts | |
212 | * its queue by itself in its completion handler, so we don't need to | |
213 | * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE. | |
28d65729 SQ |
214 | * |
215 | * Returns -EAGAIN if hctx->dispatch was found non-empty and run_work has to | |
c4aecaa2 | 216 | * be run again. This is necessary to avoid starving flushes. |
1f460b63 | 217 | */ |
28d65729 | 218 | static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx) |
b347689f ML |
219 | { |
220 | struct request_queue *q = hctx->queue; | |
221 | LIST_HEAD(rq_list); | |
222 | struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); | |
28d65729 | 223 | int ret = 0; |
445874e8 | 224 | struct request *rq; |
b347689f ML |
225 | |
226 | do { | |
2a5a24aa ML |
227 | int budget_token; |
228 | ||
28d65729 SQ |
229 | if (!list_empty_careful(&hctx->dispatch)) { |
230 | ret = -EAGAIN; | |
231 | break; | |
232 | } | |
233 | ||
b347689f ML |
234 | if (!sbitmap_any_bit_set(&hctx->ctx_map)) |
235 | break; | |
236 | ||
2a5a24aa ML |
237 | budget_token = blk_mq_get_dispatch_budget(q); |
238 | if (budget_token < 0) | |
1f460b63 | 239 | break; |
b347689f ML |
240 | |
241 | rq = blk_mq_dequeue_from_ctx(hctx, ctx); | |
242 | if (!rq) { | |
2a5a24aa | 243 | blk_mq_put_dispatch_budget(q, budget_token); |
a0823421 DA |
244 | /* |
245 | * We're releasing without dispatching. Holding the | |
246 | * budget could have blocked any "hctx"s with the | |
247 | * same queue and if we didn't dispatch then there's | |
248 | * no guarantee anyone will kick the queue. Kick it | |
249 | * ourselves. | |
250 | */ | |
251 | blk_mq_delay_run_hw_queues(q, BLK_MQ_BUDGET_DELAY); | |
b347689f | 252 | break; |
b347689f ML |
253 | } |
254 | ||
2a5a24aa ML |
255 | blk_mq_set_rq_budget_token(rq, budget_token); |
256 | ||
b347689f ML |
257 | /* |
258 | * Now this rq owns the budget which has to be released | |
259 | * if this rq won't be queued to driver via .queue_rq() | |
260 | * in blk_mq_dispatch_rq_list(). | |
261 | */ | |
262 | list_add(&rq->queuelist, &rq_list); | |
263 | ||
264 | /* round robin for fair dispatch */ | |
265 | ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); | |
266 | ||
1fd40b5e | 267 | } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, 1)); |
b347689f ML |
268 | |
269 | WRITE_ONCE(hctx->dispatch_from, ctx); | |
28d65729 | 270 | return ret; |
b347689f ML |
271 | } |
272 | ||
e1b586f2 | 273 | static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) |
bd166ef1 | 274 | { |
81380ca1 | 275 | struct request_queue *q = hctx->queue; |
e42cfb1d | 276 | const bool has_sched = q->elevator; |
28d65729 | 277 | int ret = 0; |
bd166ef1 JA |
278 | LIST_HEAD(rq_list); |
279 | ||
bd166ef1 JA |
280 | /* |
281 | * If we have previous entries on our dispatch list, grab them first for | |
282 | * more fair dispatch. | |
283 | */ | |
284 | if (!list_empty_careful(&hctx->dispatch)) { | |
285 | spin_lock(&hctx->lock); | |
286 | if (!list_empty(&hctx->dispatch)) | |
287 | list_splice_init(&hctx->dispatch, &rq_list); | |
288 | spin_unlock(&hctx->lock); | |
289 | } | |
290 | ||
291 | /* | |
292 | * Only ask the scheduler for requests, if we didn't have residual | |
293 | * requests from the dispatch list. This is to avoid the case where | |
294 | * we only ever dispatch a fraction of the requests available because | |
295 | * of low device queue depth. Once we pull requests out of the IO | |
296 | * scheduler, we can no longer merge or sort them. So it's best to | |
297 | * leave them there for as long as we can. Mark the hw queue as | |
298 | * needing a restart in that case. | |
caf8eb0d ML |
299 | * |
300 | * We want to dispatch from the scheduler if there was nothing | |
301 | * on the dispatch list or we were able to dispatch from the | |
302 | * dispatch list. | |
bd166ef1 | 303 | */ |
c13660a0 | 304 | if (!list_empty(&rq_list)) { |
d38d3515 | 305 | blk_mq_sched_mark_restart_hctx(hctx); |
1fd40b5e | 306 | if (blk_mq_dispatch_rq_list(hctx, &rq_list, 0)) { |
e42cfb1d | 307 | if (has_sched) |
28d65729 | 308 | ret = blk_mq_do_dispatch_sched(hctx); |
b347689f | 309 | else |
28d65729 | 310 | ret = blk_mq_do_dispatch_ctx(hctx); |
b347689f | 311 | } |
e42cfb1d | 312 | } else if (has_sched) { |
28d65729 | 313 | ret = blk_mq_do_dispatch_sched(hctx); |
6e768717 ML |
314 | } else if (hctx->dispatch_busy) { |
315 | /* dequeue request one by one from sw queue if queue is busy */ | |
28d65729 | 316 | ret = blk_mq_do_dispatch_ctx(hctx); |
caf8eb0d | 317 | } else { |
c13660a0 | 318 | blk_mq_flush_busy_ctxs(hctx, &rq_list); |
1fd40b5e | 319 | blk_mq_dispatch_rq_list(hctx, &rq_list, 0); |
64765a75 | 320 | } |
28d65729 SQ |
321 | |
322 | return ret; | |
323 | } | |
324 | ||
325 | void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx) | |
326 | { | |
327 | struct request_queue *q = hctx->queue; | |
328 | ||
329 | /* RCU or SRCU read lock is needed before checking quiesced flag */ | |
330 | if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) | |
331 | return; | |
332 | ||
333 | hctx->run++; | |
334 | ||
335 | /* | |
336 | * A return of -EAGAIN is an indication that hctx->dispatch is not | |
337 | * empty and we must run again in order to avoid starving flushes. | |
338 | */ | |
339 | if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) { | |
340 | if (__blk_mq_sched_dispatch_requests(hctx) == -EAGAIN) | |
341 | blk_mq_run_hw_queue(hctx, true); | |
342 | } | |
bd166ef1 JA |
343 | } |
344 | ||
179ae84f | 345 | bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, |
14ccb66b | 346 | unsigned int nr_segs) |
bd166ef1 JA |
347 | { |
348 | struct elevator_queue *e = q->elevator; | |
efed9a33 OS |
349 | struct blk_mq_ctx *ctx; |
350 | struct blk_mq_hw_ctx *hctx; | |
9bddeb2a | 351 | bool ret = false; |
c16d6b5a | 352 | enum hctx_type type; |
bd166ef1 | 353 | |
900e0807 JA |
354 | if (e && e->type->ops.bio_merge) { |
355 | ret = e->type->ops.bio_merge(q, bio, nr_segs); | |
356 | goto out_put; | |
357 | } | |
bd166ef1 | 358 | |
efed9a33 OS |
359 | ctx = blk_mq_get_ctx(q); |
360 | hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); | |
c16d6b5a | 361 | type = hctx->type; |
cdfcef9e BW |
362 | if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || |
363 | list_empty_careful(&ctx->rq_lists[type])) | |
900e0807 | 364 | goto out_put; |
cdfcef9e BW |
365 | |
366 | /* default per sw-queue merge */ | |
367 | spin_lock(&ctx->lock); | |
368 | /* | |
369 | * Reverse check our software queue for entries that we could | |
370 | * potentially merge with. Currently includes a hand-wavy stop | |
371 | * count of 8, to not spend too much time checking for merges. | |
372 | */ | |
9a14d6ce | 373 | if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) |
cdfcef9e | 374 | ret = true; |
9bddeb2a | 375 | |
cdfcef9e | 376 | spin_unlock(&ctx->lock); |
900e0807 | 377 | out_put: |
9bddeb2a | 378 | return ret; |
bd166ef1 JA |
379 | } |
380 | ||
fd2ef39c JK |
381 | bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, |
382 | struct list_head *free) | |
bd166ef1 | 383 | { |
fd2ef39c | 384 | return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq, free); |
bd166ef1 JA |
385 | } |
386 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge); | |
387 | ||
0cacba6c OS |
388 | static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, |
389 | struct request *rq) | |
bd166ef1 | 390 | { |
01e99aec ML |
391 | /* |
392 | * dispatch flush and passthrough rq directly | |
393 | * | |
394 | * passthrough request has to be added to hctx->dispatch directly. | |
395 | * For some reason, device may be in one situation which can't | |
396 | * handle FS request, so STS_RESOURCE is always returned and the | |
397 | * FS request will be added to hctx->dispatch. However passthrough | |
398 | * request may be required at that time for fixing the problem. If | |
399 | * passthrough request is added to scheduler queue, there isn't any | |
400 | * chance to dispatch it given we prioritize requests in hctx->dispatch. | |
401 | */ | |
402 | if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq)) | |
a6a252e6 | 403 | return true; |
a6a252e6 | 404 | |
a6a252e6 | 405 | return false; |
bd166ef1 | 406 | } |
bd166ef1 | 407 | |
bd6737f1 | 408 | void blk_mq_sched_insert_request(struct request *rq, bool at_head, |
9e97d295 | 409 | bool run_queue, bool async) |
bd6737f1 JA |
410 | { |
411 | struct request_queue *q = rq->q; | |
412 | struct elevator_queue *e = q->elevator; | |
413 | struct blk_mq_ctx *ctx = rq->mq_ctx; | |
ea4f995e | 414 | struct blk_mq_hw_ctx *hctx = rq->mq_hctx; |
bd6737f1 | 415 | |
e44a6a23 | 416 | WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG)); |
923218f6 | 417 | |
5218e12e | 418 | if (blk_mq_sched_bypass_insert(hctx, rq)) { |
cc3200ea ML |
419 | /* |
420 | * Firstly normal IO request is inserted to scheduler queue or | |
421 | * sw queue, meantime we add flush request to dispatch queue( | |
422 | * hctx->dispatch) directly and there is at most one in-flight | |
423 | * flush request for each hw queue, so it doesn't matter to add | |
424 | * flush request to tail or front of the dispatch queue. | |
425 | * | |
426 | * Secondly in case of NCQ, flush request belongs to non-NCQ | |
427 | * command, and queueing it will fail when there is any | |
428 | * in-flight normal IO request(NCQ command). When adding flush | |
429 | * rq to the front of hctx->dispatch, it is easier to introduce | |
430 | * extra time to flush rq's latency because of S_SCHED_RESTART | |
431 | * compared with adding to the tail of dispatch queue, then | |
432 | * chance of flush merge is increased, and less flush requests | |
433 | * will be issued to controller. It is observed that ~10% time | |
434 | * is saved in blktests block/004 on disk attached to AHCI/NCQ | |
435 | * drive when adding flush rq to the front of hctx->dispatch. | |
436 | * | |
437 | * Simply queue flush rq to the front of hctx->dispatch so that | |
438 | * intensive flush workloads can benefit in case of NCQ HW. | |
439 | */ | |
440 | at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head; | |
01e99aec | 441 | blk_mq_request_bypass_insert(rq, at_head, false); |
0cacba6c | 442 | goto run; |
01e99aec | 443 | } |
0cacba6c | 444 | |
e42cfb1d | 445 | if (e) { |
bd6737f1 JA |
446 | LIST_HEAD(list); |
447 | ||
448 | list_add(&rq->queuelist, &list); | |
f9cd4bfe | 449 | e->type->ops.insert_requests(hctx, &list, at_head); |
bd6737f1 JA |
450 | } else { |
451 | spin_lock(&ctx->lock); | |
452 | __blk_mq_insert_request(hctx, rq, at_head); | |
453 | spin_unlock(&ctx->lock); | |
454 | } | |
455 | ||
0cacba6c | 456 | run: |
bd6737f1 JA |
457 | if (run_queue) |
458 | blk_mq_run_hw_queue(hctx, async); | |
459 | } | |
460 | ||
67cae4c9 | 461 | void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, |
bd6737f1 JA |
462 | struct blk_mq_ctx *ctx, |
463 | struct list_head *list, bool run_queue_async) | |
464 | { | |
f9afca4d | 465 | struct elevator_queue *e; |
e87eb301 ML |
466 | struct request_queue *q = hctx->queue; |
467 | ||
468 | /* | |
469 | * blk_mq_sched_insert_requests() is called from flush plug | |
470 | * context only, and hold one usage counter to prevent queue | |
471 | * from being released. | |
472 | */ | |
473 | percpu_ref_get(&q->q_usage_counter); | |
bd6737f1 | 474 | |
f9afca4d | 475 | e = hctx->queue->elevator; |
e42cfb1d | 476 | if (e) { |
f9cd4bfe | 477 | e->type->ops.insert_requests(hctx, list, false); |
e42cfb1d | 478 | } else { |
6ce3dd6e ML |
479 | /* |
480 | * try to issue requests directly if the hw queue isn't | |
481 | * busy in case of 'none' scheduler, and this way may save | |
482 | * us one extra enqueue & dequeue to sw queue. | |
483 | */ | |
ef1661ba | 484 | if (!hctx->dispatch_busy && !run_queue_async) { |
4cafe86c ML |
485 | blk_mq_run_dispatch_ops(hctx->queue, |
486 | blk_mq_try_issue_list_directly(hctx, list)); | |
fd9c40f6 | 487 | if (list_empty(list)) |
e87eb301 | 488 | goto out; |
fd9c40f6 BVA |
489 | } |
490 | blk_mq_insert_requests(hctx, ctx, list); | |
6ce3dd6e | 491 | } |
bd6737f1 JA |
492 | |
493 | blk_mq_run_hw_queue(hctx, run_queue_async); | |
e87eb301 ML |
494 | out: |
495 | percpu_ref_put(&q->q_usage_counter); | |
bd6737f1 JA |
496 | } |
497 | ||
d99a6bb3 JG |
498 | static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, |
499 | struct blk_mq_hw_ctx *hctx, | |
500 | unsigned int hctx_idx) | |
6917ff0b | 501 | { |
079a2e3e JG |
502 | if (blk_mq_is_shared_tags(q->tag_set->flags)) { |
503 | hctx->sched_tags = q->sched_shared_tags; | |
e155b0c2 JG |
504 | return 0; |
505 | } | |
506 | ||
63064be1 JG |
507 | hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, |
508 | q->nr_requests); | |
6917ff0b | 509 | |
6917ff0b OS |
510 | if (!hctx->sched_tags) |
511 | return -ENOMEM; | |
63064be1 | 512 | return 0; |
6917ff0b OS |
513 | } |
514 | ||
079a2e3e | 515 | static void blk_mq_exit_sched_shared_tags(struct request_queue *queue) |
e155b0c2 | 516 | { |
079a2e3e JG |
517 | blk_mq_free_rq_map(queue->sched_shared_tags); |
518 | queue->sched_shared_tags = NULL; | |
e155b0c2 JG |
519 | } |
520 | ||
c3e22192 | 521 | /* called in queue's release handler, tagset has gone away */ |
e155b0c2 | 522 | static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) |
bd166ef1 | 523 | { |
bd166ef1 | 524 | struct blk_mq_hw_ctx *hctx; |
4f481208 | 525 | unsigned long i; |
6917ff0b | 526 | |
c3e22192 ML |
527 | queue_for_each_hw_ctx(q, hctx, i) { |
528 | if (hctx->sched_tags) { | |
8bdf7b3f | 529 | if (!blk_mq_is_shared_tags(flags)) |
e155b0c2 | 530 | blk_mq_free_rq_map(hctx->sched_tags); |
c3e22192 ML |
531 | hctx->sched_tags = NULL; |
532 | } | |
533 | } | |
e155b0c2 | 534 | |
079a2e3e JG |
535 | if (blk_mq_is_shared_tags(flags)) |
536 | blk_mq_exit_sched_shared_tags(q); | |
6917ff0b OS |
537 | } |
538 | ||
079a2e3e | 539 | static int blk_mq_init_sched_shared_tags(struct request_queue *queue) |
d97e594c JG |
540 | { |
541 | struct blk_mq_tag_set *set = queue->tag_set; | |
d97e594c JG |
542 | |
543 | /* | |
544 | * Set initial depth at max so that we don't need to reallocate for | |
545 | * updating nr_requests. | |
546 | */ | |
079a2e3e | 547 | queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set, |
e155b0c2 JG |
548 | BLK_MQ_NO_HCTX_IDX, |
549 | MAX_SCHED_RQ); | |
079a2e3e | 550 | if (!queue->sched_shared_tags) |
e155b0c2 | 551 | return -ENOMEM; |
d97e594c | 552 | |
079a2e3e | 553 | blk_mq_tag_update_sched_shared_tags(queue); |
d97e594c JG |
554 | |
555 | return 0; | |
556 | } | |
557 | ||
6917ff0b OS |
558 | int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) |
559 | { | |
4f481208 | 560 | unsigned int flags = q->tag_set->flags; |
6917ff0b | 561 | struct blk_mq_hw_ctx *hctx; |
ee056f98 | 562 | struct elevator_queue *eq; |
4f481208 | 563 | unsigned long i; |
6917ff0b OS |
564 | int ret; |
565 | ||
566 | if (!e) { | |
4d337ceb | 567 | blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); |
6917ff0b | 568 | q->elevator = NULL; |
32a50fab | 569 | q->nr_requests = q->tag_set->queue_depth; |
6917ff0b OS |
570 | return 0; |
571 | } | |
bd166ef1 JA |
572 | |
573 | /* | |
32825c45 ML |
574 | * Default to double of smaller one between hw queue_depth and 128, |
575 | * since we don't split into sync/async like the old code did. | |
576 | * Additionally, this is a per-hw queue depth. | |
bd166ef1 | 577 | */ |
32825c45 | 578 | q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, |
d2a27964 | 579 | BLKDEV_DEFAULT_RQ); |
bd166ef1 | 580 | |
079a2e3e JG |
581 | if (blk_mq_is_shared_tags(flags)) { |
582 | ret = blk_mq_init_sched_shared_tags(q); | |
bd166ef1 | 583 | if (ret) |
e155b0c2 | 584 | return ret; |
d97e594c JG |
585 | } |
586 | ||
e155b0c2 JG |
587 | queue_for_each_hw_ctx(q, hctx, i) { |
588 | ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); | |
d97e594c | 589 | if (ret) |
d99a6bb3 | 590 | goto err_free_map_and_rqs; |
bd166ef1 JA |
591 | } |
592 | ||
f9cd4bfe | 593 | ret = e->ops.init_sched(q, e); |
6917ff0b | 594 | if (ret) |
e155b0c2 | 595 | goto err_free_map_and_rqs; |
bd166ef1 | 596 | |
5cf9c91b | 597 | mutex_lock(&q->debugfs_mutex); |
d332ce09 | 598 | blk_mq_debugfs_register_sched(q); |
5cf9c91b | 599 | mutex_unlock(&q->debugfs_mutex); |
d332ce09 OS |
600 | |
601 | queue_for_each_hw_ctx(q, hctx, i) { | |
f9cd4bfe JA |
602 | if (e->ops.init_hctx) { |
603 | ret = e->ops.init_hctx(hctx, i); | |
ee056f98 OS |
604 | if (ret) { |
605 | eq = q->elevator; | |
1820f4f0 | 606 | blk_mq_sched_free_rqs(q); |
ee056f98 OS |
607 | blk_mq_exit_sched(q, eq); |
608 | kobject_put(&eq->kobj); | |
609 | return ret; | |
610 | } | |
611 | } | |
5cf9c91b | 612 | mutex_lock(&q->debugfs_mutex); |
d332ce09 | 613 | blk_mq_debugfs_register_sched_hctx(q, hctx); |
5cf9c91b | 614 | mutex_unlock(&q->debugfs_mutex); |
ee056f98 OS |
615 | } |
616 | ||
bd166ef1 | 617 | return 0; |
bd166ef1 | 618 | |
d99a6bb3 | 619 | err_free_map_and_rqs: |
1820f4f0 | 620 | blk_mq_sched_free_rqs(q); |
e155b0c2 JG |
621 | blk_mq_sched_tags_teardown(q, flags); |
622 | ||
54d5329d | 623 | q->elevator = NULL; |
6917ff0b | 624 | return ret; |
bd166ef1 | 625 | } |
d3484991 | 626 | |
c3e22192 ML |
627 | /* |
628 | * called in either blk_queue_cleanup or elevator_switch, tagset | |
629 | * is required for freeing requests | |
630 | */ | |
1820f4f0 | 631 | void blk_mq_sched_free_rqs(struct request_queue *q) |
c3e22192 ML |
632 | { |
633 | struct blk_mq_hw_ctx *hctx; | |
4f481208 | 634 | unsigned long i; |
c3e22192 | 635 | |
079a2e3e JG |
636 | if (blk_mq_is_shared_tags(q->tag_set->flags)) { |
637 | blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, | |
e155b0c2 JG |
638 | BLK_MQ_NO_HCTX_IDX); |
639 | } else { | |
640 | queue_for_each_hw_ctx(q, hctx, i) { | |
641 | if (hctx->sched_tags) | |
642 | blk_mq_free_rqs(q->tag_set, | |
643 | hctx->sched_tags, i); | |
644 | } | |
c3e22192 ML |
645 | } |
646 | } | |
647 | ||
54d5329d OS |
648 | void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) |
649 | { | |
ee056f98 | 650 | struct blk_mq_hw_ctx *hctx; |
4f481208 | 651 | unsigned long i; |
f0c1c4d2 | 652 | unsigned int flags = 0; |
ee056f98 | 653 | |
d332ce09 | 654 | queue_for_each_hw_ctx(q, hctx, i) { |
5cf9c91b | 655 | mutex_lock(&q->debugfs_mutex); |
d332ce09 | 656 | blk_mq_debugfs_unregister_sched_hctx(hctx); |
5cf9c91b CH |
657 | mutex_unlock(&q->debugfs_mutex); |
658 | ||
f9cd4bfe JA |
659 | if (e->type->ops.exit_hctx && hctx->sched_data) { |
660 | e->type->ops.exit_hctx(hctx, i); | |
d332ce09 | 661 | hctx->sched_data = NULL; |
ee056f98 | 662 | } |
f0c1c4d2 | 663 | flags = hctx->flags; |
ee056f98 | 664 | } |
5cf9c91b CH |
665 | |
666 | mutex_lock(&q->debugfs_mutex); | |
d332ce09 | 667 | blk_mq_debugfs_unregister_sched(q); |
5cf9c91b CH |
668 | mutex_unlock(&q->debugfs_mutex); |
669 | ||
f9cd4bfe JA |
670 | if (e->type->ops.exit_sched) |
671 | e->type->ops.exit_sched(e); | |
e155b0c2 | 672 | blk_mq_sched_tags_teardown(q, flags); |
54d5329d OS |
673 | q->elevator = NULL; |
674 | } |